aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/net
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net')
-rw-r--r--drivers/net/3c501.c4
-rw-r--r--drivers/net/3c503.c4
-rw-r--r--drivers/net/3c507.c6
-rw-r--r--drivers/net/3c515.c2
-rw-r--r--drivers/net/3c527.c6
-rw-r--r--drivers/net/8139too.c3
-rw-r--r--drivers/net/82596.c2
-rw-r--r--drivers/net/Kconfig266
-rw-r--r--drivers/net/Space.c5
-rw-r--r--drivers/net/arm/am79c961a.c9
-rw-r--r--drivers/net/arm/ixp4xx_eth.c4
-rw-r--r--drivers/net/arm/w90p910_ether.c2
-rw-r--r--drivers/net/at1700.c8
-rw-r--r--drivers/net/atarilance.c2
-rw-r--r--drivers/net/atl1c/atl1c_main.c41
-rw-r--r--drivers/net/atl1e/atl1e_main.c2
-rw-r--r--drivers/net/atlx/atl1.c12
-rw-r--r--drivers/net/atlx/atl2.c4
-rw-r--r--drivers/net/au1000_eth.c2
-rw-r--r--drivers/net/ax88796.c8
-rw-r--r--drivers/net/bcm63xx_enet.c2
-rw-r--r--drivers/net/benet/be.h41
-rw-r--r--drivers/net/benet/be_cmds.c144
-rw-r--r--drivers/net/benet/be_cmds.h42
-rw-r--r--drivers/net/benet/be_ethtool.c4
-rw-r--r--drivers/net/benet/be_hw.h39
-rw-r--r--drivers/net/benet/be_main.c254
-rw-r--r--drivers/net/bfin_mac.c74
-rw-r--r--drivers/net/bfin_mac.h11
-rw-r--r--drivers/net/bna/bfa_defs.h22
-rw-r--r--drivers/net/bna/bfa_defs_mfg_comm.h22
-rw-r--r--drivers/net/bna/bfa_ioc.c1219
-rw-r--r--drivers/net/bna/bfa_ioc.h49
-rw-r--r--drivers/net/bna/bfa_ioc_ct.c102
-rw-r--r--drivers/net/bna/bfi_ctreg.h41
-rw-r--r--drivers/net/bna/bna.h6
-rw-r--r--drivers/net/bna/bna_ctrl.c377
-rw-r--r--drivers/net/bna/bna_txrx.c44
-rw-r--r--drivers/net/bna/bna_types.h11
-rw-r--r--drivers/net/bna/bnad.c427
-rw-r--r--drivers/net/bna/bnad.h31
-rw-r--r--drivers/net/bna/bnad_ethtool.c8
-rw-r--r--drivers/net/bnx2.c121
-rw-r--r--drivers/net/bnx2.h2
-rw-r--r--drivers/net/bnx2x/Makefile2
-rw-r--r--drivers/net/bnx2x/bnx2x.h166
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.c155
-rw-r--r--drivers/net/bnx2x/bnx2x_cmn.h73
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.c2118
-rw-r--r--drivers/net/bnx2x/bnx2x_dcb.h196
-rw-r--r--drivers/net/bnx2x/bnx2x_dump.h988
-rw-r--r--drivers/net/bnx2x/bnx2x_ethtool.c379
-rw-r--r--drivers/net/bnx2x/bnx2x_hsi.h327
-rw-r--r--drivers/net/bnx2x/bnx2x_init.h220
-rw-r--r--drivers/net/bnx2x/bnx2x_link.c666
-rw-r--r--drivers/net/bnx2x/bnx2x_link.h56
-rw-r--r--drivers/net/bnx2x/bnx2x_main.c770
-rw-r--r--drivers/net/bnx2x/bnx2x_reg.h126
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.c18
-rw-r--r--drivers/net/bnx2x/bnx2x_stats.h2
-rw-r--r--drivers/net/bonding/Makefile2
-rw-r--r--drivers/net/bonding/bond_3ad.c3
-rw-r--r--drivers/net/bonding/bond_alb.c36
-rw-r--r--drivers/net/bonding/bond_alb.h38
-rw-r--r--drivers/net/bonding/bond_debugfs.c146
-rw-r--r--drivers/net/bonding/bond_ipv6.c7
-rw-r--r--drivers/net/bonding/bond_main.c70
-rw-r--r--drivers/net/bonding/bonding.h16
-rw-r--r--drivers/net/caif/caif_shm_u5500.c2
-rw-r--r--drivers/net/can/Kconfig21
-rw-r--r--drivers/net/can/Makefile1
-rw-r--r--drivers/net/can/janz-ican3.c9
-rw-r--r--drivers/net/can/mscan/mscan.c2
-rw-r--r--drivers/net/can/pch_can.c1350
-rw-r--r--drivers/net/can/sja1000/plx_pci.c2
-rw-r--r--drivers/net/can/sja1000/sja1000_of_platform.c8
-rw-r--r--drivers/net/can/slcan.c756
-rw-r--r--drivers/net/cassini.c22
-rw-r--r--drivers/net/cassini.h3
-rw-r--r--drivers/net/chelsio/my3126.c2
-rw-r--r--drivers/net/chelsio/sge.c10
-rw-r--r--drivers/net/cnic.c782
-rw-r--r--drivers/net/cnic.h27
-rw-r--r--drivers/net/cnic_defs.h2095
-rw-r--r--drivers/net/cnic_if.h26
-rw-r--r--drivers/net/cris/eth_v10.c34
-rw-r--r--drivers/net/cxgb3/ael1002.c24
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c6
-rw-r--r--drivers/net/cxgb3/cxgb3_offload.c6
-rw-r--r--drivers/net/cxgb3/t3_hw.c2
-rw-r--r--drivers/net/cxgb4/cxgb4.h4
-rw-r--r--drivers/net/cxgb4/cxgb4_main.c140
-rw-r--r--drivers/net/cxgb4/sge.c22
-rw-r--r--drivers/net/cxgb4/t4_hw.c93
-rw-r--r--drivers/net/cxgb4/t4fw_api.h1
-rw-r--r--drivers/net/cxgb4vf/adapter.h2
-rw-r--r--drivers/net/cxgb4vf/cxgb4vf_main.c47
-rw-r--r--drivers/net/cxgb4vf/sge.c9
-rw-r--r--drivers/net/cxgb4vf/t4vf_hw.c18
-rw-r--r--drivers/net/depca.c2
-rw-r--r--drivers/net/dm9000.c2
-rw-r--r--drivers/net/e1000/e1000_hw.c348
-rw-r--r--drivers/net/e1000/e1000_hw.h59
-rw-r--r--drivers/net/e1000/e1000_main.c53
-rw-r--r--drivers/net/e1000/e1000_osdep.h19
-rw-r--r--drivers/net/e1000/e1000_param.c13
-rw-r--r--drivers/net/e1000e/82571.c264
-rw-r--r--drivers/net/e1000e/defines.h9
-rw-r--r--drivers/net/e1000e/e1000.h7
-rw-r--r--drivers/net/e1000e/es2lan.c12
-rw-r--r--drivers/net/e1000e/ethtool.c201
-rw-r--r--drivers/net/e1000e/hw.h1
-rw-r--r--drivers/net/e1000e/ich8lan.c100
-rw-r--r--drivers/net/e1000e/lib.c144
-rw-r--r--drivers/net/e1000e/netdev.c158
-rw-r--r--drivers/net/e1000e/param.c2
-rw-r--r--drivers/net/e1000e/phy.c90
-rw-r--r--drivers/net/e2100.c2
-rw-r--r--drivers/net/eepro.c11
-rw-r--r--drivers/net/eexpress.c2
-rw-r--r--drivers/net/ehea/ehea.h17
-rw-r--r--drivers/net/ehea/ehea_ethtool.c25
-rw-r--r--drivers/net/ehea/ehea_main.c439
-rw-r--r--drivers/net/ehea/ehea_phyp.c40
-rw-r--r--drivers/net/ehea/ehea_qmr.c89
-rw-r--r--drivers/net/enic/enic.h6
-rw-r--r--drivers/net/enic/enic_main.c247
-rw-r--r--drivers/net/enic/enic_res.h1
-rw-r--r--drivers/net/enic/vnic_vic.h31
-rw-r--r--drivers/net/epic100.c4
-rw-r--r--drivers/net/ethoc.c160
-rw-r--r--drivers/net/fec.c248
-rw-r--r--drivers/net/fec.h5
-rw-r--r--drivers/net/fec_mpc52xx.c19
-rw-r--r--drivers/net/forcedeth.c1168
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c1
-rw-r--r--drivers/net/gianfar.c11
-rw-r--r--drivers/net/hamachi.c4
-rw-r--r--drivers/net/hamradio/yam.c4
-rw-r--r--drivers/net/hp.c6
-rw-r--r--drivers/net/ibm_newemac/core.c6
-rw-r--r--drivers/net/ibmveth.c7
-rw-r--r--drivers/net/ifb.c46
-rw-r--r--drivers/net/igb/e1000_82575.c37
-rw-r--r--drivers/net/igb/e1000_82575.h5
-rw-r--r--drivers/net/igb/e1000_defines.h7
-rw-r--r--drivers/net/igb/e1000_hw.h6
-rw-r--r--drivers/net/igb/e1000_nvm.c93
-rw-r--r--drivers/net/igb/e1000_nvm.h2
-rw-r--r--drivers/net/igb/e1000_phy.c11
-rw-r--r--drivers/net/igb/e1000_regs.h1
-rw-r--r--drivers/net/igb/igb.h1
-rw-r--r--drivers/net/igb/igb_main.c100
-rw-r--r--drivers/net/igbvf/Makefile2
-rw-r--r--drivers/net/igbvf/defines.h2
-rw-r--r--drivers/net/igbvf/ethtool.c9
-rw-r--r--drivers/net/igbvf/igbvf.h4
-rw-r--r--drivers/net/igbvf/mbx.c2
-rw-r--r--drivers/net/igbvf/mbx.h2
-rw-r--r--drivers/net/igbvf/netdev.c33
-rw-r--r--drivers/net/igbvf/regs.h2
-rw-r--r--drivers/net/igbvf/vf.c6
-rw-r--r--drivers/net/igbvf/vf.h4
-rw-r--r--drivers/net/irda/act200l-sir.c2
-rw-r--r--drivers/net/irda/bfin_sir.h2
-rw-r--r--drivers/net/irda/donauboe.c4
-rw-r--r--drivers/net/irda/mcs7780.c2
-rw-r--r--drivers/net/irda/smsc-ircc2.c2
-rw-r--r--drivers/net/iseries_veth.c27
-rw-r--r--drivers/net/ixgb/ixgb_main.c61
-rw-r--r--drivers/net/ixgb/ixgb_param.c21
-rw-r--r--drivers/net/ixgbe/Makefile2
-rw-r--r--drivers/net/ixgbe/ixgbe.h143
-rw-r--r--drivers/net/ixgbe/ixgbe_82598.c58
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c887
-rw-r--r--drivers/net/ixgbe/ixgbe_common.c256
-rw-r--r--drivers/net/ixgbe/ixgbe_common.h10
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.c17
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb.h3
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82598.c12
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_82599.c12
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c55
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c439
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c15
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c2226
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.c42
-rw-r--r--drivers/net/ixgbe/ixgbe_mbx.h2
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.c52
-rw-r--r--drivers/net/ixgbe/ixgbe_phy.h5
-rw-r--r--drivers/net/ixgbe/ixgbe_sriov.c57
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h136
-rw-r--r--drivers/net/ixgbe/ixgbe_x540.c724
-rw-r--r--drivers/net/ixgbevf/Makefile2
-rw-r--r--drivers/net/ixgbevf/defines.h3
-rw-r--r--drivers/net/ixgbevf/ethtool.c18
-rw-r--r--drivers/net/ixgbevf/ixgbevf.h6
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c27
-rw-r--r--drivers/net/ixgbevf/mbx.c2
-rw-r--r--drivers/net/ixgbevf/mbx.h2
-rw-r--r--drivers/net/ixgbevf/regs.h2
-rw-r--r--drivers/net/ixgbevf/vf.c8
-rw-r--r--drivers/net/ixgbevf/vf.h3
-rw-r--r--drivers/net/jme.c20
-rw-r--r--drivers/net/ks8851.c33
-rw-r--r--drivers/net/ksz884x.c22
-rw-r--r--drivers/net/lance.c2
-rw-r--r--drivers/net/lib82596.c2
-rw-r--r--drivers/net/lib8390.c24
-rw-r--r--drivers/net/ll_temac_main.c4
-rw-r--r--drivers/net/macvlan.c113
-rw-r--r--drivers/net/macvtap.c3
-rw-r--r--drivers/net/mlx4/alloc.c3
-rw-r--r--drivers/net/mlx4/en_netdev.c3
-rw-r--r--drivers/net/mlx4/fw.c4
-rw-r--r--drivers/net/mv643xx_eth.c9
-rw-r--r--drivers/net/myri10ge/myri10ge.c4
-rw-r--r--drivers/net/ne-h8300.c12
-rw-r--r--drivers/net/netconsole.c8
-rw-r--r--drivers/net/netxen/netxen_nic.h5
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c26
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c21
-rw-r--r--drivers/net/netxen/netxen_nic_init.c7
-rw-r--r--drivers/net/netxen/netxen_nic_main.c12
-rw-r--r--drivers/net/ni52.c4
-rw-r--r--drivers/net/ni65.c4
-rw-r--r--drivers/net/niu.c4
-rw-r--r--drivers/net/pch_gbe/pch_gbe_ethtool.c19
-rw-r--r--drivers/net/pch_gbe/pch_gbe_main.c12
-rw-r--r--drivers/net/pcmcia/axnet_cs.c19
-rw-r--r--drivers/net/pcmcia/nmclan_cs.c2
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c2
-rw-r--r--drivers/net/phy/phy.c4
-rw-r--r--drivers/net/ppp_async.c10
-rw-r--r--drivers/net/ppp_deflate.c9
-rw-r--r--drivers/net/ppp_generic.c30
-rw-r--r--drivers/net/ppp_mppe.c7
-rw-r--r--drivers/net/ppp_synctty.c3
-rw-r--r--drivers/net/pptp.c5
-rw-r--r--drivers/net/pxa168_eth.c9
-rw-r--r--drivers/net/qla3xxx.c8
-rw-r--r--drivers/net/qlcnic/qlcnic.h63
-rw-r--r--drivers/net/qlcnic/qlcnic_ctx.c28
-rw-r--r--drivers/net/qlcnic/qlcnic_ethtool.c159
-rw-r--r--drivers/net/qlcnic/qlcnic_hdr.h27
-rw-r--r--drivers/net/qlcnic/qlcnic_hw.c91
-rw-r--r--drivers/net/qlcnic/qlcnic_init.c186
-rw-r--r--drivers/net/qlcnic/qlcnic_main.c99
-rw-r--r--drivers/net/qlge/qlge.h4
-rw-r--r--drivers/net/qlge/qlge_dbg.c21
-rw-r--r--drivers/net/qlge/qlge_ethtool.c19
-rw-r--r--drivers/net/qlge/qlge_main.c21
-rw-r--r--drivers/net/qlge/qlge_mpi.c2
-rw-r--r--drivers/net/r6040.c2
-rw-r--r--drivers/net/r8169.c1753
-rw-r--r--drivers/net/s2io.c79
-rw-r--r--drivers/net/s2io.h9
-rw-r--r--drivers/net/sc92031.c3
-rw-r--r--drivers/net/sfc/efx.c38
-rw-r--r--drivers/net/sfc/efx.h7
-rw-r--r--drivers/net/sfc/ethtool.c168
-rw-r--r--drivers/net/sfc/falcon.c183
-rw-r--r--drivers/net/sfc/falcon_boards.c120
-rw-r--r--drivers/net/sfc/falcon_xmac.c14
-rw-r--r--drivers/net/sfc/filter.c255
-rw-r--r--drivers/net/sfc/filter.h149
-rw-r--r--drivers/net/sfc/io.h153
-rw-r--r--drivers/net/sfc/mcdi.c3
-rw-r--r--drivers/net/sfc/mcdi_phy.c1
-rw-r--r--drivers/net/sfc/mdio_10g.c1
-rw-r--r--drivers/net/sfc/mtd.c98
-rw-r--r--drivers/net/sfc/net_driver.h87
-rw-r--r--drivers/net/sfc/nic.c90
-rw-r--r--drivers/net/sfc/nic.h12
-rw-r--r--drivers/net/sfc/qt202x_phy.c6
-rw-r--r--drivers/net/sfc/rx.c30
-rw-r--r--drivers/net/sfc/siena.c10
-rw-r--r--drivers/net/sfc/spi.h5
-rw-r--r--drivers/net/sfc/tenxpress.c2
-rw-r--r--drivers/net/sfc/tx.c122
-rw-r--r--drivers/net/sh_eth.c245
-rw-r--r--drivers/net/sh_eth.h1
-rw-r--r--drivers/net/sis190.c3
-rw-r--r--drivers/net/skfp/skfddi.c2
-rw-r--r--drivers/net/skfp/smt.c4
-rw-r--r--drivers/net/skge.c54
-rw-r--r--drivers/net/sky2.c300
-rw-r--r--drivers/net/sky2.h48
-rw-r--r--drivers/net/smc-ultra.c8
-rw-r--r--drivers/net/starfire.c2
-rw-r--r--drivers/net/stmmac/stmmac.h40
-rw-r--r--drivers/net/stmmac/stmmac_ethtool.c16
-rw-r--r--drivers/net/stmmac/stmmac_main.c267
-rw-r--r--drivers/net/stmmac/stmmac_mdio.c8
-rw-r--r--drivers/net/sundance.c27
-rw-r--r--drivers/net/sungem.c14
-rw-r--r--drivers/net/sunhme.c2
-rw-r--r--drivers/net/sunlance.c10
-rw-r--r--drivers/net/tehuti.c4
-rw-r--r--drivers/net/tg3.c424
-rw-r--r--drivers/net/tg3.h42
-rw-r--r--drivers/net/tokenring/ibmtr.c5
-rw-r--r--drivers/net/tulip/de2104x.c18
-rw-r--r--drivers/net/tulip/tulip_core.c15
-rw-r--r--drivers/net/tun.c2
-rw-r--r--drivers/net/typhoon.c1
-rw-r--r--drivers/net/ucc_geth.c1
-rw-r--r--drivers/net/usb/Kconfig19
-rw-r--r--drivers/net/usb/Makefile1
-rw-r--r--drivers/net/usb/asix.c4
-rw-r--r--drivers/net/usb/cdc_ether.c4
-rw-r--r--drivers/net/usb/cdc_ncm.c1213
-rw-r--r--drivers/net/usb/hso.c40
-rw-r--r--drivers/net/usb/ipheth.c2
-rw-r--r--drivers/net/usb/mcs7830.c14
-rw-r--r--drivers/net/usb/pegasus.c4
-rw-r--r--drivers/net/usb/sierra_net.c5
-rw-r--r--drivers/net/usb/smsc95xx.c7
-rw-r--r--drivers/net/usb/usbnet.c48
-rw-r--r--drivers/net/veth.c4
-rw-r--r--drivers/net/via-rhine.c326
-rw-r--r--drivers/net/virtio_net.c2
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c965
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c174
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h73
-rw-r--r--drivers/net/vxge/vxge-config.c3604
-rw-r--r--drivers/net/vxge/vxge-config.h169
-rw-r--r--drivers/net/vxge/vxge-ethtool.c112
-rw-r--r--drivers/net/vxge/vxge-main.c1106
-rw-r--r--drivers/net/vxge/vxge-main.h86
-rw-r--r--drivers/net/vxge/vxge-reg.h33
-rw-r--r--drivers/net/vxge/vxge-traffic.c775
-rw-r--r--drivers/net/vxge/vxge-traffic.h49
-rw-r--r--drivers/net/vxge/vxge-version.h33
-rw-r--r--drivers/net/wan/dscc4.c6
-rw-r--r--drivers/net/wd.c2
-rw-r--r--drivers/net/wimax/i2400m/driver.c96
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h19
-rw-r--r--drivers/net/wimax/i2400m/sdio.c1
-rw-r--r--drivers/net/wimax/i2400m/usb.c3
-rw-r--r--drivers/net/wireless/Kconfig1
-rw-r--r--drivers/net/wireless/Makefile1
-rw-r--r--drivers/net/wireless/airo.c20
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.c2
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c6
-rw-r--r--drivers/net/wireless/ath/ath.h111
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig18
-rw-r--r--drivers/net/wireless/ath/ath5k/Makefile3
-rw-r--r--drivers/net/wireless/ath/ath5k/ahb.c219
-rw-r--r--drivers/net/wireless/ath/ath5k/ani.c40
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h292
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c28
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c1569
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h7
-rw-r--r--drivers/net/wireless/ath/ath5k/caps.c6
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.c34
-rw-r--r--drivers/net/wireless/ath/ath5k/debug.h2
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.c24
-rw-r--r--drivers/net/wireless/ath/ath5k/desc.h18
-rw-r--r--drivers/net/wireless/ath/ath5k/dma.c180
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.c139
-rw-r--r--drivers/net/wireless/ath/ath5k/eeprom.h4
-rw-r--r--drivers/net/wireless/ath/ath5k/initvals.c409
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c11
-rw-r--r--drivers/net/wireless/ath/ath5k/mac80211-ops.c774
-rw-r--r--drivers/net/wireless/ath/ath5k/pci.c327
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c571
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c754
-rw-r--r--drivers/net/wireless/ath/ath5k/qcu.c696
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h31
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c1221
-rw-r--r--drivers/net/wireless/ath/ath5k/rfbuffer.h1169
-rw-r--r--drivers/net/wireless/ath/ath5k/sysfs.c4
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c107
-rw-r--r--drivers/net/wireless/ath/ath9k/ar5008_phy.c305
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_calib.c220
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_hw.c27
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_mac.c144
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9002_phy.c25
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h104
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_calib.c605
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c3128
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h73
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_hw.c289
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.c123
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_mac.h6
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_paprd.c166
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c344
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.h78
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9485_initvals.h943
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h68
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c132
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c23
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c59
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/common.h15
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c83
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h21
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c219
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h63
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c302
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c295
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c353
-rw-r--r--drivers/net/wireless/ath/ath9k/gpio.c30
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.c229
-rw-r--r--drivers/net/wireless/ath/ath9k/hif_usb.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/htc.h35
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_beacon.c24
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_gpio.c342
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_init.c114
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_main.c652
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_drv_txrx.c44
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.c5
-rw-r--r--drivers/net/wireless/ath/ath9k/htc_hst.h22
-rw-r--r--drivers/net/wireless/ath/ath9k/hw-ops.h5
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c298
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h81
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c169
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c210
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h14
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c597
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c110
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c96
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c111
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h30
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c16
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.c32
-rw-r--r--drivers/net/wireless/ath/ath9k/wmi.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c836
-rw-r--r--drivers/net/wireless/ath/carl9170/carl9170.h5
-rw-r--r--drivers/net/wireless/ath/carl9170/cmd.c2
-rw-r--r--drivers/net/wireless/ath/carl9170/fwcmd.h13
-rw-r--r--drivers/net/wireless/ath/carl9170/hw.h7
-rw-r--r--drivers/net/wireless/ath/carl9170/mac.c56
-rw-r--r--drivers/net/wireless/ath/carl9170/main.c19
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.c27
-rw-r--r--drivers/net/wireless/ath/carl9170/phy.h24
-rw-r--r--drivers/net/wireless/ath/carl9170/tx.c80
-rw-r--r--drivers/net/wireless/ath/carl9170/usb.c58
-rw-r--r--drivers/net/wireless/ath/carl9170/version.h6
-rw-r--r--drivers/net/wireless/ath/debug.c15
-rw-r--r--drivers/net/wireless/ath/debug.h90
-rw-r--r--drivers/net/wireless/ath/key.c39
-rw-r--r--drivers/net/wireless/ath/main.c20
-rw-r--r--drivers/net/wireless/ath/regd.c8
-rw-r--r--drivers/net/wireless/atmel.c6
-rw-r--r--drivers/net/wireless/b43/Kconfig13
-rw-r--r--drivers/net/wireless/b43/Makefile8
-rw-r--r--drivers/net/wireless/b43/b43.h21
-rw-r--r--drivers/net/wireless/b43/dma.c5
-rw-r--r--drivers/net/wireless/b43/main.c68
-rw-r--r--drivers/net/wireless/b43/phy_common.c22
-rw-r--r--drivers/net/wireless/b43/phy_common.h8
-rw-r--r--drivers/net/wireless/b43/phy_n.c594
-rw-r--r--drivers/net/wireless/b43/phy_n.h2
-rw-r--r--drivers/net/wireless/b43/radio_2055.c502
-rw-r--r--drivers/net/wireless/b43/radio_2056.c9062
-rw-r--r--drivers/net/wireless/b43/radio_2056.h1084
-rw-r--r--drivers/net/wireless/b43/rfkill.c19
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c228
-rw-r--r--drivers/net/wireless/b43legacy/main.c47
-rw-r--r--drivers/net/wireless/b43legacy/rfkill.c2
-rw-r--r--drivers/net/wireless/hostap/hostap_ap.c3
-rw-r--r--drivers/net/wireless/hostap/hostap_hw.c8
-rw-r--r--drivers/net/wireless/hostap/hostap_main.c1
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig3
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile11
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c97
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c35
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h8
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c369
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c140
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c465
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-calib.c230
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c373
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c5
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c136
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c38
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rxon.c642
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-tx.c160
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-ucode.c69
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c614
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.h61
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h26
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c971
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h96
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c40
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h91
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-helpers.h51
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-legacy.c662
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-legacy.h79
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c190
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c45
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c14
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c64
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c64
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c88
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c2
-rw-r--r--drivers/net/wireless/libertas/cfg.c9
-rw-r--r--drivers/net/wireless/libertas/cmd.c8
-rw-r--r--drivers/net/wireless/libertas/dev.h1
-rw-r--r--drivers/net/wireless/libertas/if_spi.c2
-rw-r--r--drivers/net/wireless/libertas/if_usb.c13
-rw-r--r--drivers/net/wireless/libertas/main.c3
-rw-r--r--drivers/net/wireless/libertas/rx.c4
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c5
-rw-r--r--drivers/net/wireless/mwl8k.c670
-rw-r--r--drivers/net/wireless/orinoco/wext.c8
-rw-r--r--drivers/net/wireless/p54/p54usb.c8
-rw-r--r--drivers/net/wireless/ray_cs.c18
-rw-r--r--drivers/net/wireless/rndis_wlan.c402
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig72
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.c144
-rw-r--r--drivers/net/wireless/rt2x00/rt2400pci.h6
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c150
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2500usb.c98
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h218
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c223
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c215
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.h12
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c363
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h114
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00config.c14
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00debug.c15
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c104
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00ht.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00lib.h12
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00link.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c61
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.c11
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c270
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.h74
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00reg.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.c6
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c305
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h12
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c185
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.h64
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.c107
-rw-r--r--drivers/net/wireless/rt2x00/rt73usb.h38
-rw-r--r--drivers/net/wireless/rtl818x/Makefile9
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/Makefile5
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/dev.c (renamed from drivers/net/wireless/rtl818x/rtl8180_dev.c)8
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/grf5101.c (renamed from drivers/net/wireless/rtl818x/rtl8180_grf5101.c)2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/grf5101.h (renamed from drivers/net/wireless/rtl818x/rtl8180_grf5101.h)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/max2820.c (renamed from drivers/net/wireless/rtl818x/rtl8180_max2820.c)2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/max2820.h (renamed from drivers/net/wireless/rtl818x/rtl8180_max2820.h)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8180.h (renamed from drivers/net/wireless/rtl818x/rtl8180.h)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8225.c (renamed from drivers/net/wireless/rtl818x/rtl8180_rtl8225.c)2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/rtl8225.h (renamed from drivers/net/wireless/rtl818x/rtl8180_rtl8225.h)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/sa2400.c (renamed from drivers/net/wireless/rtl818x/rtl8180_sa2400.c)2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8180/sa2400.h (renamed from drivers/net/wireless/rtl818x/rtl8180_sa2400.h)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/Makefile5
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/dev.c (renamed from drivers/net/wireless/rtl818x/rtl8187_dev.c)146
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/leds.c (renamed from drivers/net/wireless/rtl818x/rtl8187_leds.c)2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/leds.h (renamed from drivers/net/wireless/rtl818x/rtl8187_leds.h)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rfkill.c (renamed from drivers/net/wireless/rtl818x/rtl8187_rfkill.c)2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rfkill.h (renamed from drivers/net/wireless/rtl818x/rtl8187_rfkill.h)0
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8187.h (renamed from drivers/net/wireless/rtl818x/rtl8187.h)2
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8225.c (renamed from drivers/net/wireless/rtl818x/rtl8187_rtl8225.c)24
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/rtl8225.h (renamed from drivers/net/wireless/rtl818x/rtl8187_rtl8225.h)0
-rw-r--r--drivers/net/wireless/rtlwifi/Kconfig15
-rw-r--r--drivers/net/wireless/rtlwifi/Makefile13
-rw-r--r--drivers/net/wireless/rtlwifi/base.c956
-rw-r--r--drivers/net/wireless/rtlwifi/base.h120
-rw-r--r--drivers/net/wireless/rtlwifi/cam.c291
-rw-r--r--drivers/net/wireless/rtlwifi/cam.h53
-rw-r--r--drivers/net/wireless/rtlwifi/core.c1029
-rw-r--r--drivers/net/wireless/rtlwifi/core.h42
-rw-r--r--drivers/net/wireless/rtlwifi/debug.c50
-rw-r--r--drivers/net/wireless/rtlwifi/debug.h212
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.c1189
-rw-r--r--drivers/net/wireless/rtlwifi/efuse.h124
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c1945
-rw-r--r--drivers/net/wireless/rtlwifi/pci.h302
-rw-r--r--drivers/net/wireless/rtlwifi/ps.c493
-rw-r--r--drivers/net/wireless/rtlwifi/ps.h43
-rw-r--r--drivers/net/wireless/rtlwifi/rc.c329
-rw-r--r--drivers/net/wireless/rtlwifi/rc.h40
-rw-r--r--drivers/net/wireless/rtlwifi/regd.c400
-rw-r--r--drivers/net/wireless/rtlwifi/regd.h61
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/Makefile12
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/def.h257
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.c1473
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/dm.h196
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/fw.c804
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/fw.h98
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c2162
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.h57
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/led.c144
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/led.h41
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.c2676
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/phy.h237
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/reg.h2065
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.c523
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/rf.h44
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c282
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.h37
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/table.c1224
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/table.h58
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c1031
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.h714
-rw-r--r--drivers/net/wireless/rtlwifi/wifi.h1532
-rw-r--r--drivers/net/wireless/wl1251/boot.c1
-rw-r--r--drivers/net/wireless/wl1251/main.c15
-rw-r--r--drivers/net/wireless/wl1251/sdio.c103
-rw-r--r--drivers/net/wireless/wl1251/spi.c9
-rw-r--r--drivers/net/wireless/wl1251/wl1251.h1
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig64
-rw-r--r--drivers/net/wireless/wl12xx/Makefile22
-rw-r--r--drivers/net/wireless/wl12xx/acx.c (renamed from drivers/net/wireless/wl12xx/wl1271_acx.c)99
-rw-r--r--drivers/net/wireless/wl12xx/acx.h (renamed from drivers/net/wireless/wl12xx/wl1271_acx.h)108
-rw-r--r--drivers/net/wireless/wl12xx/boot.c (renamed from drivers/net/wireless/wl12xx/wl1271_boot.c)38
-rw-r--r--drivers/net/wireless/wl12xx/boot.h (renamed from drivers/net/wireless/wl12xx/wl1271_boot.h)3
-rw-r--r--drivers/net/wireless/wl12xx/cmd.c (renamed from drivers/net/wireless/wl12xx/wl1271_cmd.c)81
-rw-r--r--drivers/net/wireless/wl12xx/cmd.h (renamed from drivers/net/wireless/wl12xx/wl1271_cmd.h)58
-rw-r--r--drivers/net/wireless/wl12xx/conf.h (renamed from drivers/net/wireless/wl12xx/wl1271_conf.h)4
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.c480
-rw-r--r--drivers/net/wireless/wl12xx/debugfs.h (renamed from drivers/net/wireless/wl12xx/wl1271_debugfs.h)6
-rw-r--r--drivers/net/wireless/wl12xx/event.c (renamed from drivers/net/wireless/wl12xx/wl1271_event.c)14
-rw-r--r--drivers/net/wireless/wl12xx/event.h (renamed from drivers/net/wireless/wl12xx/wl1271_event.h)4
-rw-r--r--drivers/net/wireless/wl12xx/ini.h (renamed from drivers/net/wireless/wl12xx/wl1271_ini.h)4
-rw-r--r--drivers/net/wireless/wl12xx/init.c (renamed from drivers/net/wireless/wl12xx/wl1271_init.c)23
-rw-r--r--drivers/net/wireless/wl12xx/init.h (renamed from drivers/net/wireless/wl12xx/wl1271_init.h)6
-rw-r--r--drivers/net/wireless/wl12xx/io.c (renamed from drivers/net/wireless/wl12xx/wl1271_io.c)5
-rw-r--r--drivers/net/wireless/wl12xx/io.h (renamed from drivers/net/wireless/wl12xx/wl1271_io.h)6
-rw-r--r--drivers/net/wireless/wl12xx/main.c (renamed from drivers/net/wireless/wl12xx/wl1271_main.c)494
-rw-r--r--drivers/net/wireless/wl12xx/ps.c (renamed from drivers/net/wireless/wl12xx/wl1271_ps.c)6
-rw-r--r--drivers/net/wireless/wl12xx/ps.h (renamed from drivers/net/wireless/wl12xx/wl1271_ps.h)8
-rw-r--r--drivers/net/wireless/wl12xx/reg.h (renamed from drivers/net/wireless/wl12xx/wl1271_reg.h)0
-rw-r--r--drivers/net/wireless/wl12xx/rx.c (renamed from drivers/net/wireless/wl12xx/wl1271_rx.c)38
-rw-r--r--drivers/net/wireless/wl12xx/rx.h (renamed from drivers/net/wireless/wl12xx/wl1271_rx.h)6
-rw-r--r--drivers/net/wireless/wl12xx/scan.c (renamed from drivers/net/wireless/wl12xx/wl1271_scan.c)17
-rw-r--r--drivers/net/wireless/wl12xx/scan.h (renamed from drivers/net/wireless/wl12xx/wl1271_scan.h)6
-rw-r--r--drivers/net/wireless/wl12xx/sdio.c (renamed from drivers/net/wireless/wl12xx/wl1271_sdio.c)4
-rw-r--r--drivers/net/wireless/wl12xx/sdio_test.c520
-rw-r--r--drivers/net/wireless/wl12xx/spi.c (renamed from drivers/net/wireless/wl12xx/wl1271_spi.c)6
-rw-r--r--drivers/net/wireless/wl12xx/testmode.c (renamed from drivers/net/wireless/wl12xx/wl1271_testmode.c)18
-rw-r--r--drivers/net/wireless/wl12xx/testmode.h (renamed from drivers/net/wireless/wl12xx/wl1271_testmode.h)4
-rw-r--r--drivers/net/wireless/wl12xx/tx.c (renamed from drivers/net/wireless/wl12xx/wl1271_tx.c)196
-rw-r--r--drivers/net/wireless/wl12xx/tx.h (renamed from drivers/net/wireless/wl12xx/wl1271_tx.h)7
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_debugfs.c583
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx.h (renamed from drivers/net/wireless/wl12xx/wl1271.h)150
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h17
-rw-r--r--drivers/net/wireless/zd1201.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c5
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c3
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c2
-rw-r--r--drivers/net/xen-netfront.c2
-rw-r--r--drivers/net/xilinx_emaclite.c37
-rw-r--r--drivers/net/yellowfin.c4
-rw-r--r--drivers/net/znet.c2
671 files changed, 83279 insertions, 27563 deletions
diff --git a/drivers/net/3c501.c b/drivers/net/3c501.c
index 1776ab61b05f..9e1c03eb97ae 100644
--- a/drivers/net/3c501.c
+++ b/drivers/net/3c501.c
@@ -158,8 +158,8 @@ static int mem_start;
158struct net_device * __init el1_probe(int unit) 158struct net_device * __init el1_probe(int unit)
159{ 159{
160 struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); 160 struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
161 static unsigned ports[] = { 0x280, 0x300, 0}; 161 static const unsigned ports[] = { 0x280, 0x300, 0};
162 unsigned *port; 162 const unsigned *port;
163 int err = 0; 163 int err = 0;
164 164
165 if (!dev) 165 if (!dev)
diff --git a/drivers/net/3c503.c b/drivers/net/3c503.c
index 4777a1cbcd8d..d84f6e8903a5 100644
--- a/drivers/net/3c503.c
+++ b/drivers/net/3c503.c
@@ -392,8 +392,8 @@ el2_open(struct net_device *dev)
392 int retval; 392 int retval;
393 393
394 if (dev->irq < 2) { 394 if (dev->irq < 2) {
395 int irqlist[] = {5, 9, 3, 4, 0}; 395 static const int irqlist[] = {5, 9, 3, 4, 0};
396 int *irqp = irqlist; 396 const int *irqp = irqlist;
397 397
398 outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */ 398 outb(EGACFR_NORM, E33G_GACFR); /* Enable RAM and interrupts. */
399 do { 399 do {
diff --git a/drivers/net/3c507.c b/drivers/net/3c507.c
index ea9b7a098c9b..1e945551c144 100644
--- a/drivers/net/3c507.c
+++ b/drivers/net/3c507.c
@@ -201,7 +201,7 @@ struct net_local {
201#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */ 201#define RX_BUF_SIZE (1518+14+18) /* packet+header+RBD */
202#define RX_BUF_END (dev->mem_end - dev->mem_start) 202#define RX_BUF_END (dev->mem_end - dev->mem_start)
203 203
204#define TX_TIMEOUT 5 204#define TX_TIMEOUT (HZ/20)
205 205
206/* 206/*
207 That's it: only 86 bytes to set up the beast, including every extra 207 That's it: only 86 bytes to set up the beast, including every extra
@@ -311,8 +311,8 @@ static int mem_start;
311struct net_device * __init el16_probe(int unit) 311struct net_device * __init el16_probe(int unit)
312{ 312{
313 struct net_device *dev = alloc_etherdev(sizeof(struct net_local)); 313 struct net_device *dev = alloc_etherdev(sizeof(struct net_local));
314 static unsigned ports[] = { 0x300, 0x320, 0x340, 0x280, 0}; 314 static const unsigned ports[] = { 0x300, 0x320, 0x340, 0x280, 0};
315 unsigned *port; 315 const unsigned *port;
316 int err = -ENODEV; 316 int err = -ENODEV;
317 317
318 if (!dev) 318 if (!dev)
diff --git a/drivers/net/3c515.c b/drivers/net/3c515.c
index cdf7226a7c43..d2bb4b254c57 100644
--- a/drivers/net/3c515.c
+++ b/drivers/net/3c515.c
@@ -98,7 +98,7 @@ static int rx_nocopy, rx_copy, queued_packet;
98#define WAIT_TX_AVAIL 200 98#define WAIT_TX_AVAIL 200
99 99
100/* Operational parameter that usually are not changed. */ 100/* Operational parameter that usually are not changed. */
101#define TX_TIMEOUT 40 /* Time in jiffies before concluding Tx hung */ 101#define TX_TIMEOUT ((4*HZ)/10) /* Time in jiffies before concluding Tx hung */
102 102
103/* The size here is somewhat misleading: the Corkscrew also uses the ISA 103/* The size here is somewhat misleading: the Corkscrew also uses the ISA
104 aliased registers at <base>+0x400. 104 aliased registers at <base>+0x400.
diff --git a/drivers/net/3c527.c b/drivers/net/3c527.c
index 013b7c396663..8c094bae8bf3 100644
--- a/drivers/net/3c527.c
+++ b/drivers/net/3c527.c
@@ -317,13 +317,13 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
317 u8 POS; 317 u8 POS;
318 u32 base; 318 u32 base;
319 struct mc32_local *lp = netdev_priv(dev); 319 struct mc32_local *lp = netdev_priv(dev);
320 static u16 mca_io_bases[]={ 320 static const u16 mca_io_bases[] = {
321 0x7280,0x7290, 321 0x7280,0x7290,
322 0x7680,0x7690, 322 0x7680,0x7690,
323 0x7A80,0x7A90, 323 0x7A80,0x7A90,
324 0x7E80,0x7E90 324 0x7E80,0x7E90
325 }; 325 };
326 static u32 mca_mem_bases[]={ 326 static const u32 mca_mem_bases[] = {
327 0x00C0000, 327 0x00C0000,
328 0x00C4000, 328 0x00C4000,
329 0x00C8000, 329 0x00C8000,
@@ -333,7 +333,7 @@ static int __init mc32_probe1(struct net_device *dev, int slot)
333 0x00D8000, 333 0x00D8000,
334 0x00DC000 334 0x00DC000
335 }; 335 };
336 static char *failures[]={ 336 static const char * const failures[] = {
337 "Processor instruction", 337 "Processor instruction",
338 "Processor data bus", 338 "Processor data bus",
339 "Processor data bus", 339 "Processor data bus",
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index f5166dccd8df..98517a373473 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -1092,10 +1092,11 @@ err_out:
1092static void __devexit rtl8139_remove_one (struct pci_dev *pdev) 1092static void __devexit rtl8139_remove_one (struct pci_dev *pdev)
1093{ 1093{
1094 struct net_device *dev = pci_get_drvdata (pdev); 1094 struct net_device *dev = pci_get_drvdata (pdev);
1095 struct rtl8139_private *tp = netdev_priv(dev);
1095 1096
1096 assert (dev != NULL); 1097 assert (dev != NULL);
1097 1098
1098 flush_scheduled_work(); 1099 cancel_delayed_work_sync(&tp->thread);
1099 1100
1100 unregister_netdev (dev); 1101 unregister_netdev (dev);
1101 1102
diff --git a/drivers/net/82596.c b/drivers/net/82596.c
index e2c9c5b949f9..be1f1970c842 100644
--- a/drivers/net/82596.c
+++ b/drivers/net/82596.c
@@ -191,7 +191,7 @@ enum commands {
191#define RX_SUSPEND 0x0030 191#define RX_SUSPEND 0x0030
192#define RX_ABORT 0x0040 192#define RX_ABORT 0x0040
193 193
194#define TX_TIMEOUT 5 194#define TX_TIMEOUT (HZ/20)
195 195
196 196
197struct i596_reg { 197struct i596_reg {
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 4f1755bddf6b..4c8bfc97fb4c 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1533,7 +1533,7 @@ config E100
1533 1533
1534 <http://support.intel.com/support/network/adapter/pro100/21397.htm> 1534 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
1535 1535
1536 to identify the adapter. 1536 to identify the adapter.
1537 1537
1538 For the latest Intel PRO/100 network driver for Linux, see: 1538 For the latest Intel PRO/100 network driver for Linux, see:
1539 1539
@@ -1786,17 +1786,17 @@ config KS8842
1786 tristate "Micrel KSZ8841/42 with generic bus interface" 1786 tristate "Micrel KSZ8841/42 with generic bus interface"
1787 depends on HAS_IOMEM && DMA_ENGINE 1787 depends on HAS_IOMEM && DMA_ENGINE
1788 help 1788 help
1789 This platform driver is for KSZ8841(1-port) / KS8842(2-port) 1789 This platform driver is for KSZ8841(1-port) / KS8842(2-port)
1790 ethernet switch chip (managed, VLAN, QoS) from Micrel or 1790 ethernet switch chip (managed, VLAN, QoS) from Micrel or
1791 Timberdale(FPGA). 1791 Timberdale(FPGA).
1792 1792
1793config KS8851 1793config KS8851
1794 tristate "Micrel KS8851 SPI" 1794 tristate "Micrel KS8851 SPI"
1795 depends on SPI 1795 depends on SPI
1796 select MII 1796 select MII
1797 select CRC32 1797 select CRC32
1798 help 1798 help
1799 SPI driver for Micrel KS8851 SPI attached network chip. 1799 SPI driver for Micrel KS8851 SPI attached network chip.
1800 1800
1801config KS8851_MLL 1801config KS8851_MLL
1802 tristate "Micrel KS8851 MLL" 1802 tristate "Micrel KS8851 MLL"
@@ -1944,19 +1944,12 @@ config 68360_ENET
1944config FEC 1944config FEC
1945 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)" 1945 bool "FEC ethernet controller (of ColdFire and some i.MX CPUs)"
1946 depends on M523x || M527x || M5272 || M528x || M520x || M532x || \ 1946 depends on M523x || M527x || M5272 || M528x || M520x || M532x || \
1947 MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 1947 MACH_MX27 || ARCH_MX35 || ARCH_MX25 || ARCH_MX5 || SOC_IMX28
1948 select PHYLIB 1948 select PHYLIB
1949 help 1949 help
1950 Say Y here if you want to use the built-in 10/100 Fast ethernet 1950 Say Y here if you want to use the built-in 10/100 Fast ethernet
1951 controller on some Motorola ColdFire and Freescale i.MX processors. 1951 controller on some Motorola ColdFire and Freescale i.MX processors.
1952 1952
1953config FEC2
1954 bool "Second FEC ethernet controller (on some ColdFire CPUs)"
1955 depends on FEC
1956 help
1957 Say Y here if you want to use the second built-in 10/100 Fast
1958 ethernet controller on some Motorola ColdFire processors.
1959
1960config FEC_MPC52xx 1953config FEC_MPC52xx
1961 tristate "MPC52xx FEC driver" 1954 tristate "MPC52xx FEC driver"
1962 depends on PPC_MPC52xx && PPC_BESTCOMM 1955 depends on PPC_MPC52xx && PPC_BESTCOMM
@@ -2133,25 +2126,25 @@ config IP1000
2133 will be called ipg. This is recommended. 2126 will be called ipg. This is recommended.
2134 2127
2135config IGB 2128config IGB
2136 tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support" 2129 tristate "Intel(R) 82575/82576 PCI-Express Gigabit Ethernet support"
2137 depends on PCI 2130 depends on PCI
2138 ---help--- 2131 ---help---
2139 This driver supports Intel(R) 82575/82576 gigabit ethernet family of 2132 This driver supports Intel(R) 82575/82576 gigabit ethernet family of
2140 adapters. For more information on how to identify your adapter, go 2133 adapters. For more information on how to identify your adapter, go
2141 to the Adapter & Driver ID Guide at: 2134 to the Adapter & Driver ID Guide at:
2142 2135
2143 <http://support.intel.com/support/network/adapter/pro100/21397.htm> 2136 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
2144 2137
2145 For general information and support, go to the Intel support 2138 For general information and support, go to the Intel support
2146 website at: 2139 website at:
2147 2140
2148 <http://support.intel.com> 2141 <http://support.intel.com>
2149 2142
2150 More specific information on configuring the driver is in 2143 More specific information on configuring the driver is in
2151 <file:Documentation/networking/e1000.txt>. 2144 <file:Documentation/networking/e1000.txt>.
2152 2145
2153 To compile this driver as a module, choose M here. The module 2146 To compile this driver as a module, choose M here. The module
2154 will be called igb. 2147 will be called igb.
2155 2148
2156config IGB_DCA 2149config IGB_DCA
2157 bool "Direct Cache Access (DCA) Support" 2150 bool "Direct Cache Access (DCA) Support"
@@ -2163,25 +2156,25 @@ config IGB_DCA
2163 is used, with the intent of lessening the impact of cache misses. 2156 is used, with the intent of lessening the impact of cache misses.
2164 2157
2165config IGBVF 2158config IGBVF
2166 tristate "Intel(R) 82576 Virtual Function Ethernet support" 2159 tristate "Intel(R) 82576 Virtual Function Ethernet support"
2167 depends on PCI 2160 depends on PCI
2168 ---help--- 2161 ---help---
2169 This driver supports Intel(R) 82576 virtual functions. For more 2162 This driver supports Intel(R) 82576 virtual functions. For more
2170 information on how to identify your adapter, go to the Adapter & 2163 information on how to identify your adapter, go to the Adapter &
2171 Driver ID Guide at: 2164 Driver ID Guide at:
2172 2165
2173 <http://support.intel.com/support/network/adapter/pro100/21397.htm> 2166 <http://support.intel.com/support/network/adapter/pro100/21397.htm>
2174 2167
2175 For general information and support, go to the Intel support 2168 For general information and support, go to the Intel support
2176 website at: 2169 website at:
2177 2170
2178 <http://support.intel.com> 2171 <http://support.intel.com>
2179 2172
2180 More specific information on configuring the driver is in 2173 More specific information on configuring the driver is in
2181 <file:Documentation/networking/e1000.txt>. 2174 <file:Documentation/networking/e1000.txt>.
2182 2175
2183 To compile this driver as a module, choose M here. The module 2176 To compile this driver as a module, choose M here. The module
2184 will be called igbvf. 2177 will be called igbvf.
2185 2178
2186source "drivers/net/ixp2000/Kconfig" 2179source "drivers/net/ixp2000/Kconfig"
2187 2180
@@ -2233,6 +2226,7 @@ config YELLOWFIN
2233config R8169 2226config R8169
2234 tristate "Realtek 8169 gigabit ethernet support" 2227 tristate "Realtek 8169 gigabit ethernet support"
2235 depends on PCI 2228 depends on PCI
2229 select FW_LOADER
2236 select CRC32 2230 select CRC32
2237 select MII 2231 select MII
2238 ---help--- 2232 ---help---
@@ -2300,14 +2294,14 @@ config SKGE
2300 will be called skge. This is recommended. 2294 will be called skge. This is recommended.
2301 2295
2302config SKGE_DEBUG 2296config SKGE_DEBUG
2303 bool "Debugging interface" 2297 bool "Debugging interface"
2304 depends on SKGE && DEBUG_FS 2298 depends on SKGE && DEBUG_FS
2305 help 2299 help
2306 This option adds the ability to dump driver state for debugging. 2300 This option adds the ability to dump driver state for debugging.
2307 The file /sys/kernel/debug/skge/ethX displays the state of the internal 2301 The file /sys/kernel/debug/skge/ethX displays the state of the internal
2308 transmit and receive rings. 2302 transmit and receive rings.
2309 2303
2310 If unsure, say N. 2304 If unsure, say N.
2311 2305
2312config SKY2 2306config SKY2
2313 tristate "SysKonnect Yukon2 support" 2307 tristate "SysKonnect Yukon2 support"
@@ -2326,14 +2320,14 @@ config SKY2
2326 will be called sky2. This is recommended. 2320 will be called sky2. This is recommended.
2327 2321
2328config SKY2_DEBUG 2322config SKY2_DEBUG
2329 bool "Debugging interface" 2323 bool "Debugging interface"
2330 depends on SKY2 && DEBUG_FS 2324 depends on SKY2 && DEBUG_FS
2331 help 2325 help
2332 This option adds the ability to dump driver state for debugging. 2326 This option adds the ability to dump driver state for debugging.
2333 The file /sys/kernel/debug/sky2/ethX displays the state of the internal 2327 The file /sys/kernel/debug/sky2/ethX displays the state of the internal
2334 transmit and receive rings. 2328 transmit and receive rings.
2335 2329
2336 If unsure, say N. 2330 If unsure, say N.
2337 2331
2338config VIA_VELOCITY 2332config VIA_VELOCITY
2339 tristate "VIA Velocity support" 2333 tristate "VIA Velocity support"
@@ -2389,12 +2383,12 @@ config SPIDER_NET
2389 Cell Processor-Based Blades from IBM. 2383 Cell Processor-Based Blades from IBM.
2390 2384
2391config TSI108_ETH 2385config TSI108_ETH
2392 tristate "Tundra TSI108 gigabit Ethernet support" 2386 tristate "Tundra TSI108 gigabit Ethernet support"
2393 depends on TSI108_BRIDGE 2387 depends on TSI108_BRIDGE
2394 help 2388 help
2395 This driver supports Tundra TSI108 gigabit Ethernet ports. 2389 This driver supports Tundra TSI108 gigabit Ethernet ports.
2396 To compile this driver as a module, choose M here: the module 2390 To compile this driver as a module, choose M here: the module
2397 will be called tsi108_eth. 2391 will be called tsi108_eth.
2398 2392
2399config GELIC_NET 2393config GELIC_NET
2400 tristate "PS3 Gigabit Ethernet driver" 2394 tristate "PS3 Gigabit Ethernet driver"
@@ -2573,32 +2567,32 @@ config MDIO
2573 tristate 2567 tristate
2574 2568
2575config CHELSIO_T1 2569config CHELSIO_T1
2576 tristate "Chelsio 10Gb Ethernet support" 2570 tristate "Chelsio 10Gb Ethernet support"
2577 depends on PCI 2571 depends on PCI
2578 select CRC32 2572 select CRC32
2579 select MDIO 2573 select MDIO
2580 help 2574 help
2581 This driver supports Chelsio gigabit and 10-gigabit 2575 This driver supports Chelsio gigabit and 10-gigabit
2582 Ethernet cards. More information about adapter features and 2576 Ethernet cards. More information about adapter features and
2583 performance tuning is in <file:Documentation/networking/cxgb.txt>. 2577 performance tuning is in <file:Documentation/networking/cxgb.txt>.
2584 2578
2585 For general information about Chelsio and our products, visit 2579 For general information about Chelsio and our products, visit
2586 our website at <http://www.chelsio.com>. 2580 our website at <http://www.chelsio.com>.
2587 2581
2588 For customer support, please visit our customer support page at 2582 For customer support, please visit our customer support page at
2589 <http://www.chelsio.com/support.html>. 2583 <http://www.chelsio.com/support.html>.
2590 2584
2591 Please send feedback to <linux-bugs@chelsio.com>. 2585 Please send feedback to <linux-bugs@chelsio.com>.
2592 2586
2593 To compile this driver as a module, choose M here: the module 2587 To compile this driver as a module, choose M here: the module
2594 will be called cxgb. 2588 will be called cxgb.
2595 2589
2596config CHELSIO_T1_1G 2590config CHELSIO_T1_1G
2597 bool "Chelsio gigabit Ethernet support" 2591 bool "Chelsio gigabit Ethernet support"
2598 depends on CHELSIO_T1 2592 depends on CHELSIO_T1
2599 help 2593 help
2600 Enables support for Chelsio's gigabit Ethernet PCI cards. If you 2594 Enables support for Chelsio's gigabit Ethernet PCI cards. If you
2601 are using only 10G cards say 'N' here. 2595 are using only 10G cards say 'N' here.
2602 2596
2603config CHELSIO_T3_DEPENDS 2597config CHELSIO_T3_DEPENDS
2604 tristate 2598 tristate
@@ -2728,26 +2722,26 @@ config IXGBE_DCB
2728 If unsure, say N. 2722 If unsure, say N.
2729 2723
2730config IXGBEVF 2724config IXGBEVF
2731 tristate "Intel(R) 82599 Virtual Function Ethernet support" 2725 tristate "Intel(R) 82599 Virtual Function Ethernet support"
2732 depends on PCI_MSI 2726 depends on PCI_MSI
2733 ---help--- 2727 ---help---
2734 This driver supports Intel(R) 82599 virtual functions. For more 2728 This driver supports Intel(R) 82599 virtual functions. For more
2735 information on how to identify your adapter, go to the Adapter & 2729 information on how to identify your adapter, go to the Adapter &
2736 Driver ID Guide at: 2730 Driver ID Guide at:
2737 2731
2738 <http://support.intel.com/support/network/sb/CS-008441.htm> 2732 <http://support.intel.com/support/network/sb/CS-008441.htm>
2739 2733
2740 For general information and support, go to the Intel support 2734 For general information and support, go to the Intel support
2741 website at: 2735 website at:
2742 2736
2743 <http://support.intel.com> 2737 <http://support.intel.com>
2744 2738
2745 More specific information on configuring the driver is in 2739 More specific information on configuring the driver is in
2746 <file:Documentation/networking/ixgbevf.txt>. 2740 <file:Documentation/networking/ixgbevf.txt>.
2747 2741
2748 To compile this driver as a module, choose M here. The module 2742 To compile this driver as a module, choose M here. The module
2749 will be called ixgbevf. MSI-X interrupt support is required 2743 will be called ixgbevf. MSI-X interrupt support is required
2750 for this driver to work correctly. 2744 for this driver to work correctly.
2751 2745
2752config IXGB 2746config IXGB
2753 tristate "Intel(R) PRO/10GbE support" 2747 tristate "Intel(R) PRO/10GbE support"
@@ -2772,29 +2766,38 @@ config IXGB
2772 will be called ixgb. 2766 will be called ixgb.
2773 2767
2774config S2IO 2768config S2IO
2775 tristate "S2IO 10Gbe XFrame NIC" 2769 tristate "Exar Xframe 10Gb Ethernet Adapter"
2776 depends on PCI 2770 depends on PCI
2777 ---help--- 2771 ---help---
2778 This driver supports the 10Gbe XFrame NIC of S2IO. 2772 This driver supports Exar Corp's Xframe Series 10Gb Ethernet Adapters.
2773
2779 More specific information on configuring the driver is in 2774 More specific information on configuring the driver is in
2780 <file:Documentation/networking/s2io.txt>. 2775 <file:Documentation/networking/s2io.txt>.
2781 2776
2777 To compile this driver as a module, choose M here. The module
2778 will be called s2io.
2779
2782config VXGE 2780config VXGE
2783 tristate "Neterion X3100 Series 10GbE PCIe Server Adapter" 2781 tristate "Exar X3100 Series 10GbE PCIe Server Adapter"
2784 depends on PCI && INET 2782 depends on PCI && INET
2785 ---help--- 2783 ---help---
2786 This driver supports Neterion Inc's X3100 Series 10 GbE PCIe 2784 This driver supports Exar Corp's X3100 Series 10 GbE PCIe
2787 I/O Virtualized Server Adapter. 2785 I/O Virtualized Server Adapter.
2786
2788 More specific information on configuring the driver is in 2787 More specific information on configuring the driver is in
2789 <file:Documentation/networking/vxge.txt>. 2788 <file:Documentation/networking/vxge.txt>.
2790 2789
2790 To compile this driver as a module, choose M here. The module
2791 will be called vxge.
2792
2791config VXGE_DEBUG_TRACE_ALL 2793config VXGE_DEBUG_TRACE_ALL
2792 bool "Enabling All Debug trace statments in driver" 2794 bool "Enabling All Debug trace statments in driver"
2793 default n 2795 default n
2794 depends on VXGE 2796 depends on VXGE
2795 ---help--- 2797 ---help---
2796 Say Y here if you want to enabling all the debug trace statements in 2798 Say Y here if you want to enabling all the debug trace statements in
2797 driver. By default only few debug trace statements are enabled. 2799 the vxge driver. By default only few debug trace statements are
2800 enabled.
2798 2801
2799config MYRI10GE 2802config MYRI10GE
2800 tristate "Myricom Myri-10G Ethernet support" 2803 tristate "Myricom Myri-10G Ethernet support"
@@ -2906,18 +2909,18 @@ config QLGE
2906 will be called qlge. 2909 will be called qlge.
2907 2910
2908config BNA 2911config BNA
2909 tristate "Brocade 1010/1020 10Gb Ethernet Driver support" 2912 tristate "Brocade 1010/1020 10Gb Ethernet Driver support"
2910 depends on PCI 2913 depends on PCI
2911 ---help--- 2914 ---help---
2912 This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet 2915 This driver supports Brocade 1010/1020 10Gb CEE capable Ethernet
2913 cards. 2916 cards.
2914 To compile this driver as a module, choose M here: the module 2917 To compile this driver as a module, choose M here: the module
2915 will be called bna. 2918 will be called bna.
2916 2919
2917 For general information and support, go to the Brocade support 2920 For general information and support, go to the Brocade support
2918 website at: 2921 website at:
2919 2922
2920 <http://support.brocade.com> 2923 <http://support.brocade.com>
2921 2924
2922source "drivers/net/sfc/Kconfig" 2925source "drivers/net/sfc/Kconfig"
2923 2926
@@ -2960,6 +2963,7 @@ config TILE_NET
2960config XEN_NETDEV_FRONTEND 2963config XEN_NETDEV_FRONTEND
2961 tristate "Xen network device frontend driver" 2964 tristate "Xen network device frontend driver"
2962 depends on XEN 2965 depends on XEN
2966 select XEN_XENBUS_FRONTEND
2963 default y 2967 default y
2964 help 2968 help
2965 The network device frontend driver allows the kernel to 2969 The network device frontend driver allows the kernel to
@@ -3239,18 +3243,18 @@ config PPP_BSDCOMP
3239 modules once you have said "make modules". If unsure, say N. 3243 modules once you have said "make modules". If unsure, say N.
3240 3244
3241config PPP_MPPE 3245config PPP_MPPE
3242 tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)" 3246 tristate "PPP MPPE compression (encryption) (EXPERIMENTAL)"
3243 depends on PPP && EXPERIMENTAL 3247 depends on PPP && EXPERIMENTAL
3244 select CRYPTO 3248 select CRYPTO
3245 select CRYPTO_SHA1 3249 select CRYPTO_SHA1
3246 select CRYPTO_ARC4 3250 select CRYPTO_ARC4
3247 select CRYPTO_ECB 3251 select CRYPTO_ECB
3248 ---help--- 3252 ---help---
3249 Support for the MPPE Encryption protocol, as employed by the 3253 Support for the MPPE Encryption protocol, as employed by the
3250 Microsoft Point-to-Point Tunneling Protocol. 3254 Microsoft Point-to-Point Tunneling Protocol.
3251 3255
3252 See http://pptpclient.sourceforge.net/ for information on 3256 See http://pptpclient.sourceforge.net/ for information on
3253 configuring PPTP clients and servers to utilize this method. 3257 configuring PPTP clients and servers to utilize this method.
3254 3258
3255config PPPOE 3259config PPPOE
3256 tristate "PPP over Ethernet (EXPERIMENTAL)" 3260 tristate "PPP over Ethernet (EXPERIMENTAL)"
@@ -3409,14 +3413,14 @@ config VIRTIO_NET
3409 depends on EXPERIMENTAL && VIRTIO 3413 depends on EXPERIMENTAL && VIRTIO
3410 ---help--- 3414 ---help---
3411 This is the virtual network driver for virtio. It can be used with 3415 This is the virtual network driver for virtio. It can be used with
3412 lguest or QEMU based VMMs (like KVM or Xen). Say Y or M. 3416 lguest or QEMU based VMMs (like KVM or Xen). Say Y or M.
3413 3417
3414config VMXNET3 3418config VMXNET3
3415 tristate "VMware VMXNET3 ethernet driver" 3419 tristate "VMware VMXNET3 ethernet driver"
3416 depends on PCI && INET 3420 depends on PCI && INET
3417 help 3421 help
3418 This driver supports VMware's vmxnet3 virtual ethernet NIC. 3422 This driver supports VMware's vmxnet3 virtual ethernet NIC.
3419 To compile this driver as a module, choose M here: the 3423 To compile this driver as a module, choose M here: the
3420 module will be called vmxnet3. 3424 module will be called vmxnet3.
3421 3425
3422endif # NETDEVICES 3426endif # NETDEVICES
diff --git a/drivers/net/Space.c b/drivers/net/Space.c
index 9bb405bd664e..068c3563e00f 100644
--- a/drivers/net/Space.c
+++ b/drivers/net/Space.c
@@ -55,8 +55,6 @@ extern struct net_device *eth16i_probe(int unit);
55extern struct net_device *i82596_probe(int unit); 55extern struct net_device *i82596_probe(int unit);
56extern struct net_device *ewrk3_probe(int unit); 56extern struct net_device *ewrk3_probe(int unit);
57extern struct net_device *el1_probe(int unit); 57extern struct net_device *el1_probe(int unit);
58extern struct net_device *wavelan_probe(int unit);
59extern struct net_device *arlan_probe(int unit);
60extern struct net_device *el16_probe(int unit); 58extern struct net_device *el16_probe(int unit);
61extern struct net_device *elmc_probe(int unit); 59extern struct net_device *elmc_probe(int unit);
62extern struct net_device *elplus_probe(int unit); 60extern struct net_device *elplus_probe(int unit);
@@ -68,7 +66,6 @@ extern struct net_device *ni5010_probe(int unit);
68extern struct net_device *ni52_probe(int unit); 66extern struct net_device *ni52_probe(int unit);
69extern struct net_device *ni65_probe(int unit); 67extern struct net_device *ni65_probe(int unit);
70extern struct net_device *sonic_probe(int unit); 68extern struct net_device *sonic_probe(int unit);
71extern struct net_device *SK_init(int unit);
72extern struct net_device *seeq8005_probe(int unit); 69extern struct net_device *seeq8005_probe(int unit);
73extern struct net_device *smc_init(int unit); 70extern struct net_device *smc_init(int unit);
74extern struct net_device *atarilance_probe(int unit); 71extern struct net_device *atarilance_probe(int unit);
@@ -76,8 +73,6 @@ extern struct net_device *sun3lance_probe(int unit);
76extern struct net_device *sun3_82586_probe(int unit); 73extern struct net_device *sun3_82586_probe(int unit);
77extern struct net_device *apne_probe(int unit); 74extern struct net_device *apne_probe(int unit);
78extern struct net_device *cs89x0_probe(int unit); 75extern struct net_device *cs89x0_probe(int unit);
79extern struct net_device *hplance_probe(int unit);
80extern struct net_device *bagetlance_probe(int unit);
81extern struct net_device *mvme147lance_probe(int unit); 76extern struct net_device *mvme147lance_probe(int unit);
82extern struct net_device *tc515_probe(int unit); 77extern struct net_device *tc515_probe(int unit);
83extern struct net_device *lance_probe(int unit); 78extern struct net_device *lance_probe(int unit);
diff --git a/drivers/net/arm/am79c961a.c b/drivers/net/arm/am79c961a.c
index 62f21106efec..0c9217f48b72 100644
--- a/drivers/net/arm/am79c961a.c
+++ b/drivers/net/arm/am79c961a.c
@@ -340,14 +340,6 @@ am79c961_close(struct net_device *dev)
340 return 0; 340 return 0;
341} 341}
342 342
343/*
344 * Get the current statistics.
345 */
346static struct net_device_stats *am79c961_getstats (struct net_device *dev)
347{
348 return &dev->stats;
349}
350
351static void am79c961_mc_hash(char *addr, unsigned short *hash) 343static void am79c961_mc_hash(char *addr, unsigned short *hash)
352{ 344{
353 if (addr[0] & 0x01) { 345 if (addr[0] & 0x01) {
@@ -665,7 +657,6 @@ static const struct net_device_ops am79c961_netdev_ops = {
665 .ndo_open = am79c961_open, 657 .ndo_open = am79c961_open,
666 .ndo_stop = am79c961_close, 658 .ndo_stop = am79c961_close,
667 .ndo_start_xmit = am79c961_sendpacket, 659 .ndo_start_xmit = am79c961_sendpacket,
668 .ndo_get_stats = am79c961_getstats,
669 .ndo_set_multicast_list = am79c961_setmulticastlist, 660 .ndo_set_multicast_list = am79c961_setmulticastlist,
670 .ndo_tx_timeout = am79c961_timeout, 661 .ndo_tx_timeout = am79c961_timeout,
671 .ndo_validate_addr = eth_validate_addr, 662 .ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/arm/ixp4xx_eth.c b/drivers/net/arm/ixp4xx_eth.c
index 6028226a7270..9eb9b98a7ae3 100644
--- a/drivers/net/arm/ixp4xx_eth.c
+++ b/drivers/net/arm/ixp4xx_eth.c
@@ -1229,8 +1229,10 @@ static int __devinit eth_init_one(struct platform_device *pdev)
1229 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy); 1229 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy);
1230 port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0, 1230 port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0,
1231 PHY_INTERFACE_MODE_MII); 1231 PHY_INTERFACE_MODE_MII);
1232 if ((err = IS_ERR(port->phydev))) 1232 if (IS_ERR(port->phydev)) {
1233 err = PTR_ERR(port->phydev);
1233 goto err_free_mem; 1234 goto err_free_mem;
1235 }
1234 1236
1235 port->phydev->irq = PHY_POLL; 1237 port->phydev->irq = PHY_POLL;
1236 1238
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index 4545d5a06c24..bfea499a3513 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -117,7 +117,7 @@
117#define TX_DESC_SIZE 10 117#define TX_DESC_SIZE 10
118#define MAX_RBUFF_SZ 0x600 118#define MAX_RBUFF_SZ 0x600
119#define MAX_TBUFF_SZ 0x600 119#define MAX_TBUFF_SZ 0x600
120#define TX_TIMEOUT 50 120#define TX_TIMEOUT (HZ/2)
121#define DELAY 1000 121#define DELAY 1000
122#define CAM0 0x0 122#define CAM0 0x0
123 123
diff --git a/drivers/net/at1700.c b/drivers/net/at1700.c
index 89876897a6fe..f4744fc89768 100644
--- a/drivers/net/at1700.c
+++ b/drivers/net/at1700.c
@@ -150,7 +150,7 @@ struct net_local {
150#define PORT_OFFSET(o) (o) 150#define PORT_OFFSET(o) (o)
151 151
152 152
153#define TX_TIMEOUT 10 153#define TX_TIMEOUT (HZ/10)
154 154
155 155
156/* Index to functions, as function prototypes. */ 156/* Index to functions, as function prototypes. */
@@ -270,9 +270,9 @@ static const struct net_device_ops at1700_netdev_ops = {
270 270
271static int __init at1700_probe1(struct net_device *dev, int ioaddr) 271static int __init at1700_probe1(struct net_device *dev, int ioaddr)
272{ 272{
273 char fmv_irqmap[4] = {3, 7, 10, 15}; 273 static const char fmv_irqmap[4] = {3, 7, 10, 15};
274 char fmv_irqmap_pnp[8] = {3, 4, 5, 7, 9, 10, 11, 15}; 274 static const char fmv_irqmap_pnp[8] = {3, 4, 5, 7, 9, 10, 11, 15};
275 char at1700_irqmap[8] = {3, 4, 5, 9, 10, 11, 14, 15}; 275 static const char at1700_irqmap[8] = {3, 4, 5, 9, 10, 11, 14, 15};
276 unsigned int i, irq, is_fmv18x = 0, is_at1700 = 0; 276 unsigned int i, irq, is_fmv18x = 0, is_at1700 = 0;
277 int slot, ret = -ENODEV; 277 int slot, ret = -ENODEV;
278 struct net_local *lp = netdev_priv(dev); 278 struct net_local *lp = netdev_priv(dev);
diff --git a/drivers/net/atarilance.c b/drivers/net/atarilance.c
index 8cb27cb7bca1..ce0091eb06f5 100644
--- a/drivers/net/atarilance.c
+++ b/drivers/net/atarilance.c
@@ -116,7 +116,7 @@ MODULE_LICENSE("GPL");
116#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5) 116#define RX_RING_LEN_BITS (RX_LOG_RING_SIZE << 5)
117#define RX_RING_MOD_MASK (RX_RING_SIZE - 1) 117#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
118 118
119#define TX_TIMEOUT 20 119#define TX_TIMEOUT (HZ/5)
120 120
121/* The LANCE Rx and Tx ring descriptors. */ 121/* The LANCE Rx and Tx ring descriptors. */
122struct lance_rx_head { 122struct lance_rx_head {
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 09b099bfab2b..a699bbf20eb5 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -702,6 +702,7 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
702 702
703 703
704 adapter->wol = 0; 704 adapter->wol = 0;
705 device_set_wakeup_enable(&pdev->dev, false);
705 adapter->link_speed = SPEED_0; 706 adapter->link_speed = SPEED_0;
706 adapter->link_duplex = FULL_DUPLEX; 707 adapter->link_duplex = FULL_DUPLEX;
707 adapter->num_rx_queues = AT_DEF_RECEIVE_QUEUE; 708 adapter->num_rx_queues = AT_DEF_RECEIVE_QUEUE;
@@ -2078,7 +2079,7 @@ static int atl1c_tso_csum(struct atl1c_adapter *adapter,
2078check_sum: 2079check_sum:
2079 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 2080 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
2080 u8 css, cso; 2081 u8 css, cso;
2081 cso = skb_transport_offset(skb); 2082 cso = skb_checksum_start_offset(skb);
2082 2083
2083 if (unlikely(cso & 0x1)) { 2084 if (unlikely(cso & 0x1)) {
2084 if (netif_msg_tx_err(adapter)) 2085 if (netif_msg_tx_err(adapter))
@@ -2444,8 +2445,9 @@ static int atl1c_close(struct net_device *netdev)
2444 return 0; 2445 return 0;
2445} 2446}
2446 2447
2447static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state) 2448static int atl1c_suspend(struct device *dev)
2448{ 2449{
2450 struct pci_dev *pdev = to_pci_dev(dev);
2449 struct net_device *netdev = pci_get_drvdata(pdev); 2451 struct net_device *netdev = pci_get_drvdata(pdev);
2450 struct atl1c_adapter *adapter = netdev_priv(netdev); 2452 struct atl1c_adapter *adapter = netdev_priv(netdev);
2451 struct atl1c_hw *hw = &adapter->hw; 2453 struct atl1c_hw *hw = &adapter->hw;
@@ -2454,7 +2456,6 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
2454 u32 wol_ctrl_data = 0; 2456 u32 wol_ctrl_data = 0;
2455 u16 mii_intr_status_data = 0; 2457 u16 mii_intr_status_data = 0;
2456 u32 wufc = adapter->wol; 2458 u32 wufc = adapter->wol;
2457 int retval = 0;
2458 2459
2459 atl1c_disable_l0s_l1(hw); 2460 atl1c_disable_l0s_l1(hw);
2460 if (netif_running(netdev)) { 2461 if (netif_running(netdev)) {
@@ -2462,9 +2463,6 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
2462 atl1c_down(adapter); 2463 atl1c_down(adapter);
2463 } 2464 }
2464 netif_device_detach(netdev); 2465 netif_device_detach(netdev);
2465 retval = pci_save_state(pdev);
2466 if (retval)
2467 return retval;
2468 2466
2469 if (wufc) 2467 if (wufc)
2470 if (atl1c_phy_power_saving(hw) != 0) 2468 if (atl1c_phy_power_saving(hw) != 0)
@@ -2525,12 +2523,8 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
2525 AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data); 2523 AT_WRITE_REG(hw, REG_WOL_CTRL, wol_ctrl_data);
2526 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); 2524 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
2527 2525
2528 /* pcie patch */
2529 device_set_wakeup_enable(&pdev->dev, 1);
2530
2531 AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT | 2526 AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_DEFAULT |
2532 GPHY_CTRL_EXT_RESET); 2527 GPHY_CTRL_EXT_RESET);
2533 pci_prepare_to_sleep(pdev);
2534 } else { 2528 } else {
2535 AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING); 2529 AT_WRITE_REG(hw, REG_GPHY_CTRL, GPHY_CTRL_POWER_SAVING);
2536 master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS; 2530 master_ctrl_data |= MASTER_CTRL_CLK_SEL_DIS;
@@ -2540,25 +2534,17 @@ static int atl1c_suspend(struct pci_dev *pdev, pm_message_t state)
2540 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data); 2534 AT_WRITE_REG(hw, REG_MAC_CTRL, mac_ctrl_data);
2541 AT_WRITE_REG(hw, REG_WOL_CTRL, 0); 2535 AT_WRITE_REG(hw, REG_WOL_CTRL, 0);
2542 hw->phy_configured = false; /* re-init PHY when resume */ 2536 hw->phy_configured = false; /* re-init PHY when resume */
2543 pci_enable_wake(pdev, pci_choose_state(pdev, state), 0);
2544 } 2537 }
2545 2538
2546 pci_disable_device(pdev);
2547 pci_set_power_state(pdev, pci_choose_state(pdev, state));
2548
2549 return 0; 2539 return 0;
2550} 2540}
2551 2541
2552static int atl1c_resume(struct pci_dev *pdev) 2542static int atl1c_resume(struct device *dev)
2553{ 2543{
2544 struct pci_dev *pdev = to_pci_dev(dev);
2554 struct net_device *netdev = pci_get_drvdata(pdev); 2545 struct net_device *netdev = pci_get_drvdata(pdev);
2555 struct atl1c_adapter *adapter = netdev_priv(netdev); 2546 struct atl1c_adapter *adapter = netdev_priv(netdev);
2556 2547
2557 pci_set_power_state(pdev, PCI_D0);
2558 pci_restore_state(pdev);
2559 pci_enable_wake(pdev, PCI_D3hot, 0);
2560 pci_enable_wake(pdev, PCI_D3cold, 0);
2561
2562 AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0); 2548 AT_WRITE_REG(&adapter->hw, REG_WOL_CTRL, 0);
2563 atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE | 2549 atl1c_reset_pcie(&adapter->hw, ATL1C_PCIE_L0S_L1_DISABLE |
2564 ATL1C_PCIE_PHY_RESET); 2550 ATL1C_PCIE_PHY_RESET);
@@ -2582,7 +2568,12 @@ static int atl1c_resume(struct pci_dev *pdev)
2582 2568
2583static void atl1c_shutdown(struct pci_dev *pdev) 2569static void atl1c_shutdown(struct pci_dev *pdev)
2584{ 2570{
2585 atl1c_suspend(pdev, PMSG_SUSPEND); 2571 struct net_device *netdev = pci_get_drvdata(pdev);
2572 struct atl1c_adapter *adapter = netdev_priv(netdev);
2573
2574 atl1c_suspend(&pdev->dev);
2575 pci_wake_from_d3(pdev, adapter->wol);
2576 pci_set_power_state(pdev, PCI_D3hot);
2586} 2577}
2587 2578
2588static const struct net_device_ops atl1c_netdev_ops = { 2579static const struct net_device_ops atl1c_netdev_ops = {
@@ -2886,16 +2877,16 @@ static struct pci_error_handlers atl1c_err_handler = {
2886 .resume = atl1c_io_resume, 2877 .resume = atl1c_io_resume,
2887}; 2878};
2888 2879
2880static SIMPLE_DEV_PM_OPS(atl1c_pm_ops, atl1c_suspend, atl1c_resume);
2881
2889static struct pci_driver atl1c_driver = { 2882static struct pci_driver atl1c_driver = {
2890 .name = atl1c_driver_name, 2883 .name = atl1c_driver_name,
2891 .id_table = atl1c_pci_tbl, 2884 .id_table = atl1c_pci_tbl,
2892 .probe = atl1c_probe, 2885 .probe = atl1c_probe,
2893 .remove = __devexit_p(atl1c_remove), 2886 .remove = __devexit_p(atl1c_remove),
2894 /* Power Managment Hooks */
2895 .suspend = atl1c_suspend,
2896 .resume = atl1c_resume,
2897 .shutdown = atl1c_shutdown, 2887 .shutdown = atl1c_shutdown,
2898 .err_handler = &atl1c_err_handler 2888 .err_handler = &atl1c_err_handler,
2889 .driver.pm = &atl1c_pm_ops,
2899}; 2890};
2900 2891
2901/* 2892/*
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index ef6349bf3b33..e28f8baf394e 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -1649,7 +1649,7 @@ check_sum:
1649 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 1649 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1650 u8 css, cso; 1650 u8 css, cso;
1651 1651
1652 cso = skb_transport_offset(skb); 1652 cso = skb_checksum_start_offset(skb);
1653 if (unlikely(cso & 0x1)) { 1653 if (unlikely(cso & 0x1)) {
1654 netdev_err(adapter->netdev, 1654 netdev_err(adapter->netdev,
1655 "payload offset should not ant event number\n"); 1655 "payload offset should not ant event number\n");
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 53363108994e..3b527687c28f 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -2174,7 +2174,7 @@ static int atl1_tx_csum(struct atl1_adapter *adapter, struct sk_buff *skb,
2174 u8 css, cso; 2174 u8 css, cso;
2175 2175
2176 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 2176 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
2177 css = (u8) (skb->csum_start - skb_headroom(skb)); 2177 css = skb_checksum_start_offset(skb);
2178 cso = css + (u8) skb->csum_offset; 2178 cso = css + (u8) skb->csum_offset;
2179 if (unlikely(css & 0x1)) { 2179 if (unlikely(css & 0x1)) {
2180 /* L1 hardware requires an even number here */ 2180 /* L1 hardware requires an even number here */
@@ -3504,6 +3504,8 @@ static int atl1_set_ringparam(struct net_device *netdev,
3504 struct atl1_rfd_ring rfd_old, rfd_new; 3504 struct atl1_rfd_ring rfd_old, rfd_new;
3505 struct atl1_rrd_ring rrd_old, rrd_new; 3505 struct atl1_rrd_ring rrd_old, rrd_new;
3506 struct atl1_ring_header rhdr_old, rhdr_new; 3506 struct atl1_ring_header rhdr_old, rhdr_new;
3507 struct atl1_smb smb;
3508 struct atl1_cmb cmb;
3507 int err; 3509 int err;
3508 3510
3509 tpd_old = adapter->tpd_ring; 3511 tpd_old = adapter->tpd_ring;
@@ -3544,11 +3546,19 @@ static int atl1_set_ringparam(struct net_device *netdev,
3544 adapter->rrd_ring = rrd_old; 3546 adapter->rrd_ring = rrd_old;
3545 adapter->tpd_ring = tpd_old; 3547 adapter->tpd_ring = tpd_old;
3546 adapter->ring_header = rhdr_old; 3548 adapter->ring_header = rhdr_old;
3549 /*
3550 * Save SMB and CMB, since atl1_free_ring_resources
3551 * will clear them.
3552 */
3553 smb = adapter->smb;
3554 cmb = adapter->cmb;
3547 atl1_free_ring_resources(adapter); 3555 atl1_free_ring_resources(adapter);
3548 adapter->rfd_ring = rfd_new; 3556 adapter->rfd_ring = rfd_new;
3549 adapter->rrd_ring = rrd_new; 3557 adapter->rrd_ring = rrd_new;
3550 adapter->tpd_ring = tpd_new; 3558 adapter->tpd_ring = tpd_new;
3551 adapter->ring_header = rhdr_new; 3559 adapter->ring_header = rhdr_new;
3560 adapter->smb = smb;
3561 adapter->cmb = cmb;
3552 3562
3553 err = atl1_up(adapter); 3563 err = atl1_up(adapter);
3554 if (err) 3564 if (err)
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index 35b14bec1207..4e6f4e95a5a0 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -1504,8 +1504,8 @@ static void __devexit atl2_remove(struct pci_dev *pdev)
1504 1504
1505 del_timer_sync(&adapter->watchdog_timer); 1505 del_timer_sync(&adapter->watchdog_timer);
1506 del_timer_sync(&adapter->phy_config_timer); 1506 del_timer_sync(&adapter->phy_config_timer);
1507 1507 cancel_work_sync(&adapter->reset_task);
1508 flush_scheduled_work(); 1508 cancel_work_sync(&adapter->link_chg_task);
1509 1509
1510 unregister_netdev(netdev); 1510 unregister_netdev(netdev);
1511 1511
diff --git a/drivers/net/au1000_eth.c b/drivers/net/au1000_eth.c
index 53eff9ba6e95..b9debcfb61a0 100644
--- a/drivers/net/au1000_eth.c
+++ b/drivers/net/au1000_eth.c
@@ -106,8 +106,6 @@ MODULE_VERSION(DRV_VERSION);
106 * complete immediately. 106 * complete immediately.
107 */ 107 */
108 108
109struct au1000_private *au_macs[NUM_ETH_INTERFACES];
110
111/* 109/*
112 * board-specific configurations 110 * board-specific configurations
113 * 111 *
diff --git a/drivers/net/ax88796.c b/drivers/net/ax88796.c
index b6da4cf3694b..4bebff3faeab 100644
--- a/drivers/net/ax88796.c
+++ b/drivers/net/ax88796.c
@@ -325,7 +325,7 @@ static void ax_block_output(struct net_device *dev, int count,
325static void 325static void
326ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len) 326ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
327{ 327{
328 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 328 struct ei_device *ei_local = netdev_priv(dev);
329 void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR; 329 void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
330 unsigned int memr; 330 unsigned int memr;
331 331
@@ -364,7 +364,7 @@ ax_mii_ei_outbits(struct net_device *dev, unsigned int bits, int len)
364static unsigned int 364static unsigned int
365ax_phy_ei_inbits(struct net_device *dev, int no) 365ax_phy_ei_inbits(struct net_device *dev, int no)
366{ 366{
367 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 367 struct ei_device *ei_local = netdev_priv(dev);
368 void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR; 368 void __iomem *memr_addr = (void __iomem *)dev->base_addr + AX_MEMR;
369 unsigned int memr; 369 unsigned int memr;
370 unsigned int result = 0; 370 unsigned int result = 0;
@@ -412,7 +412,7 @@ ax_phy_issueaddr(struct net_device *dev, int phy_addr, int reg, int opc)
412static int 412static int
413ax_phy_read(struct net_device *dev, int phy_addr, int reg) 413ax_phy_read(struct net_device *dev, int phy_addr, int reg)
414{ 414{
415 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 415 struct ei_device *ei_local = netdev_priv(dev);
416 unsigned long flags; 416 unsigned long flags;
417 unsigned int result; 417 unsigned int result;
418 418
@@ -435,7 +435,7 @@ ax_phy_read(struct net_device *dev, int phy_addr, int reg)
435static void 435static void
436ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value) 436ax_phy_write(struct net_device *dev, int phy_addr, int reg, int value)
437{ 437{
438 struct ei_device *ei = (struct ei_device *) netdev_priv(dev); 438 struct ei_device *ei = netdev_priv(dev);
439 struct ax_device *ax = to_ax_dev(dev); 439 struct ax_device *ax = to_ax_dev(dev);
440 unsigned long flags; 440 unsigned long flags;
441 441
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index ecfef240a303..e94a966af418 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -1097,7 +1097,7 @@ static int bcm_enet_stop(struct net_device *dev)
1097 enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); 1097 enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));
1098 1098
1099 /* make sure no mib update is scheduled */ 1099 /* make sure no mib update is scheduled */
1100 flush_scheduled_work(); 1100 cancel_work_sync(&priv->mib_update_task);
1101 1101
1102 /* disable dma & mac */ 1102 /* disable dma & mac */
1103 bcm_enet_disable_dma(priv, priv->tx_chan); 1103 bcm_enet_disable_dma(priv, priv->tx_chan);
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 4594a28b1f66..add0b93350dd 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -38,14 +38,17 @@
38#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 38#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
39#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC" 39#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
40#define OC_NAME "Emulex OneConnect 10Gbps NIC" 40#define OC_NAME "Emulex OneConnect 10Gbps NIC"
41#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)" 41#define OC_NAME_BE OC_NAME "(be3)"
42#define OC_NAME_LANCER OC_NAME "(Lancer)"
42#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver" 43#define DRV_DESC "ServerEngines BladeEngine 10Gbps NIC Driver"
43 44
44#define BE_VENDOR_ID 0x19a2 45#define BE_VENDOR_ID 0x19a2
46#define EMULEX_VENDOR_ID 0x10df
45#define BE_DEVICE_ID1 0x211 47#define BE_DEVICE_ID1 0x211
46#define BE_DEVICE_ID2 0x221 48#define BE_DEVICE_ID2 0x221
47#define OC_DEVICE_ID1 0x700 49#define OC_DEVICE_ID1 0x700 /* Device Id for BE2 cards */
48#define OC_DEVICE_ID2 0x710 50#define OC_DEVICE_ID2 0x710 /* Device Id for BE3 cards */
51#define OC_DEVICE_ID3 0xe220 /* Device id for Lancer cards */
49 52
50static inline char *nic_name(struct pci_dev *pdev) 53static inline char *nic_name(struct pci_dev *pdev)
51{ 54{
@@ -53,7 +56,9 @@ static inline char *nic_name(struct pci_dev *pdev)
53 case OC_DEVICE_ID1: 56 case OC_DEVICE_ID1:
54 return OC_NAME; 57 return OC_NAME;
55 case OC_DEVICE_ID2: 58 case OC_DEVICE_ID2:
56 return OC_NAME1; 59 return OC_NAME_BE;
60 case OC_DEVICE_ID3:
61 return OC_NAME_LANCER;
57 case BE_DEVICE_ID2: 62 case BE_DEVICE_ID2:
58 return BE3_NAME; 63 return BE3_NAME;
59 default: 64 default:
@@ -149,6 +154,7 @@ struct be_eq_obj {
149 u16 min_eqd; /* in usecs */ 154 u16 min_eqd; /* in usecs */
150 u16 max_eqd; /* in usecs */ 155 u16 max_eqd; /* in usecs */
151 u16 cur_eqd; /* in usecs */ 156 u16 cur_eqd; /* in usecs */
157 u8 msix_vec_idx;
152 158
153 struct napi_struct napi; 159 struct napi_struct napi;
154}; 160};
@@ -214,7 +220,9 @@ struct be_rx_obj {
214 struct be_rx_stats stats; 220 struct be_rx_stats stats;
215 u8 rss_id; 221 u8 rss_id;
216 bool rx_post_starved; /* Zero rx frags have been posted to BE */ 222 bool rx_post_starved; /* Zero rx frags have been posted to BE */
217 u32 cache_line_barrier[16]; 223 u16 last_frag_index;
224 u16 rsvd;
225 u32 cache_line_barrier[15];
218}; 226};
219 227
220struct be_vf_cfg { 228struct be_vf_cfg {
@@ -234,7 +242,7 @@ struct be_adapter {
234 u8 __iomem *db; /* Door Bell */ 242 u8 __iomem *db; /* Door Bell */
235 u8 __iomem *pcicfg; /* PCI config space */ 243 u8 __iomem *pcicfg; /* PCI config space */
236 244
237 spinlock_t mbox_lock; /* For serializing mbox cmds to BE card */ 245 struct mutex mbox_lock; /* For serializing mbox cmds to BE card */
238 struct be_dma_mem mbox_mem; 246 struct be_dma_mem mbox_mem;
239 /* Mbox mem is adjusted to align to 16 bytes. The allocated addr 247 /* Mbox mem is adjusted to align to 16 bytes. The allocated addr
240 * is stored for freeing purpose */ 248 * is stored for freeing purpose */
@@ -260,6 +268,8 @@ struct be_adapter {
260 u32 num_rx_qs; 268 u32 num_rx_qs;
261 u32 big_page_size; /* Compounded page size shared by rx wrbs */ 269 u32 big_page_size; /* Compounded page size shared by rx wrbs */
262 270
271 u8 msix_vec_next_idx;
272
263 struct vlan_group *vlan_grp; 273 struct vlan_group *vlan_grp;
264 u16 vlans_added; 274 u16 vlans_added;
265 u16 max_vlans; /* Number of vlans supported */ 275 u16 max_vlans; /* Number of vlans supported */
@@ -299,8 +309,8 @@ struct be_adapter {
299 309
300 bool sriov_enabled; 310 bool sriov_enabled;
301 struct be_vf_cfg vf_cfg[BE_MAX_VF]; 311 struct be_vf_cfg vf_cfg[BE_MAX_VF];
302 u8 base_eq_id;
303 u8 is_virtfn; 312 u8 is_virtfn;
313 u32 sli_family;
304}; 314};
305 315
306#define be_physfn(adapter) (!adapter->is_virtfn) 316#define be_physfn(adapter) (!adapter->is_virtfn)
@@ -309,6 +319,8 @@ struct be_adapter {
309#define BE_GEN2 2 319#define BE_GEN2 2
310#define BE_GEN3 3 320#define BE_GEN3 3
311 321
322#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3)
323
312extern const struct ethtool_ops be_ethtool_ops; 324extern const struct ethtool_ops be_ethtool_ops;
313 325
314#define tx_stats(adapter) (&adapter->tx_stats) 326#define tx_stats(adapter) (&adapter->tx_stats)
@@ -416,10 +428,17 @@ static inline u8 is_udp_pkt(struct sk_buff *skb)
416static inline void be_check_sriov_fn_type(struct be_adapter *adapter) 428static inline void be_check_sriov_fn_type(struct be_adapter *adapter)
417{ 429{
418 u8 data; 430 u8 data;
419 431 u32 sli_intf;
420 pci_write_config_byte(adapter->pdev, 0xFE, 0xAA); 432
421 pci_read_config_byte(adapter->pdev, 0xFE, &data); 433 if (lancer_chip(adapter)) {
422 adapter->is_virtfn = (data != 0xAA); 434 pci_read_config_dword(adapter->pdev, SLI_INTF_REG_OFFSET,
435 &sli_intf);
436 adapter->is_virtfn = (sli_intf & SLI_INTF_FT_MASK) ? 1 : 0;
437 } else {
438 pci_write_config_byte(adapter->pdev, 0xFE, 0xAA);
439 pci_read_config_byte(adapter->pdev, 0xFE, &data);
440 adapter->is_virtfn = (data != 0xAA);
441 }
423} 442}
424 443
425static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac) 444static inline void be_vf_eth_addr_generate(struct be_adapter *adapter, u8 *mac)
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index e4465d222a7d..0c7811faf72c 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -323,7 +323,12 @@ static int be_mbox_notify_wait(struct be_adapter *adapter)
323 323
324static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage) 324static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
325{ 325{
326 u32 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET); 326 u32 sem;
327
328 if (lancer_chip(adapter))
329 sem = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
330 else
331 sem = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
327 332
328 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK; 333 *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
329 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK) 334 if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
@@ -462,7 +467,8 @@ int be_cmd_fw_init(struct be_adapter *adapter)
462 u8 *wrb; 467 u8 *wrb;
463 int status; 468 int status;
464 469
465 spin_lock(&adapter->mbox_lock); 470 if (mutex_lock_interruptible(&adapter->mbox_lock))
471 return -1;
466 472
467 wrb = (u8 *)wrb_from_mbox(adapter); 473 wrb = (u8 *)wrb_from_mbox(adapter);
468 *wrb++ = 0xFF; 474 *wrb++ = 0xFF;
@@ -476,7 +482,7 @@ int be_cmd_fw_init(struct be_adapter *adapter)
476 482
477 status = be_mbox_notify_wait(adapter); 483 status = be_mbox_notify_wait(adapter);
478 484
479 spin_unlock(&adapter->mbox_lock); 485 mutex_unlock(&adapter->mbox_lock);
480 return status; 486 return status;
481} 487}
482 488
@@ -491,7 +497,8 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
491 if (adapter->eeh_err) 497 if (adapter->eeh_err)
492 return -EIO; 498 return -EIO;
493 499
494 spin_lock(&adapter->mbox_lock); 500 if (mutex_lock_interruptible(&adapter->mbox_lock))
501 return -1;
495 502
496 wrb = (u8 *)wrb_from_mbox(adapter); 503 wrb = (u8 *)wrb_from_mbox(adapter);
497 *wrb++ = 0xFF; 504 *wrb++ = 0xFF;
@@ -505,7 +512,7 @@ int be_cmd_fw_clean(struct be_adapter *adapter)
505 512
506 status = be_mbox_notify_wait(adapter); 513 status = be_mbox_notify_wait(adapter);
507 514
508 spin_unlock(&adapter->mbox_lock); 515 mutex_unlock(&adapter->mbox_lock);
509 return status; 516 return status;
510} 517}
511int be_cmd_eq_create(struct be_adapter *adapter, 518int be_cmd_eq_create(struct be_adapter *adapter,
@@ -516,7 +523,8 @@ int be_cmd_eq_create(struct be_adapter *adapter,
516 struct be_dma_mem *q_mem = &eq->dma_mem; 523 struct be_dma_mem *q_mem = &eq->dma_mem;
517 int status; 524 int status;
518 525
519 spin_lock(&adapter->mbox_lock); 526 if (mutex_lock_interruptible(&adapter->mbox_lock))
527 return -1;
520 528
521 wrb = wrb_from_mbox(adapter); 529 wrb = wrb_from_mbox(adapter);
522 req = embedded_payload(wrb); 530 req = embedded_payload(wrb);
@@ -546,7 +554,7 @@ int be_cmd_eq_create(struct be_adapter *adapter,
546 eq->created = true; 554 eq->created = true;
547 } 555 }
548 556
549 spin_unlock(&adapter->mbox_lock); 557 mutex_unlock(&adapter->mbox_lock);
550 return status; 558 return status;
551} 559}
552 560
@@ -558,7 +566,8 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
558 struct be_cmd_req_mac_query *req; 566 struct be_cmd_req_mac_query *req;
559 int status; 567 int status;
560 568
561 spin_lock(&adapter->mbox_lock); 569 if (mutex_lock_interruptible(&adapter->mbox_lock))
570 return -1;
562 571
563 wrb = wrb_from_mbox(adapter); 572 wrb = wrb_from_mbox(adapter);
564 req = embedded_payload(wrb); 573 req = embedded_payload(wrb);
@@ -583,7 +592,7 @@ int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
583 memcpy(mac_addr, resp->mac.addr, ETH_ALEN); 592 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
584 } 593 }
585 594
586 spin_unlock(&adapter->mbox_lock); 595 mutex_unlock(&adapter->mbox_lock);
587 return status; 596 return status;
588} 597}
589 598
@@ -667,7 +676,8 @@ int be_cmd_cq_create(struct be_adapter *adapter,
667 void *ctxt; 676 void *ctxt;
668 int status; 677 int status;
669 678
670 spin_lock(&adapter->mbox_lock); 679 if (mutex_lock_interruptible(&adapter->mbox_lock))
680 return -1;
671 681
672 wrb = wrb_from_mbox(adapter); 682 wrb = wrb_from_mbox(adapter);
673 req = embedded_payload(wrb); 683 req = embedded_payload(wrb);
@@ -680,16 +690,36 @@ int be_cmd_cq_create(struct be_adapter *adapter,
680 OPCODE_COMMON_CQ_CREATE, sizeof(*req)); 690 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
681 691
682 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 692 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
693 if (lancer_chip(adapter)) {
694 req->hdr.version = 1;
695 req->page_size = 1; /* 1 for 4K */
696 AMAP_SET_BITS(struct amap_cq_context_lancer, coalescwm, ctxt,
697 coalesce_wm);
698 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
699 no_delay);
700 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
701 __ilog2_u32(cq->len/256));
702 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
703 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
704 ctxt, 1);
705 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
706 ctxt, eq->id);
707 AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
708 } else {
709 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
710 coalesce_wm);
711 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
712 ctxt, no_delay);
713 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
714 __ilog2_u32(cq->len/256));
715 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
716 AMAP_SET_BITS(struct amap_cq_context_be, solevent,
717 ctxt, sol_evts);
718 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
719 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
720 AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
721 }
683 722
684 AMAP_SET_BITS(struct amap_cq_context, coalescwm, ctxt, coalesce_wm);
685 AMAP_SET_BITS(struct amap_cq_context, nodelay, ctxt, no_delay);
686 AMAP_SET_BITS(struct amap_cq_context, count, ctxt,
687 __ilog2_u32(cq->len/256));
688 AMAP_SET_BITS(struct amap_cq_context, valid, ctxt, 1);
689 AMAP_SET_BITS(struct amap_cq_context, solevent, ctxt, sol_evts);
690 AMAP_SET_BITS(struct amap_cq_context, eventable, ctxt, 1);
691 AMAP_SET_BITS(struct amap_cq_context, eqid, ctxt, eq->id);
692 AMAP_SET_BITS(struct amap_cq_context, armed, ctxt, 1);
693 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 723 be_dws_cpu_to_le(ctxt, sizeof(req->context));
694 724
695 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 725 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -701,7 +731,7 @@ int be_cmd_cq_create(struct be_adapter *adapter,
701 cq->created = true; 731 cq->created = true;
702 } 732 }
703 733
704 spin_unlock(&adapter->mbox_lock); 734 mutex_unlock(&adapter->mbox_lock);
705 735
706 return status; 736 return status;
707} 737}
@@ -724,7 +754,8 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
724 void *ctxt; 754 void *ctxt;
725 int status; 755 int status;
726 756
727 spin_lock(&adapter->mbox_lock); 757 if (mutex_lock_interruptible(&adapter->mbox_lock))
758 return -1;
728 759
729 wrb = wrb_from_mbox(adapter); 760 wrb = wrb_from_mbox(adapter);
730 req = embedded_payload(wrb); 761 req = embedded_payload(wrb);
@@ -737,13 +768,27 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
737 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req)); 768 OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
738 769
739 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size)); 770 req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
771 if (lancer_chip(adapter)) {
772 req->hdr.version = 1;
773 req->cq_id = cpu_to_le16(cq->id);
774
775 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
776 be_encoded_q_len(mccq->len));
777 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
778 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
779 ctxt, cq->id);
780 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
781 ctxt, 1);
782
783 } else {
784 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
785 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
786 be_encoded_q_len(mccq->len));
787 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
788 }
740 789
741 AMAP_SET_BITS(struct amap_mcc_context, valid, ctxt, 1);
742 AMAP_SET_BITS(struct amap_mcc_context, ring_size, ctxt,
743 be_encoded_q_len(mccq->len));
744 AMAP_SET_BITS(struct amap_mcc_context, cq_id, ctxt, cq->id);
745 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */ 790 /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
746 req->async_event_bitmap[0] |= 0x00000022; 791 req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
747 be_dws_cpu_to_le(ctxt, sizeof(req->context)); 792 be_dws_cpu_to_le(ctxt, sizeof(req->context));
748 793
749 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem); 794 be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
@@ -754,7 +799,7 @@ int be_cmd_mccq_create(struct be_adapter *adapter,
754 mccq->id = le16_to_cpu(resp->id); 799 mccq->id = le16_to_cpu(resp->id);
755 mccq->created = true; 800 mccq->created = true;
756 } 801 }
757 spin_unlock(&adapter->mbox_lock); 802 mutex_unlock(&adapter->mbox_lock);
758 803
759 return status; 804 return status;
760} 805}
@@ -769,7 +814,8 @@ int be_cmd_txq_create(struct be_adapter *adapter,
769 void *ctxt; 814 void *ctxt;
770 int status; 815 int status;
771 816
772 spin_lock(&adapter->mbox_lock); 817 if (mutex_lock_interruptible(&adapter->mbox_lock))
818 return -1;
773 819
774 wrb = wrb_from_mbox(adapter); 820 wrb = wrb_from_mbox(adapter);
775 req = embedded_payload(wrb); 821 req = embedded_payload(wrb);
@@ -801,7 +847,7 @@ int be_cmd_txq_create(struct be_adapter *adapter,
801 txq->created = true; 847 txq->created = true;
802 } 848 }
803 849
804 spin_unlock(&adapter->mbox_lock); 850 mutex_unlock(&adapter->mbox_lock);
805 851
806 return status; 852 return status;
807} 853}
@@ -816,7 +862,8 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
816 struct be_dma_mem *q_mem = &rxq->dma_mem; 862 struct be_dma_mem *q_mem = &rxq->dma_mem;
817 int status; 863 int status;
818 864
819 spin_lock(&adapter->mbox_lock); 865 if (mutex_lock_interruptible(&adapter->mbox_lock))
866 return -1;
820 867
821 wrb = wrb_from_mbox(adapter); 868 wrb = wrb_from_mbox(adapter);
822 req = embedded_payload(wrb); 869 req = embedded_payload(wrb);
@@ -843,7 +890,7 @@ int be_cmd_rxq_create(struct be_adapter *adapter,
843 *rss_id = resp->rss_id; 890 *rss_id = resp->rss_id;
844 } 891 }
845 892
846 spin_unlock(&adapter->mbox_lock); 893 mutex_unlock(&adapter->mbox_lock);
847 894
848 return status; 895 return status;
849} 896}
@@ -862,7 +909,8 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
862 if (adapter->eeh_err) 909 if (adapter->eeh_err)
863 return -EIO; 910 return -EIO;
864 911
865 spin_lock(&adapter->mbox_lock); 912 if (mutex_lock_interruptible(&adapter->mbox_lock))
913 return -1;
866 914
867 wrb = wrb_from_mbox(adapter); 915 wrb = wrb_from_mbox(adapter);
868 req = embedded_payload(wrb); 916 req = embedded_payload(wrb);
@@ -899,7 +947,7 @@ int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
899 947
900 status = be_mbox_notify_wait(adapter); 948 status = be_mbox_notify_wait(adapter);
901 949
902 spin_unlock(&adapter->mbox_lock); 950 mutex_unlock(&adapter->mbox_lock);
903 951
904 return status; 952 return status;
905} 953}
@@ -915,7 +963,8 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
915 struct be_cmd_req_if_create *req; 963 struct be_cmd_req_if_create *req;
916 int status; 964 int status;
917 965
918 spin_lock(&adapter->mbox_lock); 966 if (mutex_lock_interruptible(&adapter->mbox_lock))
967 return -1;
919 968
920 wrb = wrb_from_mbox(adapter); 969 wrb = wrb_from_mbox(adapter);
921 req = embedded_payload(wrb); 970 req = embedded_payload(wrb);
@@ -941,7 +990,7 @@ int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
941 *pmac_id = le32_to_cpu(resp->pmac_id); 990 *pmac_id = le32_to_cpu(resp->pmac_id);
942 } 991 }
943 992
944 spin_unlock(&adapter->mbox_lock); 993 mutex_unlock(&adapter->mbox_lock);
945 return status; 994 return status;
946} 995}
947 996
@@ -955,7 +1004,8 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
955 if (adapter->eeh_err) 1004 if (adapter->eeh_err)
956 return -EIO; 1005 return -EIO;
957 1006
958 spin_lock(&adapter->mbox_lock); 1007 if (mutex_lock_interruptible(&adapter->mbox_lock))
1008 return -1;
959 1009
960 wrb = wrb_from_mbox(adapter); 1010 wrb = wrb_from_mbox(adapter);
961 req = embedded_payload(wrb); 1011 req = embedded_payload(wrb);
@@ -970,7 +1020,7 @@ int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id)
970 1020
971 status = be_mbox_notify_wait(adapter); 1021 status = be_mbox_notify_wait(adapter);
972 1022
973 spin_unlock(&adapter->mbox_lock); 1023 mutex_unlock(&adapter->mbox_lock);
974 1024
975 return status; 1025 return status;
976} 1026}
@@ -1060,7 +1110,8 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
1060 struct be_cmd_req_get_fw_version *req; 1110 struct be_cmd_req_get_fw_version *req;
1061 int status; 1111 int status;
1062 1112
1063 spin_lock(&adapter->mbox_lock); 1113 if (mutex_lock_interruptible(&adapter->mbox_lock))
1114 return -1;
1064 1115
1065 wrb = wrb_from_mbox(adapter); 1116 wrb = wrb_from_mbox(adapter);
1066 req = embedded_payload(wrb); 1117 req = embedded_payload(wrb);
@@ -1077,7 +1128,7 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
1077 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN); 1128 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
1078 } 1129 }
1079 1130
1080 spin_unlock(&adapter->mbox_lock); 1131 mutex_unlock(&adapter->mbox_lock);
1081 return status; 1132 return status;
1082} 1133}
1083 1134
@@ -1322,7 +1373,8 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1322 struct be_cmd_req_query_fw_cfg *req; 1373 struct be_cmd_req_query_fw_cfg *req;
1323 int status; 1374 int status;
1324 1375
1325 spin_lock(&adapter->mbox_lock); 1376 if (mutex_lock_interruptible(&adapter->mbox_lock))
1377 return -1;
1326 1378
1327 wrb = wrb_from_mbox(adapter); 1379 wrb = wrb_from_mbox(adapter);
1328 req = embedded_payload(wrb); 1380 req = embedded_payload(wrb);
@@ -1341,7 +1393,7 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1341 *caps = le32_to_cpu(resp->function_caps); 1393 *caps = le32_to_cpu(resp->function_caps);
1342 } 1394 }
1343 1395
1344 spin_unlock(&adapter->mbox_lock); 1396 mutex_unlock(&adapter->mbox_lock);
1345 return status; 1397 return status;
1346} 1398}
1347 1399
@@ -1352,7 +1404,8 @@ int be_cmd_reset_function(struct be_adapter *adapter)
1352 struct be_cmd_req_hdr *req; 1404 struct be_cmd_req_hdr *req;
1353 int status; 1405 int status;
1354 1406
1355 spin_lock(&adapter->mbox_lock); 1407 if (mutex_lock_interruptible(&adapter->mbox_lock))
1408 return -1;
1356 1409
1357 wrb = wrb_from_mbox(adapter); 1410 wrb = wrb_from_mbox(adapter);
1358 req = embedded_payload(wrb); 1411 req = embedded_payload(wrb);
@@ -1365,7 +1418,7 @@ int be_cmd_reset_function(struct be_adapter *adapter)
1365 1418
1366 status = be_mbox_notify_wait(adapter); 1419 status = be_mbox_notify_wait(adapter);
1367 1420
1368 spin_unlock(&adapter->mbox_lock); 1421 mutex_unlock(&adapter->mbox_lock);
1369 return status; 1422 return status;
1370} 1423}
1371 1424
@@ -1376,7 +1429,8 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1376 u32 myhash[10]; 1429 u32 myhash[10];
1377 int status; 1430 int status;
1378 1431
1379 spin_lock(&adapter->mbox_lock); 1432 if (mutex_lock_interruptible(&adapter->mbox_lock))
1433 return -1;
1380 1434
1381 wrb = wrb_from_mbox(adapter); 1435 wrb = wrb_from_mbox(adapter);
1382 req = embedded_payload(wrb); 1436 req = embedded_payload(wrb);
@@ -1396,7 +1450,7 @@ int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1396 1450
1397 status = be_mbox_notify_wait(adapter); 1451 status = be_mbox_notify_wait(adapter);
1398 1452
1399 spin_unlock(&adapter->mbox_lock); 1453 mutex_unlock(&adapter->mbox_lock);
1400 return status; 1454 return status;
1401} 1455}
1402 1456
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index 8469ff061f30..83d15c8a9fa3 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -309,7 +309,7 @@ struct be_cmd_req_pmac_del {
309/******************** Create CQ ***************************/ 309/******************** Create CQ ***************************/
310/* Pseudo amap definition in which each bit of the actual structure is defined 310/* Pseudo amap definition in which each bit of the actual structure is defined
311 * as a byte: used to calculate offset/shift/mask of each field */ 311 * as a byte: used to calculate offset/shift/mask of each field */
312struct amap_cq_context { 312struct amap_cq_context_be {
313 u8 cidx[11]; /* dword 0*/ 313 u8 cidx[11]; /* dword 0*/
314 u8 rsvd0; /* dword 0*/ 314 u8 rsvd0; /* dword 0*/
315 u8 coalescwm[2]; /* dword 0*/ 315 u8 coalescwm[2]; /* dword 0*/
@@ -332,14 +332,32 @@ struct amap_cq_context {
332 u8 rsvd5[32]; /* dword 3*/ 332 u8 rsvd5[32]; /* dword 3*/
333} __packed; 333} __packed;
334 334
335struct amap_cq_context_lancer {
336 u8 rsvd0[12]; /* dword 0*/
337 u8 coalescwm[2]; /* dword 0*/
338 u8 nodelay; /* dword 0*/
339 u8 rsvd1[12]; /* dword 0*/
340 u8 count[2]; /* dword 0*/
341 u8 valid; /* dword 0*/
342 u8 rsvd2; /* dword 0*/
343 u8 eventable; /* dword 0*/
344 u8 eqid[16]; /* dword 1*/
345 u8 rsvd3[15]; /* dword 1*/
346 u8 armed; /* dword 1*/
347 u8 rsvd4[32]; /* dword 2*/
348 u8 rsvd5[32]; /* dword 3*/
349} __packed;
350
335struct be_cmd_req_cq_create { 351struct be_cmd_req_cq_create {
336 struct be_cmd_req_hdr hdr; 352 struct be_cmd_req_hdr hdr;
337 u16 num_pages; 353 u16 num_pages;
338 u16 rsvd0; 354 u8 page_size;
339 u8 context[sizeof(struct amap_cq_context) / 8]; 355 u8 rsvd0;
356 u8 context[sizeof(struct amap_cq_context_be) / 8];
340 struct phys_addr pages[8]; 357 struct phys_addr pages[8];
341} __packed; 358} __packed;
342 359
360
343struct be_cmd_resp_cq_create { 361struct be_cmd_resp_cq_create {
344 struct be_cmd_resp_hdr hdr; 362 struct be_cmd_resp_hdr hdr;
345 u16 cq_id; 363 u16 cq_id;
@@ -349,7 +367,7 @@ struct be_cmd_resp_cq_create {
349/******************** Create MCCQ ***************************/ 367/******************** Create MCCQ ***************************/
350/* Pseudo amap definition in which each bit of the actual structure is defined 368/* Pseudo amap definition in which each bit of the actual structure is defined
351 * as a byte: used to calculate offset/shift/mask of each field */ 369 * as a byte: used to calculate offset/shift/mask of each field */
352struct amap_mcc_context { 370struct amap_mcc_context_be {
353 u8 con_index[14]; 371 u8 con_index[14];
354 u8 rsvd0[2]; 372 u8 rsvd0[2];
355 u8 ring_size[4]; 373 u8 ring_size[4];
@@ -364,12 +382,23 @@ struct amap_mcc_context {
364 u8 rsvd2[32]; 382 u8 rsvd2[32];
365} __packed; 383} __packed;
366 384
385struct amap_mcc_context_lancer {
386 u8 async_cq_id[16];
387 u8 ring_size[4];
388 u8 rsvd0[12];
389 u8 rsvd1[31];
390 u8 valid;
391 u8 async_cq_valid[1];
392 u8 rsvd2[31];
393 u8 rsvd3[32];
394} __packed;
395
367struct be_cmd_req_mcc_create { 396struct be_cmd_req_mcc_create {
368 struct be_cmd_req_hdr hdr; 397 struct be_cmd_req_hdr hdr;
369 u16 num_pages; 398 u16 num_pages;
370 u16 rsvd0; 399 u16 cq_id;
371 u32 async_event_bitmap[1]; 400 u32 async_event_bitmap[1];
372 u8 context[sizeof(struct amap_mcc_context) / 8]; 401 u8 context[sizeof(struct amap_mcc_context_be) / 8];
373 struct phys_addr pages[8]; 402 struct phys_addr pages[8];
374} __packed; 403} __packed;
375 404
@@ -605,6 +634,7 @@ struct be_hw_stats {
605 struct be_rxf_stats rxf; 634 struct be_rxf_stats rxf;
606 u32 rsvd[48]; 635 u32 rsvd[48];
607 struct be_erx_stats erx; 636 struct be_erx_stats erx;
637 u32 rsvd1[6];
608}; 638};
609 639
610struct be_cmd_req_get_stats { 640struct be_cmd_req_get_stats {
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 0f46366ecc48..b4be0271efe0 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -549,7 +549,9 @@ be_test_ddr_dma(struct be_adapter *adapter)
549{ 549{
550 int ret, i; 550 int ret, i;
551 struct be_dma_mem ddrdma_cmd; 551 struct be_dma_mem ddrdma_cmd;
552 u64 pattern[2] = {0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL}; 552 static const u64 pattern[2] = {
553 0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL
554 };
553 555
554 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); 556 ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
555 ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size, 557 ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
diff --git a/drivers/net/benet/be_hw.h b/drivers/net/benet/be_hw.h
index a2ec5df0d733..4096d9778234 100644
--- a/drivers/net/benet/be_hw.h
+++ b/drivers/net/benet/be_hw.h
@@ -32,10 +32,12 @@
32#define MPU_EP_CONTROL 0 32#define MPU_EP_CONTROL 0
33 33
34/********** MPU semphore ******************/ 34/********** MPU semphore ******************/
35#define MPU_EP_SEMAPHORE_OFFSET 0xac 35#define MPU_EP_SEMAPHORE_OFFSET 0xac
36#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF 36#define MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET 0x400
37#define EP_SEMAPHORE_POST_ERR_MASK 0x1 37#define EP_SEMAPHORE_POST_STAGE_MASK 0x0000FFFF
38#define EP_SEMAPHORE_POST_ERR_SHIFT 31 38#define EP_SEMAPHORE_POST_ERR_MASK 0x1
39#define EP_SEMAPHORE_POST_ERR_SHIFT 31
40
39/* MPU semphore POST stage values */ 41/* MPU semphore POST stage values */
40#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */ 42#define POST_STAGE_AWAITING_HOST_RDY 0x1 /* FW awaiting goahead from host */
41#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */ 43#define POST_STAGE_HOST_RDY 0x2 /* Host has given go-ahed to FW */
@@ -66,6 +68,28 @@
66#define PCICFG_UE_STATUS_LOW_MASK 0xA8 68#define PCICFG_UE_STATUS_LOW_MASK 0xA8
67#define PCICFG_UE_STATUS_HI_MASK 0xAC 69#define PCICFG_UE_STATUS_HI_MASK 0xAC
68 70
71/******** SLI_INTF ***********************/
72#define SLI_INTF_REG_OFFSET 0x58
73#define SLI_INTF_VALID_MASK 0xE0000000
74#define SLI_INTF_VALID 0xC0000000
75#define SLI_INTF_HINT2_MASK 0x1F000000
76#define SLI_INTF_HINT2_SHIFT 24
77#define SLI_INTF_HINT1_MASK 0x00FF0000
78#define SLI_INTF_HINT1_SHIFT 16
79#define SLI_INTF_FAMILY_MASK 0x00000F00
80#define SLI_INTF_FAMILY_SHIFT 8
81#define SLI_INTF_IF_TYPE_MASK 0x0000F000
82#define SLI_INTF_IF_TYPE_SHIFT 12
83#define SLI_INTF_REV_MASK 0x000000F0
84#define SLI_INTF_REV_SHIFT 4
85#define SLI_INTF_FT_MASK 0x00000001
86
87
88/* SLI family */
89#define BE_SLI_FAMILY 0x0
90#define LANCER_A0_SLI_FAMILY 0xA
91
92
69/********* ISR0 Register offset **********/ 93/********* ISR0 Register offset **********/
70#define CEV_ISR0_OFFSET 0xC18 94#define CEV_ISR0_OFFSET 0xC18
71#define CEV_ISR_SIZE 4 95#define CEV_ISR_SIZE 4
@@ -73,6 +97,9 @@
73/********* Event Q door bell *************/ 97/********* Event Q door bell *************/
74#define DB_EQ_OFFSET DB_CQ_OFFSET 98#define DB_EQ_OFFSET DB_CQ_OFFSET
75#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */ 99#define DB_EQ_RING_ID_MASK 0x1FF /* bits 0 - 8 */
100#define DB_EQ_RING_ID_EXT_MASK 0x3e00 /* bits 9-13 */
101#define DB_EQ_RING_ID_EXT_MASK_SHIFT (2) /* qid bits 9-13 placing at 11-15 */
102
76/* Clear the interrupt for this eq */ 103/* Clear the interrupt for this eq */
77#define DB_EQ_CLR_SHIFT (9) /* bit 9 */ 104#define DB_EQ_CLR_SHIFT (9) /* bit 9 */
78/* Must be 1 */ 105/* Must be 1 */
@@ -85,6 +112,10 @@
85/********* Compl Q door bell *************/ 112/********* Compl Q door bell *************/
86#define DB_CQ_OFFSET 0x120 113#define DB_CQ_OFFSET 0x120
87#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */ 114#define DB_CQ_RING_ID_MASK 0x3FF /* bits 0 - 9 */
115#define DB_CQ_RING_ID_EXT_MASK 0x7C00 /* bits 10-14 */
116#define DB_CQ_RING_ID_EXT_MASK_SHIFT (1) /* qid bits 10-14
117 placing at 11-15 */
118
88/* Number of event entries processed */ 119/* Number of event entries processed */
89#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */ 120#define DB_CQ_NUM_POPPED_SHIFT (16) /* bits 16 - 28 */
90/* Rearm bit */ 121/* Rearm bit */
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 93354eee2cfd..de40d3b7152f 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -41,6 +41,7 @@ static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) }, 41 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 42 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 43 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
44 { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44 { 0 } 45 { 0 }
45}; 46};
46MODULE_DEVICE_TABLE(pci, be_dev_ids); 47MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -188,6 +189,8 @@ static void be_eq_notify(struct be_adapter *adapter, u16 qid,
188{ 189{
189 u32 val = 0; 190 u32 val = 0;
190 val |= qid & DB_EQ_RING_ID_MASK; 191 val |= qid & DB_EQ_RING_ID_MASK;
192 val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
193 DB_EQ_RING_ID_EXT_MASK_SHIFT);
191 194
192 if (adapter->eeh_err) 195 if (adapter->eeh_err)
193 return; 196 return;
@@ -205,6 +208,8 @@ void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
205{ 208{
206 u32 val = 0; 209 u32 val = 0;
207 val |= qid & DB_CQ_RING_ID_MASK; 210 val |= qid & DB_CQ_RING_ID_MASK;
211 val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
212 DB_CQ_RING_ID_EXT_MASK_SHIFT);
208 213
209 if (adapter->eeh_err) 214 if (adapter->eeh_err)
210 return; 215 return;
@@ -404,7 +409,8 @@ static void be_tx_stats_update(struct be_adapter *adapter,
404} 409}
405 410
406/* Determine number of WRB entries needed to xmit data in an skb */ 411/* Determine number of WRB entries needed to xmit data in an skb */
407static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy) 412static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
413 bool *dummy)
408{ 414{
409 int cnt = (skb->len > skb->data_len); 415 int cnt = (skb->len > skb->data_len);
410 416
@@ -412,12 +418,13 @@ static u32 wrb_cnt_for_skb(struct sk_buff *skb, bool *dummy)
412 418
413 /* to account for hdr wrb */ 419 /* to account for hdr wrb */
414 cnt++; 420 cnt++;
415 if (cnt & 1) { 421 if (lancer_chip(adapter) || !(cnt & 1)) {
422 *dummy = false;
423 } else {
416 /* add a dummy to make it an even num */ 424 /* add a dummy to make it an even num */
417 cnt++; 425 cnt++;
418 *dummy = true; 426 *dummy = true;
419 } else 427 }
420 *dummy = false;
421 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT); 428 BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
422 return cnt; 429 return cnt;
423} 430}
@@ -443,8 +450,18 @@ static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
443 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1); 450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
444 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss, 451 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
445 hdr, skb_shinfo(skb)->gso_size); 452 hdr, skb_shinfo(skb)->gso_size);
446 if (skb_is_gso_v6(skb)) 453 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
447 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1); 454 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
455 if (lancer_chip(adapter) && adapter->sli_family ==
456 LANCER_A0_SLI_FAMILY) {
457 AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
458 if (is_tcp_pkt(skb))
459 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
460 tcpcs, hdr, 1);
461 else if (is_udp_pkt(skb))
462 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
463 udpcs, hdr, 1);
464 }
448 } else if (skb->ip_summed == CHECKSUM_PARTIAL) { 465 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
449 if (is_tcp_pkt(skb)) 466 if (is_tcp_pkt(skb))
450 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1); 467 AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
@@ -566,7 +583,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
566 u32 start = txq->head; 583 u32 start = txq->head;
567 bool dummy_wrb, stopped = false; 584 bool dummy_wrb, stopped = false;
568 585
569 wrb_cnt = wrb_cnt_for_skb(skb, &dummy_wrb); 586 wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
570 587
571 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb); 588 copied = make_tx_wrbs(adapter, skb, wrb_cnt, dummy_wrb);
572 if (copied) { 589 if (copied) {
@@ -894,11 +911,17 @@ static void be_rx_compl_discard(struct be_adapter *adapter,
894 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 911 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
895 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 912 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
896 913
897 for (i = 0; i < num_rcvd; i++) { 914 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
898 page_info = get_rx_page_info(adapter, rxo, rxq_idx); 915 if (likely(rxq_idx != rxo->last_frag_index && num_rcvd != 0)) {
899 put_page(page_info->page); 916
900 memset(page_info, 0, sizeof(*page_info)); 917 rxo->last_frag_index = rxq_idx;
901 index_inc(&rxq_idx, rxq->len); 918
919 for (i = 0; i < num_rcvd; i++) {
920 page_info = get_rx_page_info(adapter, rxo, rxq_idx);
921 put_page(page_info->page);
922 memset(page_info, 0, sizeof(*page_info));
923 index_inc(&rxq_idx, rxq->len);
924 }
902 } 925 }
903} 926}
904 927
@@ -999,9 +1022,6 @@ static void be_rx_compl_process(struct be_adapter *adapter,
999 u8 vtm; 1022 u8 vtm;
1000 1023
1001 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 1024 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1002 /* Is it a flush compl that has no data */
1003 if (unlikely(num_rcvd == 0))
1004 return;
1005 1025
1006 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN); 1026 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
1007 if (unlikely(!skb)) { 1027 if (unlikely(!skb)) {
@@ -1035,7 +1055,8 @@ static void be_rx_compl_process(struct be_adapter *adapter,
1035 return; 1055 return;
1036 } 1056 }
1037 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); 1057 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1038 vid = swab16(vid); 1058 if (!lancer_chip(adapter))
1059 vid = swab16(vid);
1039 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid); 1060 vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
1040 } else { 1061 } else {
1041 netif_receive_skb(skb); 1062 netif_receive_skb(skb);
@@ -1057,10 +1078,6 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1057 u8 pkt_type; 1078 u8 pkt_type;
1058 1079
1059 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp); 1080 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags, rxcp);
1060 /* Is it a flush compl that has no data */
1061 if (unlikely(num_rcvd == 0))
1062 return;
1063
1064 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp); 1081 pkt_size = AMAP_GET_BITS(struct amap_eth_rx_compl, pktsize, rxcp);
1065 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp); 1082 vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl, vtp, rxcp);
1066 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp); 1083 rxq_idx = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx, rxcp);
@@ -1113,7 +1130,8 @@ static void be_rx_compl_process_gro(struct be_adapter *adapter,
1113 napi_gro_frags(&eq_obj->napi); 1130 napi_gro_frags(&eq_obj->napi);
1114 } else { 1131 } else {
1115 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp); 1132 vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
1116 vid = swab16(vid); 1133 if (!lancer_chip(adapter))
1134 vid = swab16(vid);
1117 1135
1118 if (!adapter->vlan_grp || adapter->vlans_added == 0) 1136 if (!adapter->vlan_grp || adapter->vlans_added == 0)
1119 return; 1137 return;
@@ -1330,7 +1348,7 @@ static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1330 while ((rxcp = be_rx_compl_get(rxo)) != NULL) { 1348 while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1331 be_rx_compl_discard(adapter, rxo, rxcp); 1349 be_rx_compl_discard(adapter, rxo, rxcp);
1332 be_rx_compl_reset(rxcp); 1350 be_rx_compl_reset(rxcp);
1333 be_cq_notify(adapter, rx_cq->id, true, 1); 1351 be_cq_notify(adapter, rx_cq->id, false, 1);
1334 } 1352 }
1335 1353
1336 /* Then free posted rx buffer that were not used */ 1354 /* Then free posted rx buffer that were not used */
@@ -1381,7 +1399,8 @@ static void be_tx_compl_clean(struct be_adapter *adapter)
1381 sent_skb = sent_skbs[txq->tail]; 1399 sent_skb = sent_skbs[txq->tail];
1382 end_idx = txq->tail; 1400 end_idx = txq->tail;
1383 index_adv(&end_idx, 1401 index_adv(&end_idx,
1384 wrb_cnt_for_skb(sent_skb, &dummy_wrb) - 1, txq->len); 1402 wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1403 txq->len);
1385 be_tx_compl_process(adapter, end_idx); 1404 be_tx_compl_process(adapter, end_idx);
1386 } 1405 }
1387} 1406}
@@ -1476,7 +1495,9 @@ static int be_tx_queues_create(struct be_adapter *adapter)
1476 /* Ask BE to create Tx Event queue */ 1495 /* Ask BE to create Tx Event queue */
1477 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd)) 1496 if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1478 goto tx_eq_free; 1497 goto tx_eq_free;
1479 adapter->base_eq_id = adapter->tx_eq.q.id; 1498
1499 adapter->tx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1500
1480 1501
1481 /* Alloc TX eth compl queue */ 1502 /* Alloc TX eth compl queue */
1482 cq = &adapter->tx_obj.cq; 1503 cq = &adapter->tx_obj.cq;
@@ -1554,6 +1575,9 @@ static int be_rx_queues_create(struct be_adapter *adapter)
1554 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE; 1575 adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1555 for_all_rx_queues(adapter, rxo, i) { 1576 for_all_rx_queues(adapter, rxo, i) {
1556 rxo->adapter = adapter; 1577 rxo->adapter = adapter;
1578 /* Init last_frag_index so that the frag index in the first
1579 * completion will never match */
1580 rxo->last_frag_index = 0xffff;
1557 rxo->rx_eq.max_eqd = BE_MAX_EQD; 1581 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1558 rxo->rx_eq.enable_aic = true; 1582 rxo->rx_eq.enable_aic = true;
1559 1583
@@ -1568,6 +1592,8 @@ static int be_rx_queues_create(struct be_adapter *adapter)
1568 if (rc) 1592 if (rc)
1569 goto err; 1593 goto err;
1570 1594
1595 rxo->rx_eq.msix_vec_idx = adapter->msix_vec_next_idx++;
1596
1571 /* CQ */ 1597 /* CQ */
1572 cq = &rxo->cq; 1598 cq = &rxo->cq;
1573 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN, 1599 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
@@ -1578,7 +1604,6 @@ static int be_rx_queues_create(struct be_adapter *adapter)
1578 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3); 1604 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1579 if (rc) 1605 if (rc)
1580 goto err; 1606 goto err;
1581
1582 /* Rx Q */ 1607 /* Rx Q */
1583 q = &rxo->q; 1608 q = &rxo->q;
1584 rc = be_queue_alloc(adapter, q, RX_Q_LEN, 1609 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
@@ -1611,29 +1636,45 @@ err:
1611 return -1; 1636 return -1;
1612} 1637}
1613 1638
1614/* There are 8 evt ids per func. Retruns the evt id's bit number */ 1639static bool event_peek(struct be_eq_obj *eq_obj)
1615static inline int be_evt_bit_get(struct be_adapter *adapter, u32 eq_id)
1616{ 1640{
1617 return eq_id - adapter->base_eq_id; 1641 struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1642 if (!eqe->evt)
1643 return false;
1644 else
1645 return true;
1618} 1646}
1619 1647
1620static irqreturn_t be_intx(int irq, void *dev) 1648static irqreturn_t be_intx(int irq, void *dev)
1621{ 1649{
1622 struct be_adapter *adapter = dev; 1650 struct be_adapter *adapter = dev;
1623 struct be_rx_obj *rxo; 1651 struct be_rx_obj *rxo;
1624 int isr, i; 1652 int isr, i, tx = 0 , rx = 0;
1625 1653
1626 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET + 1654 if (lancer_chip(adapter)) {
1627 (adapter->tx_eq.q.id/ 8) * CEV_ISR_SIZE); 1655 if (event_peek(&adapter->tx_eq))
1628 if (!isr) 1656 tx = event_handle(adapter, &adapter->tx_eq);
1629 return IRQ_NONE; 1657 for_all_rx_queues(adapter, rxo, i) {
1658 if (event_peek(&rxo->rx_eq))
1659 rx |= event_handle(adapter, &rxo->rx_eq);
1660 }
1630 1661
1631 if ((1 << be_evt_bit_get(adapter, adapter->tx_eq.q.id) & isr)) 1662 if (!(tx || rx))
1632 event_handle(adapter, &adapter->tx_eq); 1663 return IRQ_NONE;
1633 1664
1634 for_all_rx_queues(adapter, rxo, i) { 1665 } else {
1635 if ((1 << be_evt_bit_get(adapter, rxo->rx_eq.q.id) & isr)) 1666 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1636 event_handle(adapter, &rxo->rx_eq); 1667 (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1668 if (!isr)
1669 return IRQ_NONE;
1670
1671 if ((1 << adapter->tx_eq.msix_vec_idx & isr))
1672 event_handle(adapter, &adapter->tx_eq);
1673
1674 for_all_rx_queues(adapter, rxo, i) {
1675 if ((1 << rxo->rx_eq.msix_vec_idx & isr))
1676 event_handle(adapter, &rxo->rx_eq);
1677 }
1637 } 1678 }
1638 1679
1639 return IRQ_HANDLED; 1680 return IRQ_HANDLED;
@@ -1658,10 +1699,9 @@ static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1658 return IRQ_HANDLED; 1699 return IRQ_HANDLED;
1659} 1700}
1660 1701
1661static inline bool do_gro(struct be_adapter *adapter, struct be_rx_obj *rxo, 1702static inline bool do_gro(struct be_rx_obj *rxo,
1662 struct be_eth_rx_compl *rxcp) 1703 struct be_eth_rx_compl *rxcp, u8 err)
1663{ 1704{
1664 int err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1665 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp); 1705 int tcp_frame = AMAP_GET_BITS(struct amap_eth_rx_compl, tcpf, rxcp);
1666 1706
1667 if (err) 1707 if (err)
@@ -1678,6 +1718,8 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
1678 struct be_queue_info *rx_cq = &rxo->cq; 1718 struct be_queue_info *rx_cq = &rxo->cq;
1679 struct be_eth_rx_compl *rxcp; 1719 struct be_eth_rx_compl *rxcp;
1680 u32 work_done; 1720 u32 work_done;
1721 u16 frag_index, num_rcvd;
1722 u8 err;
1681 1723
1682 rxo->stats.rx_polls++; 1724 rxo->stats.rx_polls++;
1683 for (work_done = 0; work_done < budget; work_done++) { 1725 for (work_done = 0; work_done < budget; work_done++) {
@@ -1685,10 +1727,22 @@ static int be_poll_rx(struct napi_struct *napi, int budget)
1685 if (!rxcp) 1727 if (!rxcp)
1686 break; 1728 break;
1687 1729
1688 if (do_gro(adapter, rxo, rxcp)) 1730 err = AMAP_GET_BITS(struct amap_eth_rx_compl, err, rxcp);
1689 be_rx_compl_process_gro(adapter, rxo, rxcp); 1731 frag_index = AMAP_GET_BITS(struct amap_eth_rx_compl, fragndx,
1690 else 1732 rxcp);
1691 be_rx_compl_process(adapter, rxo, rxcp); 1733 num_rcvd = AMAP_GET_BITS(struct amap_eth_rx_compl, numfrags,
1734 rxcp);
1735
1736 /* Skip out-of-buffer compl(lancer) or flush compl(BE) */
1737 if (likely(frag_index != rxo->last_frag_index &&
1738 num_rcvd != 0)) {
1739 rxo->last_frag_index = frag_index;
1740
1741 if (do_gro(rxo, rxcp, err))
1742 be_rx_compl_process_gro(adapter, rxo, rxcp);
1743 else
1744 be_rx_compl_process(adapter, rxo, rxcp);
1745 }
1692 1746
1693 be_rx_compl_reset(rxcp); 1747 be_rx_compl_reset(rxcp);
1694 } 1748 }
@@ -1830,8 +1884,7 @@ static void be_worker(struct work_struct *work)
1830 be_post_rx_frags(rxo); 1884 be_post_rx_frags(rxo);
1831 } 1885 }
1832 } 1886 }
1833 1887 if (!adapter->ue_detected && !lancer_chip(adapter))
1834 if (!adapter->ue_detected)
1835 be_detect_dump_ue(adapter); 1888 be_detect_dump_ue(adapter);
1836 1889
1837reschedule: 1890reschedule:
@@ -1910,10 +1963,10 @@ static void be_sriov_disable(struct be_adapter *adapter)
1910#endif 1963#endif
1911} 1964}
1912 1965
1913static inline int be_msix_vec_get(struct be_adapter *adapter, u32 eq_id) 1966static inline int be_msix_vec_get(struct be_adapter *adapter,
1967 struct be_eq_obj *eq_obj)
1914{ 1968{
1915 return adapter->msix_entries[ 1969 return adapter->msix_entries[eq_obj->msix_vec_idx].vector;
1916 be_evt_bit_get(adapter, eq_id)].vector;
1917} 1970}
1918 1971
1919static int be_request_irq(struct be_adapter *adapter, 1972static int be_request_irq(struct be_adapter *adapter,
@@ -1924,14 +1977,14 @@ static int be_request_irq(struct be_adapter *adapter,
1924 int vec; 1977 int vec;
1925 1978
1926 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc); 1979 sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
1927 vec = be_msix_vec_get(adapter, eq_obj->q.id); 1980 vec = be_msix_vec_get(adapter, eq_obj);
1928 return request_irq(vec, handler, 0, eq_obj->desc, context); 1981 return request_irq(vec, handler, 0, eq_obj->desc, context);
1929} 1982}
1930 1983
1931static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj, 1984static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
1932 void *context) 1985 void *context)
1933{ 1986{
1934 int vec = be_msix_vec_get(adapter, eq_obj->q.id); 1987 int vec = be_msix_vec_get(adapter, eq_obj);
1935 free_irq(vec, context); 1988 free_irq(vec, context);
1936} 1989}
1937 1990
@@ -2036,14 +2089,15 @@ static int be_close(struct net_device *netdev)
2036 netif_carrier_off(netdev); 2089 netif_carrier_off(netdev);
2037 adapter->link_up = false; 2090 adapter->link_up = false;
2038 2091
2039 be_intr_set(adapter, false); 2092 if (!lancer_chip(adapter))
2093 be_intr_set(adapter, false);
2040 2094
2041 if (adapter->msix_enabled) { 2095 if (adapter->msix_enabled) {
2042 vec = be_msix_vec_get(adapter, tx_eq->q.id); 2096 vec = be_msix_vec_get(adapter, tx_eq);
2043 synchronize_irq(vec); 2097 synchronize_irq(vec);
2044 2098
2045 for_all_rx_queues(adapter, rxo, i) { 2099 for_all_rx_queues(adapter, rxo, i) {
2046 vec = be_msix_vec_get(adapter, rxo->rx_eq.q.id); 2100 vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2047 synchronize_irq(vec); 2101 synchronize_irq(vec);
2048 } 2102 }
2049 } else { 2103 } else {
@@ -2082,7 +2136,8 @@ static int be_open(struct net_device *netdev)
2082 2136
2083 be_irq_register(adapter); 2137 be_irq_register(adapter);
2084 2138
2085 be_intr_set(adapter, true); 2139 if (!lancer_chip(adapter))
2140 be_intr_set(adapter, true);
2086 2141
2087 /* The evt queues are created in unarmed state; arm them */ 2142 /* The evt queues are created in unarmed state; arm them */
2088 for_all_rx_queues(adapter, rxo, i) { 2143 for_all_rx_queues(adapter, rxo, i) {
@@ -2343,10 +2398,10 @@ static int be_flash_data(struct be_adapter *adapter,
2343 int num_bytes; 2398 int num_bytes;
2344 const u8 *p = fw->data; 2399 const u8 *p = fw->data;
2345 struct be_cmd_write_flashrom *req = flash_cmd->va; 2400 struct be_cmd_write_flashrom *req = flash_cmd->va;
2346 struct flash_comp *pflashcomp; 2401 const struct flash_comp *pflashcomp;
2347 int num_comp; 2402 int num_comp;
2348 2403
2349 struct flash_comp gen3_flash_types[9] = { 2404 static const struct flash_comp gen3_flash_types[9] = {
2350 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE, 2405 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2351 FLASH_IMAGE_MAX_SIZE_g3}, 2406 FLASH_IMAGE_MAX_SIZE_g3},
2352 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT, 2407 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
@@ -2366,7 +2421,7 @@ static int be_flash_data(struct be_adapter *adapter,
2366 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW, 2421 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2367 FLASH_NCSI_IMAGE_MAX_SIZE_g3} 2422 FLASH_NCSI_IMAGE_MAX_SIZE_g3}
2368 }; 2423 };
2369 struct flash_comp gen2_flash_types[8] = { 2424 static const struct flash_comp gen2_flash_types[8] = {
2370 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE, 2425 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2371 FLASH_IMAGE_MAX_SIZE_g2}, 2426 FLASH_IMAGE_MAX_SIZE_g2},
2372 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT, 2427 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
@@ -2388,11 +2443,11 @@ static int be_flash_data(struct be_adapter *adapter,
2388 if (adapter->generation == BE_GEN3) { 2443 if (adapter->generation == BE_GEN3) {
2389 pflashcomp = gen3_flash_types; 2444 pflashcomp = gen3_flash_types;
2390 filehdr_size = sizeof(struct flash_file_hdr_g3); 2445 filehdr_size = sizeof(struct flash_file_hdr_g3);
2391 num_comp = 9; 2446 num_comp = ARRAY_SIZE(gen3_flash_types);
2392 } else { 2447 } else {
2393 pflashcomp = gen2_flash_types; 2448 pflashcomp = gen2_flash_types;
2394 filehdr_size = sizeof(struct flash_file_hdr_g2); 2449 filehdr_size = sizeof(struct flash_file_hdr_g2);
2395 num_comp = 8; 2450 num_comp = ARRAY_SIZE(gen2_flash_types);
2396 } 2451 }
2397 for (i = 0; i < num_comp; i++) { 2452 for (i = 0; i < num_comp; i++) {
2398 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) && 2453 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
@@ -2543,10 +2598,15 @@ static void be_netdev_init(struct net_device *netdev)
2543 int i; 2598 int i;
2544 2599
2545 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO | 2600 netdev->features |= NETIF_F_SG | NETIF_F_HW_VLAN_RX | NETIF_F_TSO |
2546 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER | NETIF_F_HW_CSUM | 2601 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_FILTER |
2602 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2547 NETIF_F_GRO | NETIF_F_TSO6; 2603 NETIF_F_GRO | NETIF_F_TSO6;
2548 2604
2549 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_HW_CSUM; 2605 netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO |
2606 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2607
2608 if (lancer_chip(adapter))
2609 netdev->vlan_features |= NETIF_F_TSO6;
2550 2610
2551 netdev->flags |= IFF_MULTICAST; 2611 netdev->flags |= IFF_MULTICAST;
2552 2612
@@ -2587,6 +2647,15 @@ static int be_map_pci_bars(struct be_adapter *adapter)
2587 u8 __iomem *addr; 2647 u8 __iomem *addr;
2588 int pcicfg_reg, db_reg; 2648 int pcicfg_reg, db_reg;
2589 2649
2650 if (lancer_chip(adapter)) {
2651 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
2652 pci_resource_len(adapter->pdev, 0));
2653 if (addr == NULL)
2654 return -ENOMEM;
2655 adapter->db = addr;
2656 return 0;
2657 }
2658
2590 if (be_physfn(adapter)) { 2659 if (be_physfn(adapter)) {
2591 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2), 2660 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
2592 pci_resource_len(adapter->pdev, 2)); 2661 pci_resource_len(adapter->pdev, 2));
@@ -2677,7 +2746,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
2677 } 2746 }
2678 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size); 2747 memset(mc_cmd_mem->va, 0, mc_cmd_mem->size);
2679 2748
2680 spin_lock_init(&adapter->mbox_lock); 2749 mutex_init(&adapter->mbox_lock);
2681 spin_lock_init(&adapter->mcc_lock); 2750 spin_lock_init(&adapter->mcc_lock);
2682 spin_lock_init(&adapter->mcc_cq_lock); 2751 spin_lock_init(&adapter->mcc_cq_lock);
2683 2752
@@ -2783,6 +2852,44 @@ static int be_get_config(struct be_adapter *adapter)
2783 return 0; 2852 return 0;
2784} 2853}
2785 2854
2855static int be_dev_family_check(struct be_adapter *adapter)
2856{
2857 struct pci_dev *pdev = adapter->pdev;
2858 u32 sli_intf = 0, if_type;
2859
2860 switch (pdev->device) {
2861 case BE_DEVICE_ID1:
2862 case OC_DEVICE_ID1:
2863 adapter->generation = BE_GEN2;
2864 break;
2865 case BE_DEVICE_ID2:
2866 case OC_DEVICE_ID2:
2867 adapter->generation = BE_GEN3;
2868 break;
2869 case OC_DEVICE_ID3:
2870 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
2871 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
2872 SLI_INTF_IF_TYPE_SHIFT;
2873
2874 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
2875 if_type != 0x02) {
2876 dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
2877 return -EINVAL;
2878 }
2879 if (num_vfs > 0) {
2880 dev_err(&pdev->dev, "VFs not supported\n");
2881 return -EINVAL;
2882 }
2883 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
2884 SLI_INTF_FAMILY_SHIFT);
2885 adapter->generation = BE_GEN3;
2886 break;
2887 default:
2888 adapter->generation = 0;
2889 }
2890 return 0;
2891}
2892
2786static int __devinit be_probe(struct pci_dev *pdev, 2893static int __devinit be_probe(struct pci_dev *pdev,
2787 const struct pci_device_id *pdev_id) 2894 const struct pci_device_id *pdev_id)
2788{ 2895{
@@ -2805,22 +2912,13 @@ static int __devinit be_probe(struct pci_dev *pdev,
2805 goto rel_reg; 2912 goto rel_reg;
2806 } 2913 }
2807 adapter = netdev_priv(netdev); 2914 adapter = netdev_priv(netdev);
2808
2809 switch (pdev->device) {
2810 case BE_DEVICE_ID1:
2811 case OC_DEVICE_ID1:
2812 adapter->generation = BE_GEN2;
2813 break;
2814 case BE_DEVICE_ID2:
2815 case OC_DEVICE_ID2:
2816 adapter->generation = BE_GEN3;
2817 break;
2818 default:
2819 adapter->generation = 0;
2820 }
2821
2822 adapter->pdev = pdev; 2915 adapter->pdev = pdev;
2823 pci_set_drvdata(pdev, adapter); 2916 pci_set_drvdata(pdev, adapter);
2917
2918 status = be_dev_family_check(adapter);
2919 if (status)
2920 goto free_netdev;
2921
2824 adapter->netdev = netdev; 2922 adapter->netdev = netdev;
2825 SET_NETDEV_DEV(netdev, &pdev->dev); 2923 SET_NETDEV_DEV(netdev, &pdev->dev);
2826 2924
@@ -2895,7 +2993,7 @@ ctrl_clean:
2895 be_ctrl_cleanup(adapter); 2993 be_ctrl_cleanup(adapter);
2896free_netdev: 2994free_netdev:
2897 be_sriov_disable(adapter); 2995 be_sriov_disable(adapter);
2898 free_netdev(adapter->netdev); 2996 free_netdev(netdev);
2899 pci_set_drvdata(pdev, NULL); 2997 pci_set_drvdata(pdev, NULL);
2900rel_reg: 2998rel_reg:
2901 pci_release_regions(pdev); 2999 pci_release_regions(pdev);
diff --git a/drivers/net/bfin_mac.c b/drivers/net/bfin_mac.c
index ce1e5e9d06f6..0b9fc5173aef 100644
--- a/drivers/net/bfin_mac.c
+++ b/drivers/net/bfin_mac.c
@@ -8,6 +8,11 @@
8 * Licensed under the GPL-2 or later. 8 * Licensed under the GPL-2 or later.
9 */ 9 */
10 10
11#define DRV_VERSION "1.1"
12#define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
11#include <linux/init.h> 16#include <linux/init.h>
12#include <linux/module.h> 17#include <linux/module.h>
13#include <linux/kernel.h> 18#include <linux/kernel.h>
@@ -41,12 +46,7 @@
41 46
42#include "bfin_mac.h" 47#include "bfin_mac.h"
43 48
44#define DRV_NAME "bfin_mac" 49MODULE_AUTHOR("Bryan Wu, Luke Yang");
45#define DRV_VERSION "1.1"
46#define DRV_AUTHOR "Bryan Wu, Luke Yang"
47#define DRV_DESC "Blackfin on-chip Ethernet MAC driver"
48
49MODULE_AUTHOR(DRV_AUTHOR);
50MODULE_LICENSE("GPL"); 50MODULE_LICENSE("GPL");
51MODULE_DESCRIPTION(DRV_DESC); 51MODULE_DESCRIPTION(DRV_DESC);
52MODULE_ALIAS("platform:bfin_mac"); 52MODULE_ALIAS("platform:bfin_mac");
@@ -189,8 +189,7 @@ static int desc_list_init(void)
189 /* allocate a new skb for next time receive */ 189 /* allocate a new skb for next time receive */
190 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); 190 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
191 if (!new_skb) { 191 if (!new_skb) {
192 printk(KERN_NOTICE DRV_NAME 192 pr_notice("init: low on mem - packet dropped\n");
193 ": init: low on mem - packet dropped\n");
194 goto init_error; 193 goto init_error;
195 } 194 }
196 skb_reserve(new_skb, NET_IP_ALIGN); 195 skb_reserve(new_skb, NET_IP_ALIGN);
@@ -240,7 +239,7 @@ static int desc_list_init(void)
240 239
241init_error: 240init_error:
242 desc_list_free(); 241 desc_list_free();
243 printk(KERN_ERR DRV_NAME ": kmalloc failed\n"); 242 pr_err("kmalloc failed\n");
244 return -ENOMEM; 243 return -ENOMEM;
245} 244}
246 245
@@ -259,8 +258,7 @@ static int bfin_mdio_poll(void)
259 while ((bfin_read_EMAC_STAADD()) & STABUSY) { 258 while ((bfin_read_EMAC_STAADD()) & STABUSY) {
260 udelay(1); 259 udelay(1);
261 if (timeout_cnt-- < 0) { 260 if (timeout_cnt-- < 0) {
262 printk(KERN_ERR DRV_NAME 261 pr_err("wait MDC/MDIO transaction to complete timeout\n");
263 ": wait MDC/MDIO transaction to complete timeout\n");
264 return -ETIMEDOUT; 262 return -ETIMEDOUT;
265 } 263 }
266 } 264 }
@@ -350,9 +348,9 @@ static void bfin_mac_adjust_link(struct net_device *dev)
350 opmode &= ~RMII_10; 348 opmode &= ~RMII_10;
351 break; 349 break;
352 default: 350 default:
353 printk(KERN_WARNING 351 netdev_warn(dev,
354 "%s: Ack! Speed (%d) is not 10/100!\n", 352 "Ack! Speed (%d) is not 10/100!\n",
355 DRV_NAME, phydev->speed); 353 phydev->speed);
356 break; 354 break;
357 } 355 }
358 bfin_write_EMAC_OPMODE(opmode); 356 bfin_write_EMAC_OPMODE(opmode);
@@ -417,14 +415,13 @@ static int mii_probe(struct net_device *dev, int phy_mode)
417 415
418 /* now we are supposed to have a proper phydev, to attach to... */ 416 /* now we are supposed to have a proper phydev, to attach to... */
419 if (!phydev) { 417 if (!phydev) {
420 printk(KERN_INFO "%s: Don't found any phy device at all\n", 418 netdev_err(dev, "no phy device found\n");
421 dev->name);
422 return -ENODEV; 419 return -ENODEV;
423 } 420 }
424 421
425 if (phy_mode != PHY_INTERFACE_MODE_RMII && 422 if (phy_mode != PHY_INTERFACE_MODE_RMII &&
426 phy_mode != PHY_INTERFACE_MODE_MII) { 423 phy_mode != PHY_INTERFACE_MODE_MII) {
427 printk(KERN_INFO "%s: Invalid phy interface mode\n", dev->name); 424 netdev_err(dev, "invalid phy interface mode\n");
428 return -EINVAL; 425 return -EINVAL;
429 } 426 }
430 427
@@ -432,7 +429,7 @@ static int mii_probe(struct net_device *dev, int phy_mode)
432 0, phy_mode); 429 0, phy_mode);
433 430
434 if (IS_ERR(phydev)) { 431 if (IS_ERR(phydev)) {
435 printk(KERN_ERR "%s: Could not attach to PHY\n", dev->name); 432 netdev_err(dev, "could not attach PHY\n");
436 return PTR_ERR(phydev); 433 return PTR_ERR(phydev);
437 } 434 }
438 435
@@ -453,11 +450,10 @@ static int mii_probe(struct net_device *dev, int phy_mode)
453 lp->old_duplex = -1; 450 lp->old_duplex = -1;
454 lp->phydev = phydev; 451 lp->phydev = phydev;
455 452
456 printk(KERN_INFO "%s: attached PHY driver [%s] " 453 pr_info("attached PHY driver [%s] "
457 "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)" 454 "(mii_bus:phy_addr=%s, irq=%d, mdc_clk=%dHz(mdc_div=%d)@sclk=%dMHz)\n",
458 "@sclk=%dMHz)\n", 455 phydev->drv->name, dev_name(&phydev->dev), phydev->irq,
459 DRV_NAME, phydev->drv->name, dev_name(&phydev->dev), phydev->irq, 456 MDC_CLK, mdc_div, sclk/1000000);
460 MDC_CLK, mdc_div, sclk/1000000);
461 457
462 return 0; 458 return 0;
463} 459}
@@ -502,7 +498,7 @@ bfin_mac_ethtool_setsettings(struct net_device *dev, struct ethtool_cmd *cmd)
502static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev, 498static void bfin_mac_ethtool_getdrvinfo(struct net_device *dev,
503 struct ethtool_drvinfo *info) 499 struct ethtool_drvinfo *info)
504{ 500{
505 strcpy(info->driver, DRV_NAME); 501 strcpy(info->driver, KBUILD_MODNAME);
506 strcpy(info->version, DRV_VERSION); 502 strcpy(info->version, DRV_VERSION);
507 strcpy(info->fw_version, "N/A"); 503 strcpy(info->fw_version, "N/A");
508 strcpy(info->bus_info, dev_name(&dev->dev)); 504 strcpy(info->bus_info, dev_name(&dev->dev));
@@ -562,7 +558,7 @@ static const struct ethtool_ops bfin_mac_ethtool_ops = {
562}; 558};
563 559
564/**************************************************************************/ 560/**************************************************************************/
565void setup_system_regs(struct net_device *dev) 561static void setup_system_regs(struct net_device *dev)
566{ 562{
567 struct bfin_mac_local *lp = netdev_priv(dev); 563 struct bfin_mac_local *lp = netdev_priv(dev);
568 int i; 564 int i;
@@ -592,6 +588,10 @@ void setup_system_regs(struct net_device *dev)
592 588
593 bfin_write_EMAC_MMC_CTL(RSTC | CROLL); 589 bfin_write_EMAC_MMC_CTL(RSTC | CROLL);
594 590
591 /* Set vlan regs to let 1522 bytes long packets pass through */
592 bfin_write_EMAC_VLAN1(lp->vlan1_mask);
593 bfin_write_EMAC_VLAN2(lp->vlan2_mask);
594
595 /* Initialize the TX DMA channel registers */ 595 /* Initialize the TX DMA channel registers */
596 bfin_write_DMA2_X_COUNT(0); 596 bfin_write_DMA2_X_COUNT(0);
597 bfin_write_DMA2_X_MODIFY(4); 597 bfin_write_DMA2_X_MODIFY(4);
@@ -827,8 +827,7 @@ static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
827 while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt)) 827 while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
828 udelay(1); 828 udelay(1);
829 if (timeout_cnt == 0) 829 if (timeout_cnt == 0)
830 printk(KERN_ERR DRV_NAME 830 netdev_err(netdev, "timestamp the TX packet failed\n");
831 ": fails to timestamp the TX packet\n");
832 else { 831 else {
833 struct skb_shared_hwtstamps shhwtstamps; 832 struct skb_shared_hwtstamps shhwtstamps;
834 u64 ns; 833 u64 ns;
@@ -1083,8 +1082,7 @@ static void bfin_mac_rx(struct net_device *dev)
1083 * we which case we simply drop the packet 1082 * we which case we simply drop the packet
1084 */ 1083 */
1085 if (current_rx_ptr->status.status_word & RX_ERROR_MASK) { 1084 if (current_rx_ptr->status.status_word & RX_ERROR_MASK) {
1086 printk(KERN_NOTICE DRV_NAME 1085 netdev_notice(dev, "rx: receive error - packet dropped\n");
1087 ": rx: receive error - packet dropped\n");
1088 dev->stats.rx_dropped++; 1086 dev->stats.rx_dropped++;
1089 goto out; 1087 goto out;
1090 } 1088 }
@@ -1094,8 +1092,7 @@ static void bfin_mac_rx(struct net_device *dev)
1094 1092
1095 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN); 1093 new_skb = dev_alloc_skb(PKT_BUF_SZ + NET_IP_ALIGN);
1096 if (!new_skb) { 1094 if (!new_skb) {
1097 printk(KERN_NOTICE DRV_NAME 1095 netdev_notice(dev, "rx: low on mem - packet dropped\n");
1098 ": rx: low on mem - packet dropped\n");
1099 dev->stats.rx_dropped++; 1096 dev->stats.rx_dropped++;
1100 goto out; 1097 goto out;
1101 } 1098 }
@@ -1213,7 +1210,7 @@ static int bfin_mac_enable(struct phy_device *phydev)
1213 int ret; 1210 int ret;
1214 u32 opmode; 1211 u32 opmode;
1215 1212
1216 pr_debug("%s: %s\n", DRV_NAME, __func__); 1213 pr_debug("%s\n", __func__);
1217 1214
1218 /* Set RX DMA */ 1215 /* Set RX DMA */
1219 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a)); 1216 bfin_write_DMA1_NEXT_DESC_PTR(&(rx_list_head->desc_a));
@@ -1323,7 +1320,7 @@ static void bfin_mac_set_multicast_list(struct net_device *dev)
1323 u32 sysctl; 1320 u32 sysctl;
1324 1321
1325 if (dev->flags & IFF_PROMISC) { 1322 if (dev->flags & IFF_PROMISC) {
1326 printk(KERN_INFO "%s: set to promisc mode\n", dev->name); 1323 netdev_info(dev, "set promisc mode\n");
1327 sysctl = bfin_read_EMAC_OPMODE(); 1324 sysctl = bfin_read_EMAC_OPMODE();
1328 sysctl |= PR; 1325 sysctl |= PR;
1329 bfin_write_EMAC_OPMODE(sysctl); 1326 bfin_write_EMAC_OPMODE(sysctl);
@@ -1393,7 +1390,7 @@ static int bfin_mac_open(struct net_device *dev)
1393 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx 1390 * address using ifconfig eth0 hw ether xx:xx:xx:xx:xx:xx
1394 */ 1391 */
1395 if (!is_valid_ether_addr(dev->dev_addr)) { 1392 if (!is_valid_ether_addr(dev->dev_addr)) {
1396 printk(KERN_WARNING DRV_NAME ": no valid ethernet hw addr\n"); 1393 netdev_warn(dev, "no valid ethernet hw addr\n");
1397 return -EINVAL; 1394 return -EINVAL;
1398 } 1395 }
1399 1396
@@ -1527,6 +1524,9 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1527 goto out_err_mii_probe; 1524 goto out_err_mii_probe;
1528 } 1525 }
1529 1526
1527 lp->vlan1_mask = ETH_P_8021Q | mii_bus_data->vlan1_mask;
1528 lp->vlan2_mask = ETH_P_8021Q | mii_bus_data->vlan2_mask;
1529
1530 /* Fill in the fields of the device structure with ethernet values. */ 1530 /* Fill in the fields of the device structure with ethernet values. */
1531 ether_setup(ndev); 1531 ether_setup(ndev);
1532 1532
@@ -1558,7 +1558,7 @@ static int __devinit bfin_mac_probe(struct platform_device *pdev)
1558 bfin_mac_hwtstamp_init(ndev); 1558 bfin_mac_hwtstamp_init(ndev);
1559 1559
1560 /* now, print out the card info, in a short format.. */ 1560 /* now, print out the card info, in a short format.. */
1561 dev_info(&pdev->dev, "%s, Version %s\n", DRV_DESC, DRV_VERSION); 1561 netdev_info(ndev, "%s, Version %s\n", DRV_DESC, DRV_VERSION);
1562 1562
1563 return 0; 1563 return 0;
1564 1564
@@ -1650,7 +1650,7 @@ static int __devinit bfin_mii_bus_probe(struct platform_device *pdev)
1650 * so set the GPIO pins to Ethernet mode 1650 * so set the GPIO pins to Ethernet mode
1651 */ 1651 */
1652 pin_req = mii_bus_pd->mac_peripherals; 1652 pin_req = mii_bus_pd->mac_peripherals;
1653 rc = peripheral_request_list(pin_req, DRV_NAME); 1653 rc = peripheral_request_list(pin_req, KBUILD_MODNAME);
1654 if (rc) { 1654 if (rc) {
1655 dev_err(&pdev->dev, "Requesting peripherals failed!\n"); 1655 dev_err(&pdev->dev, "Requesting peripherals failed!\n");
1656 return rc; 1656 return rc;
@@ -1739,7 +1739,7 @@ static struct platform_driver bfin_mac_driver = {
1739 .resume = bfin_mac_resume, 1739 .resume = bfin_mac_resume,
1740 .suspend = bfin_mac_suspend, 1740 .suspend = bfin_mac_suspend,
1741 .driver = { 1741 .driver = {
1742 .name = DRV_NAME, 1742 .name = KBUILD_MODNAME,
1743 .owner = THIS_MODULE, 1743 .owner = THIS_MODULE,
1744 }, 1744 },
1745}; 1745};
diff --git a/drivers/net/bfin_mac.h b/drivers/net/bfin_mac.h
index aed68bed2365..f8559ac9a403 100644
--- a/drivers/net/bfin_mac.h
+++ b/drivers/net/bfin_mac.h
@@ -17,7 +17,14 @@
17#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
18#include <linux/bfin_mac.h> 18#include <linux/bfin_mac.h>
19 19
20/*
21 * Disable hardware checksum for bug #5600 if writeback cache is
22 * enabled. Otherwize, corrupted RX packet will be sent up stack
23 * without error mark.
24 */
25#ifndef CONFIG_BFIN_EXTMEM_WRITEBACK
20#define BFIN_MAC_CSUM_OFFLOAD 26#define BFIN_MAC_CSUM_OFFLOAD
27#endif
21 28
22#define TX_RECLAIM_JIFFIES (HZ / 5) 29#define TX_RECLAIM_JIFFIES (HZ / 5)
23 30
@@ -68,7 +75,6 @@ struct bfin_mac_local {
68 */ 75 */
69 struct net_device_stats stats; 76 struct net_device_stats stats;
70 77
71 unsigned char Mac[6]; /* MAC address of the board */
72 spinlock_t lock; 78 spinlock_t lock;
73 79
74 int wol; /* Wake On Lan */ 80 int wol; /* Wake On Lan */
@@ -76,6 +82,9 @@ struct bfin_mac_local {
76 struct timer_list tx_reclaim_timer; 82 struct timer_list tx_reclaim_timer;
77 struct net_device *ndev; 83 struct net_device *ndev;
78 84
85 /* Data for EMAC_VLAN1 regs */
86 u16 vlan1_mask, vlan2_mask;
87
79 /* MII and PHY stuffs */ 88 /* MII and PHY stuffs */
80 int old_link; /* used by bf537_adjust_link */ 89 int old_link; /* used by bf537_adjust_link */
81 int old_speed; 90 int old_speed;
diff --git a/drivers/net/bna/bfa_defs.h b/drivers/net/bna/bfa_defs.h
index 29c1b8de2c2d..2ea0dfe1cedc 100644
--- a/drivers/net/bna/bfa_defs.h
+++ b/drivers/net/bna/bfa_defs.h
@@ -112,16 +112,18 @@ struct bfa_ioc_pci_attr {
112 * IOC states 112 * IOC states
113 */ 113 */
114enum bfa_ioc_state { 114enum bfa_ioc_state {
115 BFA_IOC_RESET = 1, /*!< IOC is in reset state */ 115 BFA_IOC_UNINIT = 1, /*!< IOC is in uninit state */
116 BFA_IOC_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */ 116 BFA_IOC_RESET = 2, /*!< IOC is in reset state */
117 BFA_IOC_HWINIT = 3, /*!< IOC h/w is being initialized */ 117 BFA_IOC_SEMWAIT = 3, /*!< Waiting for IOC h/w semaphore */
118 BFA_IOC_GETATTR = 4, /*!< IOC is being configured */ 118 BFA_IOC_HWINIT = 4, /*!< IOC h/w is being initialized */
119 BFA_IOC_OPERATIONAL = 5, /*!< IOC is operational */ 119 BFA_IOC_GETATTR = 5, /*!< IOC is being configured */
120 BFA_IOC_INITFAIL = 6, /*!< IOC hardware failure */ 120 BFA_IOC_OPERATIONAL = 6, /*!< IOC is operational */
121 BFA_IOC_HBFAIL = 7, /*!< IOC heart-beat failure */ 121 BFA_IOC_INITFAIL = 7, /*!< IOC hardware failure */
122 BFA_IOC_DISABLING = 8, /*!< IOC is being disabled */ 122 BFA_IOC_FAIL = 8, /*!< IOC heart-beat failure */
123 BFA_IOC_DISABLED = 9, /*!< IOC is disabled */ 123 BFA_IOC_DISABLING = 9, /*!< IOC is being disabled */
124 BFA_IOC_FWMISMATCH = 10, /*!< IOC f/w different from drivers */ 124 BFA_IOC_DISABLED = 10, /*!< IOC is disabled */
125 BFA_IOC_FWMISMATCH = 11, /*!< IOC f/w different from drivers */
126 BFA_IOC_ENABLING = 12, /*!< IOC is being enabled */
125}; 127};
126 128
127/** 129/**
diff --git a/drivers/net/bna/bfa_defs_mfg_comm.h b/drivers/net/bna/bfa_defs_mfg_comm.h
index 987978fcb3fe..fdd677618361 100644
--- a/drivers/net/bna/bfa_defs_mfg_comm.h
+++ b/drivers/net/bna/bfa_defs_mfg_comm.h
@@ -95,28 +95,6 @@ enum {
95 (type) == BFA_MFG_TYPE_CNA10P1 || \ 95 (type) == BFA_MFG_TYPE_CNA10P1 || \
96 bfa_mfg_is_mezz(type))) 96 bfa_mfg_is_mezz(type)))
97 97
98/**
99 * Check if the card having old wwn/mac handling
100 */
101#define bfa_mfg_is_old_wwn_mac_model(type) (( \
102 (type) == BFA_MFG_TYPE_FC8P2 || \
103 (type) == BFA_MFG_TYPE_FC8P1 || \
104 (type) == BFA_MFG_TYPE_FC4P2 || \
105 (type) == BFA_MFG_TYPE_FC4P1 || \
106 (type) == BFA_MFG_TYPE_CNA10P2 || \
107 (type) == BFA_MFG_TYPE_CNA10P1 || \
108 (type) == BFA_MFG_TYPE_JAYHAWK || \
109 (type) == BFA_MFG_TYPE_WANCHESE))
110
111#define bfa_mfg_increment_wwn_mac(m, i) \
112do { \
113 u32 t = ((m)[0] << 16) | ((m)[1] << 8) | (m)[2]; \
114 t += (i); \
115 (m)[0] = (t >> 16) & 0xFF; \
116 (m)[1] = (t >> 8) & 0xFF; \
117 (m)[2] = t & 0xFF; \
118} while (0)
119
120#define bfa_mfg_adapter_prop_init_flash(card_type, prop) \ 98#define bfa_mfg_adapter_prop_init_flash(card_type, prop) \
121do { \ 99do { \
122 switch ((card_type)) { \ 100 switch ((card_type)) { \
diff --git a/drivers/net/bna/bfa_ioc.c b/drivers/net/bna/bfa_ioc.c
index e94e5aa97515..34933cb9569f 100644
--- a/drivers/net/bna/bfa_ioc.c
+++ b/drivers/net/bna/bfa_ioc.c
@@ -26,25 +26,6 @@
26 * IOC local definitions 26 * IOC local definitions
27 */ 27 */
28 28
29#define bfa_ioc_timer_start(__ioc) \
30 mod_timer(&(__ioc)->ioc_timer, jiffies + \
31 msecs_to_jiffies(BFA_IOC_TOV))
32#define bfa_ioc_timer_stop(__ioc) del_timer(&(__ioc)->ioc_timer)
33
34#define bfa_ioc_recovery_timer_start(__ioc) \
35 mod_timer(&(__ioc)->ioc_timer, jiffies + \
36 msecs_to_jiffies(BFA_IOC_TOV_RECOVER))
37
38#define bfa_sem_timer_start(__ioc) \
39 mod_timer(&(__ioc)->sem_timer, jiffies + \
40 msecs_to_jiffies(BFA_IOC_HWSEM_TOV))
41#define bfa_sem_timer_stop(__ioc) del_timer(&(__ioc)->sem_timer)
42
43#define bfa_hb_timer_start(__ioc) \
44 mod_timer(&(__ioc)->hb_timer, jiffies + \
45 msecs_to_jiffies(BFA_IOC_HB_TOV))
46#define bfa_hb_timer_stop(__ioc) del_timer(&(__ioc)->hb_timer)
47
48/** 29/**
49 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details. 30 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
50 */ 31 */
@@ -55,11 +36,16 @@
55 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc)) 36 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
56#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc)) 37#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
57#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc)) 38#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
58#define bfa_ioc_notify_hbfail(__ioc) \ 39#define bfa_ioc_notify_fail(__ioc) \
59 ((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc)) 40 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
60 41#define bfa_ioc_sync_join(__ioc) \
61#define bfa_ioc_is_optrom(__ioc) \ 42 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
62 (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(__ioc)) < BFA_IOC_FWIMG_MINSZ) 43#define bfa_ioc_sync_leave(__ioc) \
44 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
45#define bfa_ioc_sync_ack(__ioc) \
46 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
47#define bfa_ioc_sync_complete(__ioc) \
48 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
63 49
64#define bfa_ioc_mbox_cmd_pending(__ioc) \ 50#define bfa_ioc_mbox_cmd_pending(__ioc) \
65 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ 51 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
@@ -85,6 +71,12 @@ static void bfa_ioc_recover(struct bfa_ioc *ioc);
85static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc); 71static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
86static void bfa_ioc_disable_comp(struct bfa_ioc *ioc); 72static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
87static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc); 73static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
74static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
75static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
76static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
77static void bfa_ioc_pf_initfailed(struct bfa_ioc *ioc);
78static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
79static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
88static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, 80static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
89 u32 boot_param); 81 u32 boot_param);
90static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr); 82static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
@@ -101,72 +93,173 @@ static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
101 char *manufacturer); 93 char *manufacturer);
102static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model); 94static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
103static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc); 95static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
104static mac_t bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc);
105 96
106/** 97/**
107 * IOC state machine events 98 * IOC state machine definitions/declarations
108 */ 99 */
109enum ioc_event { 100enum ioc_event {
110 IOC_E_ENABLE = 1, /*!< IOC enable request */ 101 IOC_E_RESET = 1, /*!< IOC reset request */
111 IOC_E_DISABLE = 2, /*!< IOC disable request */ 102 IOC_E_ENABLE = 2, /*!< IOC enable request */
112 IOC_E_TIMEOUT = 3, /*!< f/w response timeout */ 103 IOC_E_DISABLE = 3, /*!< IOC disable request */
113 IOC_E_FWREADY = 4, /*!< f/w initialization done */ 104 IOC_E_DETACH = 4, /*!< driver detach cleanup */
114 IOC_E_FWRSP_GETATTR = 5, /*!< IOC get attribute response */ 105 IOC_E_ENABLED = 5, /*!< f/w enabled */
115 IOC_E_FWRSP_ENABLE = 6, /*!< enable f/w response */ 106 IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */
116 IOC_E_FWRSP_DISABLE = 7, /*!< disable f/w response */ 107 IOC_E_DISABLED = 7, /*!< f/w disabled */
117 IOC_E_HBFAIL = 8, /*!< heartbeat failure */ 108 IOC_E_INITFAILED = 8, /*!< failure notice by iocpf sm */
118 IOC_E_HWERROR = 9, /*!< hardware error interrupt */ 109 IOC_E_PFAILED = 9, /*!< failure notice by iocpf sm */
119 IOC_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */ 110 IOC_E_HBFAIL = 10, /*!< heartbeat failure */
120 IOC_E_DETACH = 11, /*!< driver detach cleanup */ 111 IOC_E_HWERROR = 11, /*!< hardware error interrupt */
112 IOC_E_TIMEOUT = 12, /*!< timeout */
121}; 113};
122 114
115bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
123bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event); 116bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
124bfa_fsm_state_decl(bfa_ioc, fwcheck, struct bfa_ioc, enum ioc_event);
125bfa_fsm_state_decl(bfa_ioc, mismatch, struct bfa_ioc, enum ioc_event);
126bfa_fsm_state_decl(bfa_ioc, semwait, struct bfa_ioc, enum ioc_event);
127bfa_fsm_state_decl(bfa_ioc, hwinit, struct bfa_ioc, enum ioc_event);
128bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event); 117bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
129bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event); 118bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
130bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event); 119bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
131bfa_fsm_state_decl(bfa_ioc, initfail, struct bfa_ioc, enum ioc_event); 120bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
132bfa_fsm_state_decl(bfa_ioc, hbfail, struct bfa_ioc, enum ioc_event); 121bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
133bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event); 122bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
134bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event); 123bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
135 124
136static struct bfa_sm_table ioc_sm_table[] = { 125static struct bfa_sm_table ioc_sm_table[] = {
126 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
137 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET}, 127 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
138 {BFA_SM(bfa_ioc_sm_fwcheck), BFA_IOC_FWMISMATCH}, 128 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
139 {BFA_SM(bfa_ioc_sm_mismatch), BFA_IOC_FWMISMATCH},
140 {BFA_SM(bfa_ioc_sm_semwait), BFA_IOC_SEMWAIT},
141 {BFA_SM(bfa_ioc_sm_hwinit), BFA_IOC_HWINIT},
142 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_HWINIT},
143 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR}, 129 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
144 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL}, 130 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
145 {BFA_SM(bfa_ioc_sm_initfail), BFA_IOC_INITFAIL}, 131 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
146 {BFA_SM(bfa_ioc_sm_hbfail), BFA_IOC_HBFAIL}, 132 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
147 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING}, 133 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
148 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED}, 134 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
149}; 135};
150 136
151/** 137/**
138 * IOCPF state machine definitions/declarations
139 */
140
141/*
142 * Forward declareations for iocpf state machine
143 */
144static void bfa_iocpf_enable(struct bfa_ioc *ioc);
145static void bfa_iocpf_disable(struct bfa_ioc *ioc);
146static void bfa_iocpf_fail(struct bfa_ioc *ioc);
147static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
148static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
149static void bfa_iocpf_stop(struct bfa_ioc *ioc);
150
151/**
152 * IOCPF state machine events
153 */
154enum iocpf_event {
155 IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
156 IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
157 IOCPF_E_STOP = 3, /*!< stop on driver detach */
158 IOCPF_E_FWREADY = 4, /*!< f/w initialization done */
159 IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */
160 IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */
161 IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */
162 IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */
163 IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */
164 IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
165 IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */
166};
167
168/**
169 * IOCPF states
170 */
171enum bfa_iocpf_state {
172 BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */
173 BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
174 BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */
175 BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */
176 BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */
177 BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */
178 BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */
179 BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */
180 BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */
181};
182
183bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
184bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
185bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
186bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
187bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
188bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
189bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
190bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
191 enum iocpf_event);
192bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
193bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
194bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
195bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
196bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
197 enum iocpf_event);
198bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
199
200static struct bfa_sm_table iocpf_sm_table[] = {
201 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
202 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
203 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
204 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
205 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
206 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
207 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
208 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
209 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
210 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
211 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
212 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
213 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
214 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
215};
216
217/**
218 * IOC State Machine
219 */
220
221/**
222 * Beginning state. IOC uninit state.
223 */
224static void
225bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
226{
227}
228
229/**
230 * IOC is in uninit state.
231 */
232static void
233bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
234{
235 switch (event) {
236 case IOC_E_RESET:
237 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
238 break;
239
240 default:
241 bfa_sm_fault(ioc, event);
242 }
243}
244
245/**
152 * Reset entry actions -- initialize state machine 246 * Reset entry actions -- initialize state machine
153 */ 247 */
154static void 248static void
155bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc) 249bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
156{ 250{
157 ioc->retry_count = 0; 251 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
158 ioc->auto_recover = bfa_nw_auto_recover;
159} 252}
160 253
161/** 254/**
162 * Beginning state. IOC is in reset state. 255 * IOC is in reset state.
163 */ 256 */
164static void 257static void
165bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event) 258bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
166{ 259{
167 switch (event) { 260 switch (event) {
168 case IOC_E_ENABLE: 261 case IOC_E_ENABLE:
169 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); 262 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
170 break; 263 break;
171 264
172 case IOC_E_DISABLE: 265 case IOC_E_DISABLE:
@@ -174,6 +267,7 @@ bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
174 break; 267 break;
175 268
176 case IOC_E_DETACH: 269 case IOC_E_DETACH:
270 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
177 break; 271 break;
178 272
179 default: 273 default:
@@ -181,42 +275,43 @@ bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
181 } 275 }
182} 276}
183 277
184/**
185 * Semaphore should be acquired for version check.
186 */
187static void 278static void
188bfa_ioc_sm_fwcheck_entry(struct bfa_ioc *ioc) 279bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
189{ 280{
190 bfa_ioc_hw_sem_get(ioc); 281 bfa_iocpf_enable(ioc);
191} 282}
192 283
193/** 284/**
194 * Awaiting h/w semaphore to continue with version check. 285 * Host IOC function is being enabled, awaiting response from firmware.
286 * Semaphore is acquired.
195 */ 287 */
196static void 288static void
197bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event) 289bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
198{ 290{
199 switch (event) { 291 switch (event) {
200 case IOC_E_SEMLOCKED: 292 case IOC_E_ENABLED:
201 if (bfa_ioc_firmware_lock(ioc)) { 293 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
202 ioc->retry_count = 0; 294 break;
203 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); 295
204 } else { 296 case IOC_E_PFAILED:
205 bfa_nw_ioc_hw_sem_release(ioc); 297 /* !!! fall through !!! */
206 bfa_fsm_set_state(ioc, bfa_ioc_sm_mismatch); 298 case IOC_E_HWERROR:
207 } 299 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
300 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
301 if (event != IOC_E_PFAILED)
302 bfa_iocpf_initfail(ioc);
208 break; 303 break;
209 304
210 case IOC_E_DISABLE: 305 case IOC_E_DISABLE:
211 bfa_ioc_disable_comp(ioc); 306 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
212 /* fall through */ 307 break;
213 308
214 case IOC_E_DETACH: 309 case IOC_E_DETACH:
215 bfa_ioc_hw_sem_get_cancel(ioc); 310 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
216 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 311 bfa_iocpf_stop(ioc);
217 break; 312 break;
218 313
219 case IOC_E_FWREADY: 314 case IOC_E_ENABLE:
220 break; 315 break;
221 316
222 default: 317 default:
@@ -225,41 +320,85 @@ bfa_ioc_sm_fwcheck(struct bfa_ioc *ioc, enum ioc_event event)
225} 320}
226 321
227/** 322/**
228 * Notify enable completion callback and generate mismatch AEN. 323 * Semaphore should be acquired for version check.
229 */ 324 */
230static void 325static void
231bfa_ioc_sm_mismatch_entry(struct bfa_ioc *ioc) 326bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
232{ 327{
233 /** 328 mod_timer(&ioc->ioc_timer, jiffies +
234 * Provide enable completion callback and AEN notification only once. 329 msecs_to_jiffies(BFA_IOC_TOV));
235 */ 330 bfa_ioc_send_getattr(ioc);
236 if (ioc->retry_count == 0)
237 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
238 ioc->retry_count++;
239 bfa_ioc_timer_start(ioc);
240} 331}
241 332
242/** 333/**
243 * Awaiting firmware version match. 334 * IOC configuration in progress. Timer is active.
244 */ 335 */
245static void 336static void
246bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event) 337bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
247{ 338{
248 switch (event) { 339 switch (event) {
340 case IOC_E_FWRSP_GETATTR:
341 del_timer(&ioc->ioc_timer);
342 bfa_ioc_check_attr_wwns(ioc);
343 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
344 break;
345
346 case IOC_E_PFAILED:
347 case IOC_E_HWERROR:
348 del_timer(&ioc->ioc_timer);
349 /* fall through */
249 case IOC_E_TIMEOUT: 350 case IOC_E_TIMEOUT:
250 bfa_fsm_set_state(ioc, bfa_ioc_sm_fwcheck); 351 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
352 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
353 if (event != IOC_E_PFAILED)
354 bfa_iocpf_getattrfail(ioc);
251 break; 355 break;
252 356
253 case IOC_E_DISABLE: 357 case IOC_E_DISABLE:
254 bfa_ioc_disable_comp(ioc); 358 del_timer(&ioc->ioc_timer);
255 /* fall through */ 359 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
360 break;
256 361
257 case IOC_E_DETACH: 362 case IOC_E_ENABLE:
258 bfa_ioc_timer_stop(ioc); 363 break;
259 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 364
365 default:
366 bfa_sm_fault(ioc, event);
367 }
368}
369
370static void
371bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
372{
373 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
374 bfa_ioc_hb_monitor(ioc);
375}
376
377static void
378bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
379{
380 switch (event) {
381 case IOC_E_ENABLE:
382 break;
383
384 case IOC_E_DISABLE:
385 bfa_ioc_hb_stop(ioc);
386 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
260 break; 387 break;
261 388
262 case IOC_E_FWREADY: 389 case IOC_E_PFAILED:
390 case IOC_E_HWERROR:
391 bfa_ioc_hb_stop(ioc);
392 /* !!! fall through !!! */
393 case IOC_E_HBFAIL:
394 bfa_ioc_fail_notify(ioc);
395 if (ioc->iocpf.auto_recover)
396 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
397 else
398 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
399
400 if (event != IOC_E_PFAILED)
401 bfa_iocpf_fail(ioc);
263 break; 402 break;
264 403
265 default: 404 default:
@@ -267,30 +406,61 @@ bfa_ioc_sm_mismatch(struct bfa_ioc *ioc, enum ioc_event event)
267 } 406 }
268} 407}
269 408
409static void
410bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
411{
412 bfa_iocpf_disable(ioc);
413}
414
270/** 415/**
271 * Request for semaphore. 416 * IOC is being desabled
272 */ 417 */
273static void 418static void
274bfa_ioc_sm_semwait_entry(struct bfa_ioc *ioc) 419bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
275{ 420{
276 bfa_ioc_hw_sem_get(ioc); 421 switch (event) {
422 case IOC_E_DISABLED:
423 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
424 break;
425
426 case IOC_E_HWERROR:
427 /*
428 * No state change. Will move to disabled state
429 * after iocpf sm completes failure processing and
430 * moves to disabled state.
431 */
432 bfa_iocpf_fail(ioc);
433 break;
434
435 default:
436 bfa_sm_fault(ioc, event);
437 }
277} 438}
278 439
279/** 440/**
280 * Awaiting semaphore for h/w initialzation. 441 * IOC desable completion entry.
281 */ 442 */
282static void 443static void
283bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event) 444bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
445{
446 bfa_ioc_disable_comp(ioc);
447}
448
449static void
450bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
284{ 451{
285 switch (event) { 452 switch (event) {
286 case IOC_E_SEMLOCKED: 453 case IOC_E_ENABLE:
287 ioc->retry_count = 0; 454 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
288 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit);
289 break; 455 break;
290 456
291 case IOC_E_DISABLE: 457 case IOC_E_DISABLE:
292 bfa_ioc_hw_sem_get_cancel(ioc); 458 ioc->cbfn->disable_cbfn(ioc->bfa);
293 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 459 break;
460
461 case IOC_E_DETACH:
462 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
463 bfa_iocpf_stop(ioc);
294 break; 464 break;
295 465
296 default: 466 default:
@@ -299,46 +469,45 @@ bfa_ioc_sm_semwait(struct bfa_ioc *ioc, enum ioc_event event)
299} 469}
300 470
301static void 471static void
302bfa_ioc_sm_hwinit_entry(struct bfa_ioc *ioc) 472bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
303{ 473{
304 bfa_ioc_timer_start(ioc);
305 bfa_ioc_reset(ioc, false);
306} 474}
307 475
308/** 476/**
309 * @brief 477 * Hardware initialization retry.
310 * Hardware is being initialized. Interrupts are enabled.
311 * Holding hardware semaphore lock.
312 */ 478 */
313static void 479static void
314bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event) 480bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
315{ 481{
316 switch (event) { 482 switch (event) {
317 case IOC_E_FWREADY: 483 case IOC_E_ENABLED:
318 bfa_ioc_timer_stop(ioc); 484 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
319 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
320 break; 485 break;
321 486
487 case IOC_E_PFAILED:
322 case IOC_E_HWERROR: 488 case IOC_E_HWERROR:
323 bfa_ioc_timer_stop(ioc); 489 /**
324 /* fall through */ 490 * Initialization retry failed.
491 */
492 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
493 if (event != IOC_E_PFAILED)
494 bfa_iocpf_initfail(ioc);
495 break;
325 496
326 case IOC_E_TIMEOUT: 497 case IOC_E_INITFAILED:
327 ioc->retry_count++; 498 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
328 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { 499 break;
329 bfa_ioc_timer_start(ioc);
330 bfa_ioc_reset(ioc, true);
331 break;
332 }
333 500
334 bfa_nw_ioc_hw_sem_release(ioc); 501 case IOC_E_ENABLE:
335 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail);
336 break; 502 break;
337 503
338 case IOC_E_DISABLE: 504 case IOC_E_DISABLE:
339 bfa_nw_ioc_hw_sem_release(ioc); 505 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
340 bfa_ioc_timer_stop(ioc); 506 break;
341 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 507
508 case IOC_E_DETACH:
509 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
510 bfa_iocpf_stop(ioc);
342 break; 511 break;
343 512
344 default: 513 default:
@@ -347,51 +516,248 @@ bfa_ioc_sm_hwinit(struct bfa_ioc *ioc, enum ioc_event event)
347} 516}
348 517
349static void 518static void
350bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc) 519bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
351{ 520{
352 bfa_ioc_timer_start(ioc);
353 bfa_ioc_send_enable(ioc);
354} 521}
355 522
356/** 523/**
357 * Host IOC function is being enabled, awaiting response from firmware. 524 * IOC failure.
358 * Semaphore is acquired.
359 */ 525 */
360static void 526static void
361bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event) 527bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
362{ 528{
363 switch (event) { 529 switch (event) {
364 case IOC_E_FWRSP_ENABLE: 530 case IOC_E_ENABLE:
365 bfa_ioc_timer_stop(ioc); 531 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
366 bfa_nw_ioc_hw_sem_release(ioc); 532 break;
367 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr); 533
534 case IOC_E_DISABLE:
535 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
536 break;
537
538 case IOC_E_DETACH:
539 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
540 bfa_iocpf_stop(ioc);
368 break; 541 break;
369 542
370 case IOC_E_HWERROR: 543 case IOC_E_HWERROR:
371 bfa_ioc_timer_stop(ioc); 544 /* HB failure notification, ignore. */
372 /* fall through */ 545 break;
373 546
374 case IOC_E_TIMEOUT: 547 default:
375 ioc->retry_count++; 548 bfa_sm_fault(ioc, event);
376 if (ioc->retry_count < BFA_IOC_HWINIT_MAX) { 549 }
377 writel(BFI_IOC_UNINIT, 550}
378 ioc->ioc_regs.ioc_fwstate); 551
379 bfa_fsm_set_state(ioc, bfa_ioc_sm_hwinit); 552/**
380 break; 553 * IOCPF State Machine
554 */
555
556/**
557 * Reset entry actions -- initialize state machine
558 */
559static void
560bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
561{
562 iocpf->retry_count = 0;
563 iocpf->auto_recover = bfa_nw_auto_recover;
564}
565
566/**
567 * Beginning state. IOC is in reset state.
568 */
569static void
570bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
571{
572 switch (event) {
573 case IOCPF_E_ENABLE:
574 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
575 break;
576
577 case IOCPF_E_STOP:
578 break;
579
580 default:
581 bfa_sm_fault(iocpf->ioc, event);
582 }
583}
584
585/**
586 * Semaphore should be acquired for version check.
587 */
588static void
589bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
590{
591 bfa_ioc_hw_sem_get(iocpf->ioc);
592}
593
594/**
595 * Awaiting h/w semaphore to continue with version check.
596 */
597static void
598bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
599{
600 struct bfa_ioc *ioc = iocpf->ioc;
601
602 switch (event) {
603 case IOCPF_E_SEMLOCKED:
604 if (bfa_ioc_firmware_lock(ioc)) {
605 if (bfa_ioc_sync_complete(ioc)) {
606 iocpf->retry_count = 0;
607 bfa_ioc_sync_join(ioc);
608 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
609 } else {
610 bfa_ioc_firmware_unlock(ioc);
611 bfa_nw_ioc_hw_sem_release(ioc);
612 mod_timer(&ioc->sem_timer, jiffies +
613 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
614 }
615 } else {
616 bfa_nw_ioc_hw_sem_release(ioc);
617 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
381 } 618 }
619 break;
382 620
383 bfa_nw_ioc_hw_sem_release(ioc); 621 case IOCPF_E_DISABLE:
384 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 622 bfa_ioc_hw_sem_get_cancel(ioc);
623 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
624 bfa_ioc_pf_disabled(ioc);
385 break; 625 break;
386 626
387 case IOC_E_DISABLE: 627 case IOCPF_E_STOP:
388 bfa_ioc_timer_stop(ioc); 628 bfa_ioc_hw_sem_get_cancel(ioc);
629 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
630 break;
631
632 default:
633 bfa_sm_fault(ioc, event);
634 }
635}
636
637/**
638 * Notify enable completion callback
639 */
640static void
641bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
642{
643 /* Call only the first time sm enters fwmismatch state. */
644 if (iocpf->retry_count == 0)
645 bfa_ioc_pf_fwmismatch(iocpf->ioc);
646
647 iocpf->retry_count++;
648 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
649 msecs_to_jiffies(BFA_IOC_TOV));
650}
651
652/**
653 * Awaiting firmware version match.
654 */
655static void
656bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
657{
658 struct bfa_ioc *ioc = iocpf->ioc;
659
660 switch (event) {
661 case IOCPF_E_TIMEOUT:
662 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
663 break;
664
665 case IOCPF_E_DISABLE:
666 del_timer(&ioc->iocpf_timer);
667 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
668 bfa_ioc_pf_disabled(ioc);
669 break;
670
671 case IOCPF_E_STOP:
672 del_timer(&ioc->iocpf_timer);
673 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
674 break;
675
676 default:
677 bfa_sm_fault(ioc, event);
678 }
679}
680
681/**
682 * Request for semaphore.
683 */
684static void
685bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
686{
687 bfa_ioc_hw_sem_get(iocpf->ioc);
688}
689
690/**
691 * Awaiting semaphore for h/w initialzation.
692 */
693static void
694bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
695{
696 struct bfa_ioc *ioc = iocpf->ioc;
697
698 switch (event) {
699 case IOCPF_E_SEMLOCKED:
700 if (bfa_ioc_sync_complete(ioc)) {
701 bfa_ioc_sync_join(ioc);
702 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
703 } else {
704 bfa_nw_ioc_hw_sem_release(ioc);
705 mod_timer(&ioc->sem_timer, jiffies +
706 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
707 }
708 break;
709
710 case IOCPF_E_DISABLE:
711 bfa_ioc_hw_sem_get_cancel(ioc);
712 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
713 break;
714
715 default:
716 bfa_sm_fault(ioc, event);
717 }
718}
719
720static void
721bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
722{
723 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
724 msecs_to_jiffies(BFA_IOC_TOV));
725 bfa_ioc_reset(iocpf->ioc, 0);
726}
727
728/**
729 * Hardware is being initialized. Interrupts are enabled.
730 * Holding hardware semaphore lock.
731 */
732static void
733bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
734{
735 struct bfa_ioc *ioc = iocpf->ioc;
736
737 switch (event) {
738 case IOCPF_E_FWREADY:
739 del_timer(&ioc->iocpf_timer);
740 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
741 break;
742
743 case IOCPF_E_INITFAIL:
744 del_timer(&ioc->iocpf_timer);
745 /*
746 * !!! fall through !!!
747 */
748
749 case IOCPF_E_TIMEOUT:
389 bfa_nw_ioc_hw_sem_release(ioc); 750 bfa_nw_ioc_hw_sem_release(ioc);
390 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 751 if (event == IOCPF_E_TIMEOUT)
752 bfa_ioc_pf_failed(ioc);
753 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
391 break; 754 break;
392 755
393 case IOC_E_FWREADY: 756 case IOCPF_E_DISABLE:
394 bfa_ioc_send_enable(ioc); 757 del_timer(&ioc->iocpf_timer);
758 bfa_ioc_sync_leave(ioc);
759 bfa_nw_ioc_hw_sem_release(ioc);
760 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
395 break; 761 break;
396 762
397 default: 763 default:
@@ -400,37 +766,49 @@ bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
400} 766}
401 767
402static void 768static void
403bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc) 769bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
404{ 770{
405 bfa_ioc_timer_start(ioc); 771 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
406 bfa_ioc_send_getattr(ioc); 772 msecs_to_jiffies(BFA_IOC_TOV));
773 bfa_ioc_send_enable(iocpf->ioc);
407} 774}
408 775
409/** 776/**
410 * @brief 777 * Host IOC function is being enabled, awaiting response from firmware.
411 * IOC configuration in progress. Timer is active. 778 * Semaphore is acquired.
412 */ 779 */
413static void 780static void
414bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event) 781bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
415{ 782{
783 struct bfa_ioc *ioc = iocpf->ioc;
784
416 switch (event) { 785 switch (event) {
417 case IOC_E_FWRSP_GETATTR: 786 case IOCPF_E_FWRSP_ENABLE:
418 bfa_ioc_timer_stop(ioc); 787 del_timer(&ioc->iocpf_timer);
419 bfa_ioc_check_attr_wwns(ioc); 788 bfa_nw_ioc_hw_sem_release(ioc);
420 bfa_fsm_set_state(ioc, bfa_ioc_sm_op); 789 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
421 break; 790 break;
422 791
423 case IOC_E_HWERROR: 792 case IOCPF_E_INITFAIL:
424 bfa_ioc_timer_stop(ioc); 793 del_timer(&ioc->iocpf_timer);
425 /* fall through */ 794 /*
795 * !!! fall through !!!
796 */
797 case IOCPF_E_TIMEOUT:
798 bfa_nw_ioc_hw_sem_release(ioc);
799 if (event == IOCPF_E_TIMEOUT)
800 bfa_ioc_pf_failed(ioc);
801 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
802 break;
426 803
427 case IOC_E_TIMEOUT: 804 case IOCPF_E_DISABLE:
428 bfa_fsm_set_state(ioc, bfa_ioc_sm_initfail); 805 del_timer(&ioc->iocpf_timer);
806 bfa_nw_ioc_hw_sem_release(ioc);
807 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
429 break; 808 break;
430 809
431 case IOC_E_DISABLE: 810 case IOCPF_E_FWREADY:
432 bfa_ioc_timer_stop(ioc); 811 bfa_ioc_send_enable(ioc);
433 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
434 break; 812 break;
435 813
436 default: 814 default:
@@ -438,36 +816,42 @@ bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
438 } 816 }
439} 817}
440 818
819static bool
820bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
821{
822 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
823}
824
441static void 825static void
442bfa_ioc_sm_op_entry(struct bfa_ioc *ioc) 826bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
443{ 827{
444 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK); 828 bfa_ioc_pf_enabled(iocpf->ioc);
445 bfa_ioc_hb_monitor(ioc);
446} 829}
447 830
448static void 831static void
449bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event) 832bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
450{ 833{
834 struct bfa_ioc *ioc = iocpf->ioc;
835
451 switch (event) { 836 switch (event) {
452 case IOC_E_ENABLE: 837 case IOCPF_E_DISABLE:
838 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
453 break; 839 break;
454 840
455 case IOC_E_DISABLE: 841 case IOCPF_E_GETATTRFAIL:
456 bfa_ioc_hb_stop(ioc); 842 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
457 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
458 break; 843 break;
459 844
460 case IOC_E_HWERROR: 845 case IOCPF_E_FAIL:
461 case IOC_E_FWREADY: 846 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
462 /** 847 break;
463 * Hard error or IOC recovery by other function.
464 * Treat it same as heartbeat failure.
465 */
466 bfa_ioc_hb_stop(ioc);
467 /* !!! fall through !!! */
468 848
469 case IOC_E_HBFAIL: 849 case IOCPF_E_FWREADY:
470 bfa_fsm_set_state(ioc, bfa_ioc_sm_hbfail); 850 bfa_ioc_pf_failed(ioc);
851 if (bfa_nw_ioc_is_operational(ioc))
852 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
853 else
854 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
471 break; 855 break;
472 856
473 default: 857 default:
@@ -476,33 +860,40 @@ bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
476} 860}
477 861
478static void 862static void
479bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc) 863bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
480{ 864{
481 bfa_ioc_timer_start(ioc); 865 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
482 bfa_ioc_send_disable(ioc); 866 msecs_to_jiffies(BFA_IOC_TOV));
867 bfa_ioc_send_disable(iocpf->ioc);
483} 868}
484 869
485/** 870/**
486 * IOC is being disabled 871 * IOC is being disabled
487 */ 872 */
488static void 873static void
489bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event) 874bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
490{ 875{
876 struct bfa_ioc *ioc = iocpf->ioc;
877
491 switch (event) { 878 switch (event) {
492 case IOC_E_FWRSP_DISABLE: 879 case IOCPF_E_FWRSP_DISABLE:
493 bfa_ioc_timer_stop(ioc); 880 case IOCPF_E_FWREADY:
494 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 881 del_timer(&ioc->iocpf_timer);
882 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
495 break; 883 break;
496 884
497 case IOC_E_HWERROR: 885 case IOCPF_E_FAIL:
498 bfa_ioc_timer_stop(ioc); 886 del_timer(&ioc->iocpf_timer);
499 /* 887 /*
500 * !!! fall through !!! 888 * !!! fall through !!!
501 */ 889 */
502 890
503 case IOC_E_TIMEOUT: 891 case IOCPF_E_TIMEOUT:
504 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 892 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
505 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 893 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
894 break;
895
896 case IOCPF_E_FWRSP_ENABLE:
506 break; 897 break;
507 898
508 default: 899 default:
@@ -510,33 +901,58 @@ bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
510 } 901 }
511} 902}
512 903
513/**
514 * IOC disable completion entry.
515 */
516static void 904static void
517bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc) 905bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
518{ 906{
519 bfa_ioc_disable_comp(ioc); 907 bfa_ioc_hw_sem_get(iocpf->ioc);
520} 908}
521 909
910/**
911 * IOC hb ack request is being removed.
912 */
522static void 913static void
523bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event) 914bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
524{ 915{
916 struct bfa_ioc *ioc = iocpf->ioc;
917
525 switch (event) { 918 switch (event) {
526 case IOC_E_ENABLE: 919 case IOCPF_E_SEMLOCKED:
527 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait); 920 bfa_ioc_sync_leave(ioc);
921 bfa_nw_ioc_hw_sem_release(ioc);
922 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
528 break; 923 break;
529 924
530 case IOC_E_DISABLE: 925 case IOCPF_E_FAIL:
531 ioc->cbfn->disable_cbfn(ioc->bfa);
532 break; 926 break;
533 927
534 case IOC_E_FWREADY: 928 default:
929 bfa_sm_fault(ioc, event);
930 }
931}
932
933/**
934 * IOC disable completion entry.
935 */
936static void
937bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
938{
939 bfa_ioc_pf_disabled(iocpf->ioc);
940}
941
942static void
943bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
944{
945 struct bfa_ioc *ioc = iocpf->ioc;
946
947 switch (event) {
948 case IOCPF_E_ENABLE:
949 iocpf->retry_count = 0;
950 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
535 break; 951 break;
536 952
537 case IOC_E_DETACH: 953 case IOCPF_E_STOP:
538 bfa_ioc_firmware_unlock(ioc); 954 bfa_ioc_firmware_unlock(ioc);
539 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 955 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
540 break; 956 break;
541 957
542 default: 958 default:
@@ -545,33 +961,50 @@ bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
545} 961}
546 962
547static void 963static void
548bfa_ioc_sm_initfail_entry(struct bfa_ioc *ioc) 964bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
549{ 965{
550 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 966 bfa_ioc_hw_sem_get(iocpf->ioc);
551 bfa_ioc_timer_start(ioc);
552} 967}
553 968
554/** 969/**
555 * @brief
556 * Hardware initialization failed. 970 * Hardware initialization failed.
557 */ 971 */
558static void 972static void
559bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event) 973bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
560{ 974{
975 struct bfa_ioc *ioc = iocpf->ioc;
976
561 switch (event) { 977 switch (event) {
562 case IOC_E_DISABLE: 978 case IOCPF_E_SEMLOCKED:
563 bfa_ioc_timer_stop(ioc); 979 bfa_ioc_notify_fail(ioc);
564 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled); 980 bfa_ioc_sync_ack(ioc);
981 iocpf->retry_count++;
982 if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
983 bfa_ioc_sync_leave(ioc);
984 bfa_nw_ioc_hw_sem_release(ioc);
985 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
986 } else {
987 if (bfa_ioc_sync_complete(ioc))
988 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
989 else {
990 bfa_nw_ioc_hw_sem_release(ioc);
991 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
992 }
993 }
565 break; 994 break;
566 995
567 case IOC_E_DETACH: 996 case IOCPF_E_DISABLE:
568 bfa_ioc_timer_stop(ioc); 997 bfa_ioc_hw_sem_get_cancel(ioc);
998 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
999 break;
1000
1001 case IOCPF_E_STOP:
1002 bfa_ioc_hw_sem_get_cancel(ioc);
569 bfa_ioc_firmware_unlock(ioc); 1003 bfa_ioc_firmware_unlock(ioc);
570 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 1004 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
571 break; 1005 break;
572 1006
573 case IOC_E_TIMEOUT: 1007 case IOCPF_E_FAIL:
574 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
575 break; 1008 break;
576 1009
577 default: 1010 default:
@@ -580,80 +1013,108 @@ bfa_ioc_sm_initfail(struct bfa_ioc *ioc, enum ioc_event event)
580} 1013}
581 1014
582static void 1015static void
583bfa_ioc_sm_hbfail_entry(struct bfa_ioc *ioc) 1016bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
584{ 1017{
585 struct list_head *qe; 1018 bfa_ioc_pf_initfailed(iocpf->ioc);
586 struct bfa_ioc_hbfail_notify *notify; 1019}
587 1020
588 /** 1021/**
589 * Mark IOC as failed in hardware and stop firmware. 1022 * Hardware initialization failed.
590 */ 1023 */
591 bfa_ioc_lpu_stop(ioc); 1024static void
592 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate); 1025bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1026{
1027 struct bfa_ioc *ioc = iocpf->ioc;
593 1028
594 /** 1029 switch (event) {
595 * Notify other functions on HB failure. 1030 case IOCPF_E_DISABLE:
596 */ 1031 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
597 bfa_ioc_notify_hbfail(ioc); 1032 break;
598 1033
599 /** 1034 case IOCPF_E_STOP:
600 * Notify driver and common modules registered for notification. 1035 bfa_ioc_firmware_unlock(ioc);
601 */ 1036 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
602 ioc->cbfn->hbfail_cbfn(ioc->bfa); 1037 break;
603 list_for_each(qe, &ioc->hb_notify_q) { 1038
604 notify = (struct bfa_ioc_hbfail_notify *) qe; 1039 default:
605 notify->cbfn(notify->cbarg); 1040 bfa_sm_fault(ioc, event);
606 } 1041 }
1042}
607 1043
1044static void
1045bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1046{
608 /** 1047 /**
609 * Flush any queued up mailbox requests. 1048 * Mark IOC as failed in hardware and stop firmware.
610 */ 1049 */
611 bfa_ioc_mbox_hbfail(ioc); 1050 bfa_ioc_lpu_stop(iocpf->ioc);
612 1051
613 /** 1052 /**
614 * Trigger auto-recovery after a delay. 1053 * Flush any queued up mailbox requests.
615 */ 1054 */
616 if (ioc->auto_recover) 1055 bfa_ioc_mbox_hbfail(iocpf->ioc);
617 mod_timer(&ioc->ioc_timer, jiffies + 1056 bfa_ioc_hw_sem_get(iocpf->ioc);
618 msecs_to_jiffies(BFA_IOC_TOV_RECOVER));
619} 1057}
620 1058
621/** 1059/**
622 * @brief 1060 * IOC is in failed state.
623 * IOC heartbeat failure.
624 */ 1061 */
625static void 1062static void
626bfa_ioc_sm_hbfail(struct bfa_ioc *ioc, enum ioc_event event) 1063bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
627{ 1064{
628 switch (event) { 1065 struct bfa_ioc *ioc = iocpf->ioc;
629 1066
630 case IOC_E_ENABLE: 1067 switch (event) {
631 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE); 1068 case IOCPF_E_SEMLOCKED:
1069 iocpf->retry_count = 0;
1070 bfa_ioc_sync_ack(ioc);
1071 bfa_ioc_notify_fail(ioc);
1072 if (!iocpf->auto_recover) {
1073 bfa_ioc_sync_leave(ioc);
1074 bfa_nw_ioc_hw_sem_release(ioc);
1075 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1076 } else {
1077 if (bfa_ioc_sync_complete(ioc))
1078 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1079 else {
1080 bfa_nw_ioc_hw_sem_release(ioc);
1081 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1082 }
1083 }
632 break; 1084 break;
633 1085
634 case IOC_E_DISABLE: 1086 case IOCPF_E_DISABLE:
635 if (ioc->auto_recover) 1087 bfa_ioc_hw_sem_get_cancel(ioc);
636 bfa_ioc_timer_stop(ioc); 1088 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
637 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
638 break; 1089 break;
639 1090
640 case IOC_E_TIMEOUT: 1091 case IOCPF_E_FAIL:
641 bfa_fsm_set_state(ioc, bfa_ioc_sm_semwait);
642 break; 1092 break;
643 1093
644 case IOC_E_FWREADY: 1094 default:
645 /** 1095 bfa_sm_fault(ioc, event);
646 * Recovery is already initiated by other function. 1096 }
647 */ 1097}
648 break;
649 1098
650 case IOC_E_HWERROR: 1099static void
651 /* 1100bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
652 * HB failure notification, ignore. 1101{
653 */ 1102}
1103
1104/**
1105 * @brief
1106 * IOC is in failed state.
1107 */
1108static void
1109bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1110{
1111 switch (event) {
1112 case IOCPF_E_DISABLE:
1113 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
654 break; 1114 break;
1115
655 default: 1116 default:
656 bfa_sm_fault(ioc, event); 1117 bfa_sm_fault(iocpf->ioc, event);
657 } 1118 }
658} 1119}
659 1120
@@ -678,14 +1139,6 @@ bfa_ioc_disable_comp(struct bfa_ioc *ioc)
678 } 1139 }
679} 1140}
680 1141
681void
682bfa_nw_ioc_sem_timeout(void *ioc_arg)
683{
684 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
685
686 bfa_ioc_hw_sem_get(ioc);
687}
688
689bool 1142bool
690bfa_nw_ioc_sem_get(void __iomem *sem_reg) 1143bfa_nw_ioc_sem_get(void __iomem *sem_reg)
691{ 1144{
@@ -725,7 +1178,7 @@ bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
725 */ 1178 */
726 r32 = readl(ioc->ioc_regs.ioc_sem_reg); 1179 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
727 if (r32 == 0) { 1180 if (r32 == 0) {
728 bfa_fsm_send_event(ioc, IOC_E_SEMLOCKED); 1181 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
729 return; 1182 return;
730 } 1183 }
731 1184
@@ -865,12 +1318,6 @@ bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
865{ 1318{
866 struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr; 1319 struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
867 1320
868 /**
869 * If bios/efi boot (flash based) -- return true
870 */
871 if (bfa_ioc_is_optrom(ioc))
872 return true;
873
874 bfa_nw_ioc_fwver_get(ioc, &fwhdr); 1321 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
875 drv_fwhdr = (struct bfi_ioc_image_hdr *) 1322 drv_fwhdr = (struct bfi_ioc_image_hdr *)
876 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0); 1323 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
@@ -934,20 +1381,15 @@ bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
934 /** 1381 /**
935 * If IOC function is disabled and firmware version is same, 1382 * If IOC function is disabled and firmware version is same,
936 * just re-enable IOC. 1383 * just re-enable IOC.
937 *
938 * If option rom, IOC must not be in operational state. With
939 * convergence, IOC will be in operational state when 2nd driver
940 * is loaded.
941 */ 1384 */
942 if (ioc_fwstate == BFI_IOC_DISABLED || 1385 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
943 (!bfa_ioc_is_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
944 /** 1386 /**
945 * When using MSI-X any pending firmware ready event should 1387 * When using MSI-X any pending firmware ready event should
946 * be flushed. Otherwise MSI-X interrupts are not delivered. 1388 * be flushed. Otherwise MSI-X interrupts are not delivered.
947 */ 1389 */
948 bfa_ioc_msgflush(ioc); 1390 bfa_ioc_msgflush(ioc);
949 ioc->cbfn->reset_cbfn(ioc->bfa); 1391 ioc->cbfn->reset_cbfn(ioc->bfa);
950 bfa_fsm_send_event(ioc, IOC_E_FWREADY); 1392 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
951 return; 1393 return;
952 } 1394 }
953 1395
@@ -1033,7 +1475,6 @@ bfa_nw_ioc_hb_check(void *cbarg)
1033 1475
1034 hb_count = readl(ioc->ioc_regs.heartbeat); 1476 hb_count = readl(ioc->ioc_regs.heartbeat);
1035 if (ioc->hb_count == hb_count) { 1477 if (ioc->hb_count == hb_count) {
1036 pr_crit("Firmware heartbeat failure at %d", hb_count);
1037 bfa_ioc_recover(ioc); 1478 bfa_ioc_recover(ioc);
1038 return; 1479 return;
1039 } else { 1480 } else {
@@ -1078,11 +1519,6 @@ bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1078 */ 1519 */
1079 bfa_ioc_lmem_init(ioc); 1520 bfa_ioc_lmem_init(ioc);
1080 1521
1081 /**
1082 * Flash based firmware boot
1083 */
1084 if (bfa_ioc_is_optrom(ioc))
1085 boot_type = BFI_BOOT_TYPE_FLASH;
1086 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno); 1522 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1087 1523
1088 pgnum = bfa_ioc_smem_pgnum(ioc, loff); 1524 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
@@ -1209,6 +1645,55 @@ bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
1209 bfa_q_deq(&mod->cmd_q, &cmd); 1645 bfa_q_deq(&mod->cmd_q, &cmd);
1210} 1646}
1211 1647
1648static void
1649bfa_ioc_fail_notify(struct bfa_ioc *ioc)
1650{
1651 struct list_head *qe;
1652 struct bfa_ioc_hbfail_notify *notify;
1653
1654 /**
1655 * Notify driver and common modules registered for notification.
1656 */
1657 ioc->cbfn->hbfail_cbfn(ioc->bfa);
1658 list_for_each(qe, &ioc->hb_notify_q) {
1659 notify = (struct bfa_ioc_hbfail_notify *) qe;
1660 notify->cbfn(notify->cbarg);
1661 }
1662}
1663
1664static void
1665bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
1666{
1667 bfa_fsm_send_event(ioc, IOC_E_ENABLED);
1668}
1669
1670static void
1671bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
1672{
1673 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
1674}
1675
1676static void
1677bfa_ioc_pf_initfailed(struct bfa_ioc *ioc)
1678{
1679 bfa_fsm_send_event(ioc, IOC_E_INITFAILED);
1680}
1681
1682static void
1683bfa_ioc_pf_failed(struct bfa_ioc *ioc)
1684{
1685 bfa_fsm_send_event(ioc, IOC_E_PFAILED);
1686}
1687
1688static void
1689bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
1690{
1691 /**
1692 * Provide enable completion callback and AEN notification.
1693 */
1694 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1695}
1696
1212/** 1697/**
1213 * IOC public 1698 * IOC public
1214 */ 1699 */
@@ -1304,6 +1789,7 @@ static void
1304bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m) 1789bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1305{ 1790{
1306 union bfi_ioc_i2h_msg_u *msg; 1791 union bfi_ioc_i2h_msg_u *msg;
1792 struct bfa_iocpf *iocpf = &ioc->iocpf;
1307 1793
1308 msg = (union bfi_ioc_i2h_msg_u *) m; 1794 msg = (union bfi_ioc_i2h_msg_u *) m;
1309 1795
@@ -1314,15 +1800,15 @@ bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1314 break; 1800 break;
1315 1801
1316 case BFI_IOC_I2H_READY_EVENT: 1802 case BFI_IOC_I2H_READY_EVENT:
1317 bfa_fsm_send_event(ioc, IOC_E_FWREADY); 1803 bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
1318 break; 1804 break;
1319 1805
1320 case BFI_IOC_I2H_ENABLE_REPLY: 1806 case BFI_IOC_I2H_ENABLE_REPLY:
1321 bfa_fsm_send_event(ioc, IOC_E_FWRSP_ENABLE); 1807 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
1322 break; 1808 break;
1323 1809
1324 case BFI_IOC_I2H_DISABLE_REPLY: 1810 case BFI_IOC_I2H_DISABLE_REPLY:
1325 bfa_fsm_send_event(ioc, IOC_E_FWRSP_DISABLE); 1811 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
1326 break; 1812 break;
1327 1813
1328 case BFI_IOC_I2H_GETATTR_REPLY: 1814 case BFI_IOC_I2H_GETATTR_REPLY:
@@ -1348,11 +1834,13 @@ bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
1348 ioc->fcmode = false; 1834 ioc->fcmode = false;
1349 ioc->pllinit = false; 1835 ioc->pllinit = false;
1350 ioc->dbg_fwsave_once = true; 1836 ioc->dbg_fwsave_once = true;
1837 ioc->iocpf.ioc = ioc;
1351 1838
1352 bfa_ioc_mbox_attach(ioc); 1839 bfa_ioc_mbox_attach(ioc);
1353 INIT_LIST_HEAD(&ioc->hb_notify_q); 1840 INIT_LIST_HEAD(&ioc->hb_notify_q);
1354 1841
1355 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset); 1842 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
1843 bfa_fsm_send_event(ioc, IOC_E_RESET);
1356} 1844}
1357 1845
1358/** 1846/**
@@ -1657,7 +2145,40 @@ bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
1657static enum bfa_ioc_state 2145static enum bfa_ioc_state
1658bfa_ioc_get_state(struct bfa_ioc *ioc) 2146bfa_ioc_get_state(struct bfa_ioc *ioc)
1659{ 2147{
1660 return bfa_sm_to_state(ioc_sm_table, ioc->fsm); 2148 enum bfa_iocpf_state iocpf_st;
2149 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2150
2151 if (ioc_st == BFA_IOC_ENABLING ||
2152 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2153
2154 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2155
2156 switch (iocpf_st) {
2157 case BFA_IOCPF_SEMWAIT:
2158 ioc_st = BFA_IOC_SEMWAIT;
2159 break;
2160
2161 case BFA_IOCPF_HWINIT:
2162 ioc_st = BFA_IOC_HWINIT;
2163 break;
2164
2165 case BFA_IOCPF_FWMISMATCH:
2166 ioc_st = BFA_IOC_FWMISMATCH;
2167 break;
2168
2169 case BFA_IOCPF_FAIL:
2170 ioc_st = BFA_IOC_FAIL;
2171 break;
2172
2173 case BFA_IOCPF_INITFAIL:
2174 ioc_st = BFA_IOC_INITFAIL;
2175 break;
2176
2177 default:
2178 break;
2179 }
2180 }
2181 return ioc_st;
1661} 2182}
1662 2183
1663void 2184void
@@ -1689,28 +2210,7 @@ bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
1689mac_t 2210mac_t
1690bfa_nw_ioc_get_mac(struct bfa_ioc *ioc) 2211bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
1691{ 2212{
1692 /* 2213 return ioc->attr->mac;
1693 * Currently mfg mac is used as FCoE enode mac (not configured by PBC)
1694 */
1695 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_FCoE)
1696 return bfa_ioc_get_mfg_mac(ioc);
1697 else
1698 return ioc->attr->mac;
1699}
1700
1701static mac_t
1702bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc)
1703{
1704 mac_t m;
1705
1706 m = ioc->attr->mfg_mac;
1707 if (bfa_mfg_is_old_wwn_mac_model(ioc->attr->card_type))
1708 m.mac[MAC_ADDRLEN - 1] += bfa_ioc_pcifn(ioc);
1709 else
1710 bfa_mfg_increment_wwn_mac(&(m.mac[MAC_ADDRLEN-3]),
1711 bfa_ioc_pcifn(ioc));
1712
1713 return m;
1714} 2214}
1715 2215
1716/** 2216/**
@@ -1719,8 +2219,13 @@ bfa_ioc_get_mfg_mac(struct bfa_ioc *ioc)
1719static void 2219static void
1720bfa_ioc_recover(struct bfa_ioc *ioc) 2220bfa_ioc_recover(struct bfa_ioc *ioc)
1721{ 2221{
1722 bfa_ioc_stats(ioc, ioc_hbfails); 2222 u16 bdf;
1723 bfa_fsm_send_event(ioc, IOC_E_HBFAIL); 2223
2224 bdf = (ioc->pcidev.pci_slot << 8 | ioc->pcidev.pci_func << 3 |
2225 ioc->pcidev.device_id);
2226
2227 pr_crit("Firmware heartbeat failure at %d", bdf);
2228 BUG_ON(1);
1724} 2229}
1725 2230
1726static void 2231static void
@@ -1728,5 +2233,61 @@ bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
1728{ 2233{
1729 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL) 2234 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
1730 return; 2235 return;
2236}
2237
2238/**
2239 * @dg hal_iocpf_pvt BFA IOC PF private functions
2240 * @{
2241 */
2242
2243static void
2244bfa_iocpf_enable(struct bfa_ioc *ioc)
2245{
2246 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2247}
2248
2249static void
2250bfa_iocpf_disable(struct bfa_ioc *ioc)
2251{
2252 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2253}
2254
2255static void
2256bfa_iocpf_fail(struct bfa_ioc *ioc)
2257{
2258 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2259}
2260
2261static void
2262bfa_iocpf_initfail(struct bfa_ioc *ioc)
2263{
2264 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2265}
2266
2267static void
2268bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2269{
2270 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2271}
2272
2273static void
2274bfa_iocpf_stop(struct bfa_ioc *ioc)
2275{
2276 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2277}
1731 2278
2279void
2280bfa_nw_iocpf_timeout(void *ioc_arg)
2281{
2282 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
2283
2284 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2285}
2286
2287void
2288bfa_nw_iocpf_sem_timeout(void *ioc_arg)
2289{
2290 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
2291
2292 bfa_ioc_hw_sem_get(ioc);
1732} 2293}
diff --git a/drivers/net/bna/bfa_ioc.h b/drivers/net/bna/bfa_ioc.h
index a73d84ec808c..e4974bc24ef6 100644
--- a/drivers/net/bna/bfa_ioc.h
+++ b/drivers/net/bna/bfa_ioc.h
@@ -26,16 +26,7 @@
26#define BFA_IOC_TOV 3000 /* msecs */ 26#define BFA_IOC_TOV 3000 /* msecs */
27#define BFA_IOC_HWSEM_TOV 500 /* msecs */ 27#define BFA_IOC_HWSEM_TOV 500 /* msecs */
28#define BFA_IOC_HB_TOV 500 /* msecs */ 28#define BFA_IOC_HB_TOV 500 /* msecs */
29#define BFA_IOC_HWINIT_MAX 2 29#define BFA_IOC_HWINIT_MAX 5
30#define BFA_IOC_TOV_RECOVER BFA_IOC_HB_TOV
31
32/**
33 * Generic Scatter Gather Element used by driver
34 */
35struct bfa_sge {
36 u32 sg_len;
37 void *sg_addr;
38};
39 30
40/** 31/**
41 * PCI device information required by IOC 32 * PCI device information required by IOC
@@ -65,19 +56,6 @@ struct bfa_dma {
65#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */ 56#define BFI_SMEM_CT_SIZE 0x280000U /* ! 2.5MB for catapult */
66 57
67/** 58/**
68 * @brief BFA dma address assignment macro
69 */
70#define bfa_dma_addr_set(dma_addr, pa) \
71 __bfa_dma_addr_set(&dma_addr, (u64)pa)
72
73static inline void
74__bfa_dma_addr_set(union bfi_addr_u *dma_addr, u64 pa)
75{
76 dma_addr->a32.addr_lo = (u32) pa;
77 dma_addr->a32.addr_hi = (u32) (upper_32_bits(pa));
78}
79
80/**
81 * @brief BFA dma address assignment macro. (big endian format) 59 * @brief BFA dma address assignment macro. (big endian format)
82 */ 60 */
83#define bfa_dma_be_addr_set(dma_addr, pa) \ 61#define bfa_dma_be_addr_set(dma_addr, pa) \
@@ -105,8 +83,11 @@ struct bfa_ioc_regs {
105 void __iomem *host_page_num_fn; 83 void __iomem *host_page_num_fn;
106 void __iomem *heartbeat; 84 void __iomem *heartbeat;
107 void __iomem *ioc_fwstate; 85 void __iomem *ioc_fwstate;
86 void __iomem *alt_ioc_fwstate;
108 void __iomem *ll_halt; 87 void __iomem *ll_halt;
88 void __iomem *alt_ll_halt;
109 void __iomem *err_set; 89 void __iomem *err_set;
90 void __iomem *ioc_fail_sync;
110 void __iomem *shirq_isr_next; 91 void __iomem *shirq_isr_next;
111 void __iomem *shirq_msk_next; 92 void __iomem *shirq_msk_next;
112 void __iomem *smem_page_start; 93 void __iomem *smem_page_start;
@@ -165,16 +146,22 @@ struct bfa_ioc_hbfail_notify {
165 (__notify)->cbarg = (__cbarg); \ 146 (__notify)->cbarg = (__cbarg); \
166} while (0) 147} while (0)
167 148
149struct bfa_iocpf {
150 bfa_fsm_t fsm;
151 struct bfa_ioc *ioc;
152 u32 retry_count;
153 bool auto_recover;
154};
155
168struct bfa_ioc { 156struct bfa_ioc {
169 bfa_fsm_t fsm; 157 bfa_fsm_t fsm;
170 struct bfa *bfa; 158 struct bfa *bfa;
171 struct bfa_pcidev pcidev; 159 struct bfa_pcidev pcidev;
172 struct bfa_timer_mod *timer_mod;
173 struct timer_list ioc_timer; 160 struct timer_list ioc_timer;
161 struct timer_list iocpf_timer;
174 struct timer_list sem_timer; 162 struct timer_list sem_timer;
175 struct timer_list hb_timer; 163 struct timer_list hb_timer;
176 u32 hb_count; 164 u32 hb_count;
177 u32 retry_count;
178 struct list_head hb_notify_q; 165 struct list_head hb_notify_q;
179 void *dbg_fwsave; 166 void *dbg_fwsave;
180 int dbg_fwsave_len; 167 int dbg_fwsave_len;
@@ -182,7 +169,6 @@ struct bfa_ioc {
182 enum bfi_mclass ioc_mc; 169 enum bfi_mclass ioc_mc;
183 struct bfa_ioc_regs ioc_regs; 170 struct bfa_ioc_regs ioc_regs;
184 struct bfa_ioc_drv_stats stats; 171 struct bfa_ioc_drv_stats stats;
185 bool auto_recover;
186 bool fcmode; 172 bool fcmode;
187 bool ctdev; 173 bool ctdev;
188 bool cna; 174 bool cna;
@@ -195,6 +181,7 @@ struct bfa_ioc {
195 struct bfa_ioc_cbfn *cbfn; 181 struct bfa_ioc_cbfn *cbfn;
196 struct bfa_ioc_mbox_mod mbox_mod; 182 struct bfa_ioc_mbox_mod mbox_mod;
197 struct bfa_ioc_hwif *ioc_hwif; 183 struct bfa_ioc_hwif *ioc_hwif;
184 struct bfa_iocpf iocpf;
198}; 185};
199 186
200struct bfa_ioc_hwif { 187struct bfa_ioc_hwif {
@@ -205,8 +192,12 @@ struct bfa_ioc_hwif {
205 void (*ioc_map_port) (struct bfa_ioc *ioc); 192 void (*ioc_map_port) (struct bfa_ioc *ioc);
206 void (*ioc_isr_mode_set) (struct bfa_ioc *ioc, 193 void (*ioc_isr_mode_set) (struct bfa_ioc *ioc,
207 bool msix); 194 bool msix);
208 void (*ioc_notify_hbfail) (struct bfa_ioc *ioc); 195 void (*ioc_notify_fail) (struct bfa_ioc *ioc);
209 void (*ioc_ownership_reset) (struct bfa_ioc *ioc); 196 void (*ioc_ownership_reset) (struct bfa_ioc *ioc);
197 void (*ioc_sync_join) (struct bfa_ioc *ioc);
198 void (*ioc_sync_leave) (struct bfa_ioc *ioc);
199 void (*ioc_sync_ack) (struct bfa_ioc *ioc);
200 bool (*ioc_sync_complete) (struct bfa_ioc *ioc);
210}; 201};
211 202
212#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func) 203#define bfa_ioc_pcifn(__ioc) ((__ioc)->pcidev.pci_func)
@@ -271,7 +262,6 @@ void bfa_nw_ioc_enable(struct bfa_ioc *ioc);
271void bfa_nw_ioc_disable(struct bfa_ioc *ioc); 262void bfa_nw_ioc_disable(struct bfa_ioc *ioc);
272 263
273void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc); 264void bfa_nw_ioc_error_isr(struct bfa_ioc *ioc);
274
275void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr); 265void bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr);
276void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc, 266void bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
277 struct bfa_ioc_hbfail_notify *notify); 267 struct bfa_ioc_hbfail_notify *notify);
@@ -289,7 +279,8 @@ mac_t bfa_nw_ioc_get_mac(struct bfa_ioc *ioc);
289 */ 279 */
290void bfa_nw_ioc_timeout(void *ioc); 280void bfa_nw_ioc_timeout(void *ioc);
291void bfa_nw_ioc_hb_check(void *ioc); 281void bfa_nw_ioc_hb_check(void *ioc);
292void bfa_nw_ioc_sem_timeout(void *ioc); 282void bfa_nw_iocpf_timeout(void *ioc);
283void bfa_nw_iocpf_sem_timeout(void *ioc);
293 284
294/* 285/*
295 * F/W Image Size & Chunk 286 * F/W Image Size & Chunk
diff --git a/drivers/net/bna/bfa_ioc_ct.c b/drivers/net/bna/bfa_ioc_ct.c
index 121cfd6d48b1..469997c4ffd1 100644
--- a/drivers/net/bna/bfa_ioc_ct.c
+++ b/drivers/net/bna/bfa_ioc_ct.c
@@ -22,6 +22,15 @@
22#include "bfi_ctreg.h" 22#include "bfi_ctreg.h"
23#include "bfa_defs.h" 23#include "bfa_defs.h"
24 24
25#define bfa_ioc_ct_sync_pos(__ioc) \
26 ((u32) (1 << bfa_ioc_pcifn(__ioc)))
27#define BFA_IOC_SYNC_REQD_SH 16
28#define bfa_ioc_ct_get_sync_ackd(__val) (__val & 0x0000ffff)
29#define bfa_ioc_ct_clear_sync_ackd(__val) (__val & 0xffff0000)
30#define bfa_ioc_ct_get_sync_reqd(__val) (__val >> BFA_IOC_SYNC_REQD_SH)
31#define bfa_ioc_ct_sync_reqd_pos(__ioc) \
32 (bfa_ioc_ct_sync_pos(__ioc) << BFA_IOC_SYNC_REQD_SH)
33
25/* 34/*
26 * forward declarations 35 * forward declarations
27 */ 36 */
@@ -30,8 +39,12 @@ static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc);
30static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc); 39static void bfa_ioc_ct_reg_init(struct bfa_ioc *ioc);
31static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc); 40static void bfa_ioc_ct_map_port(struct bfa_ioc *ioc);
32static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix); 41static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc *ioc, bool msix);
33static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc); 42static void bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc);
34static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc); 43static void bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc);
44static void bfa_ioc_ct_sync_join(struct bfa_ioc *ioc);
45static void bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc);
46static void bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc);
47static bool bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc);
35static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode); 48static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode);
36 49
37static struct bfa_ioc_hwif nw_hwif_ct; 50static struct bfa_ioc_hwif nw_hwif_ct;
@@ -48,8 +61,12 @@ bfa_nw_ioc_set_ct_hwif(struct bfa_ioc *ioc)
48 nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init; 61 nw_hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
49 nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port; 62 nw_hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
50 nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set; 63 nw_hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
51 nw_hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail; 64 nw_hwif_ct.ioc_notify_fail = bfa_ioc_ct_notify_fail;
52 nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset; 65 nw_hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
66 nw_hwif_ct.ioc_sync_join = bfa_ioc_ct_sync_join;
67 nw_hwif_ct.ioc_sync_leave = bfa_ioc_ct_sync_leave;
68 nw_hwif_ct.ioc_sync_ack = bfa_ioc_ct_sync_ack;
69 nw_hwif_ct.ioc_sync_complete = bfa_ioc_ct_sync_complete;
53 70
54 ioc->ioc_hwif = &nw_hwif_ct; 71 ioc->ioc_hwif = &nw_hwif_ct;
55} 72}
@@ -86,6 +103,7 @@ bfa_ioc_ct_firmware_lock(struct bfa_ioc *ioc)
86 if (usecnt == 0) { 103 if (usecnt == 0) {
87 writel(1, ioc->ioc_regs.ioc_usage_reg); 104 writel(1, ioc->ioc_regs.ioc_usage_reg);
88 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg); 105 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
106 writel(0, ioc->ioc_regs.ioc_fail_sync);
89 return true; 107 return true;
90 } 108 }
91 109
@@ -149,12 +167,14 @@ bfa_ioc_ct_firmware_unlock(struct bfa_ioc *ioc)
149 * Notify other functions on HB failure. 167 * Notify other functions on HB failure.
150 */ 168 */
151static void 169static void
152bfa_ioc_ct_notify_hbfail(struct bfa_ioc *ioc) 170bfa_ioc_ct_notify_fail(struct bfa_ioc *ioc)
153{ 171{
154 if (ioc->cna) { 172 if (ioc->cna) {
155 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt); 173 writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
174 writel(__FW_INIT_HALT_P, ioc->ioc_regs.alt_ll_halt);
156 /* Wait for halt to take effect */ 175 /* Wait for halt to take effect */
157 readl(ioc->ioc_regs.ll_halt); 176 readl(ioc->ioc_regs.ll_halt);
177 readl(ioc->ioc_regs.alt_ll_halt);
158 } else { 178 } else {
159 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set); 179 writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
160 readl(ioc->ioc_regs.err_set); 180 readl(ioc->ioc_regs.err_set);
@@ -206,15 +226,19 @@ bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
206 if (ioc->port_id == 0) { 226 if (ioc->port_id == 0) {
207 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; 227 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
208 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; 228 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
229 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG;
209 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn; 230 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
210 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu; 231 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
211 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0; 232 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
233 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P1;
212 } else { 234 } else {
213 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); 235 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
214 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); 236 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
237 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC0_STATE_REG;
215 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn; 238 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
216 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu; 239 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
217 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1; 240 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
241 ioc->ioc_regs.alt_ll_halt = rb + FW_INIT_HALT_P0;
218 } 242 }
219 243
220 /* 244 /*
@@ -232,6 +256,7 @@ bfa_ioc_ct_reg_init(struct bfa_ioc *ioc)
232 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG); 256 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
233 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG); 257 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
234 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT); 258 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
259 ioc->ioc_regs.ioc_fail_sync = (rb + BFA_IOC_FAIL_SYNC);
235 260
236 /** 261 /**
237 * sram memory access 262 * sram memory access
@@ -317,6 +342,77 @@ bfa_ioc_ct_ownership_reset(struct bfa_ioc *ioc)
317 bfa_nw_ioc_hw_sem_release(ioc); 342 bfa_nw_ioc_hw_sem_release(ioc);
318} 343}
319 344
345/**
346 * Synchronized IOC failure processing routines
347 */
348static void
349bfa_ioc_ct_sync_join(struct bfa_ioc *ioc)
350{
351 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
352 u32 sync_pos = bfa_ioc_ct_sync_reqd_pos(ioc);
353
354 writel((r32 | sync_pos), ioc->ioc_regs.ioc_fail_sync);
355}
356
357static void
358bfa_ioc_ct_sync_leave(struct bfa_ioc *ioc)
359{
360 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
361 u32 sync_msk = bfa_ioc_ct_sync_reqd_pos(ioc) |
362 bfa_ioc_ct_sync_pos(ioc);
363
364 writel((r32 & ~sync_msk), ioc->ioc_regs.ioc_fail_sync);
365}
366
367static void
368bfa_ioc_ct_sync_ack(struct bfa_ioc *ioc)
369{
370 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
371
372 writel((r32 | bfa_ioc_ct_sync_pos(ioc)), ioc->ioc_regs.ioc_fail_sync);
373}
374
375static bool
376bfa_ioc_ct_sync_complete(struct bfa_ioc *ioc)
377{
378 u32 r32 = readl(ioc->ioc_regs.ioc_fail_sync);
379 u32 sync_reqd = bfa_ioc_ct_get_sync_reqd(r32);
380 u32 sync_ackd = bfa_ioc_ct_get_sync_ackd(r32);
381 u32 tmp_ackd;
382
383 if (sync_ackd == 0)
384 return true;
385
386 /**
387 * The check below is to see whether any other PCI fn
388 * has reinitialized the ASIC (reset sync_ackd bits)
389 * and failed again while this IOC was waiting for hw
390 * semaphore (in bfa_iocpf_sm_semwait()).
391 */
392 tmp_ackd = sync_ackd;
393 if ((sync_reqd & bfa_ioc_ct_sync_pos(ioc)) &&
394 !(sync_ackd & bfa_ioc_ct_sync_pos(ioc)))
395 sync_ackd |= bfa_ioc_ct_sync_pos(ioc);
396
397 if (sync_reqd == sync_ackd) {
398 writel(bfa_ioc_ct_clear_sync_ackd(r32),
399 ioc->ioc_regs.ioc_fail_sync);
400 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
401 writel(BFI_IOC_FAIL, ioc->ioc_regs.alt_ioc_fwstate);
402 return true;
403 }
404
405 /**
406 * If another PCI fn reinitialized and failed again while
407 * this IOC was waiting for hw sem, the sync_ackd bit for
408 * this IOC need to be set again to allow reinitialization.
409 */
410 if (tmp_ackd != sync_ackd)
411 writel((r32 | sync_ackd), ioc->ioc_regs.ioc_fail_sync);
412
413 return false;
414}
415
320static enum bfa_status 416static enum bfa_status
321bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode) 417bfa_ioc_ct_pll_init(void __iomem *rb, bool fcmode)
322{ 418{
diff --git a/drivers/net/bna/bfi_ctreg.h b/drivers/net/bna/bfi_ctreg.h
index 404ea351d4a1..5130d7918660 100644
--- a/drivers/net/bna/bfi_ctreg.h
+++ b/drivers/net/bna/bfi_ctreg.h
@@ -535,6 +535,7 @@ enum {
535#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG 535#define BFA_IOC1_HBEAT_REG HOST_SEM2_INFO_REG
536#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG 536#define BFA_IOC1_STATE_REG HOST_SEM3_INFO_REG
537#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG 537#define BFA_FW_USE_COUNT HOST_SEM4_INFO_REG
538#define BFA_IOC_FAIL_SYNC HOST_SEM5_INFO_REG
538 539
539#define CPE_DEPTH_Q(__n) \ 540#define CPE_DEPTH_Q(__n) \
540 (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0)) 541 (CPE_DEPTH_Q0 + (__n) * (CPE_DEPTH_Q1 - CPE_DEPTH_Q0))
@@ -552,22 +553,30 @@ enum {
552 (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0)) 553 (RME_PI_PTR_Q0 + (__n) * (RME_PI_PTR_Q1 - RME_PI_PTR_Q0))
553#define RME_CI_PTR_Q(__n) \ 554#define RME_CI_PTR_Q(__n) \
554 (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0)) 555 (RME_CI_PTR_Q0 + (__n) * (RME_CI_PTR_Q1 - RME_CI_PTR_Q0))
555#define HQM_QSET_RXQ_DRBL_P0(__n) (HQM_QSET0_RXQ_DRBL_P0 + (__n) \ 556#define HQM_QSET_RXQ_DRBL_P0(__n) \
556 * (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0)) 557 (HQM_QSET0_RXQ_DRBL_P0 + (__n) * \
557#define HQM_QSET_TXQ_DRBL_P0(__n) (HQM_QSET0_TXQ_DRBL_P0 + (__n) \ 558 (HQM_QSET1_RXQ_DRBL_P0 - HQM_QSET0_RXQ_DRBL_P0))
558 * (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0)) 559#define HQM_QSET_TXQ_DRBL_P0(__n) \
559#define HQM_QSET_IB_DRBL_1_P0(__n) (HQM_QSET0_IB_DRBL_1_P0 + (__n) \ 560 (HQM_QSET0_TXQ_DRBL_P0 + (__n) * \
560 * (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0)) 561 (HQM_QSET1_TXQ_DRBL_P0 - HQM_QSET0_TXQ_DRBL_P0))
561#define HQM_QSET_IB_DRBL_2_P0(__n) (HQM_QSET0_IB_DRBL_2_P0 + (__n) \ 562#define HQM_QSET_IB_DRBL_1_P0(__n) \
562 * (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0)) 563 (HQM_QSET0_IB_DRBL_1_P0 + (__n) * \
563#define HQM_QSET_RXQ_DRBL_P1(__n) (HQM_QSET0_RXQ_DRBL_P1 + (__n) \ 564 (HQM_QSET1_IB_DRBL_1_P0 - HQM_QSET0_IB_DRBL_1_P0))
564 * (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1)) 565#define HQM_QSET_IB_DRBL_2_P0(__n) \
565#define HQM_QSET_TXQ_DRBL_P1(__n) (HQM_QSET0_TXQ_DRBL_P1 + (__n) \ 566 (HQM_QSET0_IB_DRBL_2_P0 + (__n) * \
566 * (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1)) 567 (HQM_QSET1_IB_DRBL_2_P0 - HQM_QSET0_IB_DRBL_2_P0))
567#define HQM_QSET_IB_DRBL_1_P1(__n) (HQM_QSET0_IB_DRBL_1_P1 + (__n) \ 568#define HQM_QSET_RXQ_DRBL_P1(__n) \
568 * (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1)) 569 (HQM_QSET0_RXQ_DRBL_P1 + (__n) * \
569#define HQM_QSET_IB_DRBL_2_P1(__n) (HQM_QSET0_IB_DRBL_2_P1 + (__n) \ 570 (HQM_QSET1_RXQ_DRBL_P1 - HQM_QSET0_RXQ_DRBL_P1))
570 * (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1)) 571#define HQM_QSET_TXQ_DRBL_P1(__n) \
572 (HQM_QSET0_TXQ_DRBL_P1 + (__n) * \
573 (HQM_QSET1_TXQ_DRBL_P1 - HQM_QSET0_TXQ_DRBL_P1))
574#define HQM_QSET_IB_DRBL_1_P1(__n) \
575 (HQM_QSET0_IB_DRBL_1_P1 + (__n) * \
576 (HQM_QSET1_IB_DRBL_1_P1 - HQM_QSET0_IB_DRBL_1_P1))
577#define HQM_QSET_IB_DRBL_2_P1(__n) \
578 (HQM_QSET0_IB_DRBL_2_P1 + (__n) * \
579 (HQM_QSET1_IB_DRBL_2_P1 - HQM_QSET0_IB_DRBL_2_P1))
571 580
572#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) 581#define CPE_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
573#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q)) 582#define RME_Q_NUM(__fn, __q) (((__fn) << 2) + (__q))
diff --git a/drivers/net/bna/bna.h b/drivers/net/bna/bna.h
index df6676bbc84e..a287f89b0289 100644
--- a/drivers/net/bna/bna.h
+++ b/drivers/net/bna/bna.h
@@ -32,8 +32,6 @@ extern const u32 bna_napi_dim_vector[][BNA_BIAS_T_MAX];
32/* Log string size */ 32/* Log string size */
33#define BNA_MESSAGE_SIZE 256 33#define BNA_MESSAGE_SIZE 256
34 34
35#define bna_device_timer(_dev) bfa_timer_beat(&((_dev)->timer_mod))
36
37/* MBOX API for PORT, TX, RX */ 35/* MBOX API for PORT, TX, RX */
38#define bna_mbox_qe_fill(_qe, _cmd, _cmd_len, _cbfn, _cbarg) \ 36#define bna_mbox_qe_fill(_qe, _cmd, _cmd_len, _cbfn, _cbarg) \
39do { \ 37do { \
@@ -390,8 +388,8 @@ void bna_mbox_send(struct bna *bna, struct bna_mbox_qe *mbox_qe);
390 388
391/* API for RX */ 389/* API for RX */
392int bna_port_mtu_get(struct bna_port *port); 390int bna_port_mtu_get(struct bna_port *port);
393void bna_llport_admin_up(struct bna_llport *llport); 391void bna_llport_rx_started(struct bna_llport *llport);
394void bna_llport_admin_down(struct bna_llport *llport); 392void bna_llport_rx_stopped(struct bna_llport *llport);
395 393
396/* API for BNAD */ 394/* API for BNAD */
397void bna_port_enable(struct bna_port *port); 395void bna_port_enable(struct bna_port *port);
diff --git a/drivers/net/bna/bna_ctrl.c b/drivers/net/bna/bna_ctrl.c
index 07b26598546e..e1527472b961 100644
--- a/drivers/net/bna/bna_ctrl.c
+++ b/drivers/net/bna/bna_ctrl.c
@@ -59,14 +59,70 @@ bna_port_cb_link_down(struct bna_port *port, int status)
59 port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN); 59 port->link_cbfn(port->bna->bnad, BNA_LINK_DOWN);
60} 60}
61 61
62static inline int
63llport_can_be_up(struct bna_llport *llport)
64{
65 int ready = 0;
66 if (llport->type == BNA_PORT_T_REGULAR)
67 ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) &&
68 (llport->flags & BNA_LLPORT_F_RX_STARTED) &&
69 (llport->flags & BNA_LLPORT_F_PORT_ENABLED));
70 else
71 ready = ((llport->flags & BNA_LLPORT_F_ADMIN_UP) &&
72 (llport->flags & BNA_LLPORT_F_RX_STARTED) &&
73 !(llport->flags & BNA_LLPORT_F_PORT_ENABLED));
74 return ready;
75}
76
77#define llport_is_up llport_can_be_up
78
79enum bna_llport_event {
80 LLPORT_E_START = 1,
81 LLPORT_E_STOP = 2,
82 LLPORT_E_FAIL = 3,
83 LLPORT_E_UP = 4,
84 LLPORT_E_DOWN = 5,
85 LLPORT_E_FWRESP_UP_OK = 6,
86 LLPORT_E_FWRESP_UP_FAIL = 7,
87 LLPORT_E_FWRESP_DOWN = 8
88};
89
90static void
91bna_llport_cb_port_enabled(struct bna_llport *llport)
92{
93 llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
94
95 if (llport_can_be_up(llport))
96 bfa_fsm_send_event(llport, LLPORT_E_UP);
97}
98
99static void
100bna_llport_cb_port_disabled(struct bna_llport *llport)
101{
102 int llport_up = llport_is_up(llport);
103
104 llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
105
106 if (llport_up)
107 bfa_fsm_send_event(llport, LLPORT_E_DOWN);
108}
109
62/** 110/**
63 * MBOX 111 * MBOX
64 */ 112 */
65static int 113static int
66bna_is_aen(u8 msg_id) 114bna_is_aen(u8 msg_id)
67{ 115{
68 return msg_id == BFI_LL_I2H_LINK_DOWN_AEN || 116 switch (msg_id) {
69 msg_id == BFI_LL_I2H_LINK_UP_AEN; 117 case BFI_LL_I2H_LINK_DOWN_AEN:
118 case BFI_LL_I2H_LINK_UP_AEN:
119 case BFI_LL_I2H_PORT_ENABLE_AEN:
120 case BFI_LL_I2H_PORT_DISABLE_AEN:
121 return 1;
122
123 default:
124 return 0;
125 }
70} 126}
71 127
72static void 128static void
@@ -81,6 +137,12 @@ bna_mbox_aen_callback(struct bna *bna, struct bfi_mbmsg *msg)
81 case BFI_LL_I2H_LINK_DOWN_AEN: 137 case BFI_LL_I2H_LINK_DOWN_AEN:
82 bna_port_cb_link_down(&bna->port, aen->reason); 138 bna_port_cb_link_down(&bna->port, aen->reason);
83 break; 139 break;
140 case BFI_LL_I2H_PORT_ENABLE_AEN:
141 bna_llport_cb_port_enabled(&bna->port.llport);
142 break;
143 case BFI_LL_I2H_PORT_DISABLE_AEN:
144 bna_llport_cb_port_disabled(&bna->port.llport);
145 break;
84 default: 146 default:
85 break; 147 break;
86 } 148 }
@@ -251,16 +313,6 @@ static void bna_llport_start(struct bna_llport *llport);
251static void bna_llport_stop(struct bna_llport *llport); 313static void bna_llport_stop(struct bna_llport *llport);
252static void bna_llport_fail(struct bna_llport *llport); 314static void bna_llport_fail(struct bna_llport *llport);
253 315
254enum bna_llport_event {
255 LLPORT_E_START = 1,
256 LLPORT_E_STOP = 2,
257 LLPORT_E_FAIL = 3,
258 LLPORT_E_UP = 4,
259 LLPORT_E_DOWN = 5,
260 LLPORT_E_FWRESP_UP = 6,
261 LLPORT_E_FWRESP_DOWN = 7
262};
263
264enum bna_llport_state { 316enum bna_llport_state {
265 BNA_LLPORT_STOPPED = 1, 317 BNA_LLPORT_STOPPED = 1,
266 BNA_LLPORT_DOWN = 2, 318 BNA_LLPORT_DOWN = 2,
@@ -320,7 +372,7 @@ bna_llport_sm_stopped(struct bna_llport *llport,
320 /* No-op */ 372 /* No-op */
321 break; 373 break;
322 374
323 case LLPORT_E_FWRESP_UP: 375 case LLPORT_E_FWRESP_UP_OK:
324 case LLPORT_E_FWRESP_DOWN: 376 case LLPORT_E_FWRESP_DOWN:
325 /** 377 /**
326 * These events are received due to flushing of mbox when 378 * These events are received due to flushing of mbox when
@@ -366,6 +418,7 @@ bna_llport_sm_down(struct bna_llport *llport,
366static void 418static void
367bna_llport_sm_up_resp_wait_entry(struct bna_llport *llport) 419bna_llport_sm_up_resp_wait_entry(struct bna_llport *llport)
368{ 420{
421 BUG_ON(!llport_can_be_up(llport));
369 /** 422 /**
370 * NOTE: Do not call bna_fw_llport_up() here. That will over step 423 * NOTE: Do not call bna_fw_llport_up() here. That will over step
371 * mbox due to down_resp_wait -> up_resp_wait transition on event 424 * mbox due to down_resp_wait -> up_resp_wait transition on event
@@ -390,10 +443,14 @@ bna_llport_sm_up_resp_wait(struct bna_llport *llport,
390 bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait); 443 bfa_fsm_set_state(llport, bna_llport_sm_down_resp_wait);
391 break; 444 break;
392 445
393 case LLPORT_E_FWRESP_UP: 446 case LLPORT_E_FWRESP_UP_OK:
394 bfa_fsm_set_state(llport, bna_llport_sm_up); 447 bfa_fsm_set_state(llport, bna_llport_sm_up);
395 break; 448 break;
396 449
450 case LLPORT_E_FWRESP_UP_FAIL:
451 bfa_fsm_set_state(llport, bna_llport_sm_down);
452 break;
453
397 case LLPORT_E_FWRESP_DOWN: 454 case LLPORT_E_FWRESP_DOWN:
398 /* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */ 455 /* down_resp_wait -> up_resp_wait transition on LLPORT_E_UP */
399 bna_fw_llport_up(llport); 456 bna_fw_llport_up(llport);
@@ -431,11 +488,12 @@ bna_llport_sm_down_resp_wait(struct bna_llport *llport,
431 bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait); 488 bfa_fsm_set_state(llport, bna_llport_sm_up_resp_wait);
432 break; 489 break;
433 490
434 case LLPORT_E_FWRESP_UP: 491 case LLPORT_E_FWRESP_UP_OK:
435 /* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */ 492 /* up_resp_wait->down_resp_wait transition on LLPORT_E_DOWN */
436 bna_fw_llport_down(llport); 493 bna_fw_llport_down(llport);
437 break; 494 break;
438 495
496 case LLPORT_E_FWRESP_UP_FAIL:
439 case LLPORT_E_FWRESP_DOWN: 497 case LLPORT_E_FWRESP_DOWN:
440 bfa_fsm_set_state(llport, bna_llport_sm_down); 498 bfa_fsm_set_state(llport, bna_llport_sm_down);
441 break; 499 break;
@@ -496,11 +554,12 @@ bna_llport_sm_last_resp_wait(struct bna_llport *llport,
496 /* No-op */ 554 /* No-op */
497 break; 555 break;
498 556
499 case LLPORT_E_FWRESP_UP: 557 case LLPORT_E_FWRESP_UP_OK:
500 /* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */ 558 /* up_resp_wait->last_resp_wait transition on LLPORT_T_STOP */
501 bna_fw_llport_down(llport); 559 bna_fw_llport_down(llport);
502 break; 560 break;
503 561
562 case LLPORT_E_FWRESP_UP_FAIL:
504 case LLPORT_E_FWRESP_DOWN: 563 case LLPORT_E_FWRESP_DOWN:
505 bfa_fsm_set_state(llport, bna_llport_sm_stopped); 564 bfa_fsm_set_state(llport, bna_llport_sm_stopped);
506 break; 565 break;
@@ -541,7 +600,14 @@ bna_fw_cb_llport_up(void *arg, int status)
541 struct bna_llport *llport = (struct bna_llport *)arg; 600 struct bna_llport *llport = (struct bna_llport *)arg;
542 601
543 bfa_q_qe_init(&llport->mbox_qe.qe); 602 bfa_q_qe_init(&llport->mbox_qe.qe);
544 bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP); 603 if (status == BFI_LL_CMD_FAIL) {
604 if (llport->type == BNA_PORT_T_REGULAR)
605 llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
606 else
607 llport->flags &= ~BNA_LLPORT_F_ADMIN_UP;
608 bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_FAIL);
609 } else
610 bfa_fsm_send_event(llport, LLPORT_E_FWRESP_UP_OK);
545} 611}
546 612
547static void 613static void
@@ -588,13 +654,14 @@ bna_port_cb_llport_stopped(struct bna_port *port,
588static void 654static void
589bna_llport_init(struct bna_llport *llport, struct bna *bna) 655bna_llport_init(struct bna_llport *llport, struct bna *bna)
590{ 656{
591 llport->flags |= BNA_LLPORT_F_ENABLED; 657 llport->flags |= BNA_LLPORT_F_ADMIN_UP;
658 llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
592 llport->type = BNA_PORT_T_REGULAR; 659 llport->type = BNA_PORT_T_REGULAR;
593 llport->bna = bna; 660 llport->bna = bna;
594 661
595 llport->link_status = BNA_LINK_DOWN; 662 llport->link_status = BNA_LINK_DOWN;
596 663
597 llport->admin_up_count = 0; 664 llport->rx_started_count = 0;
598 665
599 llport->stop_cbfn = NULL; 666 llport->stop_cbfn = NULL;
600 667
@@ -606,7 +673,8 @@ bna_llport_init(struct bna_llport *llport, struct bna *bna)
606static void 673static void
607bna_llport_uninit(struct bna_llport *llport) 674bna_llport_uninit(struct bna_llport *llport)
608{ 675{
609 llport->flags &= ~BNA_LLPORT_F_ENABLED; 676 llport->flags &= ~BNA_LLPORT_F_ADMIN_UP;
677 llport->flags &= ~BNA_LLPORT_F_PORT_ENABLED;
610 678
611 llport->bna = NULL; 679 llport->bna = NULL;
612} 680}
@@ -628,6 +696,8 @@ bna_llport_stop(struct bna_llport *llport)
628static void 696static void
629bna_llport_fail(struct bna_llport *llport) 697bna_llport_fail(struct bna_llport *llport)
630{ 698{
699 /* Reset the physical port status to enabled */
700 llport->flags |= BNA_LLPORT_F_PORT_ENABLED;
631 bfa_fsm_send_event(llport, LLPORT_E_FAIL); 701 bfa_fsm_send_event(llport, LLPORT_E_FAIL);
632} 702}
633 703
@@ -638,25 +708,31 @@ bna_llport_state_get(struct bna_llport *llport)
638} 708}
639 709
640void 710void
641bna_llport_admin_up(struct bna_llport *llport) 711bna_llport_rx_started(struct bna_llport *llport)
642{ 712{
643 llport->admin_up_count++; 713 llport->rx_started_count++;
644 714
645 if (llport->admin_up_count == 1) { 715 if (llport->rx_started_count == 1) {
646 llport->flags |= BNA_LLPORT_F_RX_ENABLED; 716
647 if (llport->flags & BNA_LLPORT_F_ENABLED) 717 llport->flags |= BNA_LLPORT_F_RX_STARTED;
718
719 if (llport_can_be_up(llport))
648 bfa_fsm_send_event(llport, LLPORT_E_UP); 720 bfa_fsm_send_event(llport, LLPORT_E_UP);
649 } 721 }
650} 722}
651 723
652void 724void
653bna_llport_admin_down(struct bna_llport *llport) 725bna_llport_rx_stopped(struct bna_llport *llport)
654{ 726{
655 llport->admin_up_count--; 727 int llport_up = llport_is_up(llport);
728
729 llport->rx_started_count--;
656 730
657 if (llport->admin_up_count == 0) { 731 if (llport->rx_started_count == 0) {
658 llport->flags &= ~BNA_LLPORT_F_RX_ENABLED; 732
659 if (llport->flags & BNA_LLPORT_F_ENABLED) 733 llport->flags &= ~BNA_LLPORT_F_RX_STARTED;
734
735 if (llport_up)
660 bfa_fsm_send_event(llport, LLPORT_E_DOWN); 736 bfa_fsm_send_event(llport, LLPORT_E_DOWN);
661 } 737 }
662} 738}
@@ -2056,37 +2132,6 @@ rxf_fltr_mbox_cmd(struct bna_rxf *rxf, u8 cmd, enum bna_status status)
2056 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe); 2132 bna_mbox_send(rxf->rx->bna, &rxf->mbox_qe);
2057} 2133}
2058 2134
2059static void
2060__rxf_default_function_config(struct bna_rxf *rxf, enum bna_status status)
2061{
2062 struct bna_rx_fndb_ram *rx_fndb_ram;
2063 u32 ctrl_flags;
2064 int i;
2065
2066 rx_fndb_ram = (struct bna_rx_fndb_ram *)
2067 BNA_GET_MEM_BASE_ADDR(rxf->rx->bna->pcidev.pci_bar_kva,
2068 RX_FNDB_RAM_BASE_OFFSET);
2069
2070 for (i = 0; i < BFI_MAX_RXF; i++) {
2071 if (status == BNA_STATUS_T_ENABLED) {
2072 if (i == rxf->rxf_id)
2073 continue;
2074
2075 ctrl_flags =
2076 readl(&rx_fndb_ram[i].control_flags);
2077 ctrl_flags |= BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
2078 writel(ctrl_flags,
2079 &rx_fndb_ram[i].control_flags);
2080 } else {
2081 ctrl_flags =
2082 readl(&rx_fndb_ram[i].control_flags);
2083 ctrl_flags &= ~BNA_RXF_CF_DEFAULT_FUNCTION_ENABLE;
2084 writel(ctrl_flags,
2085 &rx_fndb_ram[i].control_flags);
2086 }
2087 }
2088}
2089
2090int 2135int
2091rxf_process_packet_filter_ucast(struct bna_rxf *rxf) 2136rxf_process_packet_filter_ucast(struct bna_rxf *rxf)
2092{ 2137{
@@ -2153,46 +2198,6 @@ rxf_process_packet_filter_promisc(struct bna_rxf *rxf)
2153} 2198}
2154 2199
2155int 2200int
2156rxf_process_packet_filter_default(struct bna_rxf *rxf)
2157{
2158 struct bna *bna = rxf->rx->bna;
2159
2160 /* Enable/disable default mode */
2161 if (is_default_enable(rxf->rxmode_pending,
2162 rxf->rxmode_pending_bitmask)) {
2163 /* move default configuration from pending -> active */
2164 default_inactive(rxf->rxmode_pending,
2165 rxf->rxmode_pending_bitmask);
2166 rxf->rxmode_active |= BNA_RXMODE_DEFAULT;
2167
2168 /* Disable VLAN filter to allow all VLANs */
2169 __rxf_vlan_filter_set(rxf, BNA_STATUS_T_DISABLED);
2170 /* Redirect all other RxF vlan filtering to this one */
2171 __rxf_default_function_config(rxf, BNA_STATUS_T_ENABLED);
2172 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
2173 BNA_STATUS_T_ENABLED);
2174 return 1;
2175 } else if (is_default_disable(rxf->rxmode_pending,
2176 rxf->rxmode_pending_bitmask)) {
2177 /* move default configuration from pending -> active */
2178 default_inactive(rxf->rxmode_pending,
2179 rxf->rxmode_pending_bitmask);
2180 rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
2181 bna->rxf_default_id = BFI_MAX_RXF;
2182
2183 /* Revert VLAN filter */
2184 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
2185 /* Stop RxF vlan filter table redirection */
2186 __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
2187 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
2188 BNA_STATUS_T_DISABLED);
2189 return 1;
2190 }
2191
2192 return 0;
2193}
2194
2195int
2196rxf_process_packet_filter_allmulti(struct bna_rxf *rxf) 2201rxf_process_packet_filter_allmulti(struct bna_rxf *rxf)
2197{ 2202{
2198 /* Enable/disable allmulti mode */ 2203 /* Enable/disable allmulti mode */
@@ -2289,48 +2294,6 @@ rxf_clear_packet_filter_promisc(struct bna_rxf *rxf)
2289} 2294}
2290 2295
2291int 2296int
2292rxf_clear_packet_filter_default(struct bna_rxf *rxf)
2293{
2294 struct bna *bna = rxf->rx->bna;
2295
2296 /* 8. Execute pending default mode disable command */
2297 if (is_default_disable(rxf->rxmode_pending,
2298 rxf->rxmode_pending_bitmask)) {
2299 /* move default configuration from pending -> active */
2300 default_inactive(rxf->rxmode_pending,
2301 rxf->rxmode_pending_bitmask);
2302 rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
2303 bna->rxf_default_id = BFI_MAX_RXF;
2304
2305 /* Revert VLAN filter */
2306 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
2307 /* Stop RxF vlan filter table redirection */
2308 __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
2309 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
2310 BNA_STATUS_T_DISABLED);
2311 return 1;
2312 }
2313
2314 /* 9. Clear active default mode; move it to pending enable */
2315 if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
2316 /* move default configuration from active -> pending */
2317 default_enable(rxf->rxmode_pending,
2318 rxf->rxmode_pending_bitmask);
2319 rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
2320
2321 /* Revert VLAN filter */
2322 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
2323 /* Stop RxF vlan filter table redirection */
2324 __rxf_default_function_config(rxf, BNA_STATUS_T_DISABLED);
2325 rxf_fltr_mbox_cmd(rxf, BFI_LL_H2I_RXF_DEFAULT_SET_REQ,
2326 BNA_STATUS_T_DISABLED);
2327 return 1;
2328 }
2329
2330 return 0;
2331}
2332
2333int
2334rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf) 2297rxf_clear_packet_filter_allmulti(struct bna_rxf *rxf)
2335{ 2298{
2336 /* 10. Execute pending allmulti mode disable command */ 2299 /* 10. Execute pending allmulti mode disable command */
@@ -2405,28 +2368,6 @@ rxf_reset_packet_filter_promisc(struct bna_rxf *rxf)
2405} 2368}
2406 2369
2407void 2370void
2408rxf_reset_packet_filter_default(struct bna_rxf *rxf)
2409{
2410 struct bna *bna = rxf->rx->bna;
2411
2412 /* 8. Clear pending default mode disable */
2413 if (is_default_disable(rxf->rxmode_pending,
2414 rxf->rxmode_pending_bitmask)) {
2415 default_inactive(rxf->rxmode_pending,
2416 rxf->rxmode_pending_bitmask);
2417 rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
2418 bna->rxf_default_id = BFI_MAX_RXF;
2419 }
2420
2421 /* 9. Move default mode config from active -> pending */
2422 if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
2423 default_enable(rxf->rxmode_pending,
2424 rxf->rxmode_pending_bitmask);
2425 rxf->rxmode_active &= ~BNA_RXMODE_DEFAULT;
2426 }
2427}
2428
2429void
2430rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf) 2371rxf_reset_packet_filter_allmulti(struct bna_rxf *rxf)
2431{ 2372{
2432 /* 10. Clear pending allmulti mode disable */ 2373 /* 10. Clear pending allmulti mode disable */
@@ -2523,76 +2464,6 @@ rxf_promisc_disable(struct bna_rxf *rxf)
2523 * 1 = need h/w change 2464 * 1 = need h/w change
2524 */ 2465 */
2525static int 2466static int
2526rxf_default_enable(struct bna_rxf *rxf)
2527{
2528 struct bna *bna = rxf->rx->bna;
2529 int ret = 0;
2530
2531 /* There can not be any pending disable command */
2532
2533 /* Do nothing if pending enable or already enabled */
2534 if (is_default_enable(rxf->rxmode_pending,
2535 rxf->rxmode_pending_bitmask) ||
2536 (rxf->rxmode_active & BNA_RXMODE_DEFAULT)) {
2537 /* Schedule enable */
2538 } else {
2539 /* Default mode should not be active in the system */
2540 default_enable(rxf->rxmode_pending,
2541 rxf->rxmode_pending_bitmask);
2542 bna->rxf_default_id = rxf->rxf_id;
2543 ret = 1;
2544 }
2545
2546 return ret;
2547}
2548
2549/**
2550 * Should only be called by bna_rxf_mode_set.
2551 * Helps deciding if h/w configuration is needed or not.
2552 * Returns:
2553 * 0 = no h/w change
2554 * 1 = need h/w change
2555 */
2556static int
2557rxf_default_disable(struct bna_rxf *rxf)
2558{
2559 struct bna *bna = rxf->rx->bna;
2560 int ret = 0;
2561
2562 /* There can not be any pending disable */
2563
2564 /* Turn off pending enable command , if any */
2565 if (is_default_enable(rxf->rxmode_pending,
2566 rxf->rxmode_pending_bitmask)) {
2567 /* Promisc mode should not be active */
2568 /* system default state should be pending */
2569 default_inactive(rxf->rxmode_pending,
2570 rxf->rxmode_pending_bitmask);
2571 /* Remove the default state from the system */
2572 bna->rxf_default_id = BFI_MAX_RXF;
2573
2574 /* Schedule disable */
2575 } else if (rxf->rxmode_active & BNA_RXMODE_DEFAULT) {
2576 /* Default mode should be active in the system */
2577 default_disable(rxf->rxmode_pending,
2578 rxf->rxmode_pending_bitmask);
2579 ret = 1;
2580
2581 /* Do nothing if already disabled */
2582 } else {
2583 }
2584
2585 return ret;
2586}
2587
2588/**
2589 * Should only be called by bna_rxf_mode_set.
2590 * Helps deciding if h/w configuration is needed or not.
2591 * Returns:
2592 * 0 = no h/w change
2593 * 1 = need h/w change
2594 */
2595static int
2596rxf_allmulti_enable(struct bna_rxf *rxf) 2467rxf_allmulti_enable(struct bna_rxf *rxf)
2597{ 2468{
2598 int ret = 0; 2469 int ret = 0;
@@ -2654,38 +2525,13 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2654 struct bna_rxf *rxf = &rx->rxf; 2525 struct bna_rxf *rxf = &rx->rxf;
2655 int need_hw_config = 0; 2526 int need_hw_config = 0;
2656 2527
2657 /* Error checks */ 2528 /* Process the commands */
2658 2529
2659 if (is_promisc_enable(new_mode, bitmask)) { 2530 if (is_promisc_enable(new_mode, bitmask)) {
2660 /* If promisc mode is already enabled elsewhere in the system */ 2531 /* If promisc mode is already enabled elsewhere in the system */
2661 if ((rx->bna->rxf_promisc_id != BFI_MAX_RXF) && 2532 if ((rx->bna->rxf_promisc_id != BFI_MAX_RXF) &&
2662 (rx->bna->rxf_promisc_id != rxf->rxf_id)) 2533 (rx->bna->rxf_promisc_id != rxf->rxf_id))
2663 goto err_return; 2534 goto err_return;
2664
2665 /* If default mode is already enabled in the system */
2666 if (rx->bna->rxf_default_id != BFI_MAX_RXF)
2667 goto err_return;
2668
2669 /* Trying to enable promiscuous and default mode together */
2670 if (is_default_enable(new_mode, bitmask))
2671 goto err_return;
2672 }
2673
2674 if (is_default_enable(new_mode, bitmask)) {
2675 /* If default mode is already enabled elsewhere in the system */
2676 if ((rx->bna->rxf_default_id != BFI_MAX_RXF) &&
2677 (rx->bna->rxf_default_id != rxf->rxf_id)) {
2678 goto err_return;
2679 }
2680
2681 /* If promiscuous mode is already enabled in the system */
2682 if (rx->bna->rxf_promisc_id != BFI_MAX_RXF)
2683 goto err_return;
2684 }
2685
2686 /* Process the commands */
2687
2688 if (is_promisc_enable(new_mode, bitmask)) {
2689 if (rxf_promisc_enable(rxf)) 2535 if (rxf_promisc_enable(rxf))
2690 need_hw_config = 1; 2536 need_hw_config = 1;
2691 } else if (is_promisc_disable(new_mode, bitmask)) { 2537 } else if (is_promisc_disable(new_mode, bitmask)) {
@@ -2693,14 +2539,6 @@ bna_rx_mode_set(struct bna_rx *rx, enum bna_rxmode new_mode,
2693 need_hw_config = 1; 2539 need_hw_config = 1;
2694 } 2540 }
2695 2541
2696 if (is_default_enable(new_mode, bitmask)) {
2697 if (rxf_default_enable(rxf))
2698 need_hw_config = 1;
2699 } else if (is_default_disable(new_mode, bitmask)) {
2700 if (rxf_default_disable(rxf))
2701 need_hw_config = 1;
2702 }
2703
2704 if (is_allmulti_enable(new_mode, bitmask)) { 2542 if (is_allmulti_enable(new_mode, bitmask)) {
2705 if (rxf_allmulti_enable(rxf)) 2543 if (rxf_allmulti_enable(rxf))
2706 need_hw_config = 1; 2544 need_hw_config = 1;
@@ -3126,7 +2964,6 @@ bna_init(struct bna *bna, struct bnad *bnad, struct bfa_pcidev *pcidev,
3126 2964
3127 bna_mcam_mod_init(&bna->mcam_mod, bna, res_info); 2965 bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
3128 2966
3129 bna->rxf_default_id = BFI_MAX_RXF;
3130 bna->rxf_promisc_id = BFI_MAX_RXF; 2967 bna->rxf_promisc_id = BFI_MAX_RXF;
3131 2968
3132 /* Mbox q element for posting stat request to f/w */ 2969 /* Mbox q element for posting stat request to f/w */
diff --git a/drivers/net/bna/bna_txrx.c b/drivers/net/bna/bna_txrx.c
index ad93fdb0f427..58c7664040dc 100644
--- a/drivers/net/bna/bna_txrx.c
+++ b/drivers/net/bna/bna_txrx.c
@@ -1226,8 +1226,7 @@ rxf_process_packet_filter_vlan(struct bna_rxf *rxf)
1226 /* Apply the VLAN filter */ 1226 /* Apply the VLAN filter */
1227 if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) { 1227 if (rxf->rxf_flags & BNA_RXF_FL_VLAN_CONFIG_PENDING) {
1228 rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING; 1228 rxf->rxf_flags &= ~BNA_RXF_FL_VLAN_CONFIG_PENDING;
1229 if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC) && 1229 if (!(rxf->rxmode_active & BNA_RXMODE_PROMISC))
1230 !(rxf->rxmode_active & BNA_RXMODE_DEFAULT))
1231 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status); 1230 __rxf_vlan_filter_set(rxf, rxf->vlan_filter_status);
1232 } 1231 }
1233 1232
@@ -1276,9 +1275,6 @@ rxf_process_packet_filter(struct bna_rxf *rxf)
1276 if (rxf_process_packet_filter_promisc(rxf)) 1275 if (rxf_process_packet_filter_promisc(rxf))
1277 return 1; 1276 return 1;
1278 1277
1279 if (rxf_process_packet_filter_default(rxf))
1280 return 1;
1281
1282 if (rxf_process_packet_filter_allmulti(rxf)) 1278 if (rxf_process_packet_filter_allmulti(rxf))
1283 return 1; 1279 return 1;
1284 1280
@@ -1340,9 +1336,6 @@ rxf_clear_packet_filter(struct bna_rxf *rxf)
1340 if (rxf_clear_packet_filter_promisc(rxf)) 1336 if (rxf_clear_packet_filter_promisc(rxf))
1341 return 1; 1337 return 1;
1342 1338
1343 if (rxf_clear_packet_filter_default(rxf))
1344 return 1;
1345
1346 if (rxf_clear_packet_filter_allmulti(rxf)) 1339 if (rxf_clear_packet_filter_allmulti(rxf))
1347 return 1; 1340 return 1;
1348 1341
@@ -1389,8 +1382,6 @@ rxf_reset_packet_filter(struct bna_rxf *rxf)
1389 1382
1390 rxf_reset_packet_filter_promisc(rxf); 1383 rxf_reset_packet_filter_promisc(rxf);
1391 1384
1392 rxf_reset_packet_filter_default(rxf);
1393
1394 rxf_reset_packet_filter_allmulti(rxf); 1385 rxf_reset_packet_filter_allmulti(rxf);
1395} 1386}
1396 1387
@@ -1441,12 +1432,16 @@ bna_rxf_init(struct bna_rxf *rxf,
1441 memset(rxf->vlan_filter_table, 0, 1432 memset(rxf->vlan_filter_table, 0,
1442 (sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32))); 1433 (sizeof(u32) * ((BFI_MAX_VLAN + 1) / 32)));
1443 1434
1435 /* Set up VLAN 0 for pure priority tagged packets */
1436 rxf->vlan_filter_table[0] |= 1;
1437
1444 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped); 1438 bfa_fsm_set_state(rxf, bna_rxf_sm_stopped);
1445} 1439}
1446 1440
1447static void 1441static void
1448bna_rxf_uninit(struct bna_rxf *rxf) 1442bna_rxf_uninit(struct bna_rxf *rxf)
1449{ 1443{
1444 struct bna *bna = rxf->rx->bna;
1450 struct bna_mac *mac; 1445 struct bna_mac *mac;
1451 1446
1452 bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment); 1447 bna_rit_mod_seg_put(&rxf->rx->bna->rit_mod, rxf->rit_segment);
@@ -1473,6 +1468,27 @@ bna_rxf_uninit(struct bna_rxf *rxf)
1473 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac); 1468 bna_mcam_mod_mac_put(&rxf->rx->bna->mcam_mod, mac);
1474 } 1469 }
1475 1470
1471 /* Turn off pending promisc mode */
1472 if (is_promisc_enable(rxf->rxmode_pending,
1473 rxf->rxmode_pending_bitmask)) {
1474 /* system promisc state should be pending */
1475 BUG_ON(!(bna->rxf_promisc_id == rxf->rxf_id));
1476 promisc_inactive(rxf->rxmode_pending,
1477 rxf->rxmode_pending_bitmask);
1478 bna->rxf_promisc_id = BFI_MAX_RXF;
1479 }
1480 /* Promisc mode should not be active */
1481 BUG_ON(rxf->rxmode_active & BNA_RXMODE_PROMISC);
1482
1483 /* Turn off pending all-multi mode */
1484 if (is_allmulti_enable(rxf->rxmode_pending,
1485 rxf->rxmode_pending_bitmask)) {
1486 allmulti_inactive(rxf->rxmode_pending,
1487 rxf->rxmode_pending_bitmask);
1488 }
1489 /* Allmulti mode should not be active */
1490 BUG_ON(rxf->rxmode_active & BNA_RXMODE_ALLMULTI);
1491
1476 rxf->rx = NULL; 1492 rxf->rx = NULL;
1477} 1493}
1478 1494
@@ -1947,7 +1963,7 @@ bna_rx_sm_started_entry(struct bna_rx *rx)
1947 bna_ib_ack(&rxp->cq.ib->door_bell, 0); 1963 bna_ib_ack(&rxp->cq.ib->door_bell, 0);
1948 } 1964 }
1949 1965
1950 bna_llport_admin_up(&rx->bna->port.llport); 1966 bna_llport_rx_started(&rx->bna->port.llport);
1951} 1967}
1952 1968
1953void 1969void
@@ -1955,13 +1971,13 @@ bna_rx_sm_started(struct bna_rx *rx, enum bna_rx_event event)
1955{ 1971{
1956 switch (event) { 1972 switch (event) {
1957 case RX_E_FAIL: 1973 case RX_E_FAIL:
1958 bna_llport_admin_down(&rx->bna->port.llport); 1974 bna_llport_rx_stopped(&rx->bna->port.llport);
1959 bfa_fsm_set_state(rx, bna_rx_sm_stopped); 1975 bfa_fsm_set_state(rx, bna_rx_sm_stopped);
1960 rx_ib_fail(rx); 1976 rx_ib_fail(rx);
1961 bna_rxf_fail(&rx->rxf); 1977 bna_rxf_fail(&rx->rxf);
1962 break; 1978 break;
1963 case RX_E_STOP: 1979 case RX_E_STOP:
1964 bna_llport_admin_down(&rx->bna->port.llport); 1980 bna_llport_rx_stopped(&rx->bna->port.llport);
1965 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait); 1981 bfa_fsm_set_state(rx, bna_rx_sm_rxf_stop_wait);
1966 break; 1982 break;
1967 default: 1983 default:
@@ -3373,7 +3389,7 @@ __bna_txq_start(struct bna_tx *tx, struct bna_txq *txq)
3373 3389
3374 txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE; 3390 txq_cfg.cns_ptr2_n_q_state = BNA_Q_IDLE_STATE;
3375 txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) | 3391 txq_cfg.nxt_qid_n_fid_n_pri = (((tx->txf.txf_id & 0x3f) << 3) |
3376 (txq->priority & 0x3)); 3392 (txq->priority & 0x7));
3377 txq_cfg.wvc_n_cquota_n_rquota = 3393 txq_cfg.wvc_n_cquota_n_rquota =
3378 ((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) | 3394 ((((u32)BFI_TX_MAX_WRR_QUOTA & 0xfff) << 12) |
3379 (BFI_TX_MAX_WRR_QUOTA & 0xfff)); 3395 (BFI_TX_MAX_WRR_QUOTA & 0xfff));
diff --git a/drivers/net/bna/bna_types.h b/drivers/net/bna/bna_types.h
index 6877310f6ef4..b9c134f7ad31 100644
--- a/drivers/net/bna/bna_types.h
+++ b/drivers/net/bna/bna_types.h
@@ -165,8 +165,7 @@ enum bna_rxp_type {
165 165
166enum bna_rxmode { 166enum bna_rxmode {
167 BNA_RXMODE_PROMISC = 1, 167 BNA_RXMODE_PROMISC = 1,
168 BNA_RXMODE_DEFAULT = 2, 168 BNA_RXMODE_ALLMULTI = 2
169 BNA_RXMODE_ALLMULTI = 4
170}; 169};
171 170
172enum bna_rx_event { 171enum bna_rx_event {
@@ -249,8 +248,9 @@ enum bna_link_status {
249}; 248};
250 249
251enum bna_llport_flags { 250enum bna_llport_flags {
252 BNA_LLPORT_F_ENABLED = 1, 251 BNA_LLPORT_F_ADMIN_UP = 1,
253 BNA_LLPORT_F_RX_ENABLED = 2 252 BNA_LLPORT_F_PORT_ENABLED = 2,
253 BNA_LLPORT_F_RX_STARTED = 4
254}; 254};
255 255
256enum bna_port_flags { 256enum bna_port_flags {
@@ -405,7 +405,7 @@ struct bna_llport {
405 405
406 enum bna_link_status link_status; 406 enum bna_link_status link_status;
407 407
408 int admin_up_count; 408 int rx_started_count;
409 409
410 void (*stop_cbfn)(struct bna_port *, enum bna_cb_status); 410 void (*stop_cbfn)(struct bna_port *, enum bna_cb_status);
411 411
@@ -1117,7 +1117,6 @@ struct bna {
1117 1117
1118 struct bna_rit_mod rit_mod; 1118 struct bna_rit_mod rit_mod;
1119 1119
1120 int rxf_default_id;
1121 int rxf_promisc_id; 1120 int rxf_promisc_id;
1122 1121
1123 struct bna_mbox_qe mbox_qe; 1122 struct bna_mbox_qe mbox_qe;
diff --git a/drivers/net/bna/bnad.c b/drivers/net/bna/bnad.c
index 7e839b9cec22..fad912656fe4 100644
--- a/drivers/net/bna/bnad.c
+++ b/drivers/net/bna/bnad.c
@@ -70,6 +70,8 @@ do { \
70 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \ 70 (sizeof(struct bnad_skb_unmap) * ((_depth) - 1)); \
71} while (0) 71} while (0)
72 72
73#define BNAD_TXRX_SYNC_MDELAY 250 /* 250 msecs */
74
73/* 75/*
74 * Reinitialize completions in CQ, once Rx is taken down 76 * Reinitialize completions in CQ, once Rx is taken down
75 */ 77 */
@@ -107,7 +109,7 @@ static void
107bnad_free_all_txbufs(struct bnad *bnad, 109bnad_free_all_txbufs(struct bnad *bnad,
108 struct bna_tcb *tcb) 110 struct bna_tcb *tcb)
109{ 111{
110 u16 unmap_cons; 112 u32 unmap_cons;
111 struct bnad_unmap_q *unmap_q = tcb->unmap_q; 113 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
112 struct bnad_skb_unmap *unmap_array; 114 struct bnad_skb_unmap *unmap_array;
113 struct sk_buff *skb = NULL; 115 struct sk_buff *skb = NULL;
@@ -130,7 +132,9 @@ bnad_free_all_txbufs(struct bnad *bnad,
130 PCI_DMA_TODEVICE); 132 PCI_DMA_TODEVICE);
131 133
132 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0); 134 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 0);
133 unmap_cons++; 135 if (++unmap_cons >= unmap_q->q_depth)
136 break;
137
134 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 138 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
135 pci_unmap_page(bnad->pcidev, 139 pci_unmap_page(bnad->pcidev,
136 pci_unmap_addr(&unmap_array[unmap_cons], 140 pci_unmap_addr(&unmap_array[unmap_cons],
@@ -139,7 +143,8 @@ bnad_free_all_txbufs(struct bnad *bnad,
139 PCI_DMA_TODEVICE); 143 PCI_DMA_TODEVICE);
140 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr, 144 pci_unmap_addr_set(&unmap_array[unmap_cons], dma_addr,
141 0); 145 0);
142 unmap_cons++; 146 if (++unmap_cons >= unmap_q->q_depth)
147 break;
143 } 148 }
144 dev_kfree_skb_any(skb); 149 dev_kfree_skb_any(skb);
145 } 150 }
@@ -167,11 +172,11 @@ bnad_free_txbufs(struct bnad *bnad,
167 /* 172 /*
168 * Just return if TX is stopped. This check is useful 173 * Just return if TX is stopped. This check is useful
169 * when bnad_free_txbufs() runs out of a tasklet scheduled 174 * when bnad_free_txbufs() runs out of a tasklet scheduled
170 * before bnad_cb_tx_cleanup() cleared BNAD_RF_TX_STARTED bit 175 * before bnad_cb_tx_cleanup() cleared BNAD_TXQ_TX_STARTED bit
171 * but this routine runs actually after the cleanup has been 176 * but this routine runs actually after the cleanup has been
172 * executed. 177 * executed.
173 */ 178 */
174 if (!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) 179 if (!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
175 return 0; 180 return 0;
176 181
177 updated_hw_cons = *(tcb->hw_consumer_index); 182 updated_hw_cons = *(tcb->hw_consumer_index);
@@ -239,7 +244,7 @@ bnad_tx_free_tasklet(unsigned long bnad_ptr)
239{ 244{
240 struct bnad *bnad = (struct bnad *)bnad_ptr; 245 struct bnad *bnad = (struct bnad *)bnad_ptr;
241 struct bna_tcb *tcb; 246 struct bna_tcb *tcb;
242 u32 acked; 247 u32 acked = 0;
243 int i, j; 248 int i, j;
244 249
245 for (i = 0; i < bnad->num_tx; i++) { 250 for (i = 0; i < bnad->num_tx; i++) {
@@ -252,10 +257,26 @@ bnad_tx_free_tasklet(unsigned long bnad_ptr)
252 (!test_and_set_bit(BNAD_TXQ_FREE_SENT, 257 (!test_and_set_bit(BNAD_TXQ_FREE_SENT,
253 &tcb->flags))) { 258 &tcb->flags))) {
254 acked = bnad_free_txbufs(bnad, tcb); 259 acked = bnad_free_txbufs(bnad, tcb);
255 bna_ib_ack(tcb->i_dbell, acked); 260 if (likely(test_bit(BNAD_TXQ_TX_STARTED,
261 &tcb->flags)))
262 bna_ib_ack(tcb->i_dbell, acked);
256 smp_mb__before_clear_bit(); 263 smp_mb__before_clear_bit();
257 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); 264 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
258 } 265 }
266 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED,
267 &tcb->flags)))
268 continue;
269 if (netif_queue_stopped(bnad->netdev)) {
270 if (acked && netif_carrier_ok(bnad->netdev) &&
271 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
272 BNAD_NETIF_WAKE_THRESHOLD) {
273 netif_wake_queue(bnad->netdev);
274 /* TODO */
275 /* Counters for individual TxQs? */
276 BNAD_UPDATE_CTR(bnad,
277 netif_queue_wakeup);
278 }
279 }
259 } 280 }
260 } 281 }
261} 282}
@@ -264,7 +285,7 @@ static u32
264bnad_tx(struct bnad *bnad, struct bna_tcb *tcb) 285bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
265{ 286{
266 struct net_device *netdev = bnad->netdev; 287 struct net_device *netdev = bnad->netdev;
267 u32 sent; 288 u32 sent = 0;
268 289
269 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) 290 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
270 return 0; 291 return 0;
@@ -275,12 +296,15 @@ bnad_tx(struct bnad *bnad, struct bna_tcb *tcb)
275 netif_carrier_ok(netdev) && 296 netif_carrier_ok(netdev) &&
276 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >= 297 BNA_QE_FREE_CNT(tcb, tcb->q_depth) >=
277 BNAD_NETIF_WAKE_THRESHOLD) { 298 BNAD_NETIF_WAKE_THRESHOLD) {
278 netif_wake_queue(netdev); 299 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
279 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup); 300 netif_wake_queue(netdev);
301 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
302 }
280 } 303 }
304 }
305
306 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
281 bna_ib_ack(tcb->i_dbell, sent); 307 bna_ib_ack(tcb->i_dbell, sent);
282 } else
283 bna_ib_ack(tcb->i_dbell, 0);
284 308
285 smp_mb__before_clear_bit(); 309 smp_mb__before_clear_bit();
286 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); 310 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
@@ -313,25 +337,24 @@ bnad_reset_rcb(struct bnad *bnad, struct bna_rcb *rcb)
313} 337}
314 338
315static void 339static void
316bnad_free_rxbufs(struct bnad *bnad, struct bna_rcb *rcb) 340bnad_free_all_rxbufs(struct bnad *bnad, struct bna_rcb *rcb)
317{ 341{
318 struct bnad_unmap_q *unmap_q; 342 struct bnad_unmap_q *unmap_q;
319 struct sk_buff *skb; 343 struct sk_buff *skb;
344 int unmap_cons;
320 345
321 unmap_q = rcb->unmap_q; 346 unmap_q = rcb->unmap_q;
322 while (BNA_QE_IN_USE_CNT(unmap_q, unmap_q->q_depth)) { 347 for (unmap_cons = 0; unmap_cons < unmap_q->q_depth; unmap_cons++) {
323 skb = unmap_q->unmap_array[unmap_q->consumer_index].skb; 348 skb = unmap_q->unmap_array[unmap_cons].skb;
324 BUG_ON(!(skb)); 349 if (!skb)
325 unmap_q->unmap_array[unmap_q->consumer_index].skb = NULL; 350 continue;
351 unmap_q->unmap_array[unmap_cons].skb = NULL;
326 pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q-> 352 pci_unmap_single(bnad->pcidev, pci_unmap_addr(&unmap_q->
327 unmap_array[unmap_q->consumer_index], 353 unmap_array[unmap_cons],
328 dma_addr), rcb->rxq->buffer_size + 354 dma_addr), rcb->rxq->buffer_size,
329 NET_IP_ALIGN, PCI_DMA_FROMDEVICE); 355 PCI_DMA_FROMDEVICE);
330 dev_kfree_skb(skb); 356 dev_kfree_skb(skb);
331 BNA_QE_INDX_ADD(unmap_q->consumer_index, 1, unmap_q->q_depth);
332 BNA_QE_INDX_ADD(rcb->consumer_index, 1, rcb->q_depth);
333 } 357 }
334
335 bnad_reset_rcb(bnad, rcb); 358 bnad_reset_rcb(bnad, rcb);
336} 359}
337 360
@@ -385,43 +408,11 @@ finishing:
385 unmap_q->producer_index = unmap_prod; 408 unmap_q->producer_index = unmap_prod;
386 rcb->producer_index = unmap_prod; 409 rcb->producer_index = unmap_prod;
387 smp_mb(); 410 smp_mb();
388 bna_rxq_prod_indx_doorbell(rcb); 411 if (likely(test_bit(BNAD_RXQ_STARTED, &rcb->flags)))
412 bna_rxq_prod_indx_doorbell(rcb);
389 } 413 }
390} 414}
391 415
392/*
393 * Locking is required in the enable path
394 * because it is called from a napi poll
395 * context, where the bna_lock is not held
396 * unlike the IRQ context.
397 */
398static void
399bnad_enable_txrx_irqs(struct bnad *bnad)
400{
401 struct bna_tcb *tcb;
402 struct bna_ccb *ccb;
403 int i, j;
404 unsigned long flags;
405
406 spin_lock_irqsave(&bnad->bna_lock, flags);
407 for (i = 0; i < bnad->num_tx; i++) {
408 for (j = 0; j < bnad->num_txq_per_tx; j++) {
409 tcb = bnad->tx_info[i].tcb[j];
410 bna_ib_coalescing_timer_set(tcb->i_dbell,
411 tcb->txq->ib->ib_config.coalescing_timeo);
412 bna_ib_ack(tcb->i_dbell, 0);
413 }
414 }
415
416 for (i = 0; i < bnad->num_rx; i++) {
417 for (j = 0; j < bnad->num_rxp_per_rx; j++) {
418 ccb = bnad->rx_info[i].rx_ctrl[j].ccb;
419 bnad_enable_rx_irq_unsafe(ccb);
420 }
421 }
422 spin_unlock_irqrestore(&bnad->bna_lock, flags);
423}
424
425static inline void 416static inline void
426bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb) 417bnad_refill_rxq(struct bnad *bnad, struct bna_rcb *rcb)
427{ 418{
@@ -448,6 +439,9 @@ bnad_poll_cq(struct bnad *bnad, struct bna_ccb *ccb, int budget)
448 u32 qid0 = ccb->rcb[0]->rxq->rxq_id; 439 u32 qid0 = ccb->rcb[0]->rxq->rxq_id;
449 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate; 440 struct bna_pkt_rate *pkt_rt = &ccb->pkt_rate;
450 441
442 if (!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))
443 return 0;
444
451 prefetch(bnad->netdev); 445 prefetch(bnad->netdev);
452 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl, 446 BNA_CQ_QPGE_PTR_GET(ccb->producer_index, ccb->sw_qpt, cmpl,
453 wi_range); 447 wi_range);
@@ -544,12 +538,15 @@ next:
544 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth); 538 BNA_QE_INDX_ADD(ccb->producer_index, wis, ccb->q_depth);
545 539
546 if (likely(ccb)) { 540 if (likely(ccb)) {
547 bna_ib_ack(ccb->i_dbell, packets); 541 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
542 bna_ib_ack(ccb->i_dbell, packets);
548 bnad_refill_rxq(bnad, ccb->rcb[0]); 543 bnad_refill_rxq(bnad, ccb->rcb[0]);
549 if (ccb->rcb[1]) 544 if (ccb->rcb[1])
550 bnad_refill_rxq(bnad, ccb->rcb[1]); 545 bnad_refill_rxq(bnad, ccb->rcb[1]);
551 } else 546 } else {
552 bna_ib_ack(ccb->i_dbell, 0); 547 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
548 bna_ib_ack(ccb->i_dbell, 0);
549 }
553 550
554 return packets; 551 return packets;
555} 552}
@@ -557,6 +554,9 @@ next:
557static void 554static void
558bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb) 555bnad_disable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
559{ 556{
557 if (unlikely(!test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags)))
558 return;
559
560 bna_ib_coalescing_timer_set(ccb->i_dbell, 0); 560 bna_ib_coalescing_timer_set(ccb->i_dbell, 0);
561 bna_ib_ack(ccb->i_dbell, 0); 561 bna_ib_ack(ccb->i_dbell, 0);
562} 562}
@@ -566,7 +566,8 @@ bnad_enable_rx_irq(struct bnad *bnad, struct bna_ccb *ccb)
566{ 566{
567 unsigned long flags; 567 unsigned long flags;
568 568
569 spin_lock_irqsave(&bnad->bna_lock, flags); /* Because of polling context */ 569 /* Because of polling context */
570 spin_lock_irqsave(&bnad->bna_lock, flags);
570 bnad_enable_rx_irq_unsafe(ccb); 571 bnad_enable_rx_irq_unsafe(ccb);
571 spin_unlock_irqrestore(&bnad->bna_lock, flags); 572 spin_unlock_irqrestore(&bnad->bna_lock, flags);
572} 573}
@@ -575,9 +576,11 @@ static void
575bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb) 576bnad_netif_rx_schedule_poll(struct bnad *bnad, struct bna_ccb *ccb)
576{ 577{
577 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl); 578 struct bnad_rx_ctrl *rx_ctrl = (struct bnad_rx_ctrl *)(ccb->ctrl);
578 if (likely(napi_schedule_prep((&rx_ctrl->napi)))) { 579 struct napi_struct *napi = &rx_ctrl->napi;
580
581 if (likely(napi_schedule_prep(napi))) {
579 bnad_disable_rx_irq(bnad, ccb); 582 bnad_disable_rx_irq(bnad, ccb);
580 __napi_schedule((&rx_ctrl->napi)); 583 __napi_schedule(napi);
581 } 584 }
582 BNAD_UPDATE_CTR(bnad, netif_rx_schedule); 585 BNAD_UPDATE_CTR(bnad, netif_rx_schedule);
583} 586}
@@ -602,12 +605,11 @@ bnad_msix_mbox_handler(int irq, void *data)
602{ 605{
603 u32 intr_status; 606 u32 intr_status;
604 unsigned long flags; 607 unsigned long flags;
605 struct net_device *netdev = data; 608 struct bnad *bnad = (struct bnad *)data;
606 struct bnad *bnad;
607 609
608 bnad = netdev_priv(netdev); 610 if (unlikely(test_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)))
611 return IRQ_HANDLED;
609 612
610 /* BNA_ISR_GET(bnad); Inc Ref count */
611 spin_lock_irqsave(&bnad->bna_lock, flags); 613 spin_lock_irqsave(&bnad->bna_lock, flags);
612 614
613 bna_intr_status_get(&bnad->bna, intr_status); 615 bna_intr_status_get(&bnad->bna, intr_status);
@@ -617,7 +619,6 @@ bnad_msix_mbox_handler(int irq, void *data)
617 619
618 spin_unlock_irqrestore(&bnad->bna_lock, flags); 620 spin_unlock_irqrestore(&bnad->bna_lock, flags);
619 621
620 /* BNAD_ISR_PUT(bnad); Dec Ref count */
621 return IRQ_HANDLED; 622 return IRQ_HANDLED;
622} 623}
623 624
@@ -627,8 +628,7 @@ bnad_isr(int irq, void *data)
627 int i, j; 628 int i, j;
628 u32 intr_status; 629 u32 intr_status;
629 unsigned long flags; 630 unsigned long flags;
630 struct net_device *netdev = data; 631 struct bnad *bnad = (struct bnad *)data;
631 struct bnad *bnad = netdev_priv(netdev);
632 struct bnad_rx_info *rx_info; 632 struct bnad_rx_info *rx_info;
633 struct bnad_rx_ctrl *rx_ctrl; 633 struct bnad_rx_ctrl *rx_ctrl;
634 634
@@ -642,16 +642,21 @@ bnad_isr(int irq, void *data)
642 642
643 spin_lock_irqsave(&bnad->bna_lock, flags); 643 spin_lock_irqsave(&bnad->bna_lock, flags);
644 644
645 if (BNA_IS_MBOX_ERR_INTR(intr_status)) { 645 if (BNA_IS_MBOX_ERR_INTR(intr_status))
646 bna_mbox_handler(&bnad->bna, intr_status); 646 bna_mbox_handler(&bnad->bna, intr_status);
647 if (!BNA_IS_INTX_DATA_INTR(intr_status)) { 647
648 spin_unlock_irqrestore(&bnad->bna_lock, flags);
649 goto done;
650 }
651 }
652 spin_unlock_irqrestore(&bnad->bna_lock, flags); 648 spin_unlock_irqrestore(&bnad->bna_lock, flags);
653 649
650 if (!BNA_IS_INTX_DATA_INTR(intr_status))
651 return IRQ_HANDLED;
652
654 /* Process data interrupts */ 653 /* Process data interrupts */
654 /* Tx processing */
655 for (i = 0; i < bnad->num_tx; i++) {
656 for (j = 0; j < bnad->num_txq_per_tx; j++)
657 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
658 }
659 /* Rx processing */
655 for (i = 0; i < bnad->num_rx; i++) { 660 for (i = 0; i < bnad->num_rx; i++) {
656 rx_info = &bnad->rx_info[i]; 661 rx_info = &bnad->rx_info[i];
657 if (!rx_info->rx) 662 if (!rx_info->rx)
@@ -663,7 +668,6 @@ bnad_isr(int irq, void *data)
663 rx_ctrl->ccb); 668 rx_ctrl->ccb);
664 } 669 }
665 } 670 }
666done:
667 return IRQ_HANDLED; 671 return IRQ_HANDLED;
668} 672}
669 673
@@ -674,11 +678,7 @@ done:
674static void 678static void
675bnad_enable_mbox_irq(struct bnad *bnad) 679bnad_enable_mbox_irq(struct bnad *bnad)
676{ 680{
677 int irq = BNAD_GET_MBOX_IRQ(bnad); 681 clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
678
679 if (test_and_clear_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags))
680 if (bnad->cfg_flags & BNAD_CF_MSIX)
681 enable_irq(irq);
682 682
683 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled); 683 BNAD_UPDATE_CTR(bnad, mbox_intr_enabled);
684} 684}
@@ -690,14 +690,19 @@ bnad_enable_mbox_irq(struct bnad *bnad)
690static void 690static void
691bnad_disable_mbox_irq(struct bnad *bnad) 691bnad_disable_mbox_irq(struct bnad *bnad)
692{ 692{
693 int irq = BNAD_GET_MBOX_IRQ(bnad); 693 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
694 694
695 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
696}
695 697
696 if (!test_and_set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags)) 698static void
697 if (bnad->cfg_flags & BNAD_CF_MSIX) 699bnad_set_netdev_perm_addr(struct bnad *bnad)
698 disable_irq_nosync(irq); 700{
701 struct net_device *netdev = bnad->netdev;
699 702
700 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled); 703 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
704 if (is_zero_ether_addr(netdev->dev_addr))
705 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
701} 706}
702 707
703/* Control Path Handlers */ 708/* Control Path Handlers */
@@ -755,11 +760,14 @@ bnad_cb_port_link_status(struct bnad *bnad,
755 760
756 if (link_up) { 761 if (link_up) {
757 if (!netif_carrier_ok(bnad->netdev)) { 762 if (!netif_carrier_ok(bnad->netdev)) {
763 struct bna_tcb *tcb = bnad->tx_info[0].tcb[0];
764 if (!tcb)
765 return;
758 pr_warn("bna: %s link up\n", 766 pr_warn("bna: %s link up\n",
759 bnad->netdev->name); 767 bnad->netdev->name);
760 netif_carrier_on(bnad->netdev); 768 netif_carrier_on(bnad->netdev);
761 BNAD_UPDATE_CTR(bnad, link_toggle); 769 BNAD_UPDATE_CTR(bnad, link_toggle);
762 if (test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) { 770 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)) {
763 /* Force an immediate Transmit Schedule */ 771 /* Force an immediate Transmit Schedule */
764 pr_info("bna: %s TX_STARTED\n", 772 pr_info("bna: %s TX_STARTED\n",
765 bnad->netdev->name); 773 bnad->netdev->name);
@@ -807,6 +815,18 @@ bnad_cb_tcb_destroy(struct bnad *bnad, struct bna_tcb *tcb)
807{ 815{
808 struct bnad_tx_info *tx_info = 816 struct bnad_tx_info *tx_info =
809 (struct bnad_tx_info *)tcb->txq->tx->priv; 817 (struct bnad_tx_info *)tcb->txq->tx->priv;
818 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
819
820 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
821 cpu_relax();
822
823 bnad_free_all_txbufs(bnad, tcb);
824
825 unmap_q->producer_index = 0;
826 unmap_q->consumer_index = 0;
827
828 smp_mb__before_clear_bit();
829 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
810 830
811 tx_info->tcb[tcb->id] = NULL; 831 tx_info->tcb[tcb->id] = NULL;
812} 832}
@@ -822,6 +842,12 @@ bnad_cb_rcb_setup(struct bnad *bnad, struct bna_rcb *rcb)
822} 842}
823 843
824static void 844static void
845bnad_cb_rcb_destroy(struct bnad *bnad, struct bna_rcb *rcb)
846{
847 bnad_free_all_rxbufs(bnad, rcb);
848}
849
850static void
825bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb) 851bnad_cb_ccb_setup(struct bnad *bnad, struct bna_ccb *ccb)
826{ 852{
827 struct bnad_rx_info *rx_info = 853 struct bnad_rx_info *rx_info =
@@ -849,7 +875,7 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
849 if (tx_info != &bnad->tx_info[0]) 875 if (tx_info != &bnad->tx_info[0])
850 return; 876 return;
851 877
852 clear_bit(BNAD_RF_TX_STARTED, &bnad->run_flags); 878 clear_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
853 netif_stop_queue(bnad->netdev); 879 netif_stop_queue(bnad->netdev);
854 pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name); 880 pr_info("bna: %s TX_STOPPED\n", bnad->netdev->name);
855} 881}
@@ -857,30 +883,15 @@ bnad_cb_tx_stall(struct bnad *bnad, struct bna_tcb *tcb)
857static void 883static void
858bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb) 884bnad_cb_tx_resume(struct bnad *bnad, struct bna_tcb *tcb)
859{ 885{
860 if (test_and_set_bit(BNAD_RF_TX_STARTED, &bnad->run_flags)) 886 struct bnad_unmap_q *unmap_q = tcb->unmap_q;
861 return;
862
863 if (netif_carrier_ok(bnad->netdev)) {
864 pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
865 netif_wake_queue(bnad->netdev);
866 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
867 }
868}
869
870static void
871bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
872{
873 struct bnad_unmap_q *unmap_q;
874 887
875 if (!tcb || (!tcb->unmap_q)) 888 if (test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))
876 return; 889 return;
877 890
878 unmap_q = tcb->unmap_q; 891 clear_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags);
879 if (!unmap_q->unmap_array)
880 return;
881 892
882 if (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) 893 while (test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags))
883 return; 894 cpu_relax();
884 895
885 bnad_free_all_txbufs(bnad, tcb); 896 bnad_free_all_txbufs(bnad, tcb);
886 897
@@ -889,21 +900,45 @@ bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
889 900
890 smp_mb__before_clear_bit(); 901 smp_mb__before_clear_bit();
891 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); 902 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
903
904 /*
905 * Workaround for first device enable failure & we
906 * get a 0 MAC address. We try to get the MAC address
907 * again here.
908 */
909 if (is_zero_ether_addr(&bnad->perm_addr.mac[0])) {
910 bna_port_mac_get(&bnad->bna.port, &bnad->perm_addr);
911 bnad_set_netdev_perm_addr(bnad);
912 }
913
914 set_bit(BNAD_TXQ_TX_STARTED, &tcb->flags);
915
916 if (netif_carrier_ok(bnad->netdev)) {
917 pr_info("bna: %s TX_STARTED\n", bnad->netdev->name);
918 netif_wake_queue(bnad->netdev);
919 BNAD_UPDATE_CTR(bnad, netif_queue_wakeup);
920 }
921}
922
923static void
924bnad_cb_tx_cleanup(struct bnad *bnad, struct bna_tcb *tcb)
925{
926 /* Delay only once for the whole Tx Path Shutdown */
927 if (!test_and_set_bit(BNAD_RF_TX_SHUTDOWN_DELAYED, &bnad->run_flags))
928 mdelay(BNAD_TXRX_SYNC_MDELAY);
892} 929}
893 930
894static void 931static void
895bnad_cb_rx_cleanup(struct bnad *bnad, 932bnad_cb_rx_cleanup(struct bnad *bnad,
896 struct bna_ccb *ccb) 933 struct bna_ccb *ccb)
897{ 934{
898 bnad_cq_cmpl_init(bnad, ccb);
899
900 bnad_free_rxbufs(bnad, ccb->rcb[0]);
901 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags); 935 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags);
902 936
903 if (ccb->rcb[1]) { 937 if (ccb->rcb[1])
904 bnad_free_rxbufs(bnad, ccb->rcb[1]);
905 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags); 938 clear_bit(BNAD_RXQ_STARTED, &ccb->rcb[1]->flags);
906 } 939
940 if (!test_and_set_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags))
941 mdelay(BNAD_TXRX_SYNC_MDELAY);
907} 942}
908 943
909static void 944static void
@@ -911,6 +946,13 @@ bnad_cb_rx_post(struct bnad *bnad, struct bna_rcb *rcb)
911{ 946{
912 struct bnad_unmap_q *unmap_q = rcb->unmap_q; 947 struct bnad_unmap_q *unmap_q = rcb->unmap_q;
913 948
949 clear_bit(BNAD_RF_RX_SHUTDOWN_DELAYED, &bnad->run_flags);
950
951 if (rcb == rcb->cq->ccb->rcb[0])
952 bnad_cq_cmpl_init(bnad, rcb->cq->ccb);
953
954 bnad_free_all_rxbufs(bnad, rcb);
955
914 set_bit(BNAD_RXQ_STARTED, &rcb->flags); 956 set_bit(BNAD_RXQ_STARTED, &rcb->flags);
915 957
916 /* Now allocate & post buffers for this RCB */ 958 /* Now allocate & post buffers for this RCB */
@@ -1047,7 +1089,7 @@ bnad_mbox_irq_free(struct bnad *bnad,
1047 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1089 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1048 1090
1049 irq = BNAD_GET_MBOX_IRQ(bnad); 1091 irq = BNAD_GET_MBOX_IRQ(bnad);
1050 free_irq(irq, bnad->netdev); 1092 free_irq(irq, bnad);
1051 1093
1052 kfree(intr_info->idl); 1094 kfree(intr_info->idl);
1053} 1095}
@@ -1061,7 +1103,7 @@ static int
1061bnad_mbox_irq_alloc(struct bnad *bnad, 1103bnad_mbox_irq_alloc(struct bnad *bnad,
1062 struct bna_intr_info *intr_info) 1104 struct bna_intr_info *intr_info)
1063{ 1105{
1064 int err; 1106 int err = 0;
1065 unsigned long flags; 1107 unsigned long flags;
1066 u32 irq; 1108 u32 irq;
1067 irq_handler_t irq_handler; 1109 irq_handler_t irq_handler;
@@ -1096,22 +1138,17 @@ bnad_mbox_irq_alloc(struct bnad *bnad,
1096 */ 1138 */
1097 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags); 1139 set_bit(BNAD_RF_MBOX_IRQ_DISABLED, &bnad->run_flags);
1098 1140
1141 BNAD_UPDATE_CTR(bnad, mbox_intr_disabled);
1142
1099 err = request_irq(irq, irq_handler, flags, 1143 err = request_irq(irq, irq_handler, flags,
1100 bnad->mbox_irq_name, bnad->netdev); 1144 bnad->mbox_irq_name, bnad);
1101 1145
1102 if (err) { 1146 if (err) {
1103 kfree(intr_info->idl); 1147 kfree(intr_info->idl);
1104 intr_info->idl = NULL; 1148 intr_info->idl = NULL;
1105 return err;
1106 } 1149 }
1107 1150
1108 spin_lock_irqsave(&bnad->bna_lock, flags); 1151 return err;
1109
1110 if (bnad->cfg_flags & BNAD_CF_MSIX)
1111 disable_irq_nosync(irq);
1112
1113 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1114 return 0;
1115} 1152}
1116 1153
1117static void 1154static void
@@ -1388,13 +1425,24 @@ bnad_ioc_hb_check(unsigned long data)
1388} 1425}
1389 1426
1390static void 1427static void
1391bnad_ioc_sem_timeout(unsigned long data) 1428bnad_iocpf_timeout(unsigned long data)
1429{
1430 struct bnad *bnad = (struct bnad *)data;
1431 unsigned long flags;
1432
1433 spin_lock_irqsave(&bnad->bna_lock, flags);
1434 bfa_nw_iocpf_timeout((void *) &bnad->bna.device.ioc);
1435 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1436}
1437
1438static void
1439bnad_iocpf_sem_timeout(unsigned long data)
1392{ 1440{
1393 struct bnad *bnad = (struct bnad *)data; 1441 struct bnad *bnad = (struct bnad *)data;
1394 unsigned long flags; 1442 unsigned long flags;
1395 1443
1396 spin_lock_irqsave(&bnad->bna_lock, flags); 1444 spin_lock_irqsave(&bnad->bna_lock, flags);
1397 bfa_nw_ioc_sem_timeout((void *) &bnad->bna.device.ioc); 1445 bfa_nw_iocpf_sem_timeout((void *) &bnad->bna.device.ioc);
1398 spin_unlock_irqrestore(&bnad->bna_lock, flags); 1446 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1399} 1447}
1400 1448
@@ -1555,62 +1603,19 @@ poll_exit:
1555 return rcvd; 1603 return rcvd;
1556} 1604}
1557 1605
1558static int
1559bnad_napi_poll_txrx(struct napi_struct *napi, int budget)
1560{
1561 struct bnad_rx_ctrl *rx_ctrl =
1562 container_of(napi, struct bnad_rx_ctrl, napi);
1563 struct bna_ccb *ccb;
1564 struct bnad *bnad;
1565 int rcvd = 0;
1566 int i, j;
1567
1568 ccb = rx_ctrl->ccb;
1569
1570 bnad = ccb->bnad;
1571
1572 if (!netif_carrier_ok(bnad->netdev))
1573 goto poll_exit;
1574
1575 /* Handle Tx Completions, if any */
1576 for (i = 0; i < bnad->num_tx; i++) {
1577 for (j = 0; j < bnad->num_txq_per_tx; j++)
1578 bnad_tx(bnad, bnad->tx_info[i].tcb[j]);
1579 }
1580
1581 /* Handle Rx Completions */
1582 rcvd = bnad_poll_cq(bnad, ccb, budget);
1583 if (rcvd == budget)
1584 return rcvd;
1585poll_exit:
1586 napi_complete((napi));
1587
1588 BNAD_UPDATE_CTR(bnad, netif_rx_complete);
1589
1590 bnad_enable_txrx_irqs(bnad);
1591 return rcvd;
1592}
1593
1594static void 1606static void
1595bnad_napi_enable(struct bnad *bnad, u32 rx_id) 1607bnad_napi_enable(struct bnad *bnad, u32 rx_id)
1596{ 1608{
1597 int (*napi_poll) (struct napi_struct *, int);
1598 struct bnad_rx_ctrl *rx_ctrl; 1609 struct bnad_rx_ctrl *rx_ctrl;
1599 int i; 1610 int i;
1600 unsigned long flags;
1601
1602 spin_lock_irqsave(&bnad->bna_lock, flags);
1603 if (bnad->cfg_flags & BNAD_CF_MSIX)
1604 napi_poll = bnad_napi_poll_rx;
1605 else
1606 napi_poll = bnad_napi_poll_txrx;
1607 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1608 1611
1609 /* Initialize & enable NAPI */ 1612 /* Initialize & enable NAPI */
1610 for (i = 0; i < bnad->num_rxp_per_rx; i++) { 1613 for (i = 0; i < bnad->num_rxp_per_rx; i++) {
1611 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i]; 1614 rx_ctrl = &bnad->rx_info[rx_id].rx_ctrl[i];
1615
1612 netif_napi_add(bnad->netdev, &rx_ctrl->napi, 1616 netif_napi_add(bnad->netdev, &rx_ctrl->napi,
1613 napi_poll, 64); 1617 bnad_napi_poll_rx, 64);
1618
1614 napi_enable(&rx_ctrl->napi); 1619 napi_enable(&rx_ctrl->napi);
1615 } 1620 }
1616} 1621}
@@ -1825,6 +1830,7 @@ bnad_setup_rx(struct bnad *bnad, uint rx_id)
1825 1830
1826 /* Initialize the Rx event handlers */ 1831 /* Initialize the Rx event handlers */
1827 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup; 1832 rx_cbfn.rcb_setup_cbfn = bnad_cb_rcb_setup;
1833 rx_cbfn.rcb_destroy_cbfn = bnad_cb_rcb_destroy;
1828 rx_cbfn.rcb_destroy_cbfn = NULL; 1834 rx_cbfn.rcb_destroy_cbfn = NULL;
1829 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup; 1835 rx_cbfn.ccb_setup_cbfn = bnad_cb_ccb_setup;
1830 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy; 1836 rx_cbfn.ccb_destroy_cbfn = bnad_cb_ccb_destroy;
@@ -1968,6 +1974,27 @@ bnad_enable_default_bcast(struct bnad *bnad)
1968 return 0; 1974 return 0;
1969} 1975}
1970 1976
1977/* Called with bnad_conf_lock() held */
1978static void
1979bnad_restore_vlans(struct bnad *bnad, u32 rx_id)
1980{
1981 u16 vlan_id;
1982 unsigned long flags;
1983
1984 if (!bnad->vlan_grp)
1985 return;
1986
1987 BUG_ON(!(VLAN_N_VID == (BFI_MAX_VLAN + 1)));
1988
1989 for (vlan_id = 0; vlan_id < VLAN_N_VID; vlan_id++) {
1990 if (!vlan_group_get_device(bnad->vlan_grp, vlan_id))
1991 continue;
1992 spin_lock_irqsave(&bnad->bna_lock, flags);
1993 bna_rx_vlan_add(bnad->rx_info[rx_id].rx, vlan_id);
1994 spin_unlock_irqrestore(&bnad->bna_lock, flags);
1995 }
1996}
1997
1971/* Statistics utilities */ 1998/* Statistics utilities */
1972void 1999void
1973bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats) 2000bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats)
@@ -2152,16 +2179,6 @@ bnad_q_num_adjust(struct bnad *bnad, int msix_vectors)
2152 bnad->num_rxp_per_rx = 1; 2179 bnad->num_rxp_per_rx = 1;
2153} 2180}
2154 2181
2155static void
2156bnad_set_netdev_perm_addr(struct bnad *bnad)
2157{
2158 struct net_device *netdev = bnad->netdev;
2159
2160 memcpy(netdev->perm_addr, &bnad->perm_addr, netdev->addr_len);
2161 if (is_zero_ether_addr(netdev->dev_addr))
2162 memcpy(netdev->dev_addr, &bnad->perm_addr, netdev->addr_len);
2163}
2164
2165/* Enable / disable device */ 2182/* Enable / disable device */
2166static void 2183static void
2167bnad_device_disable(struct bnad *bnad) 2184bnad_device_disable(struct bnad *bnad)
@@ -2353,6 +2370,9 @@ bnad_open(struct net_device *netdev)
2353 /* Enable broadcast */ 2370 /* Enable broadcast */
2354 bnad_enable_default_bcast(bnad); 2371 bnad_enable_default_bcast(bnad);
2355 2372
2373 /* Restore VLANs, if any */
2374 bnad_restore_vlans(bnad, 0);
2375
2356 /* Set the UCAST address */ 2376 /* Set the UCAST address */
2357 spin_lock_irqsave(&bnad->bna_lock, flags); 2377 spin_lock_irqsave(&bnad->bna_lock, flags);
2358 bnad_mac_addr_set_locked(bnad, netdev->dev_addr); 2378 bnad_mac_addr_set_locked(bnad, netdev->dev_addr);
@@ -2433,21 +2453,21 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2433 return NETDEV_TX_OK; 2453 return NETDEV_TX_OK;
2434 } 2454 }
2435 2455
2456 tx_id = 0;
2457
2458 tx_info = &bnad->tx_info[tx_id];
2459 tcb = tx_info->tcb[tx_id];
2460 unmap_q = tcb->unmap_q;
2461
2436 /* 2462 /*
2437 * Takes care of the Tx that is scheduled between clearing the flag 2463 * Takes care of the Tx that is scheduled between clearing the flag
2438 * and the netif_stop_queue() call. 2464 * and the netif_stop_queue() call.
2439 */ 2465 */
2440 if (unlikely(!test_bit(BNAD_RF_TX_STARTED, &bnad->run_flags))) { 2466 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags))) {
2441 dev_kfree_skb(skb); 2467 dev_kfree_skb(skb);
2442 return NETDEV_TX_OK; 2468 return NETDEV_TX_OK;
2443 } 2469 }
2444 2470
2445 tx_id = 0;
2446
2447 tx_info = &bnad->tx_info[tx_id];
2448 tcb = tx_info->tcb[tx_id];
2449 unmap_q = tcb->unmap_q;
2450
2451 vectors = 1 + skb_shinfo(skb)->nr_frags; 2471 vectors = 1 + skb_shinfo(skb)->nr_frags;
2452 if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) { 2472 if (vectors > BFI_TX_MAX_VECTORS_PER_PKT) {
2453 dev_kfree_skb(skb); 2473 dev_kfree_skb(skb);
@@ -2462,7 +2482,8 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2462 tcb->consumer_index && 2482 tcb->consumer_index &&
2463 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) { 2483 !test_and_set_bit(BNAD_TXQ_FREE_SENT, &tcb->flags)) {
2464 acked = bnad_free_txbufs(bnad, tcb); 2484 acked = bnad_free_txbufs(bnad, tcb);
2465 bna_ib_ack(tcb->i_dbell, acked); 2485 if (likely(test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2486 bna_ib_ack(tcb->i_dbell, acked);
2466 smp_mb__before_clear_bit(); 2487 smp_mb__before_clear_bit();
2467 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags); 2488 clear_bit(BNAD_TXQ_FREE_SENT, &tcb->flags);
2468 } else { 2489 } else {
@@ -2624,6 +2645,10 @@ bnad_start_xmit(struct sk_buff *skb, struct net_device *netdev)
2624 tcb->producer_index = txq_prod; 2645 tcb->producer_index = txq_prod;
2625 2646
2626 smp_mb(); 2647 smp_mb();
2648
2649 if (unlikely(!test_bit(BNAD_TXQ_TX_STARTED, &tcb->flags)))
2650 return NETDEV_TX_OK;
2651
2627 bna_txq_prod_indx_doorbell(tcb); 2652 bna_txq_prod_indx_doorbell(tcb);
2628 2653
2629 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index) 2654 if ((u16) (*tcb->hw_consumer_index) != tcb->consumer_index)
@@ -3032,7 +3057,7 @@ static int __devinit
3032bnad_pci_probe(struct pci_dev *pdev, 3057bnad_pci_probe(struct pci_dev *pdev,
3033 const struct pci_device_id *pcidev_id) 3058 const struct pci_device_id *pcidev_id)
3034{ 3059{
3035 bool using_dac; 3060 bool using_dac = false;
3036 int err; 3061 int err;
3037 struct bnad *bnad; 3062 struct bnad *bnad;
3038 struct bna *bna; 3063 struct bna *bna;
@@ -3066,7 +3091,7 @@ bnad_pci_probe(struct pci_dev *pdev,
3066 /* 3091 /*
3067 * PCI initialization 3092 * PCI initialization
3068 * Output : using_dac = 1 for 64 bit DMA 3093 * Output : using_dac = 1 for 64 bit DMA
3069 * = 0 for 32 bit DMA 3094 * = 0 for 32 bit DMA
3070 */ 3095 */
3071 err = bnad_pci_init(bnad, pdev, &using_dac); 3096 err = bnad_pci_init(bnad, pdev, &using_dac);
3072 if (err) 3097 if (err)
@@ -3084,6 +3109,9 @@ bnad_pci_probe(struct pci_dev *pdev,
3084 /* Initialize netdev structure, set up ethtool ops */ 3109 /* Initialize netdev structure, set up ethtool ops */
3085 bnad_netdev_init(bnad, using_dac); 3110 bnad_netdev_init(bnad, using_dac);
3086 3111
3112 /* Set link to down state */
3113 netif_carrier_off(netdev);
3114
3087 bnad_enable_msix(bnad); 3115 bnad_enable_msix(bnad);
3088 3116
3089 /* Get resource requirement form bna */ 3117 /* Get resource requirement form bna */
@@ -3115,11 +3143,13 @@ bnad_pci_probe(struct pci_dev *pdev,
3115 ((unsigned long)bnad)); 3143 ((unsigned long)bnad));
3116 setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check, 3144 setup_timer(&bnad->bna.device.ioc.hb_timer, bnad_ioc_hb_check,
3117 ((unsigned long)bnad)); 3145 ((unsigned long)bnad));
3118 setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_ioc_sem_timeout, 3146 setup_timer(&bnad->bna.device.ioc.iocpf_timer, bnad_iocpf_timeout,
3147 ((unsigned long)bnad));
3148 setup_timer(&bnad->bna.device.ioc.sem_timer, bnad_iocpf_sem_timeout,
3119 ((unsigned long)bnad)); 3149 ((unsigned long)bnad));
3120 3150
3121 /* Now start the timer before calling IOC */ 3151 /* Now start the timer before calling IOC */
3122 mod_timer(&bnad->bna.device.ioc.ioc_timer, 3152 mod_timer(&bnad->bna.device.ioc.iocpf_timer,
3123 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ)); 3153 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
3124 3154
3125 /* 3155 /*
@@ -3137,11 +3167,6 @@ bnad_pci_probe(struct pci_dev *pdev,
3137 3167
3138 mutex_unlock(&bnad->conf_mutex); 3168 mutex_unlock(&bnad->conf_mutex);
3139 3169
3140 /*
3141 * Make sure the link appears down to the stack
3142 */
3143 netif_carrier_off(netdev);
3144
3145 /* Finally, reguister with net_device layer */ 3170 /* Finally, reguister with net_device layer */
3146 err = register_netdev(netdev); 3171 err = register_netdev(netdev);
3147 if (err) { 3172 if (err) {
diff --git a/drivers/net/bna/bnad.h b/drivers/net/bna/bnad.h
index ebc3a9078642..8b1d51557def 100644
--- a/drivers/net/bna/bnad.h
+++ b/drivers/net/bna/bnad.h
@@ -51,6 +51,7 @@
51 */ 51 */
52struct bnad_rx_ctrl { 52struct bnad_rx_ctrl {
53 struct bna_ccb *ccb; 53 struct bna_ccb *ccb;
54 unsigned long flags;
54 struct napi_struct napi; 55 struct napi_struct napi;
55}; 56};
56 57
@@ -64,7 +65,7 @@ struct bnad_rx_ctrl {
64#define BNAD_NAME "bna" 65#define BNAD_NAME "bna"
65#define BNAD_NAME_LEN 64 66#define BNAD_NAME_LEN 64
66 67
67#define BNAD_VERSION "2.3.2.0" 68#define BNAD_VERSION "2.3.2.3"
68 69
69#define BNAD_MAILBOX_MSIX_VECTORS 1 70#define BNAD_MAILBOX_MSIX_VECTORS 1
70 71
@@ -82,6 +83,7 @@ struct bnad_rx_ctrl {
82 83
83/* Bit positions for tcb->flags */ 84/* Bit positions for tcb->flags */
84#define BNAD_TXQ_FREE_SENT 0 85#define BNAD_TXQ_FREE_SENT 0
86#define BNAD_TXQ_TX_STARTED 1
85 87
86/* Bit positions for rcb->flags */ 88/* Bit positions for rcb->flags */
87#define BNAD_RXQ_REFILL 0 89#define BNAD_RXQ_REFILL 0
@@ -124,6 +126,7 @@ struct bnad_completion {
124struct bnad_drv_stats { 126struct bnad_drv_stats {
125 u64 netif_queue_stop; 127 u64 netif_queue_stop;
126 u64 netif_queue_wakeup; 128 u64 netif_queue_wakeup;
129 u64 netif_queue_stopped;
127 u64 tso4; 130 u64 tso4;
128 u64 tso6; 131 u64 tso6;
129 u64 tso_err; 132 u64 tso_err;
@@ -199,12 +202,12 @@ struct bnad_unmap_q {
199/* Set, tested & cleared using xxx_bit() functions */ 202/* Set, tested & cleared using xxx_bit() functions */
200/* Values indicated bit positions */ 203/* Values indicated bit positions */
201#define BNAD_RF_CEE_RUNNING 1 204#define BNAD_RF_CEE_RUNNING 1
202#define BNAD_RF_HW_ERROR 2 205#define BNAD_RF_MBOX_IRQ_DISABLED 2
203#define BNAD_RF_MBOX_IRQ_DISABLED 3 206#define BNAD_RF_RX_STARTED 3
204#define BNAD_RF_TX_STARTED 4 207#define BNAD_RF_DIM_TIMER_RUNNING 4
205#define BNAD_RF_RX_STARTED 5 208#define BNAD_RF_STATS_TIMER_RUNNING 5
206#define BNAD_RF_DIM_TIMER_RUNNING 6 209#define BNAD_RF_TX_SHUTDOWN_DELAYED 6
207#define BNAD_RF_STATS_TIMER_RUNNING 7 210#define BNAD_RF_RX_SHUTDOWN_DELAYED 7
208 211
209struct bnad { 212struct bnad {
210 struct net_device *netdev; 213 struct net_device *netdev;
@@ -306,8 +309,10 @@ extern void bnad_cleanup_rx(struct bnad *bnad, uint rx_id);
306extern void bnad_dim_timer_start(struct bnad *bnad); 309extern void bnad_dim_timer_start(struct bnad *bnad);
307 310
308/* Statistics */ 311/* Statistics */
309extern void bnad_netdev_qstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats); 312extern void bnad_netdev_qstats_fill(struct bnad *bnad,
310extern void bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64 *stats); 313 struct rtnl_link_stats64 *stats);
314extern void bnad_netdev_hwstats_fill(struct bnad *bnad,
315 struct rtnl_link_stats64 *stats);
311 316
312/** 317/**
313 * MACROS 318 * MACROS
@@ -320,9 +325,11 @@ extern void bnad_netdev_hwstats_fill(struct bnad *bnad, struct rtnl_link_stats64
320 325
321#define bnad_enable_rx_irq_unsafe(_ccb) \ 326#define bnad_enable_rx_irq_unsafe(_ccb) \
322{ \ 327{ \
323 bna_ib_coalescing_timer_set((_ccb)->i_dbell, \ 328 if (likely(test_bit(BNAD_RXQ_STARTED, &ccb->rcb[0]->flags))) {\
324 (_ccb)->rx_coalescing_timeo); \ 329 bna_ib_coalescing_timer_set((_ccb)->i_dbell, \
325 bna_ib_ack((_ccb)->i_dbell, 0); \ 330 (_ccb)->rx_coalescing_timeo); \
331 bna_ib_ack((_ccb)->i_dbell, 0); \
332 } \
326} 333}
327 334
328#define bnad_dim_timer_running(_bnad) \ 335#define bnad_dim_timer_running(_bnad) \
diff --git a/drivers/net/bna/bnad_ethtool.c b/drivers/net/bna/bnad_ethtool.c
index 11fa2ea842c1..99be5ae91991 100644
--- a/drivers/net/bna/bnad_ethtool.c
+++ b/drivers/net/bna/bnad_ethtool.c
@@ -68,6 +68,7 @@ static char *bnad_net_stats_strings[BNAD_ETHTOOL_STATS_NUM] = {
68 68
69 "netif_queue_stop", 69 "netif_queue_stop",
70 "netif_queue_wakeup", 70 "netif_queue_wakeup",
71 "netif_queue_stopped",
71 "tso4", 72 "tso4",
72 "tso6", 73 "tso6",
73 "tso_err", 74 "tso_err",
@@ -330,10 +331,6 @@ do { \
330 331
331 BNAD_GET_REG(PCIE_MISC_REG); 332 BNAD_GET_REG(PCIE_MISC_REG);
332 333
333 BNAD_GET_REG(HOST_SEM0_REG);
334 BNAD_GET_REG(HOST_SEM1_REG);
335 BNAD_GET_REG(HOST_SEM2_REG);
336 BNAD_GET_REG(HOST_SEM3_REG);
337 BNAD_GET_REG(HOST_SEM0_INFO_REG); 334 BNAD_GET_REG(HOST_SEM0_INFO_REG);
338 BNAD_GET_REG(HOST_SEM1_INFO_REG); 335 BNAD_GET_REG(HOST_SEM1_INFO_REG);
339 BNAD_GET_REG(HOST_SEM2_INFO_REG); 336 BNAD_GET_REG(HOST_SEM2_INFO_REG);
@@ -1184,6 +1181,9 @@ bnad_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats,
1184 1181
1185 bi = sizeof(*net_stats64) / sizeof(u64); 1182 bi = sizeof(*net_stats64) / sizeof(u64);
1186 1183
1184 /* Get netif_queue_stopped from stack */
1185 bnad->stats.drv_stats.netif_queue_stopped = netif_queue_stopped(netdev);
1186
1187 /* Fill driver stats into ethtool buffers */ 1187 /* Fill driver stats into ethtool buffers */
1188 stats64 = (u64 *)&bnad->stats.drv_stats; 1188 stats64 = (u64 *)&bnad->stats.drv_stats;
1189 for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++) 1189 for (i = 0; i < sizeof(struct bnad_drv_stats) / sizeof(u64); i++)
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 062600be073b..df99edf3464a 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -56,11 +56,11 @@
56#include "bnx2_fw.h" 56#include "bnx2_fw.h"
57 57
58#define DRV_MODULE_NAME "bnx2" 58#define DRV_MODULE_NAME "bnx2"
59#define DRV_MODULE_VERSION "2.0.18" 59#define DRV_MODULE_VERSION "2.0.21"
60#define DRV_MODULE_RELDATE "Oct 7, 2010" 60#define DRV_MODULE_RELDATE "Dec 23, 2010"
61#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.0.15.fw" 61#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-6.2.1.fw"
62#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw" 62#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-6.0.15.fw"
63#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.0.17.fw" 63#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-6.2.1.fw"
64#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw" 64#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-6.0.17.fw"
65#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw" 65#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-6.0.17.fw"
66 66
@@ -766,13 +766,10 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
766 int j; 766 int j;
767 767
768 rxr->rx_buf_ring = 768 rxr->rx_buf_ring =
769 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring); 769 vzalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
770 if (rxr->rx_buf_ring == NULL) 770 if (rxr->rx_buf_ring == NULL)
771 return -ENOMEM; 771 return -ENOMEM;
772 772
773 memset(rxr->rx_buf_ring, 0,
774 SW_RXBD_RING_SIZE * bp->rx_max_ring);
775
776 for (j = 0; j < bp->rx_max_ring; j++) { 773 for (j = 0; j < bp->rx_max_ring; j++) {
777 rxr->rx_desc_ring[j] = 774 rxr->rx_desc_ring[j] =
778 dma_alloc_coherent(&bp->pdev->dev, 775 dma_alloc_coherent(&bp->pdev->dev,
@@ -785,13 +782,11 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
785 } 782 }
786 783
787 if (bp->rx_pg_ring_size) { 784 if (bp->rx_pg_ring_size) {
788 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE * 785 rxr->rx_pg_ring = vzalloc(SW_RXPG_RING_SIZE *
789 bp->rx_max_pg_ring); 786 bp->rx_max_pg_ring);
790 if (rxr->rx_pg_ring == NULL) 787 if (rxr->rx_pg_ring == NULL)
791 return -ENOMEM; 788 return -ENOMEM;
792 789
793 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
794 bp->rx_max_pg_ring);
795 } 790 }
796 791
797 for (j = 0; j < bp->rx_max_pg_ring; j++) { 792 for (j = 0; j < bp->rx_max_pg_ring; j++) {
@@ -4645,13 +4640,28 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4645 4640
4646 /* Wait for the current PCI transaction to complete before 4641 /* Wait for the current PCI transaction to complete before
4647 * issuing a reset. */ 4642 * issuing a reset. */
4648 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS, 4643 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4649 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE | 4644 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
4650 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE | 4645 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4651 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE | 4646 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4652 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE); 4647 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4653 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS); 4648 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4654 udelay(5); 4649 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4650 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4651 udelay(5);
4652 } else { /* 5709 */
4653 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4654 val &= ~BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4655 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4656 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4657
4658 for (i = 0; i < 100; i++) {
4659 msleep(1);
4660 val = REG_RD(bp, BNX2_PCICFG_DEVICE_CONTROL);
4661 if (!(val & BNX2_PCICFG_DEVICE_STATUS_NO_PEND))
4662 break;
4663 }
4664 }
4655 4665
4656 /* Wait for the firmware to tell us it is ok to issue a reset. */ 4666 /* Wait for the firmware to tell us it is ok to issue a reset. */
4657 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1); 4667 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
@@ -4673,7 +4683,7 @@ bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4673 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 4683 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4674 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP; 4684 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4675 4685
4676 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val); 4686 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4677 4687
4678 } else { 4688 } else {
4679 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ | 4689 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
@@ -6086,7 +6096,7 @@ bnx2_request_irq(struct bnx2 *bp)
6086} 6096}
6087 6097
6088static void 6098static void
6089bnx2_free_irq(struct bnx2 *bp) 6099__bnx2_free_irq(struct bnx2 *bp)
6090{ 6100{
6091 struct bnx2_irq *irq; 6101 struct bnx2_irq *irq;
6092 int i; 6102 int i;
@@ -6097,6 +6107,13 @@ bnx2_free_irq(struct bnx2 *bp)
6097 free_irq(irq->vector, &bp->bnx2_napi[i]); 6107 free_irq(irq->vector, &bp->bnx2_napi[i]);
6098 irq->requested = 0; 6108 irq->requested = 0;
6099 } 6109 }
6110}
6111
6112static void
6113bnx2_free_irq(struct bnx2 *bp)
6114{
6115
6116 __bnx2_free_irq(bp);
6100 if (bp->flags & BNX2_FLAG_USING_MSI) 6117 if (bp->flags & BNX2_FLAG_USING_MSI)
6101 pci_disable_msi(bp->pdev); 6118 pci_disable_msi(bp->pdev);
6102 else if (bp->flags & BNX2_FLAG_USING_MSIX) 6119 else if (bp->flags & BNX2_FLAG_USING_MSIX)
@@ -6801,28 +6818,30 @@ bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6801 u32 *p = _p, i, offset; 6818 u32 *p = _p, i, offset;
6802 u8 *orig_p = _p; 6819 u8 *orig_p = _p;
6803 struct bnx2 *bp = netdev_priv(dev); 6820 struct bnx2 *bp = netdev_priv(dev);
6804 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c, 6821 static const u32 reg_boundaries[] = {
6805 0x0800, 0x0880, 0x0c00, 0x0c10, 6822 0x0000, 0x0098, 0x0400, 0x045c,
6806 0x0c30, 0x0d08, 0x1000, 0x101c, 6823 0x0800, 0x0880, 0x0c00, 0x0c10,
6807 0x1040, 0x1048, 0x1080, 0x10a4, 6824 0x0c30, 0x0d08, 0x1000, 0x101c,
6808 0x1400, 0x1490, 0x1498, 0x14f0, 6825 0x1040, 0x1048, 0x1080, 0x10a4,
6809 0x1500, 0x155c, 0x1580, 0x15dc, 6826 0x1400, 0x1490, 0x1498, 0x14f0,
6810 0x1600, 0x1658, 0x1680, 0x16d8, 6827 0x1500, 0x155c, 0x1580, 0x15dc,
6811 0x1800, 0x1820, 0x1840, 0x1854, 6828 0x1600, 0x1658, 0x1680, 0x16d8,
6812 0x1880, 0x1894, 0x1900, 0x1984, 6829 0x1800, 0x1820, 0x1840, 0x1854,
6813 0x1c00, 0x1c0c, 0x1c40, 0x1c54, 6830 0x1880, 0x1894, 0x1900, 0x1984,
6814 0x1c80, 0x1c94, 0x1d00, 0x1d84, 6831 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6815 0x2000, 0x2030, 0x23c0, 0x2400, 6832 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6816 0x2800, 0x2820, 0x2830, 0x2850, 6833 0x2000, 0x2030, 0x23c0, 0x2400,
6817 0x2b40, 0x2c10, 0x2fc0, 0x3058, 6834 0x2800, 0x2820, 0x2830, 0x2850,
6818 0x3c00, 0x3c94, 0x4000, 0x4010, 6835 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6819 0x4080, 0x4090, 0x43c0, 0x4458, 6836 0x3c00, 0x3c94, 0x4000, 0x4010,
6820 0x4c00, 0x4c18, 0x4c40, 0x4c54, 6837 0x4080, 0x4090, 0x43c0, 0x4458,
6821 0x4fc0, 0x5010, 0x53c0, 0x5444, 6838 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6822 0x5c00, 0x5c18, 0x5c80, 0x5c90, 6839 0x4fc0, 0x5010, 0x53c0, 0x5444,
6823 0x5fc0, 0x6000, 0x6400, 0x6428, 6840 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6824 0x6800, 0x6848, 0x684c, 0x6860, 6841 0x5fc0, 0x6000, 0x6400, 0x6428,
6825 0x6888, 0x6910, 0x8000 }; 6842 0x6800, 0x6848, 0x684c, 0x6860,
6843 0x6888, 0x6910, 0x8000
6844 };
6826 6845
6827 regs->version = 0; 6846 regs->version = 0;
6828 6847
@@ -7080,6 +7099,7 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7080 7099
7081 bnx2_netif_stop(bp, true); 7100 bnx2_netif_stop(bp, true);
7082 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET); 7101 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7102 __bnx2_free_irq(bp);
7083 bnx2_free_skbs(bp); 7103 bnx2_free_skbs(bp);
7084 bnx2_free_mem(bp); 7104 bnx2_free_mem(bp);
7085 } 7105 }
@@ -7092,6 +7112,9 @@ bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7092 7112
7093 rc = bnx2_alloc_mem(bp); 7113 rc = bnx2_alloc_mem(bp);
7094 if (!rc) 7114 if (!rc)
7115 rc = bnx2_request_irq(bp);
7116
7117 if (!rc)
7095 rc = bnx2_init_nic(bp, 0); 7118 rc = bnx2_init_nic(bp, 0);
7096 7119
7097 if (rc) { 7120 if (rc) {
@@ -7914,15 +7937,15 @@ bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7914 goto err_out_release; 7937 goto err_out_release;
7915 } 7938 }
7916 7939
7940 bnx2_set_power_state(bp, PCI_D0);
7941
7917 /* Configure byte swap and enable write to the reg_window registers. 7942 /* Configure byte swap and enable write to the reg_window registers.
7918 * Rely on CPU to do target byte swapping on big endian systems 7943 * Rely on CPU to do target byte swapping on big endian systems
7919 * The chip's target access swapping will not swap all accesses 7944 * The chip's target access swapping will not swap all accesses
7920 */ 7945 */
7921 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, 7946 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG,
7922 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA | 7947 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7923 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP); 7948 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7924
7925 bnx2_set_power_state(bp, PCI_D0);
7926 7949
7927 bp->chip_id = REG_RD(bp, BNX2_MISC_ID); 7950 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7928 7951
@@ -8383,8 +8406,6 @@ bnx2_remove_one(struct pci_dev *pdev)
8383 struct net_device *dev = pci_get_drvdata(pdev); 8406 struct net_device *dev = pci_get_drvdata(pdev);
8384 struct bnx2 *bp = netdev_priv(dev); 8407 struct bnx2 *bp = netdev_priv(dev);
8385 8408
8386 flush_scheduled_work();
8387
8388 unregister_netdev(dev); 8409 unregister_netdev(dev);
8389 8410
8390 if (bp->mips_firmware) 8411 if (bp->mips_firmware)
@@ -8421,7 +8442,7 @@ bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8421 if (!netif_running(dev)) 8442 if (!netif_running(dev))
8422 return 0; 8443 return 0;
8423 8444
8424 flush_scheduled_work(); 8445 cancel_work_sync(&bp->reset_task);
8425 bnx2_netif_stop(bp, true); 8446 bnx2_netif_stop(bp, true);
8426 netif_device_detach(dev); 8447 netif_device_detach(dev);
8427 del_timer_sync(&bp->timer); 8448 del_timer_sync(&bp->timer);
diff --git a/drivers/net/bnx2.h b/drivers/net/bnx2.h
index bf4c3421067d..5488a2e82fe9 100644
--- a/drivers/net/bnx2.h
+++ b/drivers/net/bnx2.h
@@ -461,6 +461,8 @@ struct l2_fhdr {
461#define BNX2_PCICFG_MAILBOX_QUEUE_ADDR 0x00000090 461#define BNX2_PCICFG_MAILBOX_QUEUE_ADDR 0x00000090
462#define BNX2_PCICFG_MAILBOX_QUEUE_DATA 0x00000094 462#define BNX2_PCICFG_MAILBOX_QUEUE_DATA 0x00000094
463 463
464#define BNX2_PCICFG_DEVICE_CONTROL 0x000000b4
465#define BNX2_PCICFG_DEVICE_STATUS_NO_PEND ((1L<<5)<<16)
464 466
465/* 467/*
466 * pci_reg definition 468 * pci_reg definition
diff --git a/drivers/net/bnx2x/Makefile b/drivers/net/bnx2x/Makefile
index 084afce89ae9..bb83a2961273 100644
--- a/drivers/net/bnx2x/Makefile
+++ b/drivers/net/bnx2x/Makefile
@@ -4,4 +4,4 @@
4 4
5obj-$(CONFIG_BNX2X) += bnx2x.o 5obj-$(CONFIG_BNX2X) += bnx2x.o
6 6
7bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o 7bnx2x-objs := bnx2x_main.o bnx2x_link.o bnx2x_cmn.o bnx2x_ethtool.o bnx2x_stats.o bnx2x_dcb.o
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index d255428122fc..a6cd335c9436 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -13,6 +13,8 @@
13 13
14#ifndef BNX2X_H 14#ifndef BNX2X_H
15#define BNX2X_H 15#define BNX2X_H
16#include <linux/netdevice.h>
17#include <linux/types.h>
16 18
17/* compilation time flags */ 19/* compilation time flags */
18 20
@@ -20,15 +22,17 @@
20 * (you will need to reboot afterwards) */ 22 * (you will need to reboot afterwards) */
21/* #define BNX2X_STOP_ON_ERROR */ 23/* #define BNX2X_STOP_ON_ERROR */
22 24
23#define DRV_MODULE_VERSION "1.60.01-0" 25#define DRV_MODULE_VERSION "1.62.00-3"
24#define DRV_MODULE_RELDATE "2010/11/12" 26#define DRV_MODULE_RELDATE "2010/12/21"
25#define BNX2X_BC_VER 0x040200 27#define BNX2X_BC_VER 0x040200
26 28
27#define BNX2X_MULTI_QUEUE 29#define BNX2X_MULTI_QUEUE
28 30
29#define BNX2X_NEW_NAPI 31#define BNX2X_NEW_NAPI
30 32
31 33#if defined(CONFIG_DCB)
34#define BCM_DCB
35#endif
32#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE) 36#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
33#define BCM_CNIC 1 37#define BCM_CNIC 1
34#include "../cnic_if.h" 38#include "../cnic_if.h"
@@ -48,6 +52,7 @@
48#include "bnx2x_fw_defs.h" 52#include "bnx2x_fw_defs.h"
49#include "bnx2x_hsi.h" 53#include "bnx2x_hsi.h"
50#include "bnx2x_link.h" 54#include "bnx2x_link.h"
55#include "bnx2x_dcb.h"
51#include "bnx2x_stats.h" 56#include "bnx2x_stats.h"
52 57
53/* error/debug prints */ 58/* error/debug prints */
@@ -199,10 +204,25 @@ void bnx2x_panic_dump(struct bnx2x *bp);
199/* EQ completions */ 204/* EQ completions */
200#define HC_SP_INDEX_EQ_CONS 7 205#define HC_SP_INDEX_EQ_CONS 7
201 206
207/* FCoE L2 connection completions */
208#define HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS 6
209#define HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS 4
202/* iSCSI L2 */ 210/* iSCSI L2 */
203#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5 211#define HC_SP_INDEX_ETH_ISCSI_CQ_CONS 5
204#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1 212#define HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS 1
205 213
214/* Special clients parameters */
215
216/* SB indices */
217/* FCoE L2 */
218#define BNX2X_FCOE_L2_RX_INDEX \
219 (&bp->def_status_blk->sp_sb.\
220 index_values[HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS])
221
222#define BNX2X_FCOE_L2_TX_INDEX \
223 (&bp->def_status_blk->sp_sb.\
224 index_values[HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS])
225
206/** 226/**
207 * CIDs and CLIDs: 227 * CIDs and CLIDs:
208 * CLIDs below is a CLID for func 0, then the CLID for other 228 * CLIDs below is a CLID for func 0, then the CLID for other
@@ -215,12 +235,19 @@ void bnx2x_panic_dump(struct bnx2x *bp);
215#define BNX2X_ISCSI_ETH_CL_ID 17 235#define BNX2X_ISCSI_ETH_CL_ID 17
216#define BNX2X_ISCSI_ETH_CID 17 236#define BNX2X_ISCSI_ETH_CID 17
217 237
238/* FCoE L2 */
239#define BNX2X_FCOE_ETH_CL_ID 18
240#define BNX2X_FCOE_ETH_CID 18
241
218/** Additional rings budgeting */ 242/** Additional rings budgeting */
219#ifdef BCM_CNIC 243#ifdef BCM_CNIC
220#define CNIC_CONTEXT_USE 1 244#define CNIC_CONTEXT_USE 1
245#define FCOE_CONTEXT_USE 1
221#else 246#else
222#define CNIC_CONTEXT_USE 0 247#define CNIC_CONTEXT_USE 0
248#define FCOE_CONTEXT_USE 0
223#endif /* BCM_CNIC */ 249#endif /* BCM_CNIC */
250#define NONE_ETH_CONTEXT_USE (FCOE_CONTEXT_USE)
224 251
225#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \ 252#define AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR \
226 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR 253 AEU_INPUTS_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR
@@ -401,6 +428,17 @@ struct bnx2x_fastpath {
401}; 428};
402 429
403#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var) 430#define bnx2x_fp(bp, nr, var) (bp->fp[nr].var)
431#ifdef BCM_CNIC
432/* FCoE L2 `fastpath' is right after the eth entries */
433#define FCOE_IDX BNX2X_NUM_ETH_QUEUES(bp)
434#define bnx2x_fcoe_fp(bp) (&bp->fp[FCOE_IDX])
435#define bnx2x_fcoe(bp, var) (bnx2x_fcoe_fp(bp)->var)
436#define IS_FCOE_FP(fp) (fp->index == FCOE_IDX)
437#define IS_FCOE_IDX(idx) ((idx) == FCOE_IDX)
438#else
439#define IS_FCOE_FP(fp) false
440#define IS_FCOE_IDX(idx) false
441#endif
404 442
405 443
406/* MC hsi */ 444/* MC hsi */
@@ -598,6 +636,7 @@ struct bnx2x_common {
598 636
599#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0) 637#define CHIP_METAL(bp) (bp->common.chip_id & 0x00000ff0)
600#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f) 638#define CHIP_BOND_ID(bp) (bp->common.chip_id & 0x0000000f)
639#define CHIP_PARITY_ENABLED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
601 640
602 int flash_size; 641 int flash_size;
603#define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */ 642#define NVRAM_1MB_SIZE 0x20000 /* 1M bit in bytes */
@@ -669,8 +708,14 @@ struct bnx2x_port {
669enum { 708enum {
670 CAM_ETH_LINE = 0, 709 CAM_ETH_LINE = 0,
671 CAM_ISCSI_ETH_LINE, 710 CAM_ISCSI_ETH_LINE,
672 CAM_MAX_PF_LINE = CAM_ISCSI_ETH_LINE 711 CAM_FIP_ETH_LINE,
712 CAM_FIP_MCAST_LINE,
713 CAM_MAX_PF_LINE = CAM_FIP_MCAST_LINE
673}; 714};
715/* number of MACs per function in NIG memory - used for SI mode */
716#define NIG_LLH_FUNC_MEM_SIZE 16
717/* number of entries in NIG_REG_LLHX_FUNC_MEM */
718#define NIG_LLH_FUNC_MEM_MAX_OFFSET 8
674 719
675#define BNX2X_VF_ID_INVALID 0xFF 720#define BNX2X_VF_ID_INVALID 0xFF
676 721
@@ -710,6 +755,14 @@ enum {
710 */ 755 */
711#define L2_FP_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE) 756#define L2_FP_COUNT(cid_cnt) ((cid_cnt) - CNIC_CONTEXT_USE)
712 757
758/*
759 * The number of FP-SB allocated by the driver == max number of regular L2
760 * queues + 1 for the CNIC which also consumes an FP-SB
761 */
762#define FP_SB_COUNT(cid_cnt) ((cid_cnt) - FCOE_CONTEXT_USE)
763#define NUM_IGU_SB_REQUIRED(cid_cnt) \
764 (FP_SB_COUNT(cid_cnt) - NONE_ETH_CONTEXT_USE)
765
713union cdu_context { 766union cdu_context {
714 struct eth_context eth; 767 struct eth_context eth;
715 char pad[1024]; 768 char pad[1024];
@@ -722,7 +775,8 @@ union cdu_context {
722 775
723#ifdef BCM_CNIC 776#ifdef BCM_CNIC
724#define CNIC_ISCSI_CID_MAX 256 777#define CNIC_ISCSI_CID_MAX 256
725#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX) 778#define CNIC_FCOE_CID_MAX 2048
779#define CNIC_CID_MAX (CNIC_ISCSI_CID_MAX + CNIC_FCOE_CID_MAX)
726#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS) 780#define CNIC_ILT_LINES DIV_ROUND_UP(CNIC_CID_MAX, ILT_PAGE_CIDS)
727#endif 781#endif
728 782
@@ -770,6 +824,8 @@ struct bnx2x_slowpath {
770 824
771 u32 wb_comp; 825 u32 wb_comp;
772 u32 wb_data[4]; 826 u32 wb_data[4];
827 /* pfc configuration for DCBX ramrod */
828 struct flow_control_configuration pfc_config;
773}; 829};
774 830
775#define bnx2x_sp(bp, var) (&bp->slowpath->var) 831#define bnx2x_sp(bp, var) (&bp->slowpath->var)
@@ -918,6 +974,10 @@ struct bnx2x {
918#define DISABLE_MSI_FLAG 0x200 974#define DISABLE_MSI_FLAG 0x200
919#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) 975#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
920#define MF_FUNC_DIS 0x1000 976#define MF_FUNC_DIS 0x1000
977#define FCOE_MACS_SET 0x2000
978#define NO_FCOE_FLAG 0x4000
979
980#define NO_FCOE(bp) ((bp)->flags & NO_FCOE_FLAG)
921 981
922 int pf_num; /* absolute PF number */ 982 int pf_num; /* absolute PF number */
923 int pfid; /* per-path PF number */ 983 int pfid; /* per-path PF number */
@@ -967,6 +1027,8 @@ struct bnx2x {
967 u16 mf_ov; 1027 u16 mf_ov;
968 u8 mf_mode; 1028 u8 mf_mode;
969#define IS_MF(bp) (bp->mf_mode != 0) 1029#define IS_MF(bp) (bp->mf_mode != 0)
1030#define IS_MF_SI(bp) (bp->mf_mode == MULTI_FUNCTION_SI)
1031#define IS_MF_SD(bp) (bp->mf_mode == MULTI_FUNCTION_SD)
970 1032
971 u8 wol; 1033 u8 wol;
972 1034
@@ -1010,6 +1072,7 @@ struct bnx2x {
1010#define BNX2X_ACCEPT_ALL_UNICAST 0x0004 1072#define BNX2X_ACCEPT_ALL_UNICAST 0x0004
1011#define BNX2X_ACCEPT_ALL_MULTICAST 0x0008 1073#define BNX2X_ACCEPT_ALL_MULTICAST 0x0008
1012#define BNX2X_ACCEPT_BROADCAST 0x0010 1074#define BNX2X_ACCEPT_BROADCAST 0x0010
1075#define BNX2X_ACCEPT_UNMATCHED_UCAST 0x0020
1013#define BNX2X_PROMISCUOUS_MODE 0x10000 1076#define BNX2X_PROMISCUOUS_MODE 0x10000
1014 1077
1015 u32 rx_mode; 1078 u32 rx_mode;
@@ -1062,7 +1125,8 @@ struct bnx2x {
1062 u16 cnic_kwq_pending; 1125 u16 cnic_kwq_pending;
1063 u16 cnic_spq_pending; 1126 u16 cnic_spq_pending;
1064 struct mutex cnic_mutex; 1127 struct mutex cnic_mutex;
1065 u8 iscsi_mac[6]; 1128 u8 iscsi_mac[ETH_ALEN];
1129 u8 fip_mac[ETH_ALEN];
1066#endif 1130#endif
1067 1131
1068 int dmae_ready; 1132 int dmae_ready;
@@ -1122,6 +1186,31 @@ struct bnx2x {
1122 1186
1123 char fw_ver[32]; 1187 char fw_ver[32];
1124 const struct firmware *firmware; 1188 const struct firmware *firmware;
1189 /* LLDP params */
1190 struct bnx2x_config_lldp_params lldp_config_params;
1191
1192 /* DCB support on/off */
1193 u16 dcb_state;
1194#define BNX2X_DCB_STATE_OFF 0
1195#define BNX2X_DCB_STATE_ON 1
1196
1197 /* DCBX engine mode */
1198 int dcbx_enabled;
1199#define BNX2X_DCBX_ENABLED_OFF 0
1200#define BNX2X_DCBX_ENABLED_ON_NEG_OFF 1
1201#define BNX2X_DCBX_ENABLED_ON_NEG_ON 2
1202#define BNX2X_DCBX_ENABLED_INVALID (-1)
1203
1204 bool dcbx_mode_uset;
1205
1206 struct bnx2x_config_dcbx_params dcbx_config_params;
1207
1208 struct bnx2x_dcbx_port_params dcbx_port_params;
1209 int dcb_version;
1210
1211 /* DCBX Negotation results */
1212 struct dcbx_features dcbx_local_feat;
1213 u32 dcbx_error;
1125}; 1214};
1126 1215
1127/** 1216/**
@@ -1152,10 +1241,17 @@ struct bnx2x {
1152#define RSS_IPV6_TCP_CAP 0x0008 1241#define RSS_IPV6_TCP_CAP 0x0008
1153 1242
1154#define BNX2X_NUM_QUEUES(bp) (bp->num_queues) 1243#define BNX2X_NUM_QUEUES(bp) (bp->num_queues)
1244#define BNX2X_NUM_ETH_QUEUES(bp) (BNX2X_NUM_QUEUES(bp) - NONE_ETH_CONTEXT_USE)
1245
1246/* ethtool statistics are displayed for all regular ethernet queues and the
1247 * fcoe L2 queue if not disabled
1248 */
1249#define BNX2X_NUM_STAT_QUEUES(bp) (NO_FCOE(bp) ? BNX2X_NUM_ETH_QUEUES(bp) : \
1250 (BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE))
1251
1155#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1) 1252#define is_multi(bp) (BNX2X_NUM_QUEUES(bp) > 1)
1156 1253
1157#define BNX2X_MAX_QUEUES(bp) (bp->igu_sb_cnt - CNIC_CONTEXT_USE) 1254#define BNX2X_MAX_QUEUES(bp) (bp->igu_sb_cnt - CNIC_CONTEXT_USE)
1158#define is_eth_multi(bp) (BNX2X_NUM_ETH_QUEUES(bp) > 1)
1159 1255
1160#define RSS_IPV4_CAP_MASK \ 1256#define RSS_IPV4_CAP_MASK \
1161 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY 1257 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_IPV4_CAPABILITY
@@ -1248,6 +1344,7 @@ struct bnx2x_client_ramrod_params {
1248 u16 cl_id; 1344 u16 cl_id;
1249 u32 cid; 1345 u32 cid;
1250 u8 poll; 1346 u8 poll;
1347#define CLIENT_IS_FCOE 0x01
1251#define CLIENT_IS_LEADING_RSS 0x02 1348#define CLIENT_IS_LEADING_RSS 0x02
1252 u8 flags; 1349 u8 flags;
1253}; 1350};
@@ -1280,11 +1377,54 @@ struct bnx2x_func_init_params {
1280 u16 spq_prod; /* valid iff FUNC_FLG_SPQ */ 1377 u16 spq_prod; /* valid iff FUNC_FLG_SPQ */
1281}; 1378};
1282 1379
1380#define for_each_eth_queue(bp, var) \
1381 for (var = 0; var < BNX2X_NUM_ETH_QUEUES(bp); var++)
1382
1383#define for_each_nondefault_eth_queue(bp, var) \
1384 for (var = 1; var < BNX2X_NUM_ETH_QUEUES(bp); var++)
1385
1386#define for_each_napi_queue(bp, var) \
1387 for (var = 0; \
1388 var < BNX2X_NUM_ETH_QUEUES(bp) + FCOE_CONTEXT_USE; var++) \
1389 if (skip_queue(bp, var)) \
1390 continue; \
1391 else
1392
1283#define for_each_queue(bp, var) \ 1393#define for_each_queue(bp, var) \
1284 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) 1394 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
1395 if (skip_queue(bp, var)) \
1396 continue; \
1397 else
1398
1399#define for_each_rx_queue(bp, var) \
1400 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
1401 if (skip_rx_queue(bp, var)) \
1402 continue; \
1403 else
1404
1405#define for_each_tx_queue(bp, var) \
1406 for (var = 0; var < BNX2X_NUM_QUEUES(bp); var++) \
1407 if (skip_tx_queue(bp, var)) \
1408 continue; \
1409 else
1410
1285#define for_each_nondefault_queue(bp, var) \ 1411#define for_each_nondefault_queue(bp, var) \
1286 for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++) 1412 for (var = 1; var < BNX2X_NUM_QUEUES(bp); var++) \
1413 if (skip_queue(bp, var)) \
1414 continue; \
1415 else
1416
1417/* skip rx queue
1418 * if FCOE l2 support is disabled and this is the fcoe L2 queue
1419 */
1420#define skip_rx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
1287 1421
1422/* skip tx queue
1423 * if FCOE l2 support is disabled and this is the fcoe L2 queue
1424 */
1425#define skip_tx_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
1426
1427#define skip_queue(bp, idx) (NO_FCOE(bp) && IS_FCOE_IDX(idx))
1288 1428
1289#define WAIT_RAMROD_POLL 0x01 1429#define WAIT_RAMROD_POLL 0x01
1290#define WAIT_RAMROD_COMMON 0x02 1430#define WAIT_RAMROD_COMMON 0x02
@@ -1329,7 +1469,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1329 1469
1330#define BNX2X_ILT_ZALLOC(x, y, size) \ 1470#define BNX2X_ILT_ZALLOC(x, y, size) \
1331 do { \ 1471 do { \
1332 x = pci_alloc_consistent(bp->pdev, size, y); \ 1472 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
1333 if (x) \ 1473 if (x) \
1334 memset(x, 0, size); \ 1474 memset(x, 0, size); \
1335 } while (0) 1475 } while (0)
@@ -1337,7 +1477,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1337#define BNX2X_ILT_FREE(x, y, size) \ 1477#define BNX2X_ILT_FREE(x, y, size) \
1338 do { \ 1478 do { \
1339 if (x) { \ 1479 if (x) { \
1340 pci_free_consistent(bp->pdev, size, x, y); \ 1480 dma_free_coherent(&bp->pdev->dev, size, x, y); \
1341 x = NULL; \ 1481 x = NULL; \
1342 y = 0; \ 1482 y = 0; \
1343 } \ 1483 } \
@@ -1608,10 +1748,6 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1608 MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \ 1748 MAC_CONFIGURATION_ENTRY_ACTION_TYPE) == \
1609 (T_ETH_MAC_COMMAND_INVALIDATE)) 1749 (T_ETH_MAC_COMMAND_INVALIDATE))
1610 1750
1611#define CAM_INVALIDATE(x) \
1612 (x.target_table_entry.flags = TSTORM_CAM_TARGET_TABLE_ENTRY_ACTION_TYPE)
1613
1614
1615/* Number of u32 elements in MC hash array */ 1751/* Number of u32 elements in MC hash array */
1616#define MC_HASH_SIZE 8 1752#define MC_HASH_SIZE 8
1617#define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \ 1753#define MC_HASH_OFFSET(bp, i) (BAR_TSTRORM_INTMEM + \
diff --git a/drivers/net/bnx2x/bnx2x_cmn.c b/drivers/net/bnx2x/bnx2x_cmn.c
index 0af361e4e3d1..710ce5d04c53 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/bnx2x/bnx2x_cmn.c
@@ -698,6 +698,29 @@ void bnx2x_release_phy_lock(struct bnx2x *bp)
698 mutex_unlock(&bp->port.phy_mutex); 698 mutex_unlock(&bp->port.phy_mutex);
699} 699}
700 700
701/* calculates MF speed according to current linespeed and MF configuration */
702u16 bnx2x_get_mf_speed(struct bnx2x *bp)
703{
704 u16 line_speed = bp->link_vars.line_speed;
705 if (IS_MF(bp)) {
706 u16 maxCfg = (bp->mf_config[BP_VN(bp)] &
707 FUNC_MF_CFG_MAX_BW_MASK) >>
708 FUNC_MF_CFG_MAX_BW_SHIFT;
709 /* Calculate the current MAX line speed limit for the DCC
710 * capable devices
711 */
712 if (IS_MF_SD(bp)) {
713 u16 vn_max_rate = maxCfg * 100;
714
715 if (vn_max_rate < line_speed)
716 line_speed = vn_max_rate;
717 } else /* IS_MF_SI(bp)) */
718 line_speed = (line_speed * maxCfg) / 100;
719 }
720
721 return line_speed;
722}
723
701void bnx2x_link_report(struct bnx2x *bp) 724void bnx2x_link_report(struct bnx2x *bp)
702{ 725{
703 if (bp->flags & MF_FUNC_DIS) { 726 if (bp->flags & MF_FUNC_DIS) {
@@ -713,17 +736,8 @@ void bnx2x_link_report(struct bnx2x *bp)
713 netif_carrier_on(bp->dev); 736 netif_carrier_on(bp->dev);
714 netdev_info(bp->dev, "NIC Link is Up, "); 737 netdev_info(bp->dev, "NIC Link is Up, ");
715 738
716 line_speed = bp->link_vars.line_speed; 739 line_speed = bnx2x_get_mf_speed(bp);
717 if (IS_MF(bp)) {
718 u16 vn_max_rate;
719 740
720 vn_max_rate =
721 ((bp->mf_config[BP_VN(bp)] &
722 FUNC_MF_CFG_MAX_BW_MASK) >>
723 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
724 if (vn_max_rate < line_speed)
725 line_speed = vn_max_rate;
726 }
727 pr_cont("%d Mbps ", line_speed); 741 pr_cont("%d Mbps ", line_speed);
728 742
729 if (bp->link_vars.duplex == DUPLEX_FULL) 743 if (bp->link_vars.duplex == DUPLEX_FULL)
@@ -813,7 +827,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
813 DP(NETIF_MSG_IFUP, 827 DP(NETIF_MSG_IFUP,
814 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size); 828 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
815 829
816 for_each_queue(bp, j) { 830 for_each_rx_queue(bp, j) {
817 struct bnx2x_fastpath *fp = &bp->fp[j]; 831 struct bnx2x_fastpath *fp = &bp->fp[j];
818 832
819 if (!fp->disable_tpa) { 833 if (!fp->disable_tpa) {
@@ -866,7 +880,7 @@ void bnx2x_init_rx_rings(struct bnx2x *bp)
866 } 880 }
867 } 881 }
868 882
869 for_each_queue(bp, j) { 883 for_each_rx_queue(bp, j) {
870 struct bnx2x_fastpath *fp = &bp->fp[j]; 884 struct bnx2x_fastpath *fp = &bp->fp[j];
871 885
872 fp->rx_bd_cons = 0; 886 fp->rx_bd_cons = 0;
@@ -897,7 +911,7 @@ static void bnx2x_free_tx_skbs(struct bnx2x *bp)
897{ 911{
898 int i; 912 int i;
899 913
900 for_each_queue(bp, i) { 914 for_each_tx_queue(bp, i) {
901 struct bnx2x_fastpath *fp = &bp->fp[i]; 915 struct bnx2x_fastpath *fp = &bp->fp[i];
902 916
903 u16 bd_cons = fp->tx_bd_cons; 917 u16 bd_cons = fp->tx_bd_cons;
@@ -915,7 +929,7 @@ static void bnx2x_free_rx_skbs(struct bnx2x *bp)
915{ 929{
916 int i, j; 930 int i, j;
917 931
918 for_each_queue(bp, j) { 932 for_each_rx_queue(bp, j) {
919 struct bnx2x_fastpath *fp = &bp->fp[j]; 933 struct bnx2x_fastpath *fp = &bp->fp[j];
920 934
921 for (i = 0; i < NUM_RX_BD; i++) { 935 for (i = 0; i < NUM_RX_BD; i++) {
@@ -956,7 +970,7 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
956#ifdef BCM_CNIC 970#ifdef BCM_CNIC
957 offset++; 971 offset++;
958#endif 972#endif
959 for_each_queue(bp, i) { 973 for_each_eth_queue(bp, i) {
960 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq " 974 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
961 "state %x\n", i, bp->msix_table[i + offset].vector, 975 "state %x\n", i, bp->msix_table[i + offset].vector,
962 bnx2x_fp(bp, i, state)); 976 bnx2x_fp(bp, i, state));
@@ -990,14 +1004,14 @@ int bnx2x_enable_msix(struct bnx2x *bp)
990 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry); 1004 bp->msix_table[msix_vec].entry, bp->msix_table[msix_vec].entry);
991 msix_vec++; 1005 msix_vec++;
992#endif 1006#endif
993 for_each_queue(bp, i) { 1007 for_each_eth_queue(bp, i) {
994 bp->msix_table[msix_vec].entry = msix_vec; 1008 bp->msix_table[msix_vec].entry = msix_vec;
995 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d " 1009 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
996 "(fastpath #%u)\n", msix_vec, msix_vec, i); 1010 "(fastpath #%u)\n", msix_vec, msix_vec, i);
997 msix_vec++; 1011 msix_vec++;
998 } 1012 }
999 1013
1000 req_cnt = BNX2X_NUM_QUEUES(bp) + CNIC_CONTEXT_USE + 1; 1014 req_cnt = BNX2X_NUM_ETH_QUEUES(bp) + CNIC_CONTEXT_USE + 1;
1001 1015
1002 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt); 1016 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0], req_cnt);
1003 1017
@@ -1053,7 +1067,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1053#ifdef BCM_CNIC 1067#ifdef BCM_CNIC
1054 offset++; 1068 offset++;
1055#endif 1069#endif
1056 for_each_queue(bp, i) { 1070 for_each_eth_queue(bp, i) {
1057 struct bnx2x_fastpath *fp = &bp->fp[i]; 1071 struct bnx2x_fastpath *fp = &bp->fp[i];
1058 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d", 1072 snprintf(fp->name, sizeof(fp->name), "%s-fp-%d",
1059 bp->dev->name, i); 1073 bp->dev->name, i);
@@ -1070,7 +1084,7 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
1070 fp->state = BNX2X_FP_STATE_IRQ; 1084 fp->state = BNX2X_FP_STATE_IRQ;
1071 } 1085 }
1072 1086
1073 i = BNX2X_NUM_QUEUES(bp); 1087 i = BNX2X_NUM_ETH_QUEUES(bp);
1074 offset = 1 + CNIC_CONTEXT_USE; 1088 offset = 1 + CNIC_CONTEXT_USE;
1075 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d" 1089 netdev_info(bp->dev, "using MSI-X IRQs: sp %d fp[%d] %d"
1076 " ... fp[%d] %d\n", 1090 " ... fp[%d] %d\n",
@@ -1117,7 +1131,7 @@ static void bnx2x_napi_enable(struct bnx2x *bp)
1117{ 1131{
1118 int i; 1132 int i;
1119 1133
1120 for_each_queue(bp, i) 1134 for_each_napi_queue(bp, i)
1121 napi_enable(&bnx2x_fp(bp, i, napi)); 1135 napi_enable(&bnx2x_fp(bp, i, napi));
1122} 1136}
1123 1137
@@ -1125,7 +1139,7 @@ static void bnx2x_napi_disable(struct bnx2x *bp)
1125{ 1139{
1126 int i; 1140 int i;
1127 1141
1128 for_each_queue(bp, i) 1142 for_each_napi_queue(bp, i)
1129 napi_disable(&bnx2x_fp(bp, i, napi)); 1143 napi_disable(&bnx2x_fp(bp, i, napi));
1130} 1144}
1131 1145
@@ -1153,6 +1167,35 @@ void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
1153 netif_tx_disable(bp->dev); 1167 netif_tx_disable(bp->dev);
1154} 1168}
1155 1169
1170u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb)
1171{
1172#ifdef BCM_CNIC
1173 struct bnx2x *bp = netdev_priv(dev);
1174 if (NO_FCOE(bp))
1175 return skb_tx_hash(dev, skb);
1176 else {
1177 struct ethhdr *hdr = (struct ethhdr *)skb->data;
1178 u16 ether_type = ntohs(hdr->h_proto);
1179
1180 /* Skip VLAN tag if present */
1181 if (ether_type == ETH_P_8021Q) {
1182 struct vlan_ethhdr *vhdr =
1183 (struct vlan_ethhdr *)skb->data;
1184
1185 ether_type = ntohs(vhdr->h_vlan_encapsulated_proto);
1186 }
1187
1188 /* If ethertype is FCoE or FIP - use FCoE ring */
1189 if ((ether_type == ETH_P_FCOE) || (ether_type == ETH_P_FIP))
1190 return bnx2x_fcoe(bp, index);
1191 }
1192#endif
1193 /* Select a none-FCoE queue: if FCoE is enabled, exclude FCoE L2 ring
1194 */
1195 return __skb_tx_hash(dev, skb,
1196 dev->real_num_tx_queues - FCOE_CONTEXT_USE);
1197}
1198
1156void bnx2x_set_num_queues(struct bnx2x *bp) 1199void bnx2x_set_num_queues(struct bnx2x *bp)
1157{ 1200{
1158 switch (bp->multi_mode) { 1201 switch (bp->multi_mode) {
@@ -1167,7 +1210,22 @@ void bnx2x_set_num_queues(struct bnx2x *bp)
1167 bp->num_queues = 1; 1210 bp->num_queues = 1;
1168 break; 1211 break;
1169 } 1212 }
1213
1214 /* Add special queues */
1215 bp->num_queues += NONE_ETH_CONTEXT_USE;
1216}
1217
1218#ifdef BCM_CNIC
1219static inline void bnx2x_set_fcoe_eth_macs(struct bnx2x *bp)
1220{
1221 if (!NO_FCOE(bp)) {
1222 if (!IS_MF_SD(bp))
1223 bnx2x_set_fip_eth_mac_addr(bp, 1);
1224 bnx2x_set_all_enode_macs(bp, 1);
1225 bp->flags |= FCOE_MACS_SET;
1226 }
1170} 1227}
1228#endif
1171 1229
1172static void bnx2x_release_firmware(struct bnx2x *bp) 1230static void bnx2x_release_firmware(struct bnx2x *bp)
1173{ 1231{
@@ -1177,6 +1235,20 @@ static void bnx2x_release_firmware(struct bnx2x *bp)
1177 release_firmware(bp->firmware); 1235 release_firmware(bp->firmware);
1178} 1236}
1179 1237
1238static inline int bnx2x_set_real_num_queues(struct bnx2x *bp)
1239{
1240 int rc, num = bp->num_queues;
1241
1242#ifdef BCM_CNIC
1243 if (NO_FCOE(bp))
1244 num -= FCOE_CONTEXT_USE;
1245
1246#endif
1247 netif_set_real_num_tx_queues(bp->dev, num);
1248 rc = netif_set_real_num_rx_queues(bp->dev, num);
1249 return rc;
1250}
1251
1180/* must be called with rtnl_lock */ 1252/* must be called with rtnl_lock */
1181int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 1253int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1182{ 1254{
@@ -1203,10 +1275,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1203 if (bnx2x_alloc_mem(bp)) 1275 if (bnx2x_alloc_mem(bp))
1204 return -ENOMEM; 1276 return -ENOMEM;
1205 1277
1206 netif_set_real_num_tx_queues(bp->dev, bp->num_queues); 1278 rc = bnx2x_set_real_num_queues(bp);
1207 rc = netif_set_real_num_rx_queues(bp->dev, bp->num_queues);
1208 if (rc) { 1279 if (rc) {
1209 BNX2X_ERR("Unable to update real_num_rx_queues\n"); 1280 BNX2X_ERR("Unable to set real_num_queues\n");
1210 goto load_error0; 1281 goto load_error0;
1211 } 1282 }
1212 1283
@@ -1214,6 +1285,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1214 bnx2x_fp(bp, i, disable_tpa) = 1285 bnx2x_fp(bp, i, disable_tpa) =
1215 ((bp->flags & TPA_ENABLE_FLAG) == 0); 1286 ((bp->flags & TPA_ENABLE_FLAG) == 0);
1216 1287
1288#ifdef BCM_CNIC
1289 /* We don't want TPA on FCoE L2 ring */
1290 bnx2x_fcoe(bp, disable_tpa) = 1;
1291#endif
1217 bnx2x_napi_enable(bp); 1292 bnx2x_napi_enable(bp);
1218 1293
1219 /* Send LOAD_REQUEST command to MCP 1294 /* Send LOAD_REQUEST command to MCP
@@ -1296,6 +1371,8 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1296 } 1371 }
1297 } 1372 }
1298 1373
1374 bnx2x_dcbx_init(bp);
1375
1299 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT; 1376 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
1300 1377
1301 rc = bnx2x_func_start(bp); 1378 rc = bnx2x_func_start(bp);
@@ -1344,6 +1421,10 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
1344 /* Now when Clients are configured we are ready to work */ 1421 /* Now when Clients are configured we are ready to work */
1345 bp->state = BNX2X_STATE_OPEN; 1422 bp->state = BNX2X_STATE_OPEN;
1346 1423
1424#ifdef BCM_CNIC
1425 bnx2x_set_fcoe_eth_macs(bp);
1426#endif
1427
1347 bnx2x_set_eth_mac(bp, 1); 1428 bnx2x_set_eth_mac(bp, 1);
1348 1429
1349 if (bp->port.pmf) 1430 if (bp->port.pmf)
@@ -1402,7 +1483,7 @@ load_error3:
1402 1483
1403 /* Free SKBs, SGEs, TPA pool and driver internals */ 1484 /* Free SKBs, SGEs, TPA pool and driver internals */
1404 bnx2x_free_skbs(bp); 1485 bnx2x_free_skbs(bp);
1405 for_each_queue(bp, i) 1486 for_each_rx_queue(bp, i)
1406 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 1487 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1407 1488
1408 /* Release IRQs */ 1489 /* Release IRQs */
@@ -1473,7 +1554,7 @@ int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
1473 1554
1474 /* Free SKBs, SGEs, TPA pool and driver internals */ 1555 /* Free SKBs, SGEs, TPA pool and driver internals */
1475 bnx2x_free_skbs(bp); 1556 bnx2x_free_skbs(bp);
1476 for_each_queue(bp, i) 1557 for_each_rx_queue(bp, i)
1477 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 1558 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
1478 1559
1479 bnx2x_free_mem(bp); 1560 bnx2x_free_mem(bp);
@@ -1577,6 +1658,17 @@ int bnx2x_poll(struct napi_struct *napi, int budget)
1577 1658
1578 /* Fall out from the NAPI loop if needed */ 1659 /* Fall out from the NAPI loop if needed */
1579 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) { 1660 if (!(bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp))) {
1661#ifdef BCM_CNIC
1662 /* No need to update SB for FCoE L2 ring as long as
1663 * it's connected to the default SB and the SB
1664 * has been updated when NAPI was scheduled.
1665 */
1666 if (IS_FCOE_FP(fp)) {
1667 napi_complete(napi);
1668 break;
1669 }
1670#endif
1671
1580 bnx2x_update_fpsb_idx(fp); 1672 bnx2x_update_fpsb_idx(fp);
1581 /* bnx2x_has_rx_work() reads the status block, 1673 /* bnx2x_has_rx_work() reads the status block,
1582 * thus we need to ensure that status block indices 1674 * thus we need to ensure that status block indices
@@ -1692,11 +1784,10 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
1692 } 1784 }
1693 } 1785 }
1694 1786
1695 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 1787 if (skb_is_gso_v6(skb))
1696 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP); 1788 rc |= XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6;
1697 1789 else if (skb_is_gso(skb))
1698 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 1790 rc |= XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP;
1699 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
1700 1791
1701 return rc; 1792 return rc;
1702} 1793}
@@ -2242,7 +2333,7 @@ int __devinit bnx2x_alloc_mem_bp(struct bnx2x *bp)
2242 bp->fp = fp; 2333 bp->fp = fp;
2243 2334
2244 /* msix table */ 2335 /* msix table */
2245 tbl = kzalloc((bp->l2_cid_count + 1) * sizeof(*tbl), 2336 tbl = kzalloc((FP_SB_COUNT(bp->l2_cid_count) + 1) * sizeof(*tbl),
2246 GFP_KERNEL); 2337 GFP_KERNEL);
2247 if (!tbl) 2338 if (!tbl)
2248 goto alloc_err; 2339 goto alloc_err;
diff --git a/drivers/net/bnx2x/bnx2x_cmn.h b/drivers/net/bnx2x/bnx2x_cmn.h
index 6b28739c5302..03eb4d68e6bb 100644
--- a/drivers/net/bnx2x/bnx2x_cmn.h
+++ b/drivers/net/bnx2x/bnx2x_cmn.h
@@ -73,6 +73,16 @@ void bnx2x__link_status_update(struct bnx2x *bp);
73void bnx2x_link_report(struct bnx2x *bp); 73void bnx2x_link_report(struct bnx2x *bp);
74 74
75/** 75/**
76 * calculates MF speed according to current linespeed and MF
77 * configuration
78 *
79 * @param bp
80 *
81 * @return u16
82 */
83u16 bnx2x_get_mf_speed(struct bnx2x *bp);
84
85/**
76 * MSI-X slowpath interrupt handler 86 * MSI-X slowpath interrupt handler
77 * 87 *
78 * @param irq 88 * @param irq
@@ -232,6 +242,30 @@ int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource);
232 */ 242 */
233void bnx2x_set_eth_mac(struct bnx2x *bp, int set); 243void bnx2x_set_eth_mac(struct bnx2x *bp, int set);
234 244
245#ifdef BCM_CNIC
246/**
247 * Set/Clear FIP MAC(s) at the next enties in the CAM after the ETH
248 * MAC(s). This function will wait until the ramdord completion
249 * returns.
250 *
251 * @param bp driver handle
252 * @param set set or clear the CAM entry
253 *
254 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
255 */
256int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set);
257
258/**
259 * Set/Clear ALL_ENODE mcast MAC.
260 *
261 * @param bp
262 * @param set
263 *
264 * @return int
265 */
266int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set);
267#endif
268
235/** 269/**
236 * Set MAC filtering configurations. 270 * Set MAC filtering configurations.
237 * 271 *
@@ -290,6 +324,13 @@ int bnx2x_func_start(struct bnx2x *bp);
290void bnx2x_ilt_set_info(struct bnx2x *bp); 324void bnx2x_ilt_set_info(struct bnx2x *bp);
291 325
292/** 326/**
327 * Inintialize dcbx protocol
328 *
329 * @param bp
330 */
331void bnx2x_dcbx_init(struct bnx2x *bp);
332
333/**
293 * Set power state to the requested value. Currently only D0 and 334 * Set power state to the requested value. Currently only D0 and
294 * D3hot are supported. 335 * D3hot are supported.
295 * 336 *
@@ -309,6 +350,9 @@ int bnx2x_nic_load(struct bnx2x *bp, int load_mode);
309/* hard_xmit callback */ 350/* hard_xmit callback */
310netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev); 351netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev);
311 352
353/* select_queue callback */
354u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb);
355
312int bnx2x_change_mac_addr(struct net_device *dev, void *p); 356int bnx2x_change_mac_addr(struct net_device *dev, void *p);
313 357
314/* NAPI poll Rx part */ 358/* NAPI poll Rx part */
@@ -685,7 +729,7 @@ static inline void bnx2x_add_all_napi(struct bnx2x *bp)
685 int i; 729 int i;
686 730
687 /* Add NAPI objects */ 731 /* Add NAPI objects */
688 for_each_queue(bp, i) 732 for_each_napi_queue(bp, i)
689 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi), 733 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
690 bnx2x_poll, BNX2X_NAPI_WEIGHT); 734 bnx2x_poll, BNX2X_NAPI_WEIGHT);
691} 735}
@@ -694,7 +738,7 @@ static inline void bnx2x_del_all_napi(struct bnx2x *bp)
694{ 738{
695 int i; 739 int i;
696 740
697 for_each_queue(bp, i) 741 for_each_napi_queue(bp, i)
698 netif_napi_del(&bnx2x_fp(bp, i, napi)); 742 netif_napi_del(&bnx2x_fp(bp, i, napi));
699} 743}
700 744
@@ -860,7 +904,7 @@ static inline void bnx2x_init_tx_rings(struct bnx2x *bp)
860{ 904{
861 int i, j; 905 int i, j;
862 906
863 for_each_queue(bp, j) { 907 for_each_tx_queue(bp, j) {
864 struct bnx2x_fastpath *fp = &bp->fp[j]; 908 struct bnx2x_fastpath *fp = &bp->fp[j];
865 909
866 for (i = 1; i <= NUM_TX_RINGS; i++) { 910 for (i = 1; i <= NUM_TX_RINGS; i++) {
@@ -939,7 +983,30 @@ static inline void bnx2x_set_next_page_rx_cq(struct bnx2x_fastpath *fp)
939 } 983 }
940} 984}
941 985
986#ifdef BCM_CNIC
987static inline void bnx2x_init_fcoe_fp(struct bnx2x *bp)
988{
989 bnx2x_fcoe(bp, cl_id) = BNX2X_FCOE_ETH_CL_ID +
990 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
991 bnx2x_fcoe(bp, cid) = BNX2X_FCOE_ETH_CID;
992 bnx2x_fcoe(bp, fw_sb_id) = DEF_SB_ID;
993 bnx2x_fcoe(bp, igu_sb_id) = bp->igu_dsb_id;
994 bnx2x_fcoe(bp, bp) = bp;
995 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
996 bnx2x_fcoe(bp, index) = FCOE_IDX;
997 bnx2x_fcoe(bp, rx_cons_sb) = BNX2X_FCOE_L2_RX_INDEX;
998 bnx2x_fcoe(bp, tx_cons_sb) = BNX2X_FCOE_L2_TX_INDEX;
999 /* qZone id equals to FW (per path) client id */
1000 bnx2x_fcoe(bp, cl_qzone_id) = bnx2x_fcoe(bp, cl_id) +
1001 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
1002 ETH_MAX_RX_CLIENTS_E1H);
1003 /* init shortcut */
1004 bnx2x_fcoe(bp, ustorm_rx_prods_offset) = CHIP_IS_E2(bp) ?
1005 USTORM_RX_PRODS_E2_OFFSET(bnx2x_fcoe(bp, cl_qzone_id)) :
1006 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), bnx2x_fcoe_fp(bp)->cl_id);
942 1007
1008}
1009#endif
943 1010
944static inline void __storm_memset_struct(struct bnx2x *bp, 1011static inline void __storm_memset_struct(struct bnx2x *bp,
945 u32 addr, size_t size, u32 *data) 1012 u32 addr, size_t size, u32 *data)
diff --git a/drivers/net/bnx2x/bnx2x_dcb.c b/drivers/net/bnx2x/bnx2x_dcb.c
new file mode 100644
index 000000000000..fb60021f81fb
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_dcb.c
@@ -0,0 +1,2118 @@
1/* bnx2x_dcb.c: Broadcom Everest network driver.
2 *
3 * Copyright 2009-2010 Broadcom Corporation
4 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Dmitry Kravkov
17 *
18 */
19#include <linux/netdevice.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22
23#include "bnx2x.h"
24#include "bnx2x_cmn.h"
25#include "bnx2x_dcb.h"
26
27
28/* forward declarations of dcbx related functions */
29static void bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp);
30static void bnx2x_pfc_set_pfc(struct bnx2x *bp);
31static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp);
32static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp);
33static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
34 u32 *set_configuration_ets_pg,
35 u32 *pri_pg_tbl);
36static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
37 u32 *pg_pri_orginal_spread,
38 struct pg_help_data *help_data);
39static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
40 struct pg_help_data *help_data,
41 struct dcbx_ets_feature *ets,
42 u32 *pg_pri_orginal_spread);
43static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
44 struct cos_help_data *cos_data,
45 u32 *pg_pri_orginal_spread,
46 struct dcbx_ets_feature *ets);
47static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp);
48
49
50static void bnx2x_pfc_set(struct bnx2x *bp)
51{
52 struct bnx2x_nig_brb_pfc_port_params pfc_params = {0};
53 u32 pri_bit, val = 0;
54 u8 pri;
55
56 /* Tx COS configuration */
57 if (bp->dcbx_port_params.ets.cos_params[0].pauseable)
58 pfc_params.rx_cos0_priority_mask =
59 bp->dcbx_port_params.ets.cos_params[0].pri_bitmask;
60 if (bp->dcbx_port_params.ets.cos_params[1].pauseable)
61 pfc_params.rx_cos1_priority_mask =
62 bp->dcbx_port_params.ets.cos_params[1].pri_bitmask;
63
64
65 /**
66 * Rx COS configuration
67 * Changing PFC RX configuration .
68 * In RX COS0 will always be configured to lossy and COS1 to lossless
69 */
70 for (pri = 0 ; pri < MAX_PFC_PRIORITIES ; pri++) {
71 pri_bit = 1 << pri;
72
73 if (pri_bit & DCBX_PFC_PRI_PAUSE_MASK(bp))
74 val |= 1 << (pri * 4);
75 }
76
77 pfc_params.pkt_priority_to_cos = val;
78
79 /* RX COS0 */
80 pfc_params.llfc_low_priority_classes = 0;
81 /* RX COS1 */
82 pfc_params.llfc_high_priority_classes = DCBX_PFC_PRI_PAUSE_MASK(bp);
83
84 /* BRB configuration */
85 pfc_params.cos0_pauseable = false;
86 pfc_params.cos1_pauseable = true;
87
88 bnx2x_acquire_phy_lock(bp);
89 bp->link_params.feature_config_flags |= FEATURE_CONFIG_PFC_ENABLED;
90 bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &pfc_params);
91 bnx2x_release_phy_lock(bp);
92}
93
94static void bnx2x_pfc_clear(struct bnx2x *bp)
95{
96 struct bnx2x_nig_brb_pfc_port_params nig_params = {0};
97 nig_params.pause_enable = 1;
98#ifdef BNX2X_SAFC
99 if (bp->flags & SAFC_TX_FLAG) {
100 u32 high = 0, low = 0;
101 int i;
102
103 for (i = 0; i < BNX2X_MAX_PRIORITY; i++) {
104 if (bp->pri_map[i] == 1)
105 high |= (1 << i);
106 if (bp->pri_map[i] == 0)
107 low |= (1 << i);
108 }
109
110 nig_params.llfc_low_priority_classes = high;
111 nig_params.llfc_low_priority_classes = low;
112
113 nig_params.pause_enable = 0;
114 nig_params.llfc_enable = 1;
115 nig_params.llfc_out_en = 1;
116 }
117#endif /* BNX2X_SAFC */
118 bnx2x_acquire_phy_lock(bp);
119 bp->link_params.feature_config_flags &= ~FEATURE_CONFIG_PFC_ENABLED;
120 bnx2x_update_pfc(&bp->link_params, &bp->link_vars, &nig_params);
121 bnx2x_release_phy_lock(bp);
122}
123
124static void bnx2x_dump_dcbx_drv_param(struct bnx2x *bp,
125 struct dcbx_features *features,
126 u32 error)
127{
128 u8 i = 0;
129 DP(NETIF_MSG_LINK, "local_mib.error %x\n", error);
130
131 /* PG */
132 DP(NETIF_MSG_LINK,
133 "local_mib.features.ets.enabled %x\n", features->ets.enabled);
134 for (i = 0; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++)
135 DP(NETIF_MSG_LINK,
136 "local_mib.features.ets.pg_bw_tbl[%d] %d\n", i,
137 DCBX_PG_BW_GET(features->ets.pg_bw_tbl, i));
138 for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++)
139 DP(NETIF_MSG_LINK,
140 "local_mib.features.ets.pri_pg_tbl[%d] %d\n", i,
141 DCBX_PRI_PG_GET(features->ets.pri_pg_tbl, i));
142
143 /* pfc */
144 DP(NETIF_MSG_LINK, "dcbx_features.pfc.pri_en_bitmap %x\n",
145 features->pfc.pri_en_bitmap);
146 DP(NETIF_MSG_LINK, "dcbx_features.pfc.pfc_caps %x\n",
147 features->pfc.pfc_caps);
148 DP(NETIF_MSG_LINK, "dcbx_features.pfc.enabled %x\n",
149 features->pfc.enabled);
150
151 DP(NETIF_MSG_LINK, "dcbx_features.app.default_pri %x\n",
152 features->app.default_pri);
153 DP(NETIF_MSG_LINK, "dcbx_features.app.tc_supported %x\n",
154 features->app.tc_supported);
155 DP(NETIF_MSG_LINK, "dcbx_features.app.enabled %x\n",
156 features->app.enabled);
157 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++) {
158 DP(NETIF_MSG_LINK,
159 "dcbx_features.app.app_pri_tbl[%x].app_id %x\n",
160 i, features->app.app_pri_tbl[i].app_id);
161 DP(NETIF_MSG_LINK,
162 "dcbx_features.app.app_pri_tbl[%x].pri_bitmap %x\n",
163 i, features->app.app_pri_tbl[i].pri_bitmap);
164 DP(NETIF_MSG_LINK,
165 "dcbx_features.app.app_pri_tbl[%x].appBitfield %x\n",
166 i, features->app.app_pri_tbl[i].appBitfield);
167 }
168}
169
170static void bnx2x_dcbx_get_ap_priority(struct bnx2x *bp,
171 u8 pri_bitmap,
172 u8 llfc_traf_type)
173{
174 u32 pri = MAX_PFC_PRIORITIES;
175 u32 index = MAX_PFC_PRIORITIES - 1;
176 u32 pri_mask;
177 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
178
179 /* Choose the highest priority */
180 while ((MAX_PFC_PRIORITIES == pri) && (0 != index)) {
181 pri_mask = 1 << index;
182 if (GET_FLAGS(pri_bitmap, pri_mask))
183 pri = index ;
184 index--;
185 }
186
187 if (pri < MAX_PFC_PRIORITIES)
188 ttp[llfc_traf_type] = max_t(u32, ttp[llfc_traf_type], pri);
189}
190
191static void bnx2x_dcbx_get_ap_feature(struct bnx2x *bp,
192 struct dcbx_app_priority_feature *app,
193 u32 error) {
194 u8 index;
195 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
196
197 if (GET_FLAGS(error, DCBX_LOCAL_APP_ERROR))
198 DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_ERROR\n");
199
200 if (app->enabled && !GET_FLAGS(error, DCBX_LOCAL_APP_ERROR)) {
201
202 bp->dcbx_port_params.app.enabled = true;
203
204 for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
205 ttp[index] = 0;
206
207 if (app->default_pri < MAX_PFC_PRIORITIES)
208 ttp[LLFC_TRAFFIC_TYPE_NW] = app->default_pri;
209
210 for (index = 0 ; index < DCBX_MAX_APP_PROTOCOL; index++) {
211 struct dcbx_app_priority_entry *entry =
212 app->app_pri_tbl;
213
214 if (GET_FLAGS(entry[index].appBitfield,
215 DCBX_APP_SF_ETH_TYPE) &&
216 ETH_TYPE_FCOE == entry[index].app_id)
217 bnx2x_dcbx_get_ap_priority(bp,
218 entry[index].pri_bitmap,
219 LLFC_TRAFFIC_TYPE_FCOE);
220
221 if (GET_FLAGS(entry[index].appBitfield,
222 DCBX_APP_SF_PORT) &&
223 TCP_PORT_ISCSI == entry[index].app_id)
224 bnx2x_dcbx_get_ap_priority(bp,
225 entry[index].pri_bitmap,
226 LLFC_TRAFFIC_TYPE_ISCSI);
227 }
228 } else {
229 DP(NETIF_MSG_LINK, "DCBX_LOCAL_APP_DISABLED\n");
230 bp->dcbx_port_params.app.enabled = false;
231 for (index = 0 ; index < LLFC_DRIVER_TRAFFIC_TYPE_MAX; index++)
232 ttp[index] = INVALID_TRAFFIC_TYPE_PRIORITY;
233 }
234}
235
236static void bnx2x_dcbx_get_ets_feature(struct bnx2x *bp,
237 struct dcbx_ets_feature *ets,
238 u32 error) {
239 int i = 0;
240 u32 pg_pri_orginal_spread[DCBX_MAX_NUM_PG_BW_ENTRIES] = {0};
241 struct pg_help_data pg_help_data;
242 struct bnx2x_dcbx_cos_params *cos_params =
243 bp->dcbx_port_params.ets.cos_params;
244
245 memset(&pg_help_data, 0, sizeof(struct pg_help_data));
246
247
248 if (GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR))
249 DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ERROR\n");
250
251
252 /* Clean up old settings of ets on COS */
253 for (i = 0; i < E2_NUM_OF_COS ; i++) {
254
255 cos_params[i].pauseable = false;
256 cos_params[i].strict = BNX2X_DCBX_COS_NOT_STRICT;
257 cos_params[i].bw_tbl = DCBX_INVALID_COS_BW;
258 cos_params[i].pri_bitmask = DCBX_PFC_PRI_GET_NON_PAUSE(bp, 0);
259 }
260
261 if (bp->dcbx_port_params.app.enabled &&
262 !GET_FLAGS(error, DCBX_LOCAL_ETS_ERROR) &&
263 ets->enabled) {
264 DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_ENABLE\n");
265 bp->dcbx_port_params.ets.enabled = true;
266
267 bnx2x_dcbx_get_ets_pri_pg_tbl(bp,
268 pg_pri_orginal_spread,
269 ets->pri_pg_tbl);
270
271 bnx2x_dcbx_get_num_pg_traf_type(bp,
272 pg_pri_orginal_spread,
273 &pg_help_data);
274
275 bnx2x_dcbx_fill_cos_params(bp, &pg_help_data,
276 ets, pg_pri_orginal_spread);
277
278 } else {
279 DP(NETIF_MSG_LINK, "DCBX_LOCAL_ETS_DISABLED\n");
280 bp->dcbx_port_params.ets.enabled = false;
281 ets->pri_pg_tbl[0] = 0;
282
283 for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES ; i++)
284 DCBX_PG_BW_SET(ets->pg_bw_tbl, i, 1);
285 }
286}
287
288static void bnx2x_dcbx_get_pfc_feature(struct bnx2x *bp,
289 struct dcbx_pfc_feature *pfc, u32 error)
290{
291
292 if (GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR))
293 DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_ERROR\n");
294
295 if (bp->dcbx_port_params.app.enabled &&
296 !GET_FLAGS(error, DCBX_LOCAL_PFC_ERROR) &&
297 pfc->enabled) {
298 bp->dcbx_port_params.pfc.enabled = true;
299 bp->dcbx_port_params.pfc.priority_non_pauseable_mask =
300 ~(pfc->pri_en_bitmap);
301 } else {
302 DP(NETIF_MSG_LINK, "DCBX_LOCAL_PFC_DISABLED\n");
303 bp->dcbx_port_params.pfc.enabled = false;
304 bp->dcbx_port_params.pfc.priority_non_pauseable_mask = 0;
305 }
306}
307
308static void bnx2x_get_dcbx_drv_param(struct bnx2x *bp,
309 struct dcbx_features *features,
310 u32 error)
311{
312 bnx2x_dcbx_get_ap_feature(bp, &features->app, error);
313
314 bnx2x_dcbx_get_pfc_feature(bp, &features->pfc, error);
315
316 bnx2x_dcbx_get_ets_feature(bp, &features->ets, error);
317}
318
319#define DCBX_LOCAL_MIB_MAX_TRY_READ (100)
320static int bnx2x_dcbx_read_mib(struct bnx2x *bp,
321 u32 *base_mib_addr,
322 u32 offset,
323 int read_mib_type)
324{
325 int max_try_read = 0, i;
326 u32 *buff, mib_size, prefix_seq_num, suffix_seq_num;
327 struct lldp_remote_mib *remote_mib ;
328 struct lldp_local_mib *local_mib;
329
330
331 switch (read_mib_type) {
332 case DCBX_READ_LOCAL_MIB:
333 mib_size = sizeof(struct lldp_local_mib);
334 break;
335 case DCBX_READ_REMOTE_MIB:
336 mib_size = sizeof(struct lldp_remote_mib);
337 break;
338 default:
339 return 1; /*error*/
340 }
341
342 offset += BP_PORT(bp) * mib_size;
343
344 do {
345 buff = base_mib_addr;
346 for (i = 0; i < mib_size; i += 4, buff++)
347 *buff = REG_RD(bp, offset + i);
348
349 max_try_read++;
350
351 switch (read_mib_type) {
352 case DCBX_READ_LOCAL_MIB:
353 local_mib = (struct lldp_local_mib *) base_mib_addr;
354 prefix_seq_num = local_mib->prefix_seq_num;
355 suffix_seq_num = local_mib->suffix_seq_num;
356 break;
357 case DCBX_READ_REMOTE_MIB:
358 remote_mib = (struct lldp_remote_mib *) base_mib_addr;
359 prefix_seq_num = remote_mib->prefix_seq_num;
360 suffix_seq_num = remote_mib->suffix_seq_num;
361 break;
362 default:
363 return 1; /*error*/
364 }
365 } while ((prefix_seq_num != suffix_seq_num) &&
366 (max_try_read < DCBX_LOCAL_MIB_MAX_TRY_READ));
367
368 if (max_try_read >= DCBX_LOCAL_MIB_MAX_TRY_READ) {
369 BNX2X_ERR("MIB could not be read\n");
370 return 1;
371 }
372
373 return 0;
374}
375
376static void bnx2x_pfc_set_pfc(struct bnx2x *bp)
377{
378 if (CHIP_IS_E2(bp)) {
379 if (BP_PORT(bp)) {
380 BNX2X_ERR("4 port mode is not supported");
381 return;
382 }
383
384 if (bp->dcbx_port_params.pfc.enabled)
385
386 /* 1. Fills up common PFC structures if required.*/
387 /* 2. Configure NIG, MAC and BRB via the elink:
388 * elink must first check if BMAC is not in reset
389 * and only then configures the BMAC
390 * Or, configure EMAC.
391 */
392 bnx2x_pfc_set(bp);
393
394 else
395 bnx2x_pfc_clear(bp);
396 }
397}
398
399static void bnx2x_dcbx_stop_hw_tx(struct bnx2x *bp)
400{
401 DP(NETIF_MSG_LINK, "sending STOP TRAFFIC\n");
402 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STOP_TRAFFIC,
403 0 /* connectionless */,
404 0 /* dataHi is zero */,
405 0 /* dataLo is zero */,
406 1 /* common */);
407}
408
409static void bnx2x_dcbx_resume_hw_tx(struct bnx2x *bp)
410{
411 bnx2x_pfc_fw_struct_e2(bp);
412 DP(NETIF_MSG_LINK, "sending START TRAFFIC\n");
413 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_START_TRAFFIC,
414 0, /* connectionless */
415 U64_HI(bnx2x_sp_mapping(bp, pfc_config)),
416 U64_LO(bnx2x_sp_mapping(bp, pfc_config)),
417 1 /* commmon */);
418}
419
420static void bnx2x_dcbx_update_ets_params(struct bnx2x *bp)
421{
422 struct bnx2x_dcbx_pg_params *ets = &(bp->dcbx_port_params.ets);
423 u8 status = 0;
424
425 bnx2x_ets_disabled(&bp->link_params);
426
427 if (!ets->enabled)
428 return;
429
430 if ((ets->num_of_cos == 0) || (ets->num_of_cos > E2_NUM_OF_COS)) {
431 BNX2X_ERR("illegal num of cos= %x", ets->num_of_cos);
432 return;
433 }
434
435 /* valid COS entries */
436 if (ets->num_of_cos == 1) /* no ETS */
437 return;
438
439 /* sanity */
440 if (((BNX2X_DCBX_COS_NOT_STRICT == ets->cos_params[0].strict) &&
441 (DCBX_INVALID_COS_BW == ets->cos_params[0].bw_tbl)) ||
442 ((BNX2X_DCBX_COS_NOT_STRICT == ets->cos_params[1].strict) &&
443 (DCBX_INVALID_COS_BW == ets->cos_params[1].bw_tbl))) {
444 BNX2X_ERR("all COS should have at least bw_limit or strict"
445 "ets->cos_params[0].strict= %x"
446 "ets->cos_params[0].bw_tbl= %x"
447 "ets->cos_params[1].strict= %x"
448 "ets->cos_params[1].bw_tbl= %x",
449 ets->cos_params[0].strict,
450 ets->cos_params[0].bw_tbl,
451 ets->cos_params[1].strict,
452 ets->cos_params[1].bw_tbl);
453 return;
454 }
455 /* If we join a group and there is bw_tbl and strict then bw rules */
456 if ((DCBX_INVALID_COS_BW != ets->cos_params[0].bw_tbl) &&
457 (DCBX_INVALID_COS_BW != ets->cos_params[1].bw_tbl)) {
458 u32 bw_tbl_0 = ets->cos_params[0].bw_tbl;
459 u32 bw_tbl_1 = ets->cos_params[1].bw_tbl;
460 /* Do not allow 0-100 configuration
461 * since PBF does not support it
462 * force 1-99 instead
463 */
464 if (bw_tbl_0 == 0) {
465 bw_tbl_0 = 1;
466 bw_tbl_1 = 99;
467 } else if (bw_tbl_1 == 0) {
468 bw_tbl_1 = 1;
469 bw_tbl_0 = 99;
470 }
471
472 bnx2x_ets_bw_limit(&bp->link_params, bw_tbl_0, bw_tbl_1);
473 } else {
474 if (ets->cos_params[0].strict == BNX2X_DCBX_COS_HIGH_STRICT)
475 status = bnx2x_ets_strict(&bp->link_params, 0);
476 else if (ets->cos_params[1].strict
477 == BNX2X_DCBX_COS_HIGH_STRICT)
478 status = bnx2x_ets_strict(&bp->link_params, 1);
479
480 if (status)
481 BNX2X_ERR("update_ets_params failed\n");
482 }
483}
484
485static int bnx2x_dcbx_read_shmem_neg_results(struct bnx2x *bp)
486{
487 struct lldp_local_mib local_mib = {0};
488 u32 dcbx_neg_res_offset = SHMEM2_RD(bp, dcbx_neg_res_offset);
489 int rc;
490
491 DP(NETIF_MSG_LINK, "dcbx_neg_res_offset 0x%x\n", dcbx_neg_res_offset);
492
493 if (SHMEM_DCBX_NEG_RES_NONE == dcbx_neg_res_offset) {
494 BNX2X_ERR("FW doesn't support dcbx_neg_res_offset\n");
495 return -EINVAL;
496 }
497 rc = bnx2x_dcbx_read_mib(bp, (u32 *)&local_mib, dcbx_neg_res_offset,
498 DCBX_READ_LOCAL_MIB);
499
500 if (rc) {
501 BNX2X_ERR("Faild to read local mib from FW\n");
502 return rc;
503 }
504
505 /* save features and error */
506 bp->dcbx_local_feat = local_mib.features;
507 bp->dcbx_error = local_mib.error;
508 return 0;
509}
510
511void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state)
512{
513 switch (state) {
514 case BNX2X_DCBX_STATE_NEG_RECEIVED:
515 {
516 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_NEG_RECEIVED\n");
517
518 /* Read neg results if dcbx is in the FW */
519 if (bnx2x_dcbx_read_shmem_neg_results(bp))
520 return;
521
522 bnx2x_dump_dcbx_drv_param(bp, &bp->dcbx_local_feat,
523 bp->dcbx_error);
524
525 bnx2x_get_dcbx_drv_param(bp, &bp->dcbx_local_feat,
526 bp->dcbx_error);
527
528 if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
529 bnx2x_dcbx_stop_hw_tx(bp);
530 return;
531 }
532 /* fall through */
533 }
534 case BNX2X_DCBX_STATE_TX_PAUSED:
535 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_PAUSED\n");
536 bnx2x_pfc_set_pfc(bp);
537
538 bnx2x_dcbx_update_ets_params(bp);
539 if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD) {
540 bnx2x_dcbx_resume_hw_tx(bp);
541 return;
542 }
543 /* fall through */
544 case BNX2X_DCBX_STATE_TX_RELEASED:
545 DP(NETIF_MSG_LINK, "BNX2X_DCBX_STATE_TX_RELEASED\n");
546 if (bp->state != BNX2X_STATE_OPENING_WAIT4_LOAD)
547 bnx2x_fw_command(bp, DRV_MSG_CODE_DCBX_PMF_DRV_OK, 0);
548
549 return;
550 default:
551 BNX2X_ERR("Unknown DCBX_STATE\n");
552 }
553}
554
555
556#define LLDP_STATS_OFFSET(bp) (BP_PORT(bp)*\
557 sizeof(struct lldp_dcbx_stat))
558
559/* calculate struct offset in array according to chip information */
560#define LLDP_PARAMS_OFFSET(bp) (BP_PORT(bp)*sizeof(struct lldp_params))
561
562#define LLDP_ADMIN_MIB_OFFSET(bp) (PORT_MAX*sizeof(struct lldp_params) + \
563 BP_PORT(bp)*sizeof(struct lldp_admin_mib))
564
565static void bnx2x_dcbx_lldp_updated_params(struct bnx2x *bp,
566 u32 dcbx_lldp_params_offset)
567{
568 struct lldp_params lldp_params = {0};
569 u32 i = 0, *buff = NULL;
570 u32 offset = dcbx_lldp_params_offset + LLDP_PARAMS_OFFSET(bp);
571
572 DP(NETIF_MSG_LINK, "lldp_offset 0x%x\n", offset);
573
574 if ((bp->lldp_config_params.overwrite_settings ==
575 BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE)) {
576 /* Read the data first */
577 buff = (u32 *)&lldp_params;
578 for (i = 0; i < sizeof(struct lldp_params); i += 4, buff++)
579 *buff = REG_RD(bp, (offset + i));
580
581 lldp_params.msg_tx_hold =
582 (u8)bp->lldp_config_params.msg_tx_hold;
583 lldp_params.msg_fast_tx_interval =
584 (u8)bp->lldp_config_params.msg_fast_tx;
585 lldp_params.tx_crd_max =
586 (u8)bp->lldp_config_params.tx_credit_max;
587 lldp_params.msg_tx_interval =
588 (u8)bp->lldp_config_params.msg_tx_interval;
589 lldp_params.tx_fast =
590 (u8)bp->lldp_config_params.tx_fast;
591
592 /* Write the data.*/
593 buff = (u32 *)&lldp_params;
594 for (i = 0; i < sizeof(struct lldp_params); i += 4, buff++)
595 REG_WR(bp, (offset + i) , *buff);
596
597
598 } else if (BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE ==
599 bp->lldp_config_params.overwrite_settings)
600 bp->lldp_config_params.overwrite_settings =
601 BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID;
602}
603
604static void bnx2x_dcbx_admin_mib_updated_params(struct bnx2x *bp,
605 u32 dcbx_lldp_params_offset)
606{
607 struct lldp_admin_mib admin_mib;
608 u32 i, other_traf_type = PREDEFINED_APP_IDX_MAX, traf_type = 0;
609 u32 *buff;
610 u32 offset = dcbx_lldp_params_offset + LLDP_ADMIN_MIB_OFFSET(bp);
611
612 /*shortcuts*/
613 struct dcbx_features *af = &admin_mib.features;
614 struct bnx2x_config_dcbx_params *dp = &bp->dcbx_config_params;
615
616 memset(&admin_mib, 0, sizeof(struct lldp_admin_mib));
617 buff = (u32 *)&admin_mib;
618 /* Read the data first */
619 for (i = 0; i < sizeof(struct lldp_admin_mib); i += 4, buff++)
620 *buff = REG_RD(bp, (offset + i));
621
622 if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON)
623 SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
624 else
625 RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_DCBX_ENABLED);
626
627 if ((BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE ==
628 dp->overwrite_settings)) {
629 RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_CEE_VERSION_MASK);
630 admin_mib.ver_cfg_flags |=
631 (dp->admin_dcbx_version << DCBX_CEE_VERSION_SHIFT) &
632 DCBX_CEE_VERSION_MASK;
633
634 af->ets.enabled = (u8)dp->admin_ets_enable;
635
636 af->pfc.enabled = (u8)dp->admin_pfc_enable;
637
638 /* FOR IEEE dp->admin_tc_supported_tx_enable */
639 if (dp->admin_ets_configuration_tx_enable)
640 SET_FLAGS(admin_mib.ver_cfg_flags,
641 DCBX_ETS_CONFIG_TX_ENABLED);
642 else
643 RESET_FLAGS(admin_mib.ver_cfg_flags,
644 DCBX_ETS_CONFIG_TX_ENABLED);
645 /* For IEEE admin_ets_recommendation_tx_enable */
646 if (dp->admin_pfc_tx_enable)
647 SET_FLAGS(admin_mib.ver_cfg_flags,
648 DCBX_PFC_CONFIG_TX_ENABLED);
649 else
650 RESET_FLAGS(admin_mib.ver_cfg_flags,
651 DCBX_PFC_CONFIG_TX_ENABLED);
652
653 if (dp->admin_application_priority_tx_enable)
654 SET_FLAGS(admin_mib.ver_cfg_flags,
655 DCBX_APP_CONFIG_TX_ENABLED);
656 else
657 RESET_FLAGS(admin_mib.ver_cfg_flags,
658 DCBX_APP_CONFIG_TX_ENABLED);
659
660 if (dp->admin_ets_willing)
661 SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
662 else
663 RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_ETS_WILLING);
664 /* For IEEE admin_ets_reco_valid */
665 if (dp->admin_pfc_willing)
666 SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
667 else
668 RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_PFC_WILLING);
669
670 if (dp->admin_app_priority_willing)
671 SET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
672 else
673 RESET_FLAGS(admin_mib.ver_cfg_flags, DCBX_APP_WILLING);
674
675 for (i = 0 ; i < DCBX_MAX_NUM_PG_BW_ENTRIES; i++) {
676 DCBX_PG_BW_SET(af->ets.pg_bw_tbl, i,
677 (u8)dp->admin_configuration_bw_precentage[i]);
678
679 DP(NETIF_MSG_LINK, "pg_bw_tbl[%d] = %02x\n",
680 i, DCBX_PG_BW_GET(af->ets.pg_bw_tbl, i));
681 }
682
683 for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
684 DCBX_PRI_PG_SET(af->ets.pri_pg_tbl, i,
685 (u8)dp->admin_configuration_ets_pg[i]);
686
687 DP(NETIF_MSG_LINK, "pri_pg_tbl[%d] = %02x\n",
688 i, DCBX_PRI_PG_GET(af->ets.pri_pg_tbl, i));
689 }
690
691 /*For IEEE admin_recommendation_bw_precentage
692 *For IEEE admin_recommendation_ets_pg */
693 af->pfc.pri_en_bitmap = (u8)dp->admin_pfc_bitmap;
694 for (i = 0; i < 4; i++) {
695 if (dp->admin_priority_app_table[i].valid) {
696 struct bnx2x_admin_priority_app_table *table =
697 dp->admin_priority_app_table;
698 if ((ETH_TYPE_FCOE == table[i].app_id) &&
699 (TRAFFIC_TYPE_ETH == table[i].traffic_type))
700 traf_type = FCOE_APP_IDX;
701 else if ((TCP_PORT_ISCSI == table[i].app_id) &&
702 (TRAFFIC_TYPE_PORT == table[i].traffic_type))
703 traf_type = ISCSI_APP_IDX;
704 else
705 traf_type = other_traf_type++;
706
707 af->app.app_pri_tbl[traf_type].app_id =
708 table[i].app_id;
709
710 af->app.app_pri_tbl[traf_type].pri_bitmap =
711 (u8)(1 << table[i].priority);
712
713 af->app.app_pri_tbl[traf_type].appBitfield =
714 (DCBX_APP_ENTRY_VALID);
715
716 af->app.app_pri_tbl[traf_type].appBitfield |=
717 (TRAFFIC_TYPE_ETH == table[i].traffic_type) ?
718 DCBX_APP_SF_ETH_TYPE : DCBX_APP_SF_PORT;
719 }
720 }
721
722 af->app.default_pri = (u8)dp->admin_default_priority;
723
724 } else if (BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE ==
725 dp->overwrite_settings)
726 dp->overwrite_settings = BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID;
727
728 /* Write the data. */
729 buff = (u32 *)&admin_mib;
730 for (i = 0; i < sizeof(struct lldp_admin_mib); i += 4, buff++)
731 REG_WR(bp, (offset + i), *buff);
732}
733
734void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled)
735{
736 if (CHIP_IS_E2(bp) && !CHIP_MODE_IS_4_PORT(bp)) {
737 bp->dcb_state = dcb_on;
738 bp->dcbx_enabled = dcbx_enabled;
739 } else {
740 bp->dcb_state = false;
741 bp->dcbx_enabled = BNX2X_DCBX_ENABLED_INVALID;
742 }
743 DP(NETIF_MSG_LINK, "DCB state [%s:%s]\n",
744 dcb_on ? "ON" : "OFF",
745 dcbx_enabled == BNX2X_DCBX_ENABLED_OFF ? "user-mode" :
746 dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF ? "on-chip static" :
747 dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_ON ?
748 "on-chip with negotiation" : "invalid");
749}
750
751void bnx2x_dcbx_init_params(struct bnx2x *bp)
752{
753 bp->dcbx_config_params.admin_dcbx_version = 0x0; /* 0 - CEE; 1 - IEEE */
754 bp->dcbx_config_params.admin_ets_willing = 1;
755 bp->dcbx_config_params.admin_pfc_willing = 1;
756 bp->dcbx_config_params.overwrite_settings = 1;
757 bp->dcbx_config_params.admin_ets_enable = 1;
758 bp->dcbx_config_params.admin_pfc_enable = 1;
759 bp->dcbx_config_params.admin_tc_supported_tx_enable = 1;
760 bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
761 bp->dcbx_config_params.admin_pfc_tx_enable = 1;
762 bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
763 bp->dcbx_config_params.admin_ets_reco_valid = 1;
764 bp->dcbx_config_params.admin_app_priority_willing = 1;
765 bp->dcbx_config_params.admin_configuration_bw_precentage[0] = 00;
766 bp->dcbx_config_params.admin_configuration_bw_precentage[1] = 50;
767 bp->dcbx_config_params.admin_configuration_bw_precentage[2] = 50;
768 bp->dcbx_config_params.admin_configuration_bw_precentage[3] = 0;
769 bp->dcbx_config_params.admin_configuration_bw_precentage[4] = 0;
770 bp->dcbx_config_params.admin_configuration_bw_precentage[5] = 0;
771 bp->dcbx_config_params.admin_configuration_bw_precentage[6] = 0;
772 bp->dcbx_config_params.admin_configuration_bw_precentage[7] = 0;
773 bp->dcbx_config_params.admin_configuration_ets_pg[0] = 1;
774 bp->dcbx_config_params.admin_configuration_ets_pg[1] = 0;
775 bp->dcbx_config_params.admin_configuration_ets_pg[2] = 0;
776 bp->dcbx_config_params.admin_configuration_ets_pg[3] = 2;
777 bp->dcbx_config_params.admin_configuration_ets_pg[4] = 0;
778 bp->dcbx_config_params.admin_configuration_ets_pg[5] = 0;
779 bp->dcbx_config_params.admin_configuration_ets_pg[6] = 0;
780 bp->dcbx_config_params.admin_configuration_ets_pg[7] = 0;
781 bp->dcbx_config_params.admin_recommendation_bw_precentage[0] = 0;
782 bp->dcbx_config_params.admin_recommendation_bw_precentage[1] = 1;
783 bp->dcbx_config_params.admin_recommendation_bw_precentage[2] = 2;
784 bp->dcbx_config_params.admin_recommendation_bw_precentage[3] = 0;
785 bp->dcbx_config_params.admin_recommendation_bw_precentage[4] = 7;
786 bp->dcbx_config_params.admin_recommendation_bw_precentage[5] = 5;
787 bp->dcbx_config_params.admin_recommendation_bw_precentage[6] = 6;
788 bp->dcbx_config_params.admin_recommendation_bw_precentage[7] = 7;
789 bp->dcbx_config_params.admin_recommendation_ets_pg[0] = 0;
790 bp->dcbx_config_params.admin_recommendation_ets_pg[1] = 1;
791 bp->dcbx_config_params.admin_recommendation_ets_pg[2] = 2;
792 bp->dcbx_config_params.admin_recommendation_ets_pg[3] = 3;
793 bp->dcbx_config_params.admin_recommendation_ets_pg[4] = 4;
794 bp->dcbx_config_params.admin_recommendation_ets_pg[5] = 5;
795 bp->dcbx_config_params.admin_recommendation_ets_pg[6] = 6;
796 bp->dcbx_config_params.admin_recommendation_ets_pg[7] = 7;
797 bp->dcbx_config_params.admin_pfc_bitmap = 0x8; /* FCoE(3) enable */
798 bp->dcbx_config_params.admin_priority_app_table[0].valid = 1;
799 bp->dcbx_config_params.admin_priority_app_table[1].valid = 1;
800 bp->dcbx_config_params.admin_priority_app_table[2].valid = 0;
801 bp->dcbx_config_params.admin_priority_app_table[3].valid = 0;
802 bp->dcbx_config_params.admin_priority_app_table[0].priority = 3;
803 bp->dcbx_config_params.admin_priority_app_table[1].priority = 0;
804 bp->dcbx_config_params.admin_priority_app_table[2].priority = 0;
805 bp->dcbx_config_params.admin_priority_app_table[3].priority = 0;
806 bp->dcbx_config_params.admin_priority_app_table[0].traffic_type = 0;
807 bp->dcbx_config_params.admin_priority_app_table[1].traffic_type = 1;
808 bp->dcbx_config_params.admin_priority_app_table[2].traffic_type = 0;
809 bp->dcbx_config_params.admin_priority_app_table[3].traffic_type = 0;
810 bp->dcbx_config_params.admin_priority_app_table[0].app_id = 0x8906;
811 bp->dcbx_config_params.admin_priority_app_table[1].app_id = 3260;
812 bp->dcbx_config_params.admin_priority_app_table[2].app_id = 0;
813 bp->dcbx_config_params.admin_priority_app_table[3].app_id = 0;
814 bp->dcbx_config_params.admin_default_priority =
815 bp->dcbx_config_params.admin_priority_app_table[1].priority;
816}
817
818void bnx2x_dcbx_init(struct bnx2x *bp)
819{
820 u32 dcbx_lldp_params_offset = SHMEM_LLDP_DCBX_PARAMS_NONE;
821
822 if (bp->dcbx_enabled <= 0)
823 return;
824
825 /* validate:
826 * chip of good for dcbx version,
827 * dcb is wanted
828 * the function is pmf
829 * shmem2 contains DCBX support fields
830 */
831 DP(NETIF_MSG_LINK, "dcb_state %d bp->port.pmf %d\n",
832 bp->dcb_state, bp->port.pmf);
833
834 if (bp->dcb_state == BNX2X_DCB_STATE_ON && bp->port.pmf &&
835 SHMEM2_HAS(bp, dcbx_lldp_params_offset)) {
836 dcbx_lldp_params_offset =
837 SHMEM2_RD(bp, dcbx_lldp_params_offset);
838
839 DP(NETIF_MSG_LINK, "dcbx_lldp_params_offset 0x%x\n",
840 dcbx_lldp_params_offset);
841
842 if (SHMEM_LLDP_DCBX_PARAMS_NONE != dcbx_lldp_params_offset) {
843 bnx2x_dcbx_lldp_updated_params(bp,
844 dcbx_lldp_params_offset);
845
846 bnx2x_dcbx_admin_mib_updated_params(bp,
847 dcbx_lldp_params_offset);
848
849 /* set default configuration BC has */
850 bnx2x_dcbx_set_params(bp,
851 BNX2X_DCBX_STATE_NEG_RECEIVED);
852
853 bnx2x_fw_command(bp,
854 DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG, 0);
855 }
856 }
857}
858
859void bnx2x_dcb_init_intmem_pfc(struct bnx2x *bp)
860{
861 struct priority_cos pricos[MAX_PFC_TRAFFIC_TYPES];
862 u32 i = 0, addr;
863 memset(pricos, 0, sizeof(pricos));
864 /* Default initialization */
865 for (i = 0; i < MAX_PFC_TRAFFIC_TYPES; i++)
866 pricos[i].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED;
867
868 /* Store per port struct to internal memory */
869 addr = BAR_XSTRORM_INTMEM +
870 XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
871 offsetof(struct cmng_struct_per_port,
872 traffic_type_to_priority_cos);
873 __storm_memset_struct(bp, addr, sizeof(pricos), (u32 *)pricos);
874
875
876 /* LLFC disabled.*/
877 REG_WR8(bp , BAR_XSTRORM_INTMEM +
878 XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
879 offsetof(struct cmng_struct_per_port, llfc_mode),
880 LLFC_MODE_NONE);
881
882 /* DCBX disabled.*/
883 REG_WR8(bp , BAR_XSTRORM_INTMEM +
884 XSTORM_CMNG_PER_PORT_VARS_OFFSET(BP_PORT(bp)) +
885 offsetof(struct cmng_struct_per_port, dcb_enabled),
886 DCB_DISABLED);
887}
888
889static void
890bnx2x_dcbx_print_cos_params(struct bnx2x *bp,
891 struct flow_control_configuration *pfc_fw_cfg)
892{
893 u8 pri = 0;
894 u8 cos = 0;
895
896 DP(NETIF_MSG_LINK,
897 "pfc_fw_cfg->dcb_version %x\n", pfc_fw_cfg->dcb_version);
898 DP(NETIF_MSG_LINK,
899 "pdev->params.dcbx_port_params.pfc."
900 "priority_non_pauseable_mask %x\n",
901 bp->dcbx_port_params.pfc.priority_non_pauseable_mask);
902
903 for (cos = 0 ; cos < bp->dcbx_port_params.ets.num_of_cos ; cos++) {
904 DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
905 "cos_params[%d].pri_bitmask %x\n", cos,
906 bp->dcbx_port_params.ets.cos_params[cos].pri_bitmask);
907
908 DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
909 "cos_params[%d].bw_tbl %x\n", cos,
910 bp->dcbx_port_params.ets.cos_params[cos].bw_tbl);
911
912 DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
913 "cos_params[%d].strict %x\n", cos,
914 bp->dcbx_port_params.ets.cos_params[cos].strict);
915
916 DP(NETIF_MSG_LINK, "pdev->params.dcbx_port_params.ets."
917 "cos_params[%d].pauseable %x\n", cos,
918 bp->dcbx_port_params.ets.cos_params[cos].pauseable);
919 }
920
921 for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
922 DP(NETIF_MSG_LINK,
923 "pfc_fw_cfg->traffic_type_to_priority_cos[%d]."
924 "priority %x\n", pri,
925 pfc_fw_cfg->traffic_type_to_priority_cos[pri].priority);
926
927 DP(NETIF_MSG_LINK,
928 "pfc_fw_cfg->traffic_type_to_priority_cos[%d].cos %x\n",
929 pri, pfc_fw_cfg->traffic_type_to_priority_cos[pri].cos);
930 }
931}
932
933/* fills help_data according to pg_info */
934static void bnx2x_dcbx_get_num_pg_traf_type(struct bnx2x *bp,
935 u32 *pg_pri_orginal_spread,
936 struct pg_help_data *help_data)
937{
938 bool pg_found = false;
939 u32 i, traf_type, add_traf_type, add_pg;
940 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
941 struct pg_entry_help_data *data = help_data->data; /*shotcut*/
942
943 /* Set to invalid */
944 for (i = 0; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++)
945 data[i].pg = DCBX_ILLEGAL_PG;
946
947 for (add_traf_type = 0;
948 add_traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX; add_traf_type++) {
949 pg_found = false;
950 if (ttp[add_traf_type] < MAX_PFC_PRIORITIES) {
951 add_pg = (u8)pg_pri_orginal_spread[ttp[add_traf_type]];
952 for (traf_type = 0;
953 traf_type < LLFC_DRIVER_TRAFFIC_TYPE_MAX;
954 traf_type++) {
955 if (data[traf_type].pg == add_pg) {
956 if (!(data[traf_type].pg_priority &
957 (1 << ttp[add_traf_type])))
958 data[traf_type].
959 num_of_dif_pri++;
960 data[traf_type].pg_priority |=
961 (1 << ttp[add_traf_type]);
962 pg_found = true;
963 break;
964 }
965 }
966 if (false == pg_found) {
967 data[help_data->num_of_pg].pg = add_pg;
968 data[help_data->num_of_pg].pg_priority =
969 (1 << ttp[add_traf_type]);
970 data[help_data->num_of_pg].num_of_dif_pri = 1;
971 help_data->num_of_pg++;
972 }
973 }
974 DP(NETIF_MSG_LINK,
975 "add_traf_type %d pg_found %s num_of_pg %d\n",
976 add_traf_type, (false == pg_found) ? "NO" : "YES",
977 help_data->num_of_pg);
978 }
979}
980
981
982/*******************************************************************************
983 * Description: single priority group
984 *
985 * Return:
986 ******************************************************************************/
987static void bnx2x_dcbx_ets_disabled_entry_data(struct bnx2x *bp,
988 struct cos_help_data *cos_data,
989 u32 pri_join_mask)
990{
991 /* Only one priority than only one COS */
992 cos_data->data[0].pausable =
993 IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
994 cos_data->data[0].pri_join_mask = pri_join_mask;
995 cos_data->data[0].cos_bw = 100;
996 cos_data->num_of_cos = 1;
997}
998
999/*******************************************************************************
1000 * Description: updating the cos bw
1001 *
1002 * Return:
1003 ******************************************************************************/
1004static inline void bnx2x_dcbx_add_to_cos_bw(struct bnx2x *bp,
1005 struct cos_entry_help_data *data,
1006 u8 pg_bw)
1007{
1008 if (data->cos_bw == DCBX_INVALID_COS_BW)
1009 data->cos_bw = pg_bw;
1010 else
1011 data->cos_bw += pg_bw;
1012}
1013
1014/*******************************************************************************
1015 * Description: single priority group
1016 *
1017 * Return:
1018 ******************************************************************************/
1019static void bnx2x_dcbx_separate_pauseable_from_non(struct bnx2x *bp,
1020 struct cos_help_data *cos_data,
1021 u32 *pg_pri_orginal_spread,
1022 struct dcbx_ets_feature *ets)
1023{
1024 u32 pri_tested = 0;
1025 u8 i = 0;
1026 u8 entry = 0;
1027 u8 pg_entry = 0;
1028 u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX;
1029
1030 cos_data->data[0].pausable = true;
1031 cos_data->data[1].pausable = false;
1032 cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
1033
1034 for (i = 0 ; i < num_of_pri ; i++) {
1035 pri_tested = 1 << bp->dcbx_port_params.
1036 app.traffic_type_priority[i];
1037
1038 if (pri_tested & DCBX_PFC_PRI_NON_PAUSE_MASK(bp)) {
1039 cos_data->data[1].pri_join_mask |= pri_tested;
1040 entry = 1;
1041 } else {
1042 cos_data->data[0].pri_join_mask |= pri_tested;
1043 entry = 0;
1044 }
1045 pg_entry = (u8)pg_pri_orginal_spread[bp->dcbx_port_params.
1046 app.traffic_type_priority[i]];
1047 /* There can be only one strict pg */
1048 if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES)
1049 bnx2x_dcbx_add_to_cos_bw(bp, &cos_data->data[entry],
1050 DCBX_PG_BW_GET(ets->pg_bw_tbl, pg_entry));
1051 else
1052 /* If we join a group and one is strict
1053 * than the bw rulls */
1054 cos_data->data[entry].strict =
1055 BNX2X_DCBX_COS_HIGH_STRICT;
1056 }
1057 if ((0 == cos_data->data[0].pri_join_mask) &&
1058 (0 == cos_data->data[1].pri_join_mask))
1059 BNX2X_ERR("dcbx error: Both groups must have priorities\n");
1060}
1061
1062
1063#ifndef POWER_OF_2
1064#define POWER_OF_2(x) ((0 != x) && (0 == (x & (x-1))))
1065#endif
1066
1067static void bxn2x_dcbx_single_pg_to_cos_params(struct bnx2x *bp,
1068 struct pg_help_data *pg_help_data,
1069 struct cos_help_data *cos_data,
1070 u32 pri_join_mask,
1071 u8 num_of_dif_pri)
1072{
1073 u8 i = 0;
1074 u32 pri_tested = 0;
1075 u32 pri_mask_without_pri = 0;
1076 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
1077 /*debug*/
1078 if (num_of_dif_pri == 1) {
1079 bnx2x_dcbx_ets_disabled_entry_data(bp, cos_data, pri_join_mask);
1080 return;
1081 }
1082 /* single priority group */
1083 if (pg_help_data->data[0].pg < DCBX_MAX_NUM_PG_BW_ENTRIES) {
1084 /* If there are both pauseable and non-pauseable priorities,
1085 * the pauseable priorities go to the first queue and
1086 * the non-pauseable priorities go to the second queue.
1087 */
1088 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
1089 /* Pauseable */
1090 cos_data->data[0].pausable = true;
1091 /* Non pauseable.*/
1092 cos_data->data[1].pausable = false;
1093
1094 if (2 == num_of_dif_pri) {
1095 cos_data->data[0].cos_bw = 50;
1096 cos_data->data[1].cos_bw = 50;
1097 }
1098
1099 if (3 == num_of_dif_pri) {
1100 if (POWER_OF_2(DCBX_PFC_PRI_GET_PAUSE(bp,
1101 pri_join_mask))) {
1102 cos_data->data[0].cos_bw = 33;
1103 cos_data->data[1].cos_bw = 67;
1104 } else {
1105 cos_data->data[0].cos_bw = 67;
1106 cos_data->data[1].cos_bw = 33;
1107 }
1108 }
1109
1110 } else if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask)) {
1111 /* If there are only pauseable priorities,
1112 * then one/two priorities go to the first queue
1113 * and one priority goes to the second queue.
1114 */
1115 if (2 == num_of_dif_pri) {
1116 cos_data->data[0].cos_bw = 50;
1117 cos_data->data[1].cos_bw = 50;
1118 } else {
1119 cos_data->data[0].cos_bw = 67;
1120 cos_data->data[1].cos_bw = 33;
1121 }
1122 cos_data->data[1].pausable = true;
1123 cos_data->data[0].pausable = true;
1124 /* All priorities except FCOE */
1125 cos_data->data[0].pri_join_mask = (pri_join_mask &
1126 ((u8)~(1 << ttp[LLFC_TRAFFIC_TYPE_FCOE])));
1127 /* Only FCOE priority.*/
1128 cos_data->data[1].pri_join_mask =
1129 (1 << ttp[LLFC_TRAFFIC_TYPE_FCOE]);
1130 } else
1131 /* If there are only non-pauseable priorities,
1132 * they will all go to the same queue.
1133 */
1134 bnx2x_dcbx_ets_disabled_entry_data(bp,
1135 cos_data, pri_join_mask);
1136 } else {
1137 /* priority group which is not BW limited (PG#15):*/
1138 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
1139 /* If there are both pauseable and non-pauseable
1140 * priorities, the pauseable priorities go to the first
1141 * queue and the non-pauseable priorities
1142 * go to the second queue.
1143 */
1144 if (DCBX_PFC_PRI_GET_PAUSE(bp, pri_join_mask) >
1145 DCBX_PFC_PRI_GET_NON_PAUSE(bp, pri_join_mask)) {
1146 cos_data->data[0].strict =
1147 BNX2X_DCBX_COS_HIGH_STRICT;
1148 cos_data->data[1].strict =
1149 BNX2X_DCBX_COS_LOW_STRICT;
1150 } else {
1151 cos_data->data[0].strict =
1152 BNX2X_DCBX_COS_LOW_STRICT;
1153 cos_data->data[1].strict =
1154 BNX2X_DCBX_COS_HIGH_STRICT;
1155 }
1156 /* Pauseable */
1157 cos_data->data[0].pausable = true;
1158 /* Non pause-able.*/
1159 cos_data->data[1].pausable = false;
1160 } else {
1161 /* If there are only pauseable priorities or
1162 * only non-pauseable,* the lower priorities go
1163 * to the first queue and the higherpriorities go
1164 * to the second queue.
1165 */
1166 cos_data->data[0].pausable =
1167 cos_data->data[1].pausable =
1168 IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
1169
1170 for (i = 0 ; i < LLFC_DRIVER_TRAFFIC_TYPE_MAX; i++) {
1171 pri_tested = 1 << bp->dcbx_port_params.
1172 app.traffic_type_priority[i];
1173 /* Remove priority tested */
1174 pri_mask_without_pri =
1175 (pri_join_mask & ((u8)(~pri_tested)));
1176 if (pri_mask_without_pri < pri_tested)
1177 break;
1178 }
1179
1180 if (i == LLFC_DRIVER_TRAFFIC_TYPE_MAX)
1181 BNX2X_ERR("Invalid value for pri_join_mask -"
1182 " could not find a priority\n");
1183
1184 cos_data->data[0].pri_join_mask = pri_mask_without_pri;
1185 cos_data->data[1].pri_join_mask = pri_tested;
1186 /* Both queues are strict priority,
1187 * and that with the highest priority
1188 * gets the highest strict priority in the arbiter.
1189 */
1190 cos_data->data[0].strict = BNX2X_DCBX_COS_LOW_STRICT;
1191 cos_data->data[1].strict = BNX2X_DCBX_COS_HIGH_STRICT;
1192 }
1193 }
1194}
1195
1196static void bnx2x_dcbx_two_pg_to_cos_params(
1197 struct bnx2x *bp,
1198 struct pg_help_data *pg_help_data,
1199 struct dcbx_ets_feature *ets,
1200 struct cos_help_data *cos_data,
1201 u32 *pg_pri_orginal_spread,
1202 u32 pri_join_mask,
1203 u8 num_of_dif_pri)
1204{
1205 u8 i = 0;
1206 u8 pg[E2_NUM_OF_COS] = {0};
1207
1208 /* If there are both pauseable and non-pauseable priorities,
1209 * the pauseable priorities go to the first queue and
1210 * the non-pauseable priorities go to the second queue.
1211 */
1212 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask)) {
1213 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
1214 pg_help_data->data[0].pg_priority) ||
1215 IS_DCBX_PFC_PRI_MIX_PAUSE(bp,
1216 pg_help_data->data[1].pg_priority)) {
1217 /* If one PG contains both pauseable and
1218 * non-pauseable priorities then ETS is disabled.
1219 */
1220 bnx2x_dcbx_separate_pauseable_from_non(bp, cos_data,
1221 pg_pri_orginal_spread, ets);
1222 bp->dcbx_port_params.ets.enabled = false;
1223 return;
1224 }
1225
1226 /* Pauseable */
1227 cos_data->data[0].pausable = true;
1228 /* Non pauseable. */
1229 cos_data->data[1].pausable = false;
1230 if (IS_DCBX_PFC_PRI_ONLY_PAUSE(bp,
1231 pg_help_data->data[0].pg_priority)) {
1232 /* 0 is pauseable */
1233 cos_data->data[0].pri_join_mask =
1234 pg_help_data->data[0].pg_priority;
1235 pg[0] = pg_help_data->data[0].pg;
1236 cos_data->data[1].pri_join_mask =
1237 pg_help_data->data[1].pg_priority;
1238 pg[1] = pg_help_data->data[1].pg;
1239 } else {/* 1 is pauseable */
1240 cos_data->data[0].pri_join_mask =
1241 pg_help_data->data[1].pg_priority;
1242 pg[0] = pg_help_data->data[1].pg;
1243 cos_data->data[1].pri_join_mask =
1244 pg_help_data->data[0].pg_priority;
1245 pg[1] = pg_help_data->data[0].pg;
1246 }
1247 } else {
1248 /* If there are only pauseable priorities or
1249 * only non-pauseable, each PG goes to a queue.
1250 */
1251 cos_data->data[0].pausable = cos_data->data[1].pausable =
1252 IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
1253 cos_data->data[0].pri_join_mask =
1254 pg_help_data->data[0].pg_priority;
1255 pg[0] = pg_help_data->data[0].pg;
1256 cos_data->data[1].pri_join_mask =
1257 pg_help_data->data[1].pg_priority;
1258 pg[1] = pg_help_data->data[1].pg;
1259 }
1260
1261 /* There can be only one strict pg */
1262 for (i = 0 ; i < E2_NUM_OF_COS; i++) {
1263 if (pg[i] < DCBX_MAX_NUM_PG_BW_ENTRIES)
1264 cos_data->data[i].cos_bw =
1265 DCBX_PG_BW_GET(ets->pg_bw_tbl, pg[i]);
1266 else
1267 cos_data->data[i].strict = BNX2X_DCBX_COS_HIGH_STRICT;
1268 }
1269}
1270
1271/*******************************************************************************
1272 * Description: Still
1273 *
1274 * Return:
1275 ******************************************************************************/
1276static void bnx2x_dcbx_three_pg_to_cos_params(
1277 struct bnx2x *bp,
1278 struct pg_help_data *pg_help_data,
1279 struct dcbx_ets_feature *ets,
1280 struct cos_help_data *cos_data,
1281 u32 *pg_pri_orginal_spread,
1282 u32 pri_join_mask,
1283 u8 num_of_dif_pri)
1284{
1285 u8 i = 0;
1286 u32 pri_tested = 0;
1287 u8 entry = 0;
1288 u8 pg_entry = 0;
1289 bool b_found_strict = false;
1290 u8 num_of_pri = LLFC_DRIVER_TRAFFIC_TYPE_MAX;
1291
1292 cos_data->data[0].pri_join_mask = cos_data->data[1].pri_join_mask = 0;
1293 /* If there are both pauseable and non-pauseable priorities,
1294 * the pauseable priorities go to the first queue and the
1295 * non-pauseable priorities go to the second queue.
1296 */
1297 if (IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pri_join_mask))
1298 bnx2x_dcbx_separate_pauseable_from_non(bp,
1299 cos_data, pg_pri_orginal_spread, ets);
1300 else {
1301 /* If two BW-limited PG-s were combined to one queue,
1302 * the BW is their sum.
1303 *
1304 * If there are only pauseable priorities or only non-pauseable,
1305 * and there are both BW-limited and non-BW-limited PG-s,
1306 * the BW-limited PG/s go to one queue and the non-BW-limited
1307 * PG/s go to the second queue.
1308 *
1309 * If there are only pauseable priorities or only non-pauseable
1310 * and all are BW limited, then two priorities go to the first
1311 * queue and one priority goes to the second queue.
1312 *
1313 * We will join this two cases:
1314 * if one is BW limited it will go to the secoend queue
1315 * otherwise the last priority will get it
1316 */
1317
1318 cos_data->data[0].pausable = cos_data->data[1].pausable =
1319 IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pri_join_mask);
1320
1321 for (i = 0 ; i < num_of_pri; i++) {
1322 pri_tested = 1 << bp->dcbx_port_params.
1323 app.traffic_type_priority[i];
1324 pg_entry = (u8)pg_pri_orginal_spread[bp->
1325 dcbx_port_params.app.traffic_type_priority[i]];
1326
1327 if (pg_entry < DCBX_MAX_NUM_PG_BW_ENTRIES) {
1328 entry = 0;
1329
1330 if (i == (num_of_pri-1) &&
1331 false == b_found_strict)
1332 /* last entry will be handled separately
1333 * If no priority is strict than last
1334 * enty goes to last queue.*/
1335 entry = 1;
1336 cos_data->data[entry].pri_join_mask |=
1337 pri_tested;
1338 bnx2x_dcbx_add_to_cos_bw(bp,
1339 &cos_data->data[entry],
1340 DCBX_PG_BW_GET(ets->pg_bw_tbl,
1341 pg_entry));
1342 } else {
1343 b_found_strict = true;
1344 cos_data->data[1].pri_join_mask |= pri_tested;
1345 /* If we join a group and one is strict
1346 * than the bw rulls */
1347 cos_data->data[1].strict =
1348 BNX2X_DCBX_COS_HIGH_STRICT;
1349 }
1350 }
1351 }
1352}
1353
1354
1355static void bnx2x_dcbx_fill_cos_params(struct bnx2x *bp,
1356 struct pg_help_data *help_data,
1357 struct dcbx_ets_feature *ets,
1358 u32 *pg_pri_orginal_spread)
1359{
1360 struct cos_help_data cos_data ;
1361 u8 i = 0;
1362 u32 pri_join_mask = 0;
1363 u8 num_of_dif_pri = 0;
1364
1365 memset(&cos_data, 0, sizeof(cos_data));
1366 /* Validate the pg value */
1367 for (i = 0; i < help_data->num_of_pg ; i++) {
1368 if (DCBX_STRICT_PRIORITY != help_data->data[i].pg &&
1369 DCBX_MAX_NUM_PG_BW_ENTRIES <= help_data->data[i].pg)
1370 BNX2X_ERR("Invalid pg[%d] data %x\n", i,
1371 help_data->data[i].pg);
1372 pri_join_mask |= help_data->data[i].pg_priority;
1373 num_of_dif_pri += help_data->data[i].num_of_dif_pri;
1374 }
1375
1376 /* default settings */
1377 cos_data.num_of_cos = 2;
1378 for (i = 0; i < E2_NUM_OF_COS ; i++) {
1379 cos_data.data[i].pri_join_mask = pri_join_mask;
1380 cos_data.data[i].pausable = false;
1381 cos_data.data[i].strict = BNX2X_DCBX_COS_NOT_STRICT;
1382 cos_data.data[i].cos_bw = DCBX_INVALID_COS_BW;
1383 }
1384
1385 switch (help_data->num_of_pg) {
1386 case 1:
1387
1388 bxn2x_dcbx_single_pg_to_cos_params(
1389 bp,
1390 help_data,
1391 &cos_data,
1392 pri_join_mask,
1393 num_of_dif_pri);
1394 break;
1395 case 2:
1396 bnx2x_dcbx_two_pg_to_cos_params(
1397 bp,
1398 help_data,
1399 ets,
1400 &cos_data,
1401 pg_pri_orginal_spread,
1402 pri_join_mask,
1403 num_of_dif_pri);
1404 break;
1405
1406 case 3:
1407 bnx2x_dcbx_three_pg_to_cos_params(
1408 bp,
1409 help_data,
1410 ets,
1411 &cos_data,
1412 pg_pri_orginal_spread,
1413 pri_join_mask,
1414 num_of_dif_pri);
1415
1416 break;
1417 default:
1418 BNX2X_ERR("Wrong pg_help_data.num_of_pg\n");
1419 bnx2x_dcbx_ets_disabled_entry_data(bp,
1420 &cos_data, pri_join_mask);
1421 }
1422
1423 for (i = 0; i < cos_data.num_of_cos ; i++) {
1424 struct bnx2x_dcbx_cos_params *params =
1425 &bp->dcbx_port_params.ets.cos_params[i];
1426
1427 params->pauseable = cos_data.data[i].pausable;
1428 params->strict = cos_data.data[i].strict;
1429 params->bw_tbl = cos_data.data[i].cos_bw;
1430 if (params->pauseable) {
1431 params->pri_bitmask =
1432 DCBX_PFC_PRI_GET_PAUSE(bp,
1433 cos_data.data[i].pri_join_mask);
1434 DP(NETIF_MSG_LINK, "COS %d PAUSABLE prijoinmask 0x%x\n",
1435 i, cos_data.data[i].pri_join_mask);
1436 } else {
1437 params->pri_bitmask =
1438 DCBX_PFC_PRI_GET_NON_PAUSE(bp,
1439 cos_data.data[i].pri_join_mask);
1440 DP(NETIF_MSG_LINK, "COS %d NONPAUSABLE prijoinmask "
1441 "0x%x\n",
1442 i, cos_data.data[i].pri_join_mask);
1443 }
1444 }
1445
1446 bp->dcbx_port_params.ets.num_of_cos = cos_data.num_of_cos ;
1447}
1448
1449static void bnx2x_dcbx_get_ets_pri_pg_tbl(struct bnx2x *bp,
1450 u32 *set_configuration_ets_pg,
1451 u32 *pri_pg_tbl)
1452{
1453 int i;
1454
1455 for (i = 0; i < DCBX_MAX_NUM_PRI_PG_ENTRIES; i++) {
1456 set_configuration_ets_pg[i] = DCBX_PRI_PG_GET(pri_pg_tbl, i);
1457
1458 DP(NETIF_MSG_LINK, "set_configuration_ets_pg[%d] = 0x%x\n",
1459 i, set_configuration_ets_pg[i]);
1460 }
1461}
1462
1463/*******************************************************************************
1464 * Description: Fill pfc_config struct that will be sent in DCBX start ramrod
1465 *
1466 * Return:
1467 ******************************************************************************/
1468static void bnx2x_pfc_fw_struct_e2(struct bnx2x *bp)
1469{
1470 struct flow_control_configuration *pfc_fw_cfg = NULL;
1471 u16 pri_bit = 0;
1472 u8 cos = 0, pri = 0;
1473 struct priority_cos *tt2cos;
1474 u32 *ttp = bp->dcbx_port_params.app.traffic_type_priority;
1475
1476 pfc_fw_cfg = (struct flow_control_configuration *)
1477 bnx2x_sp(bp, pfc_config);
1478 memset(pfc_fw_cfg, 0, sizeof(struct flow_control_configuration));
1479
1480 /*shortcut*/
1481 tt2cos = pfc_fw_cfg->traffic_type_to_priority_cos;
1482
1483 /* Fw version should be incremented each update */
1484 pfc_fw_cfg->dcb_version = ++bp->dcb_version;
1485 pfc_fw_cfg->dcb_enabled = DCB_ENABLED;
1486
1487 /* Default initialization */
1488 for (pri = 0; pri < MAX_PFC_TRAFFIC_TYPES ; pri++) {
1489 tt2cos[pri].priority = LLFC_TRAFFIC_TYPE_TO_PRIORITY_UNMAPPED;
1490 tt2cos[pri].cos = 0;
1491 }
1492
1493 /* Fill priority parameters */
1494 for (pri = 0; pri < LLFC_DRIVER_TRAFFIC_TYPE_MAX; pri++) {
1495 tt2cos[pri].priority = ttp[pri];
1496 pri_bit = 1 << tt2cos[pri].priority;
1497
1498 /* Fill COS parameters based on COS calculated to
1499 * make it more generally for future use */
1500 for (cos = 0; cos < bp->dcbx_port_params.ets.num_of_cos; cos++)
1501 if (bp->dcbx_port_params.ets.cos_params[cos].
1502 pri_bitmask & pri_bit)
1503 tt2cos[pri].cos = cos;
1504 }
1505 bnx2x_dcbx_print_cos_params(bp, pfc_fw_cfg);
1506}
1507/* DCB netlink */
1508#ifdef BCM_DCB
1509#include <linux/dcbnl.h>
1510
1511#define BNX2X_DCBX_CAPS (DCB_CAP_DCBX_LLD_MANAGED | \
1512 DCB_CAP_DCBX_VER_CEE | DCB_CAP_DCBX_STATIC)
1513
1514static inline bool bnx2x_dcbnl_set_valid(struct bnx2x *bp)
1515{
1516 /* validate dcbnl call that may change HW state:
1517 * DCB is on and DCBX mode was SUCCESSFULLY set by the user.
1518 */
1519 return bp->dcb_state && bp->dcbx_mode_uset;
1520}
1521
1522static u8 bnx2x_dcbnl_get_state(struct net_device *netdev)
1523{
1524 struct bnx2x *bp = netdev_priv(netdev);
1525 DP(NETIF_MSG_LINK, "state = %d\n", bp->dcb_state);
1526 return bp->dcb_state;
1527}
1528
1529static u8 bnx2x_dcbnl_set_state(struct net_device *netdev, u8 state)
1530{
1531 struct bnx2x *bp = netdev_priv(netdev);
1532 DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off");
1533
1534 bnx2x_dcbx_set_state(bp, (state ? true : false), bp->dcbx_enabled);
1535 return 0;
1536}
1537
1538static void bnx2x_dcbnl_get_perm_hw_addr(struct net_device *netdev,
1539 u8 *perm_addr)
1540{
1541 struct bnx2x *bp = netdev_priv(netdev);
1542 DP(NETIF_MSG_LINK, "GET-PERM-ADDR\n");
1543
1544 /* first the HW mac address */
1545 memcpy(perm_addr, netdev->dev_addr, netdev->addr_len);
1546
1547#ifdef BCM_CNIC
1548 /* second SAN address */
1549 memcpy(perm_addr+netdev->addr_len, bp->fip_mac, netdev->addr_len);
1550#endif
1551}
1552
1553static void bnx2x_dcbnl_set_pg_tccfg_tx(struct net_device *netdev, int prio,
1554 u8 prio_type, u8 pgid, u8 bw_pct,
1555 u8 up_map)
1556{
1557 struct bnx2x *bp = netdev_priv(netdev);
1558
1559 DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, pgid);
1560 if (!bnx2x_dcbnl_set_valid(bp) || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
1561 return;
1562
1563 /**
1564 * bw_pct ingnored - band-width percentage devision between user
1565 * priorities within the same group is not
1566 * standard and hence not supported
1567 *
1568 * prio_type igonred - priority levels within the same group are not
1569 * standard and hence are not supported. According
1570 * to the standard pgid 15 is dedicated to strict
1571 * prioirty traffic (on the port level).
1572 *
1573 * up_map ignored
1574 */
1575
1576 bp->dcbx_config_params.admin_configuration_ets_pg[prio] = pgid;
1577 bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
1578}
1579
1580static void bnx2x_dcbnl_set_pg_bwgcfg_tx(struct net_device *netdev,
1581 int pgid, u8 bw_pct)
1582{
1583 struct bnx2x *bp = netdev_priv(netdev);
1584 DP(NETIF_MSG_LINK, "pgid[%d] = %d\n", pgid, bw_pct);
1585
1586 if (!bnx2x_dcbnl_set_valid(bp) || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
1587 return;
1588
1589 bp->dcbx_config_params.admin_configuration_bw_precentage[pgid] = bw_pct;
1590 bp->dcbx_config_params.admin_ets_configuration_tx_enable = 1;
1591}
1592
1593static void bnx2x_dcbnl_set_pg_tccfg_rx(struct net_device *netdev, int prio,
1594 u8 prio_type, u8 pgid, u8 bw_pct,
1595 u8 up_map)
1596{
1597 struct bnx2x *bp = netdev_priv(netdev);
1598 DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n");
1599}
1600
1601static void bnx2x_dcbnl_set_pg_bwgcfg_rx(struct net_device *netdev,
1602 int pgid, u8 bw_pct)
1603{
1604 struct bnx2x *bp = netdev_priv(netdev);
1605 DP(NETIF_MSG_LINK, "Nothing to set; No RX support\n");
1606}
1607
1608static void bnx2x_dcbnl_get_pg_tccfg_tx(struct net_device *netdev, int prio,
1609 u8 *prio_type, u8 *pgid, u8 *bw_pct,
1610 u8 *up_map)
1611{
1612 struct bnx2x *bp = netdev_priv(netdev);
1613 DP(NETIF_MSG_LINK, "prio = %d\n", prio);
1614
1615 /**
1616 * bw_pct ingnored - band-width percentage devision between user
1617 * priorities within the same group is not
1618 * standard and hence not supported
1619 *
1620 * prio_type igonred - priority levels within the same group are not
1621 * standard and hence are not supported. According
1622 * to the standard pgid 15 is dedicated to strict
1623 * prioirty traffic (on the port level).
1624 *
1625 * up_map ignored
1626 */
1627 *up_map = *bw_pct = *prio_type = *pgid = 0;
1628
1629 if (!bp->dcb_state || prio >= DCBX_MAX_NUM_PRI_PG_ENTRIES)
1630 return;
1631
1632 *pgid = DCBX_PRI_PG_GET(bp->dcbx_local_feat.ets.pri_pg_tbl, prio);
1633}
1634
1635static void bnx2x_dcbnl_get_pg_bwgcfg_tx(struct net_device *netdev,
1636 int pgid, u8 *bw_pct)
1637{
1638 struct bnx2x *bp = netdev_priv(netdev);
1639 DP(NETIF_MSG_LINK, "pgid = %d\n", pgid);
1640
1641 *bw_pct = 0;
1642
1643 if (!bp->dcb_state || pgid >= DCBX_MAX_NUM_PG_BW_ENTRIES)
1644 return;
1645
1646 *bw_pct = DCBX_PG_BW_GET(bp->dcbx_local_feat.ets.pg_bw_tbl, pgid);
1647}
1648
1649static void bnx2x_dcbnl_get_pg_tccfg_rx(struct net_device *netdev, int prio,
1650 u8 *prio_type, u8 *pgid, u8 *bw_pct,
1651 u8 *up_map)
1652{
1653 struct bnx2x *bp = netdev_priv(netdev);
1654 DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n");
1655
1656 *prio_type = *pgid = *bw_pct = *up_map = 0;
1657}
1658
1659static void bnx2x_dcbnl_get_pg_bwgcfg_rx(struct net_device *netdev,
1660 int pgid, u8 *bw_pct)
1661{
1662 struct bnx2x *bp = netdev_priv(netdev);
1663 DP(NETIF_MSG_LINK, "Nothing to get; No RX support\n");
1664
1665 *bw_pct = 0;
1666}
1667
1668static void bnx2x_dcbnl_set_pfc_cfg(struct net_device *netdev, int prio,
1669 u8 setting)
1670{
1671 struct bnx2x *bp = netdev_priv(netdev);
1672 DP(NETIF_MSG_LINK, "prio[%d] = %d\n", prio, setting);
1673
1674 if (!bnx2x_dcbnl_set_valid(bp) || prio >= MAX_PFC_PRIORITIES)
1675 return;
1676
1677 bp->dcbx_config_params.admin_pfc_bitmap |= ((setting ? 1 : 0) << prio);
1678
1679 if (setting)
1680 bp->dcbx_config_params.admin_pfc_tx_enable = 1;
1681}
1682
1683static void bnx2x_dcbnl_get_pfc_cfg(struct net_device *netdev, int prio,
1684 u8 *setting)
1685{
1686 struct bnx2x *bp = netdev_priv(netdev);
1687 DP(NETIF_MSG_LINK, "prio = %d\n", prio);
1688
1689 *setting = 0;
1690
1691 if (!bp->dcb_state || prio >= MAX_PFC_PRIORITIES)
1692 return;
1693
1694 *setting = (bp->dcbx_local_feat.pfc.pri_en_bitmap >> prio) & 0x1;
1695}
1696
1697static u8 bnx2x_dcbnl_set_all(struct net_device *netdev)
1698{
1699 struct bnx2x *bp = netdev_priv(netdev);
1700 int rc = 0;
1701
1702 DP(NETIF_MSG_LINK, "SET-ALL\n");
1703
1704 if (!bnx2x_dcbnl_set_valid(bp))
1705 return 1;
1706
1707 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
1708 netdev_err(bp->dev, "Handling parity error recovery. "
1709 "Try again later\n");
1710 return 1;
1711 }
1712 if (netif_running(bp->dev)) {
1713 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
1714 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
1715 }
1716 DP(NETIF_MSG_LINK, "set_dcbx_params done (%d)\n", rc);
1717 if (rc)
1718 return 1;
1719
1720 return 0;
1721}
1722
1723static u8 bnx2x_dcbnl_get_cap(struct net_device *netdev, int capid, u8 *cap)
1724{
1725 struct bnx2x *bp = netdev_priv(netdev);
1726 u8 rval = 0;
1727
1728 if (bp->dcb_state) {
1729 switch (capid) {
1730 case DCB_CAP_ATTR_PG:
1731 *cap = true;
1732 break;
1733 case DCB_CAP_ATTR_PFC:
1734 *cap = true;
1735 break;
1736 case DCB_CAP_ATTR_UP2TC:
1737 *cap = false;
1738 break;
1739 case DCB_CAP_ATTR_PG_TCS:
1740 *cap = 0x80; /* 8 priorities for PGs */
1741 break;
1742 case DCB_CAP_ATTR_PFC_TCS:
1743 *cap = 0x80; /* 8 priorities for PFC */
1744 break;
1745 case DCB_CAP_ATTR_GSP:
1746 *cap = true;
1747 break;
1748 case DCB_CAP_ATTR_BCN:
1749 *cap = false;
1750 break;
1751 case DCB_CAP_ATTR_DCBX:
1752 *cap = BNX2X_DCBX_CAPS;
1753 default:
1754 rval = -EINVAL;
1755 break;
1756 }
1757 } else
1758 rval = -EINVAL;
1759
1760 DP(NETIF_MSG_LINK, "capid %d:%x\n", capid, *cap);
1761 return rval;
1762}
1763
1764static u8 bnx2x_dcbnl_get_numtcs(struct net_device *netdev, int tcid, u8 *num)
1765{
1766 struct bnx2x *bp = netdev_priv(netdev);
1767 u8 rval = 0;
1768
1769 DP(NETIF_MSG_LINK, "tcid %d\n", tcid);
1770
1771 if (bp->dcb_state) {
1772 switch (tcid) {
1773 case DCB_NUMTCS_ATTR_PG:
1774 *num = E2_NUM_OF_COS;
1775 break;
1776 case DCB_NUMTCS_ATTR_PFC:
1777 *num = E2_NUM_OF_COS;
1778 break;
1779 default:
1780 rval = -EINVAL;
1781 break;
1782 }
1783 } else
1784 rval = -EINVAL;
1785
1786 return rval;
1787}
1788
1789static u8 bnx2x_dcbnl_set_numtcs(struct net_device *netdev, int tcid, u8 num)
1790{
1791 struct bnx2x *bp = netdev_priv(netdev);
1792 DP(NETIF_MSG_LINK, "num tcs = %d; Not supported\n", num);
1793 return -EINVAL;
1794}
1795
1796static u8 bnx2x_dcbnl_get_pfc_state(struct net_device *netdev)
1797{
1798 struct bnx2x *bp = netdev_priv(netdev);
1799 DP(NETIF_MSG_LINK, "state = %d\n", bp->dcbx_local_feat.pfc.enabled);
1800
1801 if (!bp->dcb_state)
1802 return 0;
1803
1804 return bp->dcbx_local_feat.pfc.enabled;
1805}
1806
1807static void bnx2x_dcbnl_set_pfc_state(struct net_device *netdev, u8 state)
1808{
1809 struct bnx2x *bp = netdev_priv(netdev);
1810 DP(NETIF_MSG_LINK, "state = %s\n", state ? "on" : "off");
1811
1812 if (!bnx2x_dcbnl_set_valid(bp))
1813 return;
1814
1815 bp->dcbx_config_params.admin_pfc_tx_enable =
1816 bp->dcbx_config_params.admin_pfc_enable = (state ? 1 : 0);
1817}
1818
1819static bool bnx2x_app_is_equal(struct dcbx_app_priority_entry *app_ent,
1820 u8 idtype, u16 idval)
1821{
1822 if (!(app_ent->appBitfield & DCBX_APP_ENTRY_VALID))
1823 return false;
1824
1825 switch (idtype) {
1826 case DCB_APP_IDTYPE_ETHTYPE:
1827 if ((app_ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) !=
1828 DCBX_APP_SF_ETH_TYPE)
1829 return false;
1830 break;
1831 case DCB_APP_IDTYPE_PORTNUM:
1832 if ((app_ent->appBitfield & DCBX_APP_ENTRY_SF_MASK) !=
1833 DCBX_APP_SF_PORT)
1834 return false;
1835 break;
1836 default:
1837 return false;
1838 }
1839 if (app_ent->app_id != idval)
1840 return false;
1841
1842 return true;
1843}
1844
1845static void bnx2x_admin_app_set_ent(
1846 struct bnx2x_admin_priority_app_table *app_ent,
1847 u8 idtype, u16 idval, u8 up)
1848{
1849 app_ent->valid = 1;
1850
1851 switch (idtype) {
1852 case DCB_APP_IDTYPE_ETHTYPE:
1853 app_ent->traffic_type = TRAFFIC_TYPE_ETH;
1854 break;
1855 case DCB_APP_IDTYPE_PORTNUM:
1856 app_ent->traffic_type = TRAFFIC_TYPE_PORT;
1857 break;
1858 default:
1859 break; /* never gets here */
1860 }
1861 app_ent->app_id = idval;
1862 app_ent->priority = up;
1863}
1864
1865static bool bnx2x_admin_app_is_equal(
1866 struct bnx2x_admin_priority_app_table *app_ent,
1867 u8 idtype, u16 idval)
1868{
1869 if (!app_ent->valid)
1870 return false;
1871
1872 switch (idtype) {
1873 case DCB_APP_IDTYPE_ETHTYPE:
1874 if (app_ent->traffic_type != TRAFFIC_TYPE_ETH)
1875 return false;
1876 break;
1877 case DCB_APP_IDTYPE_PORTNUM:
1878 if (app_ent->traffic_type != TRAFFIC_TYPE_PORT)
1879 return false;
1880 break;
1881 default:
1882 return false;
1883 }
1884 if (app_ent->app_id != idval)
1885 return false;
1886
1887 return true;
1888}
1889
1890static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
1891{
1892 int i, ff;
1893
1894 /* iterate over the app entries looking for idtype and idval */
1895 for (i = 0, ff = -1; i < 4; i++) {
1896 struct bnx2x_admin_priority_app_table *app_ent =
1897 &bp->dcbx_config_params.admin_priority_app_table[i];
1898 if (bnx2x_admin_app_is_equal(app_ent, idtype, idval))
1899 break;
1900
1901 if (ff < 0 && !app_ent->valid)
1902 ff = i;
1903 }
1904 if (i < 4)
1905 /* if found overwrite up */
1906 bp->dcbx_config_params.
1907 admin_priority_app_table[i].priority = up;
1908 else if (ff >= 0)
1909 /* not found use first-free */
1910 bnx2x_admin_app_set_ent(
1911 &bp->dcbx_config_params.admin_priority_app_table[ff],
1912 idtype, idval, up);
1913 else
1914 /* app table is full */
1915 return -EBUSY;
1916
1917 /* up configured, if not 0 make sure feature is enabled */
1918 if (up)
1919 bp->dcbx_config_params.admin_application_priority_tx_enable = 1;
1920
1921 return 0;
1922}
1923
1924static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
1925 u16 idval, u8 up)
1926{
1927 struct bnx2x *bp = netdev_priv(netdev);
1928
1929 DP(NETIF_MSG_LINK, "app_type %d, app_id %x, prio bitmap %d\n",
1930 idtype, idval, up);
1931
1932 if (!bnx2x_dcbnl_set_valid(bp))
1933 return -EINVAL;
1934
1935 /* verify idtype */
1936 switch (idtype) {
1937 case DCB_APP_IDTYPE_ETHTYPE:
1938 case DCB_APP_IDTYPE_PORTNUM:
1939 break;
1940 default:
1941 return -EINVAL;
1942 }
1943 return bnx2x_set_admin_app_up(bp, idtype, idval, up);
1944}
1945
1946static u8 bnx2x_dcbnl_get_app_up(struct net_device *netdev, u8 idtype,
1947 u16 idval)
1948{
1949 int i;
1950 u8 up = 0;
1951
1952 struct bnx2x *bp = netdev_priv(netdev);
1953 DP(NETIF_MSG_LINK, "app_type %d, app_id 0x%x\n", idtype, idval);
1954
1955 /* iterate over the app entries looking for idtype and idval */
1956 for (i = 0; i < DCBX_MAX_APP_PROTOCOL; i++)
1957 if (bnx2x_app_is_equal(&bp->dcbx_local_feat.app.app_pri_tbl[i],
1958 idtype, idval))
1959 break;
1960
1961 if (i < DCBX_MAX_APP_PROTOCOL)
1962 /* if found return up */
1963 up = bp->dcbx_local_feat.app.app_pri_tbl[i].pri_bitmap;
1964 else
1965 DP(NETIF_MSG_LINK, "app not found\n");
1966
1967 return up;
1968}
1969
1970static u8 bnx2x_dcbnl_get_dcbx(struct net_device *netdev)
1971{
1972 struct bnx2x *bp = netdev_priv(netdev);
1973 u8 state;
1974
1975 state = DCB_CAP_DCBX_LLD_MANAGED | DCB_CAP_DCBX_VER_CEE;
1976
1977 if (bp->dcbx_enabled == BNX2X_DCBX_ENABLED_ON_NEG_OFF)
1978 state |= DCB_CAP_DCBX_STATIC;
1979
1980 return state;
1981}
1982
1983static u8 bnx2x_dcbnl_set_dcbx(struct net_device *netdev, u8 state)
1984{
1985 struct bnx2x *bp = netdev_priv(netdev);
1986 DP(NETIF_MSG_LINK, "state = %02x\n", state);
1987
1988 /* set dcbx mode */
1989
1990 if ((state & BNX2X_DCBX_CAPS) != state) {
1991 BNX2X_ERR("Requested DCBX mode %x is beyond advertised "
1992 "capabilities\n", state);
1993 return 1;
1994 }
1995
1996 if (bp->dcb_state != BNX2X_DCB_STATE_ON) {
1997 BNX2X_ERR("DCB turned off, DCBX configuration is invalid\n");
1998 return 1;
1999 }
2000
2001 if (state & DCB_CAP_DCBX_STATIC)
2002 bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_OFF;
2003 else
2004 bp->dcbx_enabled = BNX2X_DCBX_ENABLED_ON_NEG_ON;
2005
2006 bp->dcbx_mode_uset = true;
2007 return 0;
2008}
2009
2010
2011static u8 bnx2x_dcbnl_get_featcfg(struct net_device *netdev, int featid,
2012 u8 *flags)
2013{
2014 struct bnx2x *bp = netdev_priv(netdev);
2015 u8 rval = 0;
2016
2017 DP(NETIF_MSG_LINK, "featid %d\n", featid);
2018
2019 if (bp->dcb_state) {
2020 *flags = 0;
2021 switch (featid) {
2022 case DCB_FEATCFG_ATTR_PG:
2023 if (bp->dcbx_local_feat.ets.enabled)
2024 *flags |= DCB_FEATCFG_ENABLE;
2025 if (bp->dcbx_error & DCBX_LOCAL_ETS_ERROR)
2026 *flags |= DCB_FEATCFG_ERROR;
2027 break;
2028 case DCB_FEATCFG_ATTR_PFC:
2029 if (bp->dcbx_local_feat.pfc.enabled)
2030 *flags |= DCB_FEATCFG_ENABLE;
2031 if (bp->dcbx_error & (DCBX_LOCAL_PFC_ERROR |
2032 DCBX_LOCAL_PFC_MISMATCH))
2033 *flags |= DCB_FEATCFG_ERROR;
2034 break;
2035 case DCB_FEATCFG_ATTR_APP:
2036 if (bp->dcbx_local_feat.app.enabled)
2037 *flags |= DCB_FEATCFG_ENABLE;
2038 if (bp->dcbx_error & (DCBX_LOCAL_APP_ERROR |
2039 DCBX_LOCAL_APP_MISMATCH))
2040 *flags |= DCB_FEATCFG_ERROR;
2041 break;
2042 default:
2043 rval = -EINVAL;
2044 break;
2045 }
2046 } else
2047 rval = -EINVAL;
2048
2049 return rval;
2050}
2051
2052static u8 bnx2x_dcbnl_set_featcfg(struct net_device *netdev, int featid,
2053 u8 flags)
2054{
2055 struct bnx2x *bp = netdev_priv(netdev);
2056 u8 rval = 0;
2057
2058 DP(NETIF_MSG_LINK, "featid = %d flags = %02x\n", featid, flags);
2059
2060 /* ignore the 'advertise' flag */
2061 if (bnx2x_dcbnl_set_valid(bp)) {
2062 switch (featid) {
2063 case DCB_FEATCFG_ATTR_PG:
2064 bp->dcbx_config_params.admin_ets_enable =
2065 flags & DCB_FEATCFG_ENABLE ? 1 : 0;
2066 bp->dcbx_config_params.admin_ets_willing =
2067 flags & DCB_FEATCFG_WILLING ? 1 : 0;
2068 break;
2069 case DCB_FEATCFG_ATTR_PFC:
2070 bp->dcbx_config_params.admin_pfc_enable =
2071 flags & DCB_FEATCFG_ENABLE ? 1 : 0;
2072 bp->dcbx_config_params.admin_pfc_willing =
2073 flags & DCB_FEATCFG_WILLING ? 1 : 0;
2074 break;
2075 case DCB_FEATCFG_ATTR_APP:
2076 /* ignore enable, always enabled */
2077 bp->dcbx_config_params.admin_app_priority_willing =
2078 flags & DCB_FEATCFG_WILLING ? 1 : 0;
2079 break;
2080 default:
2081 rval = -EINVAL;
2082 break;
2083 }
2084 } else
2085 rval = -EINVAL;
2086
2087 return rval;
2088}
2089
2090const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops = {
2091 .getstate = bnx2x_dcbnl_get_state,
2092 .setstate = bnx2x_dcbnl_set_state,
2093 .getpermhwaddr = bnx2x_dcbnl_get_perm_hw_addr,
2094 .setpgtccfgtx = bnx2x_dcbnl_set_pg_tccfg_tx,
2095 .setpgbwgcfgtx = bnx2x_dcbnl_set_pg_bwgcfg_tx,
2096 .setpgtccfgrx = bnx2x_dcbnl_set_pg_tccfg_rx,
2097 .setpgbwgcfgrx = bnx2x_dcbnl_set_pg_bwgcfg_rx,
2098 .getpgtccfgtx = bnx2x_dcbnl_get_pg_tccfg_tx,
2099 .getpgbwgcfgtx = bnx2x_dcbnl_get_pg_bwgcfg_tx,
2100 .getpgtccfgrx = bnx2x_dcbnl_get_pg_tccfg_rx,
2101 .getpgbwgcfgrx = bnx2x_dcbnl_get_pg_bwgcfg_rx,
2102 .setpfccfg = bnx2x_dcbnl_set_pfc_cfg,
2103 .getpfccfg = bnx2x_dcbnl_get_pfc_cfg,
2104 .setall = bnx2x_dcbnl_set_all,
2105 .getcap = bnx2x_dcbnl_get_cap,
2106 .getnumtcs = bnx2x_dcbnl_get_numtcs,
2107 .setnumtcs = bnx2x_dcbnl_set_numtcs,
2108 .getpfcstate = bnx2x_dcbnl_get_pfc_state,
2109 .setpfcstate = bnx2x_dcbnl_set_pfc_state,
2110 .getapp = bnx2x_dcbnl_get_app_up,
2111 .setapp = bnx2x_dcbnl_set_app_up,
2112 .getdcbx = bnx2x_dcbnl_get_dcbx,
2113 .setdcbx = bnx2x_dcbnl_set_dcbx,
2114 .getfeatcfg = bnx2x_dcbnl_get_featcfg,
2115 .setfeatcfg = bnx2x_dcbnl_set_featcfg,
2116};
2117
2118#endif /* BCM_DCB */
diff --git a/drivers/net/bnx2x/bnx2x_dcb.h b/drivers/net/bnx2x/bnx2x_dcb.h
new file mode 100644
index 000000000000..f650f98e4092
--- /dev/null
+++ b/drivers/net/bnx2x/bnx2x_dcb.h
@@ -0,0 +1,196 @@
1/* bnx2x_dcb.h: Broadcom Everest network driver.
2 *
3 * Copyright 2009-2010 Broadcom Corporation
4 *
5 * Unless you and Broadcom execute a separate written software license
6 * agreement governing use of this software, this software is licensed to you
7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
14 *
15 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
16 * Written by: Dmitry Kravkov
17 *
18 */
19#ifndef BNX2X_DCB_H
20#define BNX2X_DCB_H
21
22#include "bnx2x_hsi.h"
23
24#define LLFC_DRIVER_TRAFFIC_TYPE_MAX 3 /* NW, iSCSI, FCoE */
25struct bnx2x_dcbx_app_params {
26 u32 enabled;
27 u32 traffic_type_priority[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
28};
29
30#define E2_NUM_OF_COS 2
31#define BNX2X_DCBX_COS_NOT_STRICT 0
32#define BNX2X_DCBX_COS_LOW_STRICT 1
33#define BNX2X_DCBX_COS_HIGH_STRICT 2
34
35struct bnx2x_dcbx_cos_params {
36 u32 bw_tbl;
37 u32 pri_bitmask;
38 u8 strict;
39 u8 pauseable;
40};
41
42struct bnx2x_dcbx_pg_params {
43 u32 enabled;
44 u8 num_of_cos; /* valid COS entries */
45 struct bnx2x_dcbx_cos_params cos_params[E2_NUM_OF_COS];
46};
47
48struct bnx2x_dcbx_pfc_params {
49 u32 enabled;
50 u32 priority_non_pauseable_mask;
51};
52
53struct bnx2x_dcbx_port_params {
54 struct bnx2x_dcbx_pfc_params pfc;
55 struct bnx2x_dcbx_pg_params ets;
56 struct bnx2x_dcbx_app_params app;
57};
58
59#define BNX2X_DCBX_CONFIG_INV_VALUE (0xFFFFFFFF)
60#define BNX2X_DCBX_OVERWRITE_SETTINGS_DISABLE 0
61#define BNX2X_DCBX_OVERWRITE_SETTINGS_ENABLE 1
62#define BNX2X_DCBX_OVERWRITE_SETTINGS_INVALID (BNX2X_DCBX_CONFIG_INV_VALUE)
63
64/*******************************************************************************
65 * LLDP protocol configuration parameters.
66 ******************************************************************************/
67struct bnx2x_config_lldp_params {
68 u32 overwrite_settings;
69 u32 msg_tx_hold;
70 u32 msg_fast_tx;
71 u32 tx_credit_max;
72 u32 msg_tx_interval;
73 u32 tx_fast;
74};
75
76struct bnx2x_admin_priority_app_table {
77 u32 valid;
78 u32 priority;
79#define INVALID_TRAFFIC_TYPE_PRIORITY (0xFFFFFFFF)
80 u32 traffic_type;
81#define TRAFFIC_TYPE_ETH 0
82#define TRAFFIC_TYPE_PORT 1
83 u32 app_id;
84};
85
86/*******************************************************************************
87 * DCBX protocol configuration parameters.
88 ******************************************************************************/
89struct bnx2x_config_dcbx_params {
90 u32 overwrite_settings;
91 u32 admin_dcbx_version;
92 u32 admin_ets_enable;
93 u32 admin_pfc_enable;
94 u32 admin_tc_supported_tx_enable;
95 u32 admin_ets_configuration_tx_enable;
96 u32 admin_ets_recommendation_tx_enable;
97 u32 admin_pfc_tx_enable;
98 u32 admin_application_priority_tx_enable;
99 u32 admin_ets_willing;
100 u32 admin_ets_reco_valid;
101 u32 admin_pfc_willing;
102 u32 admin_app_priority_willing;
103 u32 admin_configuration_bw_precentage[8];
104 u32 admin_configuration_ets_pg[8];
105 u32 admin_recommendation_bw_precentage[8];
106 u32 admin_recommendation_ets_pg[8];
107 u32 admin_pfc_bitmap;
108 struct bnx2x_admin_priority_app_table admin_priority_app_table[4];
109 u32 admin_default_priority;
110};
111
112#define GET_FLAGS(flags, bits) ((flags) & (bits))
113#define SET_FLAGS(flags, bits) ((flags) |= (bits))
114#define RESET_FLAGS(flags, bits) ((flags) &= ~(bits))
115
116enum {
117 DCBX_READ_LOCAL_MIB,
118 DCBX_READ_REMOTE_MIB
119};
120
121#define ETH_TYPE_FCOE (0x8906)
122#define TCP_PORT_ISCSI (0xCBC)
123
124#define PFC_VALUE_FRAME_SIZE (512)
125#define PFC_QUANTA_IN_NANOSEC_FROM_SPEED_MEGA(mega_speed) \
126 ((1000 * PFC_VALUE_FRAME_SIZE)/(mega_speed))
127
128#define PFC_BRB1_REG_HIGH_LLFC_LOW_THRESHOLD 130
129#define PFC_BRB1_REG_HIGH_LLFC_HIGH_THRESHOLD 170
130
131
132
133struct cos_entry_help_data {
134 u32 pri_join_mask;
135 u32 cos_bw;
136 u8 strict;
137 bool pausable;
138};
139
140struct cos_help_data {
141 struct cos_entry_help_data data[E2_NUM_OF_COS];
142 u8 num_of_cos;
143};
144
145#define DCBX_ILLEGAL_PG (0xFF)
146#define DCBX_PFC_PRI_MASK (0xFF)
147#define DCBX_STRICT_PRIORITY (15)
148#define DCBX_INVALID_COS_BW (0xFFFFFFFF)
149#define DCBX_PFC_PRI_NON_PAUSE_MASK(bp) \
150 ((bp)->dcbx_port_params.pfc.priority_non_pauseable_mask)
151#define DCBX_PFC_PRI_PAUSE_MASK(bp) \
152 ((u8)~DCBX_PFC_PRI_NON_PAUSE_MASK(bp))
153#define DCBX_PFC_PRI_GET_PAUSE(bp, pg_pri) \
154 ((pg_pri) & (DCBX_PFC_PRI_PAUSE_MASK(bp)))
155#define DCBX_PFC_PRI_GET_NON_PAUSE(bp, pg_pri) \
156 (DCBX_PFC_PRI_NON_PAUSE_MASK(bp) & (pg_pri))
157#define IS_DCBX_PFC_PRI_ONLY_PAUSE(bp, pg_pri) \
158 (pg_pri == DCBX_PFC_PRI_GET_PAUSE((bp), (pg_pri)))
159#define IS_DCBX_PFC_PRI_ONLY_NON_PAUSE(bp, pg_pri)\
160 ((pg_pri) == DCBX_PFC_PRI_GET_NON_PAUSE((bp), (pg_pri)))
161#define IS_DCBX_PFC_PRI_MIX_PAUSE(bp, pg_pri) \
162 (!(IS_DCBX_PFC_PRI_ONLY_NON_PAUSE((bp), (pg_pri)) || \
163 IS_DCBX_PFC_PRI_ONLY_PAUSE((bp), (pg_pri))))
164
165
166struct pg_entry_help_data {
167 u8 num_of_dif_pri;
168 u8 pg;
169 u32 pg_priority;
170};
171
172struct pg_help_data {
173 struct pg_entry_help_data data[LLFC_DRIVER_TRAFFIC_TYPE_MAX];
174 u8 num_of_pg;
175};
176
177/* forward DCB/PFC related declarations */
178struct bnx2x;
179void bnx2x_dcb_init_intmem_pfc(struct bnx2x *bp);
180void bnx2x_dcbx_update(struct work_struct *work);
181void bnx2x_dcbx_init_params(struct bnx2x *bp);
182void bnx2x_dcbx_set_state(struct bnx2x *bp, bool dcb_on, u32 dcbx_enabled);
183
184enum {
185 BNX2X_DCBX_STATE_NEG_RECEIVED = 0x1,
186 BNX2X_DCBX_STATE_TX_PAUSED = 0x2,
187 BNX2X_DCBX_STATE_TX_RELEASED = 0x4
188};
189void bnx2x_dcbx_set_params(struct bnx2x *bp, u32 state);
190
191/* DCB netlink */
192#ifdef BCM_DCB
193extern const struct dcbnl_rtnl_ops bnx2x_dcbnl_ops;
194#endif /* BCM_DCB */
195
196#endif /* BNX2X_DCB_H */
diff --git a/drivers/net/bnx2x/bnx2x_dump.h b/drivers/net/bnx2x/bnx2x_dump.h
index dc18c25ca9e5..fb3ff7c4d7ca 100644
--- a/drivers/net/bnx2x/bnx2x_dump.h
+++ b/drivers/net/bnx2x/bnx2x_dump.h
@@ -1,10 +1,16 @@
1/* bnx2x_dump.h: Broadcom Everest network driver. 1/* bnx2x_dump.h: Broadcom Everest network driver.
2 * 2 *
3 * Copyright (c) 2009 Broadcom Corporation 3 * Copyright (c) 2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * Unless you and Broadcom execute a separate written software license
6 * it under the terms of the GNU General Public License as published by 6 * agreement governing use of this software, this software is licensed to you
7 * the Free Software Foundation. 7 * under the terms of the GNU General Public License version 2, available
8 * at http://www.gnu.org/licenses/old-licenses/gpl-2.0.html (the "GPL").
9 *
10 * Notwithstanding the above, under no circumstances may you combine this
11 * software in any way with any other Broadcom software provided under a
12 * license other than the GPL, without Broadcom's express prior written
13 * consent.
8 */ 14 */
9 15
10 16
@@ -17,53 +23,53 @@
17#define BNX2X_DUMP_H 23#define BNX2X_DUMP_H
18 24
19 25
20struct dump_sign {
21 u32 time_stamp;
22 u32 diag_ver;
23 u32 grc_dump_ver;
24};
25 26
26#define TSTORM_WAITP_ADDR 0x1b8a80 27/*definitions */
27#define CSTORM_WAITP_ADDR 0x238a80 28#define XSTORM_WAITP_ADDR 0x2b8a80
28#define XSTORM_WAITP_ADDR 0x2b8a80 29#define TSTORM_WAITP_ADDR 0x1b8a80
29#define USTORM_WAITP_ADDR 0x338a80 30#define USTORM_WAITP_ADDR 0x338a80
30#define TSTORM_CAM_MODE 0x1b1440 31#define CSTORM_WAITP_ADDR 0x238a80
32#define TSTORM_CAM_MODE 0x1B1440
31 33
32#define RI_E1 0x1 34#define MAX_TIMER_PENDING 200
33#define RI_E1H 0x2 35#define TIMER_SCAN_DONT_CARE 0xFF
36#define RI_E1 0x1
37#define RI_E1H 0x2
34#define RI_E2 0x4 38#define RI_E2 0x4
35#define RI_ONLINE 0x100 39#define RI_ONLINE 0x100
36#define RI_PATH0_DUMP 0x200 40#define RI_PATH0_DUMP 0x200
37#define RI_PATH1_DUMP 0x400 41#define RI_PATH1_DUMP 0x400
38#define RI_E1_OFFLINE (RI_E1) 42#define RI_E1_OFFLINE (RI_E1)
39#define RI_E1_ONLINE (RI_E1 | RI_ONLINE) 43#define RI_E1_ONLINE (RI_E1 | RI_ONLINE)
40#define RI_E1H_OFFLINE (RI_E1H) 44#define RI_E1H_OFFLINE (RI_E1H)
41#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE) 45#define RI_E1H_ONLINE (RI_E1H | RI_ONLINE)
42#define RI_E2_OFFLINE (RI_E2) 46#define RI_E2_OFFLINE (RI_E2)
43#define RI_E2_ONLINE (RI_E2 | RI_ONLINE) 47#define RI_E2_ONLINE (RI_E2 | RI_ONLINE)
44#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H) 48#define RI_E1E1H_OFFLINE (RI_E1 | RI_E1H)
45#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE) 49#define RI_E1E1H_ONLINE (RI_E1 | RI_E1H | RI_ONLINE)
46#define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H) 50#define RI_E1HE2_OFFLINE (RI_E2 | RI_E1H)
47#define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE) 51#define RI_E1HE2_ONLINE (RI_E2 | RI_E1H | RI_ONLINE)
48#define RI_E1E2_OFFLINE (RI_E2 | RI_E1) 52#define RI_E1E2_OFFLINE (RI_E2 | RI_E1)
49#define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE) 53#define RI_E1E2_ONLINE (RI_E2 | RI_E1 | RI_ONLINE)
50#define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2) 54#define RI_ALL_OFFLINE (RI_E1 | RI_E1H | RI_E2)
51#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE) 55#define RI_ALL_ONLINE (RI_E1 | RI_E1H | RI_E2 | RI_ONLINE)
52
53#define MAX_TIMER_PENDING 200
54#define TIMER_SCAN_DONT_CARE 0xFF
55 56
57struct dump_sign {
58 u32 time_stamp;
59 u32 diag_ver;
60 u32 grc_dump_ver;
61};
56 62
57struct dump_hdr { 63struct dump_hdr {
58 u32 hdr_size; /* in dwords, excluding this field */ 64 u32 hdr_size; /* in dwords, excluding this field */
59 struct dump_sign dump_sign; 65 struct dump_sign dump_sign;
60 u32 xstorm_waitp; 66 u32 xstorm_waitp;
61 u32 tstorm_waitp; 67 u32 tstorm_waitp;
62 u32 ustorm_waitp; 68 u32 ustorm_waitp;
63 u32 cstorm_waitp; 69 u32 cstorm_waitp;
64 u16 info; 70 u16 info;
65 u8 idle_chk; 71 u8 idle_chk;
66 u8 reserved; 72 u8 reserved;
67}; 73};
68 74
69struct reg_addr { 75struct reg_addr {
@@ -80,202 +86,185 @@ struct wreg_addr {
80 u16 info; 86 u16 info;
81}; 87};
82 88
83 89#define REGS_COUNT 834
84#define REGS_COUNT 558
85static const struct reg_addr reg_addrs[REGS_COUNT] = { 90static const struct reg_addr reg_addrs[REGS_COUNT] = {
86 { 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE }, 91 { 0x2000, 341, RI_ALL_ONLINE }, { 0x2800, 103, RI_ALL_ONLINE },
87 { 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE }, 92 { 0x3000, 287, RI_ALL_ONLINE }, { 0x3800, 331, RI_ALL_ONLINE },
88 { 0x8800, 6, RI_E1_ONLINE }, { 0xa000, 223, RI_ALL_ONLINE }, 93 { 0x8800, 6, RI_ALL_ONLINE }, { 0x8818, 1, RI_E1HE2_ONLINE },
89 { 0xa388, 1, RI_ALL_ONLINE }, { 0xa398, 1, RI_ALL_ONLINE }, 94 { 0x9000, 164, RI_E2_ONLINE }, { 0x9400, 33, RI_E2_ONLINE },
90 { 0xa39c, 7, RI_E1H_ONLINE }, { 0xa3c0, 3, RI_E1H_ONLINE }, 95 { 0xa000, 27, RI_ALL_ONLINE }, { 0xa06c, 1, RI_E1E1H_ONLINE },
91 { 0xa3d0, 1, RI_E1H_ONLINE }, { 0xa3d8, 1, RI_E1H_ONLINE }, 96 { 0xa070, 71, RI_ALL_ONLINE }, { 0xa18c, 4, RI_E1E1H_ONLINE },
92 { 0xa3e0, 1, RI_E1H_ONLINE }, { 0xa3e8, 1, RI_E1H_ONLINE }, 97 { 0xa19c, 62, RI_ALL_ONLINE }, { 0xa294, 2, RI_E1E1H_ONLINE },
93 { 0xa3f0, 1, RI_E1H_ONLINE }, { 0xa3f8, 1, RI_E1H_ONLINE }, 98 { 0xa29c, 56, RI_ALL_ONLINE }, { 0xa39c, 7, RI_E1HE2_ONLINE },
94 { 0xa400, 69, RI_ALL_ONLINE }, { 0xa518, 1, RI_ALL_ONLINE }, 99 { 0xa3c0, 3, RI_E1HE2_ONLINE }, { 0xa3d0, 1, RI_E1HE2_ONLINE },
95 { 0xa520, 1, RI_ALL_ONLINE }, { 0xa528, 1, RI_ALL_ONLINE }, 100 { 0xa3d8, 1, RI_E1HE2_ONLINE }, { 0xa3e0, 1, RI_E1HE2_ONLINE },
96 { 0xa530, 1, RI_ALL_ONLINE }, { 0xa538, 1, RI_ALL_ONLINE }, 101 { 0xa3e8, 1, RI_E1HE2_ONLINE }, { 0xa3f0, 1, RI_E1HE2_ONLINE },
97 { 0xa540, 1, RI_ALL_ONLINE }, { 0xa548, 1, RI_ALL_ONLINE }, 102 { 0xa3f8, 1, RI_E1HE2_ONLINE }, { 0xa400, 43, RI_ALL_ONLINE },
98 { 0xa550, 1, RI_ALL_ONLINE }, { 0xa558, 1, RI_ALL_ONLINE }, 103 { 0xa4ac, 2, RI_E1E1H_ONLINE }, { 0xa4b4, 1, RI_ALL_ONLINE },
99 { 0xa560, 1, RI_ALL_ONLINE }, { 0xa568, 1, RI_ALL_ONLINE }, 104 { 0xa4b8, 2, RI_E1E1H_ONLINE }, { 0xa4c0, 3, RI_ALL_ONLINE },
100 { 0xa570, 1, RI_ALL_ONLINE }, { 0xa580, 1, RI_ALL_ONLINE }, 105 { 0xa4cc, 5, RI_E1E1H_ONLINE }, { 0xa4e0, 9, RI_ALL_ONLINE },
101 { 0xa590, 1, RI_ALL_ONLINE }, { 0xa5a0, 1, RI_ALL_ONLINE }, 106 { 0xa504, 1, RI_E1E1H_ONLINE }, { 0xa508, 3, RI_ALL_ONLINE },
102 { 0xa5c0, 1, RI_ALL_ONLINE }, { 0xa5e0, 1, RI_E1H_ONLINE }, 107 { 0xa518, 1, RI_ALL_ONLINE }, { 0xa520, 1, RI_ALL_ONLINE },
103 { 0xa5e8, 1, RI_E1H_ONLINE }, { 0xa5f0, 1, RI_E1H_ONLINE }, 108 { 0xa528, 1, RI_ALL_ONLINE }, { 0xa530, 1, RI_ALL_ONLINE },
104 { 0xa5f8, 10, RI_E1H_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE }, 109 { 0xa538, 1, RI_ALL_ONLINE }, { 0xa540, 1, RI_ALL_ONLINE },
105 { 0x103bc, 1, RI_ALL_ONLINE }, { 0x103cc, 1, RI_ALL_ONLINE }, 110 { 0xa548, 1, RI_E1E1H_ONLINE }, { 0xa550, 1, RI_E1E1H_ONLINE },
106 { 0x103dc, 1, RI_ALL_ONLINE }, { 0x10400, 57, RI_ALL_ONLINE }, 111 { 0xa558, 1, RI_E1E1H_ONLINE }, { 0xa560, 1, RI_E1E1H_ONLINE },
107 { 0x104e8, 2, RI_ALL_ONLINE }, { 0x104f4, 2, RI_ALL_ONLINE }, 112 { 0xa568, 1, RI_E1E1H_ONLINE }, { 0xa570, 1, RI_ALL_ONLINE },
108 { 0x10500, 146, RI_ALL_ONLINE }, { 0x10750, 2, RI_ALL_ONLINE }, 113 { 0xa580, 1, RI_ALL_ONLINE }, { 0xa590, 1, RI_ALL_ONLINE },
109 { 0x10760, 2, RI_ALL_ONLINE }, { 0x10770, 2, RI_ALL_ONLINE }, 114 { 0xa5a0, 1, RI_ALL_ONLINE }, { 0xa5c0, 1, RI_ALL_ONLINE },
110 { 0x10780, 2, RI_ALL_ONLINE }, { 0x10790, 2, RI_ALL_ONLINE }, 115 { 0xa5e0, 1, RI_E1HE2_ONLINE }, { 0xa5e8, 1, RI_E1HE2_ONLINE },
111 { 0x107a0, 2, RI_ALL_ONLINE }, { 0x107b0, 2, RI_ALL_ONLINE }, 116 { 0xa5f0, 1, RI_E1HE2_ONLINE }, { 0xa5f8, 10, RI_E1HE2_ONLINE },
112 { 0x107c0, 2, RI_ALL_ONLINE }, { 0x107d0, 2, RI_ALL_ONLINE }, 117 { 0xa620, 111, RI_E2_ONLINE }, { 0xa800, 51, RI_E2_ONLINE },
113 { 0x107e0, 2, RI_ALL_ONLINE }, { 0x10880, 2, RI_ALL_ONLINE }, 118 { 0xa8d4, 4, RI_E2_ONLINE }, { 0xa8e8, 1, RI_E2_ONLINE },
114 { 0x10900, 2, RI_ALL_ONLINE }, { 0x12000, 1, RI_ALL_ONLINE }, 119 { 0xa8f0, 1, RI_E2_ONLINE }, { 0x10000, 236, RI_ALL_ONLINE },
115 { 0x14000, 1, RI_ALL_ONLINE }, { 0x16000, 26, RI_E1H_ONLINE }, 120 { 0x10400, 57, RI_ALL_ONLINE }, { 0x104e8, 2, RI_ALL_ONLINE },
116 { 0x16070, 18, RI_E1H_ONLINE }, { 0x160c0, 27, RI_E1H_ONLINE }, 121 { 0x104f4, 2, RI_ALL_ONLINE }, { 0x10500, 146, RI_ALL_ONLINE },
117 { 0x16140, 1, RI_E1H_ONLINE }, { 0x16160, 1, RI_E1H_ONLINE }, 122 { 0x10750, 2, RI_ALL_ONLINE }, { 0x10760, 2, RI_ALL_ONLINE },
118 { 0x16180, 2, RI_E1H_ONLINE }, { 0x161c0, 2, RI_E1H_ONLINE }, 123 { 0x10770, 2, RI_ALL_ONLINE }, { 0x10780, 2, RI_ALL_ONLINE },
119 { 0x16204, 5, RI_E1H_ONLINE }, { 0x18000, 1, RI_E1H_ONLINE }, 124 { 0x10790, 2, RI_ALL_ONLINE }, { 0x107a0, 2, RI_ALL_ONLINE },
120 { 0x18008, 1, RI_E1H_ONLINE }, { 0x20000, 24, RI_ALL_ONLINE }, 125 { 0x107b0, 2, RI_ALL_ONLINE }, { 0x107c0, 2, RI_ALL_ONLINE },
121 { 0x20060, 8, RI_ALL_ONLINE }, { 0x20080, 138, RI_ALL_ONLINE }, 126 { 0x107d0, 2, RI_ALL_ONLINE }, { 0x107e0, 2, RI_ALL_ONLINE },
122 { 0x202b4, 1, RI_ALL_ONLINE }, { 0x202c4, 1, RI_ALL_ONLINE }, 127 { 0x10880, 2, RI_ALL_ONLINE }, { 0x10900, 2, RI_ALL_ONLINE },
123 { 0x20400, 2, RI_ALL_ONLINE }, { 0x2040c, 8, RI_ALL_ONLINE }, 128 { 0x16000, 26, RI_E1HE2_ONLINE }, { 0x16070, 18, RI_E1HE2_ONLINE },
124 { 0x2042c, 18, RI_E1H_ONLINE }, { 0x20480, 1, RI_ALL_ONLINE }, 129 { 0x160c0, 27, RI_E1HE2_ONLINE }, { 0x16140, 1, RI_E1HE2_ONLINE },
125 { 0x20500, 1, RI_ALL_ONLINE }, { 0x20600, 1, RI_ALL_ONLINE }, 130 { 0x16160, 1, RI_E1HE2_ONLINE }, { 0x16180, 2, RI_E1HE2_ONLINE },
126 { 0x28000, 1, RI_ALL_ONLINE }, { 0x28004, 8191, RI_ALL_OFFLINE }, 131 { 0x161c0, 2, RI_E1HE2_ONLINE }, { 0x16204, 5, RI_E1HE2_ONLINE },
127 { 0x30000, 1, RI_ALL_ONLINE }, { 0x30004, 16383, RI_ALL_OFFLINE }, 132 { 0x18000, 1, RI_E1HE2_ONLINE }, { 0x18008, 1, RI_E1HE2_ONLINE },
128 { 0x40000, 98, RI_ALL_ONLINE }, { 0x40194, 1, RI_ALL_ONLINE }, 133 { 0x18010, 35, RI_E2_ONLINE }, { 0x180a4, 2, RI_E2_ONLINE },
129 { 0x401a4, 1, RI_ALL_ONLINE }, { 0x401a8, 11, RI_E1H_ONLINE }, 134 { 0x180c0, 191, RI_E2_ONLINE }, { 0x18440, 1, RI_E2_ONLINE },
130 { 0x40200, 4, RI_ALL_ONLINE }, { 0x40400, 43, RI_ALL_ONLINE }, 135 { 0x18460, 1, RI_E2_ONLINE }, { 0x18480, 2, RI_E2_ONLINE },
131 { 0x404b8, 1, RI_ALL_ONLINE }, { 0x404c8, 1, RI_ALL_ONLINE }, 136 { 0x184c0, 2, RI_E2_ONLINE }, { 0x18500, 15, RI_E2_ONLINE },
132 { 0x404cc, 3, RI_E1H_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE }, 137 { 0x20000, 24, RI_ALL_ONLINE }, { 0x20060, 8, RI_ALL_ONLINE },
138 { 0x20080, 94, RI_ALL_ONLINE }, { 0x201f8, 1, RI_E1E1H_ONLINE },
139 { 0x201fc, 1, RI_ALL_ONLINE }, { 0x20200, 1, RI_E1E1H_ONLINE },
140 { 0x20204, 1, RI_ALL_ONLINE }, { 0x20208, 1, RI_E1E1H_ONLINE },
141 { 0x2020c, 39, RI_ALL_ONLINE }, { 0x202c8, 1, RI_E2_ONLINE },
142 { 0x202d8, 4, RI_E2_ONLINE }, { 0x20400, 2, RI_ALL_ONLINE },
143 { 0x2040c, 8, RI_ALL_ONLINE }, { 0x2042c, 18, RI_E1HE2_ONLINE },
144 { 0x20480, 1, RI_ALL_ONLINE }, { 0x20500, 1, RI_ALL_ONLINE },
145 { 0x20600, 1, RI_ALL_ONLINE }, { 0x28000, 1, RI_ALL_ONLINE },
146 { 0x28004, 8191, RI_ALL_OFFLINE }, { 0x30000, 1, RI_ALL_ONLINE },
147 { 0x30004, 16383, RI_ALL_OFFLINE }, { 0x40000, 98, RI_ALL_ONLINE },
148 { 0x401a8, 8, RI_E1HE2_ONLINE }, { 0x401c8, 1, RI_E1H_ONLINE },
149 { 0x401cc, 2, RI_E1HE2_ONLINE }, { 0x401d4, 2, RI_E2_ONLINE },
150 { 0x40200, 4, RI_ALL_ONLINE }, { 0x40220, 18, RI_E2_ONLINE },
151 { 0x40400, 43, RI_ALL_ONLINE }, { 0x404cc, 3, RI_E1HE2_ONLINE },
152 { 0x404e0, 1, RI_E2_ONLINE }, { 0x40500, 2, RI_ALL_ONLINE },
133 { 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE }, 153 { 0x40510, 2, RI_ALL_ONLINE }, { 0x40520, 2, RI_ALL_ONLINE },
134 { 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE }, 154 { 0x40530, 2, RI_ALL_ONLINE }, { 0x40540, 2, RI_ALL_ONLINE },
135 { 0x42000, 164, RI_ALL_ONLINE }, { 0x4229c, 1, RI_ALL_ONLINE }, 155 { 0x40550, 10, RI_E2_ONLINE }, { 0x40610, 2, RI_E2_ONLINE },
136 { 0x422ac, 1, RI_ALL_ONLINE }, { 0x422bc, 1, RI_ALL_ONLINE }, 156 { 0x42000, 164, RI_ALL_ONLINE }, { 0x422c0, 4, RI_E2_ONLINE },
137 { 0x422d4, 5, RI_E1H_ONLINE }, { 0x42400, 49, RI_ALL_ONLINE }, 157 { 0x422d4, 5, RI_E1HE2_ONLINE }, { 0x422e8, 1, RI_E2_ONLINE },
138 { 0x424c8, 38, RI_ALL_ONLINE }, { 0x42568, 2, RI_ALL_ONLINE }, 158 { 0x42400, 49, RI_ALL_ONLINE }, { 0x424c8, 38, RI_ALL_ONLINE },
139 { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 20, RI_ALL_ONLINE }, 159 { 0x42568, 2, RI_ALL_ONLINE }, { 0x42640, 5, RI_E2_ONLINE },
140 { 0x50050, 8, RI_ALL_ONLINE }, { 0x50070, 88, RI_ALL_ONLINE }, 160 { 0x42800, 1, RI_ALL_ONLINE }, { 0x50000, 1, RI_ALL_ONLINE },
141 { 0x501dc, 1, RI_ALL_ONLINE }, { 0x501ec, 1, RI_ALL_ONLINE }, 161 { 0x50004, 19, RI_ALL_ONLINE }, { 0x50050, 8, RI_ALL_ONLINE },
142 { 0x501f0, 4, RI_E1H_ONLINE }, { 0x50200, 2, RI_ALL_ONLINE }, 162 { 0x50070, 88, RI_ALL_ONLINE }, { 0x501f0, 4, RI_E1HE2_ONLINE },
143 { 0x5020c, 7, RI_ALL_ONLINE }, { 0x50228, 6, RI_E1H_ONLINE }, 163 { 0x50200, 2, RI_ALL_ONLINE }, { 0x5020c, 7, RI_ALL_ONLINE },
144 { 0x50240, 1, RI_ALL_ONLINE }, { 0x50280, 1, RI_ALL_ONLINE }, 164 { 0x50228, 6, RI_E1HE2_ONLINE }, { 0x50240, 1, RI_ALL_ONLINE },
165 { 0x50280, 1, RI_ALL_ONLINE }, { 0x50300, 1, RI_E2_ONLINE },
166 { 0x5030c, 1, RI_E2_ONLINE }, { 0x50318, 1, RI_E2_ONLINE },
167 { 0x5031c, 1, RI_E2_ONLINE }, { 0x50320, 2, RI_E2_ONLINE },
145 { 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE }, 168 { 0x52000, 1, RI_ALL_ONLINE }, { 0x54000, 1, RI_ALL_ONLINE },
146 { 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE }, 169 { 0x54004, 3327, RI_ALL_OFFLINE }, { 0x58000, 1, RI_ALL_ONLINE },
147 { 0x58004, 8191, RI_ALL_OFFLINE }, { 0x60000, 71, RI_ALL_ONLINE }, 170 { 0x58004, 8191, RI_E1E1H_OFFLINE }, { 0x60000, 26, RI_ALL_ONLINE },
148 { 0x60128, 1, RI_ALL_ONLINE }, { 0x60138, 1, RI_ALL_ONLINE }, 171 { 0x60068, 8, RI_E1E1H_ONLINE }, { 0x60088, 12, RI_ALL_ONLINE },
149 { 0x6013c, 24, RI_E1H_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE }, 172 { 0x600b8, 9, RI_E1E1H_ONLINE }, { 0x600dc, 1, RI_ALL_ONLINE },
173 { 0x600e0, 5, RI_E1E1H_ONLINE }, { 0x600f4, 1, RI_ALL_ONLINE },
174 { 0x600f8, 1, RI_E1E1H_ONLINE }, { 0x600fc, 8, RI_ALL_ONLINE },
175 { 0x6013c, 24, RI_E1H_ONLINE }, { 0x6019c, 2, RI_E2_ONLINE },
176 { 0x601ac, 18, RI_E2_ONLINE }, { 0x60200, 1, RI_ALL_ONLINE },
177 { 0x60204, 2, RI_ALL_OFFLINE }, { 0x60210, 13, RI_E2_ONLINE },
150 { 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE }, 178 { 0x61000, 1, RI_ALL_ONLINE }, { 0x61004, 511, RI_ALL_OFFLINE },
151 { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 21496, RI_ALL_OFFLINE }, 179 { 0x70000, 8, RI_ALL_ONLINE }, { 0x70020, 8184, RI_ALL_OFFLINE },
152 { 0x85000, 3, RI_ALL_ONLINE }, { 0x8500c, 4, RI_ALL_OFFLINE }, 180 { 0x85000, 3, RI_ALL_ONLINE }, { 0x8501c, 7, RI_ALL_ONLINE },
153 { 0x8501c, 7, RI_ALL_ONLINE }, { 0x85038, 4, RI_ALL_OFFLINE }, 181 { 0x85048, 1, RI_ALL_ONLINE }, { 0x85200, 32, RI_ALL_ONLINE },
154 { 0x85048, 1, RI_ALL_ONLINE }, { 0x8504c, 109, RI_ALL_OFFLINE }, 182 { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc103c, 2, RI_E2_ONLINE },
155 { 0x85200, 32, RI_ALL_ONLINE }, { 0x85280, 11104, RI_ALL_OFFLINE }, 183 { 0xc1800, 2, RI_ALL_ONLINE }, { 0xc2000, 164, RI_ALL_ONLINE },
156 { 0xa0000, 16384, RI_ALL_ONLINE }, { 0xb0000, 16384, RI_E1H_ONLINE }, 184 { 0xc22c0, 5, RI_E2_ONLINE }, { 0xc22d8, 4, RI_E2_ONLINE },
157 { 0xc1000, 7, RI_ALL_ONLINE }, { 0xc1028, 1, RI_ALL_ONLINE },
158 { 0xc1038, 1, RI_ALL_ONLINE }, { 0xc1800, 2, RI_ALL_ONLINE },
159 { 0xc2000, 164, RI_ALL_ONLINE }, { 0xc229c, 1, RI_ALL_ONLINE },
160 { 0xc22ac, 1, RI_ALL_ONLINE }, { 0xc22bc, 1, RI_ALL_ONLINE },
161 { 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE }, 185 { 0xc2400, 49, RI_ALL_ONLINE }, { 0xc24c8, 38, RI_ALL_ONLINE },
162 { 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE }, 186 { 0xc2568, 2, RI_ALL_ONLINE }, { 0xc2600, 1, RI_ALL_ONLINE },
163 { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42a0, 1, RI_ALL_ONLINE }, 187 { 0xc4000, 165, RI_ALL_ONLINE }, { 0xc42d8, 2, RI_E2_ONLINE },
164 { 0xc42b0, 1, RI_ALL_ONLINE }, { 0xc42c0, 1, RI_ALL_ONLINE }, 188 { 0xc42e0, 7, RI_E1HE2_ONLINE }, { 0xc42fc, 1, RI_E2_ONLINE },
165 { 0xc42e0, 7, RI_E1H_ONLINE }, { 0xc4400, 51, RI_ALL_ONLINE }, 189 { 0xc4400, 51, RI_ALL_ONLINE }, { 0xc44d0, 38, RI_ALL_ONLINE },
166 { 0xc44d0, 38, RI_ALL_ONLINE }, { 0xc4570, 2, RI_ALL_ONLINE }, 190 { 0xc4570, 2, RI_ALL_ONLINE }, { 0xc4578, 5, RI_E2_ONLINE },
167 { 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE }, 191 { 0xc4600, 1, RI_ALL_ONLINE }, { 0xd0000, 19, RI_ALL_ONLINE },
168 { 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE }, 192 { 0xd004c, 8, RI_ALL_ONLINE }, { 0xd006c, 91, RI_ALL_ONLINE },
169 { 0xd01e4, 1, RI_ALL_ONLINE }, { 0xd01f4, 1, RI_ALL_ONLINE }, 193 { 0xd01fc, 1, RI_E2_ONLINE }, { 0xd0200, 2, RI_ALL_ONLINE },
170 { 0xd0200, 2, RI_ALL_ONLINE }, { 0xd020c, 7, RI_ALL_ONLINE }, 194 { 0xd020c, 7, RI_ALL_ONLINE }, { 0xd0228, 18, RI_E1HE2_ONLINE },
171 { 0xd0228, 18, RI_E1H_ONLINE }, { 0xd0280, 1, RI_ALL_ONLINE }, 195 { 0xd0280, 1, RI_ALL_ONLINE }, { 0xd0300, 1, RI_ALL_ONLINE },
172 { 0xd0300, 1, RI_ALL_ONLINE }, { 0xd0400, 1, RI_ALL_ONLINE }, 196 { 0xd0400, 1, RI_ALL_ONLINE }, { 0xd4000, 1, RI_ALL_ONLINE },
173 { 0xd4000, 1, RI_ALL_ONLINE }, { 0xd4004, 2559, RI_ALL_OFFLINE }, 197 { 0xd4004, 2559, RI_ALL_OFFLINE }, { 0xd8000, 1, RI_ALL_ONLINE },
174 { 0xd8000, 1, RI_ALL_ONLINE }, { 0xd8004, 8191, RI_ALL_OFFLINE }, 198 { 0xd8004, 8191, RI_ALL_OFFLINE }, { 0xe0000, 21, RI_ALL_ONLINE },
175 { 0xe0000, 21, RI_ALL_ONLINE }, { 0xe0054, 8, RI_ALL_ONLINE }, 199 { 0xe0054, 8, RI_ALL_ONLINE }, { 0xe0074, 49, RI_ALL_ONLINE },
176 { 0xe0074, 85, RI_ALL_ONLINE }, { 0xe01d4, 1, RI_ALL_ONLINE }, 200 { 0xe0138, 1, RI_E1E1H_ONLINE }, { 0xe013c, 35, RI_ALL_ONLINE },
177 { 0xe01e4, 1, RI_ALL_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE }, 201 { 0xe01f4, 2, RI_E2_ONLINE }, { 0xe0200, 2, RI_ALL_ONLINE },
178 { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1H_ONLINE }, 202 { 0xe020c, 8, RI_ALL_ONLINE }, { 0xe022c, 18, RI_E1HE2_ONLINE },
179 { 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE }, 203 { 0xe0280, 1, RI_ALL_ONLINE }, { 0xe0300, 1, RI_ALL_ONLINE },
180 { 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE }, 204 { 0xe1000, 1, RI_ALL_ONLINE }, { 0xe2000, 1, RI_ALL_ONLINE },
181 { 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE }, 205 { 0xe2004, 2047, RI_ALL_OFFLINE }, { 0xf0000, 1, RI_ALL_ONLINE },
182 { 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE }, 206 { 0xf0004, 16383, RI_ALL_OFFLINE }, { 0x101000, 12, RI_ALL_ONLINE },
183 { 0x10103c, 1, RI_ALL_ONLINE }, { 0x10104c, 1, RI_ALL_ONLINE }, 207 { 0x101050, 1, RI_E1HE2_ONLINE }, { 0x101054, 3, RI_E2_ONLINE },
184 { 0x101050, 1, RI_E1H_ONLINE }, { 0x101100, 1, RI_ALL_ONLINE }, 208 { 0x101100, 1, RI_ALL_ONLINE }, { 0x101800, 8, RI_ALL_ONLINE },
185 { 0x101800, 8, RI_ALL_ONLINE }, { 0x102000, 18, RI_ALL_ONLINE }, 209 { 0x102000, 18, RI_ALL_ONLINE }, { 0x102068, 6, RI_E2_ONLINE },
186 { 0x102054, 1, RI_ALL_ONLINE }, { 0x102064, 1, RI_ALL_ONLINE },
187 { 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE }, 210 { 0x102080, 17, RI_ALL_ONLINE }, { 0x1020c8, 8, RI_E1H_ONLINE },
188 { 0x102400, 1, RI_ALL_ONLINE }, { 0x103000, 26, RI_ALL_ONLINE }, 211 { 0x1020e8, 9, RI_E2_ONLINE }, { 0x102400, 1, RI_ALL_ONLINE },
189 { 0x103074, 1, RI_ALL_ONLINE }, { 0x103084, 1, RI_ALL_ONLINE }, 212 { 0x103000, 26, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1HE2_ONLINE },
190 { 0x103094, 1, RI_ALL_ONLINE }, { 0x103098, 5, RI_E1H_ONLINE }, 213 { 0x1030ac, 10, RI_E2_ONLINE }, { 0x1030d8, 8, RI_E2_ONLINE },
214 { 0x103400, 1, RI_E2_ONLINE }, { 0x103404, 135, RI_E2_OFFLINE },
191 { 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE }, 215 { 0x103800, 8, RI_ALL_ONLINE }, { 0x104000, 63, RI_ALL_ONLINE },
192 { 0x104108, 1, RI_ALL_ONLINE }, { 0x104118, 1, RI_ALL_ONLINE }, 216 { 0x10411c, 16, RI_E2_ONLINE }, { 0x104200, 17, RI_ALL_ONLINE },
193 { 0x104200, 17, RI_ALL_ONLINE }, { 0x104400, 64, RI_ALL_ONLINE }, 217 { 0x104400, 64, RI_ALL_ONLINE }, { 0x104500, 192, RI_ALL_OFFLINE },
194 { 0x104500, 192, RI_ALL_OFFLINE }, { 0x104800, 64, RI_ALL_ONLINE }, 218 { 0x104800, 64, RI_ALL_ONLINE }, { 0x104900, 192, RI_ALL_OFFLINE },
195 { 0x104900, 192, RI_ALL_OFFLINE }, { 0x105000, 7, RI_ALL_ONLINE }, 219 { 0x105000, 256, RI_ALL_ONLINE }, { 0x105400, 768, RI_ALL_OFFLINE },
196 { 0x10501c, 1, RI_ALL_OFFLINE }, { 0x105020, 3, RI_ALL_ONLINE }, 220 { 0x107000, 7, RI_E2_ONLINE }, { 0x108000, 33, RI_E1E1H_ONLINE },
197 { 0x10502c, 1, RI_ALL_OFFLINE }, { 0x105030, 3, RI_ALL_ONLINE }, 221 { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_E1E1H_ONLINE },
198 { 0x10503c, 1, RI_ALL_OFFLINE }, { 0x105040, 3, RI_ALL_ONLINE }, 222 { 0x108120, 5, RI_E1E1H_ONLINE }, { 0x108200, 74, RI_E1E1H_ONLINE },
199 { 0x10504c, 1, RI_ALL_OFFLINE }, { 0x105050, 3, RI_ALL_ONLINE }, 223 { 0x108400, 74, RI_E1E1H_ONLINE }, { 0x108800, 152, RI_E1E1H_ONLINE },
200 { 0x10505c, 1, RI_ALL_OFFLINE }, { 0x105060, 3, RI_ALL_ONLINE }, 224 { 0x110000, 111, RI_E2_ONLINE }, { 0x110200, 4, RI_E2_ONLINE },
201 { 0x10506c, 1, RI_ALL_OFFLINE }, { 0x105070, 3, RI_ALL_ONLINE }, 225 { 0x120000, 2, RI_ALL_ONLINE }, { 0x120008, 4, RI_ALL_ONLINE },
202 { 0x10507c, 1, RI_ALL_OFFLINE }, { 0x105080, 3, RI_ALL_ONLINE }, 226 { 0x120018, 3, RI_ALL_ONLINE }, { 0x120024, 4, RI_ALL_ONLINE },
203 { 0x10508c, 1, RI_ALL_OFFLINE }, { 0x105090, 3, RI_ALL_ONLINE }, 227 { 0x120034, 3, RI_ALL_ONLINE }, { 0x120040, 4, RI_ALL_ONLINE },
204 { 0x10509c, 1, RI_ALL_OFFLINE }, { 0x1050a0, 3, RI_ALL_ONLINE }, 228 { 0x120050, 3, RI_ALL_ONLINE }, { 0x12005c, 4, RI_ALL_ONLINE },
205 { 0x1050ac, 1, RI_ALL_OFFLINE }, { 0x1050b0, 3, RI_ALL_ONLINE }, 229 { 0x12006c, 3, RI_ALL_ONLINE }, { 0x120078, 4, RI_ALL_ONLINE },
206 { 0x1050bc, 1, RI_ALL_OFFLINE }, { 0x1050c0, 3, RI_ALL_ONLINE }, 230 { 0x120088, 3, RI_ALL_ONLINE }, { 0x120094, 4, RI_ALL_ONLINE },
207 { 0x1050cc, 1, RI_ALL_OFFLINE }, { 0x1050d0, 3, RI_ALL_ONLINE }, 231 { 0x1200a4, 3, RI_ALL_ONLINE }, { 0x1200b0, 4, RI_ALL_ONLINE },
208 { 0x1050dc, 1, RI_ALL_OFFLINE }, { 0x1050e0, 3, RI_ALL_ONLINE }, 232 { 0x1200c0, 3, RI_ALL_ONLINE }, { 0x1200cc, 4, RI_ALL_ONLINE },
209 { 0x1050ec, 1, RI_ALL_OFFLINE }, { 0x1050f0, 3, RI_ALL_ONLINE }, 233 { 0x1200dc, 3, RI_ALL_ONLINE }, { 0x1200e8, 4, RI_ALL_ONLINE },
210 { 0x1050fc, 1, RI_ALL_OFFLINE }, { 0x105100, 3, RI_ALL_ONLINE }, 234 { 0x1200f8, 3, RI_ALL_ONLINE }, { 0x120104, 4, RI_ALL_ONLINE },
211 { 0x10510c, 1, RI_ALL_OFFLINE }, { 0x105110, 3, RI_ALL_ONLINE }, 235 { 0x120114, 1, RI_ALL_ONLINE }, { 0x120118, 22, RI_ALL_ONLINE },
212 { 0x10511c, 1, RI_ALL_OFFLINE }, { 0x105120, 3, RI_ALL_ONLINE }, 236 { 0x120170, 2, RI_E1E1H_ONLINE }, { 0x120178, 243, RI_ALL_ONLINE },
213 { 0x10512c, 1, RI_ALL_OFFLINE }, { 0x105130, 3, RI_ALL_ONLINE }, 237 { 0x120544, 4, RI_E1E1H_ONLINE }, { 0x120554, 7, RI_ALL_ONLINE },
214 { 0x10513c, 1, RI_ALL_OFFLINE }, { 0x105140, 3, RI_ALL_ONLINE }, 238 { 0x12059c, 6, RI_E1HE2_ONLINE }, { 0x1205b4, 1, RI_E1HE2_ONLINE },
215 { 0x10514c, 1, RI_ALL_OFFLINE }, { 0x105150, 3, RI_ALL_ONLINE }, 239 { 0x1205b8, 16, RI_E1HE2_ONLINE }, { 0x1205f8, 4, RI_E2_ONLINE },
216 { 0x10515c, 1, RI_ALL_OFFLINE }, { 0x105160, 3, RI_ALL_ONLINE }, 240 { 0x120618, 1, RI_E2_ONLINE }, { 0x12061c, 20, RI_E1HE2_ONLINE },
217 { 0x10516c, 1, RI_ALL_OFFLINE }, { 0x105170, 3, RI_ALL_ONLINE }, 241 { 0x12066c, 11, RI_E1HE2_ONLINE }, { 0x120698, 5, RI_E2_ONLINE },
218 { 0x10517c, 1, RI_ALL_OFFLINE }, { 0x105180, 3, RI_ALL_ONLINE }, 242 { 0x1206b0, 76, RI_E2_ONLINE }, { 0x1207fc, 1, RI_E2_ONLINE },
219 { 0x10518c, 1, RI_ALL_OFFLINE }, { 0x105190, 3, RI_ALL_ONLINE }, 243 { 0x120808, 66, RI_ALL_ONLINE }, { 0x120910, 7, RI_E2_ONLINE },
220 { 0x10519c, 1, RI_ALL_OFFLINE }, { 0x1051a0, 3, RI_ALL_ONLINE }, 244 { 0x120930, 9, RI_E2_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
221 { 0x1051ac, 1, RI_ALL_OFFLINE }, { 0x1051b0, 3, RI_ALL_ONLINE }, 245 { 0x122000, 2, RI_ALL_ONLINE }, { 0x122008, 2046, RI_E1_OFFLINE },
222 { 0x1051bc, 1, RI_ALL_OFFLINE }, { 0x1051c0, 3, RI_ALL_ONLINE }, 246 { 0x128000, 2, RI_E1HE2_ONLINE }, { 0x128008, 6142, RI_E1HE2_OFFLINE },
223 { 0x1051cc, 1, RI_ALL_OFFLINE }, { 0x1051d0, 3, RI_ALL_ONLINE }, 247 { 0x130000, 35, RI_E2_ONLINE }, { 0x130100, 29, RI_E2_ONLINE },
224 { 0x1051dc, 1, RI_ALL_OFFLINE }, { 0x1051e0, 3, RI_ALL_ONLINE }, 248 { 0x130180, 1, RI_E2_ONLINE }, { 0x130200, 1, RI_E2_ONLINE },
225 { 0x1051ec, 1, RI_ALL_OFFLINE }, { 0x1051f0, 3, RI_ALL_ONLINE }, 249 { 0x130280, 1, RI_E2_ONLINE }, { 0x130300, 5, RI_E2_ONLINE },
226 { 0x1051fc, 1, RI_ALL_OFFLINE }, { 0x105200, 3, RI_ALL_ONLINE }, 250 { 0x130380, 1, RI_E2_ONLINE }, { 0x130400, 1, RI_E2_ONLINE },
227 { 0x10520c, 1, RI_ALL_OFFLINE }, { 0x105210, 3, RI_ALL_ONLINE }, 251 { 0x130480, 5, RI_E2_ONLINE }, { 0x130800, 72, RI_E2_ONLINE },
228 { 0x10521c, 1, RI_ALL_OFFLINE }, { 0x105220, 3, RI_ALL_ONLINE }, 252 { 0x131000, 136, RI_E2_ONLINE }, { 0x132000, 148, RI_E2_ONLINE },
229 { 0x10522c, 1, RI_ALL_OFFLINE }, { 0x105230, 3, RI_ALL_ONLINE }, 253 { 0x134000, 544, RI_E2_ONLINE }, { 0x140000, 64, RI_ALL_ONLINE },
230 { 0x10523c, 1, RI_ALL_OFFLINE }, { 0x105240, 3, RI_ALL_ONLINE }, 254 { 0x140100, 5, RI_E1E1H_ONLINE }, { 0x140114, 45, RI_ALL_ONLINE },
231 { 0x10524c, 1, RI_ALL_OFFLINE }, { 0x105250, 3, RI_ALL_ONLINE }, 255 { 0x140200, 6, RI_ALL_ONLINE }, { 0x140220, 4, RI_E2_ONLINE },
232 { 0x10525c, 1, RI_ALL_OFFLINE }, { 0x105260, 3, RI_ALL_ONLINE }, 256 { 0x140240, 4, RI_E2_ONLINE }, { 0x140260, 4, RI_E2_ONLINE },
233 { 0x10526c, 1, RI_ALL_OFFLINE }, { 0x105270, 3, RI_ALL_ONLINE }, 257 { 0x140280, 4, RI_E2_ONLINE }, { 0x1402a0, 4, RI_E2_ONLINE },
234 { 0x10527c, 1, RI_ALL_OFFLINE }, { 0x105280, 3, RI_ALL_ONLINE }, 258 { 0x1402c0, 4, RI_E2_ONLINE }, { 0x1402e0, 13, RI_E2_ONLINE },
235 { 0x10528c, 1, RI_ALL_OFFLINE }, { 0x105290, 3, RI_ALL_ONLINE }, 259 { 0x144000, 4, RI_E1E1H_ONLINE }, { 0x148000, 4, RI_E1E1H_ONLINE },
236 { 0x10529c, 1, RI_ALL_OFFLINE }, { 0x1052a0, 3, RI_ALL_ONLINE }, 260 { 0x14c000, 4, RI_E1E1H_ONLINE }, { 0x150000, 4, RI_E1E1H_ONLINE },
237 { 0x1052ac, 1, RI_ALL_OFFLINE }, { 0x1052b0, 3, RI_ALL_ONLINE }, 261 { 0x154000, 4, RI_E1E1H_ONLINE }, { 0x158000, 4, RI_E1E1H_ONLINE },
238 { 0x1052bc, 1, RI_ALL_OFFLINE }, { 0x1052c0, 3, RI_ALL_ONLINE }, 262 { 0x15c000, 2, RI_E1HE2_ONLINE }, { 0x15c008, 5, RI_E1H_ONLINE },
239 { 0x1052cc, 1, RI_ALL_OFFLINE }, { 0x1052d0, 3, RI_ALL_ONLINE }, 263 { 0x15c020, 27, RI_E2_ONLINE }, { 0x15c090, 13, RI_E2_ONLINE },
240 { 0x1052dc, 1, RI_ALL_OFFLINE }, { 0x1052e0, 3, RI_ALL_ONLINE }, 264 { 0x15c0c8, 34, RI_E2_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
241 { 0x1052ec, 1, RI_ALL_OFFLINE }, { 0x1052f0, 3, RI_ALL_ONLINE }, 265 { 0x16103c, 2, RI_E2_ONLINE }, { 0x161800, 2, RI_ALL_ONLINE },
242 { 0x1052fc, 1, RI_ALL_OFFLINE }, { 0x105300, 3, RI_ALL_ONLINE }, 266 { 0x164000, 60, RI_ALL_ONLINE }, { 0x164110, 2, RI_E1HE2_ONLINE },
243 { 0x10530c, 1, RI_ALL_OFFLINE }, { 0x105310, 3, RI_ALL_ONLINE }, 267 { 0x164118, 15, RI_E2_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
244 { 0x10531c, 1, RI_ALL_OFFLINE }, { 0x105320, 3, RI_ALL_ONLINE },
245 { 0x10532c, 1, RI_ALL_OFFLINE }, { 0x105330, 3, RI_ALL_ONLINE },
246 { 0x10533c, 1, RI_ALL_OFFLINE }, { 0x105340, 3, RI_ALL_ONLINE },
247 { 0x10534c, 1, RI_ALL_OFFLINE }, { 0x105350, 3, RI_ALL_ONLINE },
248 { 0x10535c, 1, RI_ALL_OFFLINE }, { 0x105360, 3, RI_ALL_ONLINE },
249 { 0x10536c, 1, RI_ALL_OFFLINE }, { 0x105370, 3, RI_ALL_ONLINE },
250 { 0x10537c, 1, RI_ALL_OFFLINE }, { 0x105380, 3, RI_ALL_ONLINE },
251 { 0x10538c, 1, RI_ALL_OFFLINE }, { 0x105390, 3, RI_ALL_ONLINE },
252 { 0x10539c, 1, RI_ALL_OFFLINE }, { 0x1053a0, 3, RI_ALL_ONLINE },
253 { 0x1053ac, 1, RI_ALL_OFFLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
254 { 0x1053bc, 1, RI_ALL_OFFLINE }, { 0x1053c0, 3, RI_ALL_ONLINE },
255 { 0x1053cc, 1, RI_ALL_OFFLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
256 { 0x1053dc, 1, RI_ALL_OFFLINE }, { 0x1053e0, 3, RI_ALL_ONLINE },
257 { 0x1053ec, 1, RI_ALL_OFFLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
258 { 0x1053fc, 769, RI_ALL_OFFLINE }, { 0x108000, 33, RI_ALL_ONLINE },
259 { 0x108090, 1, RI_ALL_ONLINE }, { 0x1080a0, 1, RI_ALL_ONLINE },
260 { 0x1080ac, 5, RI_E1H_ONLINE }, { 0x108100, 5, RI_ALL_ONLINE },
261 { 0x108120, 5, RI_ALL_ONLINE }, { 0x108200, 74, RI_ALL_ONLINE },
262 { 0x108400, 74, RI_ALL_ONLINE }, { 0x108800, 152, RI_ALL_ONLINE },
263 { 0x109000, 1, RI_ALL_ONLINE }, { 0x120000, 347, RI_ALL_ONLINE },
264 { 0x120578, 1, RI_ALL_ONLINE }, { 0x120588, 1, RI_ALL_ONLINE },
265 { 0x120598, 1, RI_ALL_ONLINE }, { 0x12059c, 23, RI_E1H_ONLINE },
266 { 0x120614, 1, RI_E1H_ONLINE }, { 0x12061c, 30, RI_E1H_ONLINE },
267 { 0x12080c, 65, RI_ALL_ONLINE }, { 0x120a00, 2, RI_ALL_ONLINE },
268 { 0x122000, 2, RI_ALL_ONLINE }, { 0x128000, 2, RI_E1H_ONLINE },
269 { 0x140000, 114, RI_ALL_ONLINE }, { 0x1401d4, 1, RI_ALL_ONLINE },
270 { 0x1401e4, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE },
271 { 0x144000, 4, RI_ALL_ONLINE }, { 0x148000, 4, RI_ALL_ONLINE },
272 { 0x14c000, 4, RI_ALL_ONLINE }, { 0x150000, 4, RI_ALL_ONLINE },
273 { 0x154000, 4, RI_ALL_ONLINE }, { 0x158000, 4, RI_ALL_ONLINE },
274 { 0x15c000, 7, RI_E1H_ONLINE }, { 0x161000, 7, RI_ALL_ONLINE },
275 { 0x161028, 1, RI_ALL_ONLINE }, { 0x161038, 1, RI_ALL_ONLINE },
276 { 0x161800, 2, RI_ALL_ONLINE }, { 0x164000, 60, RI_ALL_ONLINE },
277 { 0x1640fc, 1, RI_ALL_ONLINE }, { 0x16410c, 1, RI_ALL_ONLINE },
278 { 0x164110, 2, RI_E1H_ONLINE }, { 0x164200, 1, RI_ALL_ONLINE },
279 { 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE }, 268 { 0x164208, 1, RI_ALL_ONLINE }, { 0x164210, 1, RI_ALL_ONLINE },
280 { 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE }, 269 { 0x164218, 1, RI_ALL_ONLINE }, { 0x164220, 1, RI_ALL_ONLINE },
281 { 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE }, 270 { 0x164228, 1, RI_ALL_ONLINE }, { 0x164230, 1, RI_ALL_ONLINE },
@@ -284,169 +273,298 @@ static const struct reg_addr reg_addrs[REGS_COUNT] = {
284 { 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE }, 273 { 0x164258, 1, RI_ALL_ONLINE }, { 0x164260, 1, RI_ALL_ONLINE },
285 { 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE }, 274 { 0x164270, 2, RI_ALL_ONLINE }, { 0x164280, 2, RI_ALL_ONLINE },
286 { 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE }, 275 { 0x164800, 2, RI_ALL_ONLINE }, { 0x165000, 2, RI_ALL_ONLINE },
287 { 0x166000, 164, RI_ALL_ONLINE }, { 0x16629c, 1, RI_ALL_ONLINE }, 276 { 0x166000, 164, RI_ALL_ONLINE }, { 0x1662cc, 7, RI_E2_ONLINE },
288 { 0x1662ac, 1, RI_ALL_ONLINE }, { 0x1662bc, 1, RI_ALL_ONLINE },
289 { 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE }, 277 { 0x166400, 49, RI_ALL_ONLINE }, { 0x1664c8, 38, RI_ALL_ONLINE },
290 { 0x166568, 2, RI_ALL_ONLINE }, { 0x166800, 1, RI_ALL_ONLINE }, 278 { 0x166568, 2, RI_ALL_ONLINE }, { 0x166570, 5, RI_E2_ONLINE },
291 { 0x168000, 270, RI_ALL_ONLINE }, { 0x168444, 1, RI_ALL_ONLINE }, 279 { 0x166800, 1, RI_ALL_ONLINE }, { 0x168000, 137, RI_ALL_ONLINE },
292 { 0x168454, 1, RI_ALL_ONLINE }, { 0x168800, 19, RI_ALL_ONLINE }, 280 { 0x168224, 2, RI_E1E1H_ONLINE }, { 0x16822c, 29, RI_ALL_ONLINE },
293 { 0x168900, 1, RI_ALL_ONLINE }, { 0x168a00, 128, RI_ALL_ONLINE }, 281 { 0x1682a0, 12, RI_E1E1H_ONLINE }, { 0x1682d0, 12, RI_ALL_ONLINE },
294 { 0x16a000, 1, RI_ALL_ONLINE }, { 0x16a004, 1535, RI_ALL_OFFLINE }, 282 { 0x168300, 2, RI_E1E1H_ONLINE }, { 0x168308, 68, RI_ALL_ONLINE },
295 { 0x16c000, 1, RI_ALL_ONLINE }, { 0x16c004, 1535, RI_ALL_OFFLINE }, 283 { 0x168418, 2, RI_E1E1H_ONLINE }, { 0x168420, 6, RI_ALL_ONLINE },
296 { 0x16e000, 16, RI_E1H_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE }, 284 { 0x168800, 19, RI_ALL_ONLINE }, { 0x168900, 1, RI_ALL_ONLINE },
297 { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 183, RI_E1H_ONLINE }, 285 { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16a000, 1, RI_ALL_ONLINE },
298 { 0x170000, 93, RI_ALL_ONLINE }, { 0x170180, 1, RI_ALL_ONLINE }, 286 { 0x16a004, 1535, RI_ALL_OFFLINE }, { 0x16c000, 1, RI_ALL_ONLINE },
299 { 0x170190, 1, RI_ALL_ONLINE }, { 0x170200, 4, RI_ALL_ONLINE }, 287 { 0x16c004, 1535, RI_ALL_OFFLINE }, { 0x16e000, 16, RI_E1H_ONLINE },
300 { 0x170214, 1, RI_ALL_ONLINE }, { 0x178000, 1, RI_ALL_ONLINE }, 288 { 0x16e040, 8, RI_E2_ONLINE }, { 0x16e100, 1, RI_E1H_ONLINE },
301 { 0x180000, 61, RI_ALL_ONLINE }, { 0x180100, 1, RI_ALL_ONLINE }, 289 { 0x16e200, 2, RI_E1H_ONLINE }, { 0x16e400, 161, RI_E1H_ONLINE },
302 { 0x180110, 1, RI_ALL_ONLINE }, { 0x180120, 1, RI_ALL_ONLINE }, 290 { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 12, RI_E1H_ONLINE },
303 { 0x180130, 1, RI_ALL_ONLINE }, { 0x18013c, 2, RI_E1H_ONLINE }, 291 { 0x16e6bc, 4, RI_E1HE2_ONLINE }, { 0x16e6cc, 4, RI_E1H_ONLINE },
304 { 0x180200, 58, RI_ALL_ONLINE }, { 0x180340, 4, RI_ALL_ONLINE }, 292 { 0x16e6e0, 12, RI_E2_ONLINE }, { 0x16e768, 17, RI_E2_ONLINE },
305 { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_ALL_OFFLINE }, 293 { 0x170000, 24, RI_ALL_ONLINE }, { 0x170060, 4, RI_E1E1H_ONLINE },
294 { 0x170070, 65, RI_ALL_ONLINE }, { 0x170194, 11, RI_E2_ONLINE },
295 { 0x1701c4, 1, RI_E2_ONLINE }, { 0x1701cc, 7, RI_E2_ONLINE },
296 { 0x1701ec, 1, RI_E2_ONLINE }, { 0x1701f4, 1, RI_E2_ONLINE },
297 { 0x170200, 4, RI_ALL_ONLINE }, { 0x170214, 1, RI_ALL_ONLINE },
298 { 0x170218, 77, RI_E2_ONLINE }, { 0x170400, 64, RI_E2_ONLINE },
299 { 0x178000, 1, RI_ALL_ONLINE }, { 0x180000, 61, RI_ALL_ONLINE },
300 { 0x18013c, 2, RI_E1HE2_ONLINE }, { 0x180200, 58, RI_ALL_ONLINE },
301 { 0x180340, 4, RI_ALL_ONLINE }, { 0x180380, 1, RI_E2_ONLINE },
302 { 0x180388, 1, RI_E2_ONLINE }, { 0x180390, 1, RI_E2_ONLINE },
303 { 0x180398, 1, RI_E2_ONLINE }, { 0x1803a0, 5, RI_E2_ONLINE },
304 { 0x180400, 1, RI_ALL_ONLINE }, { 0x180404, 255, RI_E1E1H_OFFLINE },
306 { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE }, 305 { 0x181000, 4, RI_ALL_ONLINE }, { 0x181010, 1020, RI_ALL_OFFLINE },
307 { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 1023, RI_ALL_OFFLINE }, 306 { 0x1a0000, 1, RI_ALL_ONLINE }, { 0x1a0004, 5631, RI_ALL_OFFLINE },
308 { 0x1a1000, 1, RI_ALL_ONLINE }, { 0x1a1004, 4607, RI_ALL_OFFLINE }, 307 { 0x1a5800, 2560, RI_E1HE2_OFFLINE }, { 0x1a8000, 1, RI_ALL_ONLINE },
309 { 0x1a5800, 2560, RI_E1H_OFFLINE }, { 0x1a8000, 64, RI_ALL_OFFLINE }, 308 { 0x1a8004, 8191, RI_E1HE2_OFFLINE }, { 0x1b0000, 1, RI_ALL_ONLINE },
310 { 0x1a8100, 1984, RI_E1H_OFFLINE }, { 0x1aa000, 1, RI_E1H_ONLINE }, 309 { 0x1b0004, 15, RI_E1H_OFFLINE }, { 0x1b0040, 1, RI_E1HE2_ONLINE },
311 { 0x1aa004, 6655, RI_E1H_OFFLINE }, { 0x1b1800, 128, RI_ALL_OFFLINE }, 310 { 0x1b0044, 239, RI_E1H_OFFLINE }, { 0x1b0400, 1, RI_ALL_ONLINE },
312 { 0x1b1c00, 128, RI_ALL_OFFLINE }, { 0x1b2000, 1, RI_ALL_OFFLINE }, 311 { 0x1b0404, 255, RI_E1H_OFFLINE }, { 0x1b0800, 1, RI_ALL_ONLINE },
313 { 0x1b2400, 64, RI_E1H_OFFLINE }, { 0x1b8200, 1, RI_ALL_ONLINE }, 312 { 0x1b0840, 1, RI_E1HE2_ONLINE }, { 0x1b0c00, 1, RI_ALL_ONLINE },
313 { 0x1b1000, 1, RI_ALL_ONLINE }, { 0x1b1040, 1, RI_E1HE2_ONLINE },
314 { 0x1b1400, 1, RI_ALL_ONLINE }, { 0x1b1440, 1, RI_E1HE2_ONLINE },
315 { 0x1b1480, 1, RI_E1HE2_ONLINE }, { 0x1b14c0, 1, RI_E1HE2_ONLINE },
316 { 0x1b1800, 128, RI_ALL_OFFLINE }, { 0x1b1c00, 128, RI_ALL_OFFLINE },
317 { 0x1b2000, 1, RI_ALL_ONLINE }, { 0x1b2400, 1, RI_E1HE2_ONLINE },
318 { 0x1b2404, 5631, RI_E2_OFFLINE }, { 0x1b8000, 1, RI_ALL_ONLINE },
319 { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE },
320 { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x1b8100, 1, RI_ALL_ONLINE },
321 { 0x1b8140, 1, RI_ALL_ONLINE }, { 0x1b8180, 1, RI_ALL_ONLINE },
322 { 0x1b81c0, 1, RI_ALL_ONLINE }, { 0x1b8200, 1, RI_ALL_ONLINE },
314 { 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE }, 323 { 0x1b8240, 1, RI_ALL_ONLINE }, { 0x1b8280, 1, RI_ALL_ONLINE },
315 { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE }, 324 { 0x1b82c0, 1, RI_ALL_ONLINE }, { 0x1b8300, 1, RI_ALL_ONLINE },
316 { 0x1b8a80, 1, RI_ALL_ONLINE }, { 0x1c0000, 2, RI_ALL_ONLINE }, 325 { 0x1b8340, 1, RI_ALL_ONLINE }, { 0x1b8380, 1, RI_ALL_ONLINE },
317 { 0x200000, 65, RI_ALL_ONLINE }, { 0x200110, 1, RI_ALL_ONLINE }, 326 { 0x1b83c0, 1, RI_ALL_ONLINE }, { 0x1b8400, 1, RI_ALL_ONLINE },
318 { 0x200120, 1, RI_ALL_ONLINE }, { 0x200130, 1, RI_ALL_ONLINE }, 327 { 0x1b8440, 1, RI_ALL_ONLINE }, { 0x1b8480, 1, RI_ALL_ONLINE },
319 { 0x200140, 1, RI_ALL_ONLINE }, { 0x20014c, 2, RI_E1H_ONLINE }, 328 { 0x1b84c0, 1, RI_ALL_ONLINE }, { 0x1b8500, 1, RI_ALL_ONLINE },
320 { 0x200200, 58, RI_ALL_ONLINE }, { 0x200340, 4, RI_ALL_ONLINE }, 329 { 0x1b8540, 1, RI_ALL_ONLINE }, { 0x1b8580, 1, RI_ALL_ONLINE },
321 { 0x200400, 1, RI_ALL_ONLINE }, { 0x200404, 255, RI_ALL_OFFLINE }, 330 { 0x1b85c0, 19, RI_E2_ONLINE }, { 0x1b8800, 1, RI_ALL_ONLINE },
322 { 0x202000, 4, RI_ALL_ONLINE }, { 0x202010, 2044, RI_ALL_OFFLINE }, 331 { 0x1b8840, 1, RI_ALL_ONLINE }, { 0x1b8880, 1, RI_ALL_ONLINE },
323 { 0x220000, 1, RI_ALL_ONLINE }, { 0x220004, 1023, RI_ALL_OFFLINE }, 332 { 0x1b88c0, 1, RI_ALL_ONLINE }, { 0x1b8900, 1, RI_ALL_ONLINE },
324 { 0x221000, 1, RI_ALL_ONLINE }, { 0x221004, 4607, RI_ALL_OFFLINE }, 333 { 0x1b8940, 1, RI_ALL_ONLINE }, { 0x1b8980, 1, RI_ALL_ONLINE },
325 { 0x225800, 1536, RI_E1H_OFFLINE }, { 0x227000, 1, RI_E1H_ONLINE }, 334 { 0x1b89c0, 1, RI_ALL_ONLINE }, { 0x1b8a00, 1, RI_ALL_ONLINE },
326 { 0x227004, 1023, RI_E1H_OFFLINE }, { 0x228000, 64, RI_ALL_OFFLINE }, 335 { 0x1b8a40, 1, RI_ALL_ONLINE }, { 0x1b8a80, 1, RI_ALL_ONLINE },
327 { 0x228100, 8640, RI_E1H_OFFLINE }, { 0x231800, 128, RI_ALL_OFFLINE }, 336 { 0x1b8ac0, 1, RI_ALL_ONLINE }, { 0x1b8b00, 1, RI_ALL_ONLINE },
328 { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_OFFLINE }, 337 { 0x1b8b40, 1, RI_ALL_ONLINE }, { 0x1b8b80, 1, RI_ALL_ONLINE },
329 { 0x232400, 64, RI_E1H_OFFLINE }, { 0x238200, 1, RI_ALL_ONLINE }, 338 { 0x1b8bc0, 1, RI_ALL_ONLINE }, { 0x1b8c00, 1, RI_ALL_ONLINE },
330 { 0x238240, 1, RI_ALL_ONLINE }, { 0x238280, 1, RI_ALL_ONLINE }, 339 { 0x1b8c40, 1, RI_ALL_ONLINE }, { 0x1b8c80, 1, RI_ALL_ONLINE },
331 { 0x2382c0, 1, RI_ALL_ONLINE }, { 0x238a00, 1, RI_ALL_ONLINE }, 340 { 0x1b8cc0, 1, RI_ALL_ONLINE }, { 0x1b8cc4, 1, RI_E2_ONLINE },
332 { 0x238a80, 1, RI_ALL_ONLINE }, { 0x240000, 2, RI_ALL_ONLINE }, 341 { 0x1b8d00, 1, RI_ALL_ONLINE }, { 0x1b8d40, 1, RI_ALL_ONLINE },
333 { 0x280000, 65, RI_ALL_ONLINE }, { 0x280110, 1, RI_ALL_ONLINE }, 342 { 0x1b8d80, 1, RI_ALL_ONLINE }, { 0x1b8dc0, 1, RI_ALL_ONLINE },
334 { 0x280120, 1, RI_ALL_ONLINE }, { 0x280130, 1, RI_ALL_ONLINE }, 343 { 0x1b8e00, 1, RI_ALL_ONLINE }, { 0x1b8e40, 1, RI_ALL_ONLINE },
335 { 0x280140, 1, RI_ALL_ONLINE }, { 0x28014c, 2, RI_E1H_ONLINE }, 344 { 0x1b8e80, 1, RI_ALL_ONLINE }, { 0x1b8e84, 1, RI_E2_ONLINE },
336 { 0x280200, 58, RI_ALL_ONLINE }, { 0x280340, 4, RI_ALL_ONLINE }, 345 { 0x1b8ec0, 1, RI_E1HE2_ONLINE }, { 0x1b8f00, 1, RI_E1HE2_ONLINE },
337 { 0x280400, 1, RI_ALL_ONLINE }, { 0x280404, 255, RI_ALL_OFFLINE }, 346 { 0x1b8f40, 1, RI_E1HE2_ONLINE }, { 0x1b8f80, 1, RI_E1HE2_ONLINE },
338 { 0x282000, 4, RI_ALL_ONLINE }, { 0x282010, 2044, RI_ALL_OFFLINE }, 347 { 0x1b8fc0, 1, RI_E1HE2_ONLINE }, { 0x1b8fc4, 2, RI_E2_ONLINE },
339 { 0x2a0000, 1, RI_ALL_ONLINE }, { 0x2a0004, 1023, RI_ALL_OFFLINE }, 348 { 0x1b8fd0, 6, RI_E2_ONLINE }, { 0x1b9000, 1, RI_E2_ONLINE },
340 { 0x2a1000, 1, RI_ALL_ONLINE }, { 0x2a1004, 4607, RI_ALL_OFFLINE }, 349 { 0x1b9040, 3, RI_E2_ONLINE }, { 0x1b9400, 14, RI_E2_ONLINE },
341 { 0x2a5800, 2560, RI_E1H_OFFLINE }, { 0x2a8000, 64, RI_ALL_OFFLINE }, 350 { 0x1b943c, 19, RI_E2_ONLINE }, { 0x1b9490, 10, RI_E2_ONLINE },
342 { 0x2a8100, 960, RI_E1H_OFFLINE }, { 0x2a9000, 1, RI_E1H_ONLINE }, 351 { 0x1c0000, 2, RI_ALL_ONLINE }, { 0x200000, 65, RI_ALL_ONLINE },
343 { 0x2a9004, 7679, RI_E1H_OFFLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE }, 352 { 0x20014c, 2, RI_E1HE2_ONLINE }, { 0x200200, 58, RI_ALL_ONLINE },
344 { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_OFFLINE }, 353 { 0x200340, 4, RI_ALL_ONLINE }, { 0x200380, 1, RI_E2_ONLINE },
345 { 0x2b2400, 64, RI_E1H_OFFLINE }, { 0x2b8200, 1, RI_ALL_ONLINE }, 354 { 0x200388, 1, RI_E2_ONLINE }, { 0x200390, 1, RI_E2_ONLINE },
346 { 0x2b8240, 1, RI_ALL_ONLINE }, { 0x2b8280, 1, RI_ALL_ONLINE }, 355 { 0x200398, 1, RI_E2_ONLINE }, { 0x2003a0, 1, RI_E2_ONLINE },
347 { 0x2b82c0, 1, RI_ALL_ONLINE }, { 0x2b8a00, 1, RI_ALL_ONLINE }, 356 { 0x2003a8, 2, RI_E2_ONLINE }, { 0x200400, 1, RI_ALL_ONLINE },
348 { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE }, 357 { 0x200404, 255, RI_E1E1H_OFFLINE }, { 0x202000, 4, RI_ALL_ONLINE },
349 { 0x300000, 65, RI_ALL_ONLINE }, { 0x300110, 1, RI_ALL_ONLINE }, 358 { 0x202010, 2044, RI_ALL_OFFLINE }, { 0x220000, 1, RI_ALL_ONLINE },
350 { 0x300120, 1, RI_ALL_ONLINE }, { 0x300130, 1, RI_ALL_ONLINE }, 359 { 0x220004, 5631, RI_ALL_OFFLINE }, { 0x225800, 2560, RI_E1HE2_OFFLINE},
351 { 0x300140, 1, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1H_ONLINE }, 360 { 0x228000, 1, RI_ALL_ONLINE }, { 0x228004, 8191, RI_E1HE2_OFFLINE },
361 { 0x230000, 1, RI_ALL_ONLINE }, { 0x230004, 15, RI_E1H_OFFLINE },
362 { 0x230040, 1, RI_E1HE2_ONLINE }, { 0x230044, 239, RI_E1H_OFFLINE },
363 { 0x230400, 1, RI_ALL_ONLINE }, { 0x230404, 255, RI_E1H_OFFLINE },
364 { 0x230800, 1, RI_ALL_ONLINE }, { 0x230840, 1, RI_E1HE2_ONLINE },
365 { 0x230c00, 1, RI_ALL_ONLINE }, { 0x231000, 1, RI_ALL_ONLINE },
366 { 0x231040, 1, RI_E1HE2_ONLINE }, { 0x231400, 1, RI_ALL_ONLINE },
367 { 0x231440, 1, RI_E1HE2_ONLINE }, { 0x231480, 1, RI_E1HE2_ONLINE },
368 { 0x2314c0, 1, RI_E1HE2_ONLINE }, { 0x231800, 128, RI_ALL_OFFLINE },
369 { 0x231c00, 128, RI_ALL_OFFLINE }, { 0x232000, 1, RI_ALL_ONLINE },
370 { 0x232400, 1, RI_E1HE2_ONLINE }, { 0x232404, 5631, RI_E2_OFFLINE },
371 { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
372 { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
373 { 0x238100, 1, RI_ALL_ONLINE }, { 0x238140, 1, RI_ALL_ONLINE },
374 { 0x238180, 1, RI_ALL_ONLINE }, { 0x2381c0, 1, RI_ALL_ONLINE },
375 { 0x238200, 1, RI_ALL_ONLINE }, { 0x238240, 1, RI_ALL_ONLINE },
376 { 0x238280, 1, RI_ALL_ONLINE }, { 0x2382c0, 1, RI_ALL_ONLINE },
377 { 0x238300, 1, RI_ALL_ONLINE }, { 0x238340, 1, RI_ALL_ONLINE },
378 { 0x238380, 1, RI_ALL_ONLINE }, { 0x2383c0, 1, RI_ALL_ONLINE },
379 { 0x238400, 1, RI_ALL_ONLINE }, { 0x238440, 1, RI_ALL_ONLINE },
380 { 0x238480, 1, RI_ALL_ONLINE }, { 0x2384c0, 1, RI_ALL_ONLINE },
381 { 0x238500, 1, RI_ALL_ONLINE }, { 0x238540, 1, RI_ALL_ONLINE },
382 { 0x238580, 1, RI_ALL_ONLINE }, { 0x2385c0, 19, RI_E2_ONLINE },
383 { 0x238800, 1, RI_ALL_ONLINE }, { 0x238840, 1, RI_ALL_ONLINE },
384 { 0x238880, 1, RI_ALL_ONLINE }, { 0x2388c0, 1, RI_ALL_ONLINE },
385 { 0x238900, 1, RI_ALL_ONLINE }, { 0x238940, 1, RI_ALL_ONLINE },
386 { 0x238980, 1, RI_ALL_ONLINE }, { 0x2389c0, 1, RI_ALL_ONLINE },
387 { 0x238a00, 1, RI_ALL_ONLINE }, { 0x238a40, 1, RI_ALL_ONLINE },
388 { 0x238a80, 1, RI_ALL_ONLINE }, { 0x238ac0, 1, RI_ALL_ONLINE },
389 { 0x238b00, 1, RI_ALL_ONLINE }, { 0x238b40, 1, RI_ALL_ONLINE },
390 { 0x238b80, 1, RI_ALL_ONLINE }, { 0x238bc0, 1, RI_ALL_ONLINE },
391 { 0x238c00, 1, RI_ALL_ONLINE }, { 0x238c40, 1, RI_ALL_ONLINE },
392 { 0x238c80, 1, RI_ALL_ONLINE }, { 0x238cc0, 1, RI_ALL_ONLINE },
393 { 0x238cc4, 1, RI_E2_ONLINE }, { 0x238d00, 1, RI_ALL_ONLINE },
394 { 0x238d40, 1, RI_ALL_ONLINE }, { 0x238d80, 1, RI_ALL_ONLINE },
395 { 0x238dc0, 1, RI_ALL_ONLINE }, { 0x238e00, 1, RI_ALL_ONLINE },
396 { 0x238e40, 1, RI_ALL_ONLINE }, { 0x238e80, 1, RI_ALL_ONLINE },
397 { 0x238e84, 1, RI_E2_ONLINE }, { 0x238ec0, 1, RI_E1HE2_ONLINE },
398 { 0x238f00, 1, RI_E1HE2_ONLINE }, { 0x238f40, 1, RI_E1HE2_ONLINE },
399 { 0x238f80, 1, RI_E1HE2_ONLINE }, { 0x238fc0, 1, RI_E1HE2_ONLINE },
400 { 0x238fc4, 2, RI_E2_ONLINE }, { 0x238fd0, 6, RI_E2_ONLINE },
401 { 0x239000, 1, RI_E2_ONLINE }, { 0x239040, 3, RI_E2_ONLINE },
402 { 0x240000, 2, RI_ALL_ONLINE }, { 0x280000, 65, RI_ALL_ONLINE },
403 { 0x28014c, 2, RI_E1HE2_ONLINE }, { 0x280200, 58, RI_ALL_ONLINE },
404 { 0x280340, 4, RI_ALL_ONLINE }, { 0x280380, 1, RI_E2_ONLINE },
405 { 0x280388, 1, RI_E2_ONLINE }, { 0x280390, 1, RI_E2_ONLINE },
406 { 0x280398, 1, RI_E2_ONLINE }, { 0x2803a0, 1, RI_E2_ONLINE },
407 { 0x2803a8, 2, RI_E2_ONLINE }, { 0x280400, 1, RI_ALL_ONLINE },
408 { 0x280404, 255, RI_E1E1H_OFFLINE }, { 0x282000, 4, RI_ALL_ONLINE },
409 { 0x282010, 2044, RI_ALL_OFFLINE }, { 0x2a0000, 1, RI_ALL_ONLINE },
410 { 0x2a0004, 5631, RI_ALL_OFFLINE }, { 0x2a5800, 2560, RI_E1HE2_OFFLINE},
411 { 0x2a8000, 1, RI_ALL_ONLINE }, { 0x2a8004, 8191, RI_E1HE2_OFFLINE },
412 { 0x2b0000, 1, RI_ALL_ONLINE }, { 0x2b0004, 15, RI_E1H_OFFLINE },
413 { 0x2b0040, 1, RI_E1HE2_ONLINE }, { 0x2b0044, 239, RI_E1H_OFFLINE },
414 { 0x2b0400, 1, RI_ALL_ONLINE }, { 0x2b0404, 255, RI_E1H_OFFLINE },
415 { 0x2b0800, 1, RI_ALL_ONLINE }, { 0x2b0840, 1, RI_E1HE2_ONLINE },
416 { 0x2b0c00, 1, RI_ALL_ONLINE }, { 0x2b1000, 1, RI_ALL_ONLINE },
417 { 0x2b1040, 1, RI_E1HE2_ONLINE }, { 0x2b1400, 1, RI_ALL_ONLINE },
418 { 0x2b1440, 1, RI_E1HE2_ONLINE }, { 0x2b1480, 1, RI_E1HE2_ONLINE },
419 { 0x2b14c0, 1, RI_E1HE2_ONLINE }, { 0x2b1800, 128, RI_ALL_OFFLINE },
420 { 0x2b1c00, 128, RI_ALL_OFFLINE }, { 0x2b2000, 1, RI_ALL_ONLINE },
421 { 0x2b2400, 1, RI_E1HE2_ONLINE }, { 0x2b2404, 5631, RI_E2_OFFLINE },
422 { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
423 { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x2b80c0, 1, RI_ALL_ONLINE },
424 { 0x2b8100, 1, RI_ALL_ONLINE }, { 0x2b8140, 1, RI_ALL_ONLINE },
425 { 0x2b8180, 1, RI_ALL_ONLINE }, { 0x2b81c0, 1, RI_ALL_ONLINE },
426 { 0x2b8200, 1, RI_ALL_ONLINE }, { 0x2b8240, 1, RI_ALL_ONLINE },
427 { 0x2b8280, 1, RI_ALL_ONLINE }, { 0x2b82c0, 1, RI_ALL_ONLINE },
428 { 0x2b8300, 1, RI_ALL_ONLINE }, { 0x2b8340, 1, RI_ALL_ONLINE },
429 { 0x2b8380, 1, RI_ALL_ONLINE }, { 0x2b83c0, 1, RI_ALL_ONLINE },
430 { 0x2b8400, 1, RI_ALL_ONLINE }, { 0x2b8440, 1, RI_ALL_ONLINE },
431 { 0x2b8480, 1, RI_ALL_ONLINE }, { 0x2b84c0, 1, RI_ALL_ONLINE },
432 { 0x2b8500, 1, RI_ALL_ONLINE }, { 0x2b8540, 1, RI_ALL_ONLINE },
433 { 0x2b8580, 1, RI_ALL_ONLINE }, { 0x2b85c0, 19, RI_E2_ONLINE },
434 { 0x2b8800, 1, RI_ALL_ONLINE }, { 0x2b8840, 1, RI_ALL_ONLINE },
435 { 0x2b8880, 1, RI_ALL_ONLINE }, { 0x2b88c0, 1, RI_ALL_ONLINE },
436 { 0x2b8900, 1, RI_ALL_ONLINE }, { 0x2b8940, 1, RI_ALL_ONLINE },
437 { 0x2b8980, 1, RI_ALL_ONLINE }, { 0x2b89c0, 1, RI_ALL_ONLINE },
438 { 0x2b8a00, 1, RI_ALL_ONLINE }, { 0x2b8a40, 1, RI_ALL_ONLINE },
439 { 0x2b8a80, 1, RI_ALL_ONLINE }, { 0x2b8ac0, 1, RI_ALL_ONLINE },
440 { 0x2b8b00, 1, RI_ALL_ONLINE }, { 0x2b8b40, 1, RI_ALL_ONLINE },
441 { 0x2b8b80, 1, RI_ALL_ONLINE }, { 0x2b8bc0, 1, RI_ALL_ONLINE },
442 { 0x2b8c00, 1, RI_ALL_ONLINE }, { 0x2b8c40, 1, RI_ALL_ONLINE },
443 { 0x2b8c80, 1, RI_ALL_ONLINE }, { 0x2b8cc0, 1, RI_ALL_ONLINE },
444 { 0x2b8cc4, 1, RI_E2_ONLINE }, { 0x2b8d00, 1, RI_ALL_ONLINE },
445 { 0x2b8d40, 1, RI_ALL_ONLINE }, { 0x2b8d80, 1, RI_ALL_ONLINE },
446 { 0x2b8dc0, 1, RI_ALL_ONLINE }, { 0x2b8e00, 1, RI_ALL_ONLINE },
447 { 0x2b8e40, 1, RI_ALL_ONLINE }, { 0x2b8e80, 1, RI_ALL_ONLINE },
448 { 0x2b8e84, 1, RI_E2_ONLINE }, { 0x2b8ec0, 1, RI_E1HE2_ONLINE },
449 { 0x2b8f00, 1, RI_E1HE2_ONLINE }, { 0x2b8f40, 1, RI_E1HE2_ONLINE },
450 { 0x2b8f80, 1, RI_E1HE2_ONLINE }, { 0x2b8fc0, 1, RI_E1HE2_ONLINE },
451 { 0x2b8fc4, 2, RI_E2_ONLINE }, { 0x2b8fd0, 6, RI_E2_ONLINE },
452 { 0x2b9000, 1, RI_E2_ONLINE }, { 0x2b9040, 3, RI_E2_ONLINE },
453 { 0x2b9400, 14, RI_E2_ONLINE }, { 0x2b943c, 19, RI_E2_ONLINE },
454 { 0x2b9490, 10, RI_E2_ONLINE }, { 0x2c0000, 2, RI_ALL_ONLINE },
455 { 0x300000, 65, RI_ALL_ONLINE }, { 0x30014c, 2, RI_E1HE2_ONLINE },
352 { 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE }, 456 { 0x300200, 58, RI_ALL_ONLINE }, { 0x300340, 4, RI_ALL_ONLINE },
353 { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_ALL_OFFLINE }, 457 { 0x300380, 1, RI_E2_ONLINE }, { 0x300388, 1, RI_E2_ONLINE },
458 { 0x300390, 1, RI_E2_ONLINE }, { 0x300398, 1, RI_E2_ONLINE },
459 { 0x3003a0, 1, RI_E2_ONLINE }, { 0x3003a8, 2, RI_E2_ONLINE },
460 { 0x300400, 1, RI_ALL_ONLINE }, { 0x300404, 255, RI_E1E1H_OFFLINE },
354 { 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE }, 461 { 0x302000, 4, RI_ALL_ONLINE }, { 0x302010, 2044, RI_ALL_OFFLINE },
355 { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 1023, RI_ALL_OFFLINE }, 462 { 0x320000, 1, RI_ALL_ONLINE }, { 0x320004, 5631, RI_ALL_OFFLINE },
356 { 0x321000, 1, RI_ALL_ONLINE }, { 0x321004, 4607, RI_ALL_OFFLINE }, 463 { 0x325800, 2560, RI_E1HE2_OFFLINE }, { 0x328000, 1, RI_ALL_ONLINE },
357 { 0x325800, 2560, RI_E1H_OFFLINE }, { 0x328000, 64, RI_ALL_OFFLINE }, 464 { 0x328004, 8191, RI_E1HE2_OFFLINE }, { 0x330000, 1, RI_ALL_ONLINE },
358 { 0x328100, 536, RI_E1H_OFFLINE }, { 0x328960, 1, RI_E1H_ONLINE }, 465 { 0x330004, 15, RI_E1H_OFFLINE }, { 0x330040, 1, RI_E1HE2_ONLINE },
359 { 0x328964, 8103, RI_E1H_OFFLINE }, { 0x331800, 128, RI_ALL_OFFLINE }, 466 { 0x330044, 239, RI_E1H_OFFLINE }, { 0x330400, 1, RI_ALL_ONLINE },
360 { 0x331c00, 128, RI_ALL_OFFLINE }, { 0x332000, 1, RI_ALL_OFFLINE }, 467 { 0x330404, 255, RI_E1H_OFFLINE }, { 0x330800, 1, RI_ALL_ONLINE },
361 { 0x332400, 64, RI_E1H_OFFLINE }, { 0x338200, 1, RI_ALL_ONLINE }, 468 { 0x330840, 1, RI_E1HE2_ONLINE }, { 0x330c00, 1, RI_ALL_ONLINE },
469 { 0x331000, 1, RI_ALL_ONLINE }, { 0x331040, 1, RI_E1HE2_ONLINE },
470 { 0x331400, 1, RI_ALL_ONLINE }, { 0x331440, 1, RI_E1HE2_ONLINE },
471 { 0x331480, 1, RI_E1HE2_ONLINE }, { 0x3314c0, 1, RI_E1HE2_ONLINE },
472 { 0x331800, 128, RI_ALL_OFFLINE }, { 0x331c00, 128, RI_ALL_OFFLINE },
473 { 0x332000, 1, RI_ALL_ONLINE }, { 0x332400, 1, RI_E1HE2_ONLINE },
474 { 0x332404, 5631, RI_E2_OFFLINE }, { 0x338000, 1, RI_ALL_ONLINE },
475 { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
476 { 0x3380c0, 1, RI_ALL_ONLINE }, { 0x338100, 1, RI_ALL_ONLINE },
477 { 0x338140, 1, RI_ALL_ONLINE }, { 0x338180, 1, RI_ALL_ONLINE },
478 { 0x3381c0, 1, RI_ALL_ONLINE }, { 0x338200, 1, RI_ALL_ONLINE },
362 { 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE }, 479 { 0x338240, 1, RI_ALL_ONLINE }, { 0x338280, 1, RI_ALL_ONLINE },
363 { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE }, 480 { 0x3382c0, 1, RI_ALL_ONLINE }, { 0x338300, 1, RI_ALL_ONLINE },
364 { 0x338a80, 1, RI_ALL_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE } 481 { 0x338340, 1, RI_ALL_ONLINE }, { 0x338380, 1, RI_ALL_ONLINE },
482 { 0x3383c0, 1, RI_ALL_ONLINE }, { 0x338400, 1, RI_ALL_ONLINE },
483 { 0x338440, 1, RI_ALL_ONLINE }, { 0x338480, 1, RI_ALL_ONLINE },
484 { 0x3384c0, 1, RI_ALL_ONLINE }, { 0x338500, 1, RI_ALL_ONLINE },
485 { 0x338540, 1, RI_ALL_ONLINE }, { 0x338580, 1, RI_ALL_ONLINE },
486 { 0x3385c0, 19, RI_E2_ONLINE }, { 0x338800, 1, RI_ALL_ONLINE },
487 { 0x338840, 1, RI_ALL_ONLINE }, { 0x338880, 1, RI_ALL_ONLINE },
488 { 0x3388c0, 1, RI_ALL_ONLINE }, { 0x338900, 1, RI_ALL_ONLINE },
489 { 0x338940, 1, RI_ALL_ONLINE }, { 0x338980, 1, RI_ALL_ONLINE },
490 { 0x3389c0, 1, RI_ALL_ONLINE }, { 0x338a00, 1, RI_ALL_ONLINE },
491 { 0x338a40, 1, RI_ALL_ONLINE }, { 0x338a80, 1, RI_ALL_ONLINE },
492 { 0x338ac0, 1, RI_ALL_ONLINE }, { 0x338b00, 1, RI_ALL_ONLINE },
493 { 0x338b40, 1, RI_ALL_ONLINE }, { 0x338b80, 1, RI_ALL_ONLINE },
494 { 0x338bc0, 1, RI_ALL_ONLINE }, { 0x338c00, 1, RI_ALL_ONLINE },
495 { 0x338c40, 1, RI_ALL_ONLINE }, { 0x338c80, 1, RI_ALL_ONLINE },
496 { 0x338cc0, 1, RI_ALL_ONLINE }, { 0x338cc4, 1, RI_E2_ONLINE },
497 { 0x338d00, 1, RI_ALL_ONLINE }, { 0x338d40, 1, RI_ALL_ONLINE },
498 { 0x338d80, 1, RI_ALL_ONLINE }, { 0x338dc0, 1, RI_ALL_ONLINE },
499 { 0x338e00, 1, RI_ALL_ONLINE }, { 0x338e40, 1, RI_ALL_ONLINE },
500 { 0x338e80, 1, RI_ALL_ONLINE }, { 0x338e84, 1, RI_E2_ONLINE },
501 { 0x338ec0, 1, RI_E1HE2_ONLINE }, { 0x338f00, 1, RI_E1HE2_ONLINE },
502 { 0x338f40, 1, RI_E1HE2_ONLINE }, { 0x338f80, 1, RI_E1HE2_ONLINE },
503 { 0x338fc0, 1, RI_E1HE2_ONLINE }, { 0x338fc4, 2, RI_E2_ONLINE },
504 { 0x338fd0, 6, RI_E2_ONLINE }, { 0x339000, 1, RI_E2_ONLINE },
505 { 0x339040, 3, RI_E2_ONLINE }, { 0x340000, 2, RI_ALL_ONLINE },
365}; 506};
366 507
367 508#define IDLE_REGS_COUNT 237
368#define IDLE_REGS_COUNT 277
369static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = { 509static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = {
370 { 0x2114, 1, RI_ALL_ONLINE }, { 0x2120, 1, RI_ALL_ONLINE }, 510 { 0x2104, 1, RI_ALL_ONLINE }, { 0x2110, 2, RI_ALL_ONLINE },
371 { 0x212c, 4, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE }, 511 { 0x211c, 8, RI_ALL_ONLINE }, { 0x2814, 1, RI_ALL_ONLINE },
372 { 0x281c, 2, RI_ALL_ONLINE }, { 0xa38c, 1, RI_ALL_ONLINE }, 512 { 0x281c, 2, RI_ALL_ONLINE }, { 0x2854, 1, RI_ALL_ONLINE },
513 { 0x285c, 1, RI_ALL_ONLINE }, { 0x9010, 7, RI_E2_ONLINE },
514 { 0x9030, 1, RI_E2_ONLINE }, { 0x9068, 16, RI_E2_ONLINE },
515 { 0x9230, 2, RI_E2_ONLINE }, { 0x9244, 1, RI_E2_ONLINE },
516 { 0x9298, 1, RI_E2_ONLINE }, { 0x92a8, 1, RI_E2_ONLINE },
517 { 0xa38c, 1, RI_ALL_ONLINE }, { 0xa3c4, 1, RI_E1HE2_ONLINE },
373 { 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE }, 518 { 0xa408, 1, RI_ALL_ONLINE }, { 0xa42c, 12, RI_ALL_ONLINE },
374 { 0xa600, 5, RI_E1H_ONLINE }, { 0xa618, 1, RI_E1H_ONLINE }, 519 { 0xa600, 5, RI_E1HE2_ONLINE }, { 0xa618, 1, RI_E1HE2_ONLINE },
375 { 0xc09c, 1, RI_ALL_ONLINE }, { 0x103b0, 1, RI_ALL_ONLINE }, 520 { 0xa714, 1, RI_E2_ONLINE }, { 0xa720, 1, RI_E2_ONLINE },
376 { 0x103c0, 1, RI_ALL_ONLINE }, { 0x103d0, 1, RI_E1H_ONLINE }, 521 { 0xa750, 1, RI_E2_ONLINE }, { 0xc09c, 1, RI_E1E1H_ONLINE },
377 { 0x2021c, 11, RI_ALL_ONLINE }, { 0x202a8, 1, RI_ALL_ONLINE }, 522 { 0x103b0, 1, RI_ALL_ONLINE }, { 0x103c0, 1, RI_ALL_ONLINE },
378 { 0x202b8, 1, RI_ALL_ONLINE }, { 0x20404, 1, RI_ALL_ONLINE }, 523 { 0x103d0, 1, RI_E1H_ONLINE }, { 0x183bc, 1, RI_E2_ONLINE },
379 { 0x2040c, 2, RI_ALL_ONLINE }, { 0x2041c, 2, RI_ALL_ONLINE }, 524 { 0x183cc, 1, RI_E2_ONLINE }, { 0x2021c, 11, RI_ALL_ONLINE },
380 { 0x40154, 14, RI_ALL_ONLINE }, { 0x40198, 1, RI_ALL_ONLINE }, 525 { 0x202a8, 1, RI_ALL_ONLINE }, { 0x202b8, 1, RI_ALL_ONLINE },
381 { 0x404ac, 1, RI_ALL_ONLINE }, { 0x404bc, 1, RI_ALL_ONLINE }, 526 { 0x20404, 1, RI_ALL_ONLINE }, { 0x2040c, 2, RI_ALL_ONLINE },
382 { 0x42290, 1, RI_ALL_ONLINE }, { 0x422a0, 1, RI_ALL_ONLINE }, 527 { 0x2041c, 2, RI_ALL_ONLINE }, { 0x40154, 14, RI_ALL_ONLINE },
383 { 0x422b0, 1, RI_ALL_ONLINE }, { 0x42548, 1, RI_ALL_ONLINE }, 528 { 0x40198, 1, RI_ALL_ONLINE }, { 0x404ac, 1, RI_ALL_ONLINE },
384 { 0x42550, 1, RI_ALL_ONLINE }, { 0x42558, 1, RI_ALL_ONLINE }, 529 { 0x404bc, 1, RI_ALL_ONLINE }, { 0x42290, 1, RI_ALL_ONLINE },
385 { 0x50160, 8, RI_ALL_ONLINE }, { 0x501d0, 1, RI_ALL_ONLINE }, 530 { 0x422a0, 1, RI_ALL_ONLINE }, { 0x422b0, 1, RI_ALL_ONLINE },
386 { 0x501e0, 1, RI_ALL_ONLINE }, { 0x50204, 1, RI_ALL_ONLINE }, 531 { 0x42548, 1, RI_ALL_ONLINE }, { 0x42550, 1, RI_ALL_ONLINE },
387 { 0x5020c, 2, RI_ALL_ONLINE }, { 0x5021c, 1, RI_ALL_ONLINE }, 532 { 0x42558, 1, RI_ALL_ONLINE }, { 0x50160, 8, RI_ALL_ONLINE },
388 { 0x60090, 1, RI_ALL_ONLINE }, { 0x6011c, 1, RI_ALL_ONLINE }, 533 { 0x501d0, 1, RI_ALL_ONLINE }, { 0x501e0, 1, RI_ALL_ONLINE },
389 { 0x6012c, 1, RI_ALL_ONLINE }, { 0xc101c, 1, RI_ALL_ONLINE }, 534 { 0x50204, 1, RI_ALL_ONLINE }, { 0x5020c, 2, RI_ALL_ONLINE },
390 { 0xc102c, 1, RI_ALL_ONLINE }, { 0xc2290, 1, RI_ALL_ONLINE }, 535 { 0x5021c, 1, RI_ALL_ONLINE }, { 0x60090, 1, RI_ALL_ONLINE },
391 { 0xc22a0, 1, RI_ALL_ONLINE }, { 0xc22b0, 1, RI_ALL_ONLINE }, 536 { 0x6011c, 1, RI_ALL_ONLINE }, { 0x6012c, 1, RI_ALL_ONLINE },
392 { 0xc2548, 1, RI_ALL_ONLINE }, { 0xc2550, 1, RI_ALL_ONLINE }, 537 { 0xc101c, 1, RI_ALL_ONLINE }, { 0xc102c, 1, RI_ALL_ONLINE },
393 { 0xc2558, 1, RI_ALL_ONLINE }, { 0xc4294, 1, RI_ALL_ONLINE }, 538 { 0xc2290, 1, RI_ALL_ONLINE }, { 0xc22a0, 1, RI_ALL_ONLINE },
394 { 0xc42a4, 1, RI_ALL_ONLINE }, { 0xc42b4, 1, RI_ALL_ONLINE }, 539 { 0xc22b0, 1, RI_ALL_ONLINE }, { 0xc2548, 1, RI_ALL_ONLINE },
395 { 0xc4550, 1, RI_ALL_ONLINE }, { 0xc4558, 1, RI_ALL_ONLINE }, 540 { 0xc2550, 1, RI_ALL_ONLINE }, { 0xc2558, 1, RI_ALL_ONLINE },
396 { 0xc4560, 1, RI_ALL_ONLINE }, { 0xd016c, 8, RI_ALL_ONLINE }, 541 { 0xc4294, 1, RI_ALL_ONLINE }, { 0xc42a4, 1, RI_ALL_ONLINE },
397 { 0xd01d8, 1, RI_ALL_ONLINE }, { 0xd01e8, 1, RI_ALL_ONLINE }, 542 { 0xc42b4, 1, RI_ALL_ONLINE }, { 0xc4550, 1, RI_ALL_ONLINE },
398 { 0xd0204, 1, RI_ALL_ONLINE }, { 0xd020c, 3, RI_ALL_ONLINE }, 543 { 0xc4558, 1, RI_ALL_ONLINE }, { 0xc4560, 1, RI_ALL_ONLINE },
399 { 0xe0154, 8, RI_ALL_ONLINE }, { 0xe01c8, 1, RI_ALL_ONLINE }, 544 { 0xd016c, 8, RI_ALL_ONLINE }, { 0xd01d8, 1, RI_ALL_ONLINE },
400 { 0xe01d8, 1, RI_ALL_ONLINE }, { 0xe0204, 1, RI_ALL_ONLINE }, 545 { 0xd01e8, 1, RI_ALL_ONLINE }, { 0xd0204, 1, RI_ALL_ONLINE },
401 { 0xe020c, 2, RI_ALL_ONLINE }, { 0xe021c, 2, RI_ALL_ONLINE }, 546 { 0xd020c, 3, RI_ALL_ONLINE }, { 0xe0154, 8, RI_ALL_ONLINE },
402 { 0x101014, 1, RI_ALL_ONLINE }, { 0x101030, 1, RI_ALL_ONLINE }, 547 { 0xe01c8, 1, RI_ALL_ONLINE }, { 0xe01d8, 1, RI_ALL_ONLINE },
403 { 0x101040, 1, RI_ALL_ONLINE }, { 0x102058, 1, RI_ALL_ONLINE }, 548 { 0xe0204, 1, RI_ALL_ONLINE }, { 0xe020c, 2, RI_ALL_ONLINE },
404 { 0x102080, 16, RI_ALL_ONLINE }, { 0x103004, 2, RI_ALL_ONLINE }, 549 { 0xe021c, 2, RI_ALL_ONLINE }, { 0x101014, 1, RI_ALL_ONLINE },
405 { 0x103068, 1, RI_ALL_ONLINE }, { 0x103078, 1, RI_ALL_ONLINE }, 550 { 0x101030, 1, RI_ALL_ONLINE }, { 0x101040, 1, RI_ALL_ONLINE },
406 { 0x103088, 1, RI_ALL_ONLINE }, { 0x10309c, 2, RI_E1H_ONLINE }, 551 { 0x102058, 1, RI_ALL_ONLINE }, { 0x102080, 16, RI_ALL_ONLINE },
552 { 0x103004, 2, RI_ALL_ONLINE }, { 0x103068, 1, RI_ALL_ONLINE },
553 { 0x103078, 1, RI_ALL_ONLINE }, { 0x103088, 1, RI_ALL_ONLINE },
554 { 0x10309c, 2, RI_E1HE2_ONLINE }, { 0x1030b8, 2, RI_E2_ONLINE },
555 { 0x1030cc, 1, RI_E2_ONLINE }, { 0x1030e0, 1, RI_E2_ONLINE },
407 { 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE }, 556 { 0x104004, 1, RI_ALL_ONLINE }, { 0x104018, 1, RI_ALL_ONLINE },
408 { 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE }, 557 { 0x104020, 1, RI_ALL_ONLINE }, { 0x10403c, 1, RI_ALL_ONLINE },
409 { 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE }, 558 { 0x1040fc, 1, RI_ALL_ONLINE }, { 0x10410c, 1, RI_ALL_ONLINE },
410 { 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE }, 559 { 0x104400, 64, RI_ALL_ONLINE }, { 0x104800, 64, RI_ALL_ONLINE },
411 { 0x105000, 3, RI_ALL_ONLINE }, { 0x105010, 3, RI_ALL_ONLINE }, 560 { 0x105000, 256, RI_ALL_ONLINE }, { 0x108094, 1, RI_E1E1H_ONLINE },
412 { 0x105020, 3, RI_ALL_ONLINE }, { 0x105030, 3, RI_ALL_ONLINE }, 561 { 0x1201b0, 2, RI_ALL_ONLINE }, { 0x12032c, 1, RI_ALL_ONLINE },
413 { 0x105040, 3, RI_ALL_ONLINE }, { 0x105050, 3, RI_ALL_ONLINE }, 562 { 0x12036c, 3, RI_ALL_ONLINE }, { 0x120408, 2, RI_ALL_ONLINE },
414 { 0x105060, 3, RI_ALL_ONLINE }, { 0x105070, 3, RI_ALL_ONLINE }, 563 { 0x120414, 15, RI_ALL_ONLINE }, { 0x120478, 2, RI_ALL_ONLINE },
415 { 0x105080, 3, RI_ALL_ONLINE }, { 0x105090, 3, RI_ALL_ONLINE }, 564 { 0x12052c, 1, RI_ALL_ONLINE }, { 0x120564, 3, RI_ALL_ONLINE },
416 { 0x1050a0, 3, RI_ALL_ONLINE }, { 0x1050b0, 3, RI_ALL_ONLINE }, 565 { 0x12057c, 1, RI_ALL_ONLINE }, { 0x12058c, 1, RI_ALL_ONLINE },
417 { 0x1050c0, 3, RI_ALL_ONLINE }, { 0x1050d0, 3, RI_ALL_ONLINE }, 566 { 0x120608, 1, RI_E1HE2_ONLINE }, { 0x120738, 1, RI_E2_ONLINE },
418 { 0x1050e0, 3, RI_ALL_ONLINE }, { 0x1050f0, 3, RI_ALL_ONLINE }, 567 { 0x120778, 2, RI_E2_ONLINE }, { 0x120808, 3, RI_ALL_ONLINE },
419 { 0x105100, 3, RI_ALL_ONLINE }, { 0x105110, 3, RI_ALL_ONLINE },
420 { 0x105120, 3, RI_ALL_ONLINE }, { 0x105130, 3, RI_ALL_ONLINE },
421 { 0x105140, 3, RI_ALL_ONLINE }, { 0x105150, 3, RI_ALL_ONLINE },
422 { 0x105160, 3, RI_ALL_ONLINE }, { 0x105170, 3, RI_ALL_ONLINE },
423 { 0x105180, 3, RI_ALL_ONLINE }, { 0x105190, 3, RI_ALL_ONLINE },
424 { 0x1051a0, 3, RI_ALL_ONLINE }, { 0x1051b0, 3, RI_ALL_ONLINE },
425 { 0x1051c0, 3, RI_ALL_ONLINE }, { 0x1051d0, 3, RI_ALL_ONLINE },
426 { 0x1051e0, 3, RI_ALL_ONLINE }, { 0x1051f0, 3, RI_ALL_ONLINE },
427 { 0x105200, 3, RI_ALL_ONLINE }, { 0x105210, 3, RI_ALL_ONLINE },
428 { 0x105220, 3, RI_ALL_ONLINE }, { 0x105230, 3, RI_ALL_ONLINE },
429 { 0x105240, 3, RI_ALL_ONLINE }, { 0x105250, 3, RI_ALL_ONLINE },
430 { 0x105260, 3, RI_ALL_ONLINE }, { 0x105270, 3, RI_ALL_ONLINE },
431 { 0x105280, 3, RI_ALL_ONLINE }, { 0x105290, 3, RI_ALL_ONLINE },
432 { 0x1052a0, 3, RI_ALL_ONLINE }, { 0x1052b0, 3, RI_ALL_ONLINE },
433 { 0x1052c0, 3, RI_ALL_ONLINE }, { 0x1052d0, 3, RI_ALL_ONLINE },
434 { 0x1052e0, 3, RI_ALL_ONLINE }, { 0x1052f0, 3, RI_ALL_ONLINE },
435 { 0x105300, 3, RI_ALL_ONLINE }, { 0x105310, 3, RI_ALL_ONLINE },
436 { 0x105320, 3, RI_ALL_ONLINE }, { 0x105330, 3, RI_ALL_ONLINE },
437 { 0x105340, 3, RI_ALL_ONLINE }, { 0x105350, 3, RI_ALL_ONLINE },
438 { 0x105360, 3, RI_ALL_ONLINE }, { 0x105370, 3, RI_ALL_ONLINE },
439 { 0x105380, 3, RI_ALL_ONLINE }, { 0x105390, 3, RI_ALL_ONLINE },
440 { 0x1053a0, 3, RI_ALL_ONLINE }, { 0x1053b0, 3, RI_ALL_ONLINE },
441 { 0x1053c0, 3, RI_ALL_ONLINE }, { 0x1053d0, 3, RI_ALL_ONLINE },
442 { 0x1053e0, 3, RI_ALL_ONLINE }, { 0x1053f0, 3, RI_ALL_ONLINE },
443 { 0x108094, 1, RI_ALL_ONLINE }, { 0x1201b0, 2, RI_ALL_ONLINE },
444 { 0x12032c, 1, RI_ALL_ONLINE }, { 0x12036c, 3, RI_ALL_ONLINE },
445 { 0x120408, 2, RI_ALL_ONLINE }, { 0x120414, 15, RI_ALL_ONLINE },
446 { 0x120478, 2, RI_ALL_ONLINE }, { 0x12052c, 1, RI_ALL_ONLINE },
447 { 0x120564, 3, RI_ALL_ONLINE }, { 0x12057c, 1, RI_ALL_ONLINE },
448 { 0x12058c, 1, RI_ALL_ONLINE }, { 0x120608, 1, RI_E1H_ONLINE },
449 { 0x120808, 1, RI_E1_ONLINE }, { 0x12080c, 2, RI_ALL_ONLINE },
450 { 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE }, 568 { 0x120818, 1, RI_ALL_ONLINE }, { 0x120820, 1, RI_ALL_ONLINE },
451 { 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE }, 569 { 0x120828, 1, RI_ALL_ONLINE }, { 0x120830, 1, RI_ALL_ONLINE },
452 { 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE }, 570 { 0x120838, 1, RI_ALL_ONLINE }, { 0x120840, 1, RI_ALL_ONLINE },
@@ -462,48 +580,50 @@ static const struct reg_addr idle_addrs[IDLE_REGS_COUNT] = {
462 { 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE }, 580 { 0x1208d8, 1, RI_ALL_ONLINE }, { 0x1208e0, 1, RI_ALL_ONLINE },
463 { 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE }, 581 { 0x1208e8, 1, RI_ALL_ONLINE }, { 0x1208f0, 1, RI_ALL_ONLINE },
464 { 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE }, 582 { 0x1208f8, 1, RI_ALL_ONLINE }, { 0x120900, 1, RI_ALL_ONLINE },
465 { 0x120908, 1, RI_ALL_ONLINE }, { 0x14005c, 2, RI_ALL_ONLINE }, 583 { 0x120908, 1, RI_ALL_ONLINE }, { 0x120940, 5, RI_E2_ONLINE },
466 { 0x1400d0, 2, RI_ALL_ONLINE }, { 0x1400e0, 1, RI_ALL_ONLINE }, 584 { 0x130030, 1, RI_E2_ONLINE }, { 0x13004c, 3, RI_E2_ONLINE },
467 { 0x1401c8, 1, RI_ALL_ONLINE }, { 0x140200, 6, RI_ALL_ONLINE }, 585 { 0x130064, 2, RI_E2_ONLINE }, { 0x13009c, 1, RI_E2_ONLINE },
468 { 0x16101c, 1, RI_ALL_ONLINE }, { 0x16102c, 1, RI_ALL_ONLINE }, 586 { 0x130130, 1, RI_E2_ONLINE }, { 0x13016c, 1, RI_E2_ONLINE },
469 { 0x164014, 2, RI_ALL_ONLINE }, { 0x1640f0, 1, RI_ALL_ONLINE }, 587 { 0x130300, 1, RI_E2_ONLINE }, { 0x130480, 1, RI_E2_ONLINE },
470 { 0x166290, 1, RI_ALL_ONLINE }, { 0x1662a0, 1, RI_ALL_ONLINE }, 588 { 0x14005c, 2, RI_ALL_ONLINE }, { 0x1400d0, 2, RI_ALL_ONLINE },
471 { 0x1662b0, 1, RI_ALL_ONLINE }, { 0x166548, 1, RI_ALL_ONLINE }, 589 { 0x1400e0, 1, RI_ALL_ONLINE }, { 0x1401c8, 1, RI_ALL_ONLINE },
472 { 0x166550, 1, RI_ALL_ONLINE }, { 0x166558, 1, RI_ALL_ONLINE }, 590 { 0x140200, 6, RI_ALL_ONLINE }, { 0x16101c, 1, RI_ALL_ONLINE },
473 { 0x168000, 1, RI_ALL_ONLINE }, { 0x168008, 1, RI_ALL_ONLINE }, 591 { 0x16102c, 1, RI_ALL_ONLINE }, { 0x164014, 2, RI_ALL_ONLINE },
474 { 0x168010, 1, RI_ALL_ONLINE }, { 0x168018, 1, RI_ALL_ONLINE }, 592 { 0x1640f0, 1, RI_ALL_ONLINE }, { 0x166290, 1, RI_ALL_ONLINE },
475 { 0x168028, 2, RI_ALL_ONLINE }, { 0x168058, 4, RI_ALL_ONLINE }, 593 { 0x1662a0, 1, RI_ALL_ONLINE }, { 0x1662b0, 1, RI_ALL_ONLINE },
476 { 0x168070, 1, RI_ALL_ONLINE }, { 0x168238, 1, RI_ALL_ONLINE }, 594 { 0x166548, 1, RI_ALL_ONLINE }, { 0x166550, 1, RI_ALL_ONLINE },
477 { 0x1682d0, 2, RI_ALL_ONLINE }, { 0x1682e0, 1, RI_ALL_ONLINE }, 595 { 0x166558, 1, RI_ALL_ONLINE }, { 0x168000, 1, RI_ALL_ONLINE },
478 { 0x168300, 67, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE }, 596 { 0x168008, 1, RI_ALL_ONLINE }, { 0x168010, 1, RI_ALL_ONLINE },
597 { 0x168018, 1, RI_ALL_ONLINE }, { 0x168028, 2, RI_ALL_ONLINE },
598 { 0x168058, 4, RI_ALL_ONLINE }, { 0x168070, 1, RI_ALL_ONLINE },
599 { 0x168238, 1, RI_ALL_ONLINE }, { 0x1682d0, 2, RI_ALL_ONLINE },
600 { 0x1682e0, 1, RI_ALL_ONLINE }, { 0x168300, 2, RI_E1E1H_ONLINE },
601 { 0x168308, 65, RI_ALL_ONLINE }, { 0x168410, 2, RI_ALL_ONLINE },
479 { 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE }, 602 { 0x168438, 1, RI_ALL_ONLINE }, { 0x168448, 1, RI_ALL_ONLINE },
480 { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE }, 603 { 0x168a00, 128, RI_ALL_ONLINE }, { 0x16e200, 128, RI_E1H_ONLINE },
481 { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 70, RI_E1H_ONLINE }, 604 { 0x16e404, 2, RI_E1H_ONLINE }, { 0x16e584, 64, RI_E1H_ONLINE },
482 { 0x1700a4, 1, RI_ALL_ONLINE }, { 0x1700ac, 2, RI_ALL_ONLINE }, 605 { 0x16e684, 2, RI_E1HE2_ONLINE }, { 0x16e68c, 4, RI_E1H_ONLINE },
483 { 0x1700c0, 1, RI_ALL_ONLINE }, { 0x170174, 1, RI_ALL_ONLINE }, 606 { 0x16e6fc, 4, RI_E2_ONLINE }, { 0x1700a4, 1, RI_ALL_ONLINE },
484 { 0x170184, 1, RI_ALL_ONLINE }, { 0x1800f4, 1, RI_ALL_ONLINE }, 607 { 0x1700ac, 2, RI_ALL_ONLINE }, { 0x1700c0, 1, RI_ALL_ONLINE },
485 { 0x180104, 1, RI_ALL_ONLINE }, { 0x180114, 1, RI_ALL_ONLINE }, 608 { 0x170174, 1, RI_ALL_ONLINE }, { 0x170184, 1, RI_ALL_ONLINE },
486 { 0x180124, 1, RI_ALL_ONLINE }, { 0x18026c, 1, RI_ALL_ONLINE }, 609 { 0x1800f4, 1, RI_ALL_ONLINE }, { 0x180104, 1, RI_ALL_ONLINE },
487 { 0x1802a0, 1, RI_ALL_ONLINE }, { 0x1a1000, 1, RI_ALL_ONLINE }, 610 { 0x180114, 1, RI_ALL_ONLINE }, { 0x180124, 1, RI_ALL_ONLINE },
488 { 0x1aa000, 1, RI_E1H_ONLINE }, { 0x1b8000, 1, RI_ALL_ONLINE }, 611 { 0x18026c, 1, RI_ALL_ONLINE }, { 0x1802a0, 1, RI_ALL_ONLINE },
489 { 0x1b8040, 1, RI_ALL_ONLINE }, { 0x1b8080, 1, RI_ALL_ONLINE }, 612 { 0x1b8000, 1, RI_ALL_ONLINE }, { 0x1b8040, 1, RI_ALL_ONLINE },
490 { 0x1b80c0, 1, RI_ALL_ONLINE }, { 0x200104, 1, RI_ALL_ONLINE }, 613 { 0x1b8080, 1, RI_ALL_ONLINE }, { 0x1b80c0, 1, RI_ALL_ONLINE },
491 { 0x200114, 1, RI_ALL_ONLINE }, { 0x200124, 1, RI_ALL_ONLINE }, 614 { 0x200104, 1, RI_ALL_ONLINE }, { 0x200114, 1, RI_ALL_ONLINE },
492 { 0x200134, 1, RI_ALL_ONLINE }, { 0x20026c, 1, RI_ALL_ONLINE }, 615 { 0x200124, 1, RI_ALL_ONLINE }, { 0x200134, 1, RI_ALL_ONLINE },
493 { 0x2002a0, 1, RI_ALL_ONLINE }, { 0x221000, 1, RI_ALL_ONLINE }, 616 { 0x20026c, 1, RI_ALL_ONLINE }, { 0x2002a0, 1, RI_ALL_ONLINE },
494 { 0x227000, 1, RI_E1H_ONLINE }, { 0x238000, 1, RI_ALL_ONLINE }, 617 { 0x238000, 1, RI_ALL_ONLINE }, { 0x238040, 1, RI_ALL_ONLINE },
495 { 0x238040, 1, RI_ALL_ONLINE }, { 0x238080, 1, RI_ALL_ONLINE }, 618 { 0x238080, 1, RI_ALL_ONLINE }, { 0x2380c0, 1, RI_ALL_ONLINE },
496 { 0x2380c0, 1, RI_ALL_ONLINE }, { 0x280104, 1, RI_ALL_ONLINE }, 619 { 0x280104, 1, RI_ALL_ONLINE }, { 0x280114, 1, RI_ALL_ONLINE },
497 { 0x280114, 1, RI_ALL_ONLINE }, { 0x280124, 1, RI_ALL_ONLINE }, 620 { 0x280124, 1, RI_ALL_ONLINE }, { 0x280134, 1, RI_ALL_ONLINE },
498 { 0x280134, 1, RI_ALL_ONLINE }, { 0x28026c, 1, RI_ALL_ONLINE }, 621 { 0x28026c, 1, RI_ALL_ONLINE }, { 0x2802a0, 1, RI_ALL_ONLINE },
499 { 0x2802a0, 1, RI_ALL_ONLINE }, { 0x2a1000, 1, RI_ALL_ONLINE }, 622 { 0x2b8000, 1, RI_ALL_ONLINE }, { 0x2b8040, 1, RI_ALL_ONLINE },
500 { 0x2a9000, 1, RI_E1H_ONLINE }, { 0x2b8000, 1, RI_ALL_ONLINE }, 623 { 0x2b8080, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
501 { 0x2b8040, 1, RI_ALL_ONLINE }, { 0x2b8080, 1, RI_ALL_ONLINE },
502 { 0x2b80c0, 1, RI_ALL_ONLINE }, { 0x300104, 1, RI_ALL_ONLINE },
503 { 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE }, 624 { 0x300114, 1, RI_ALL_ONLINE }, { 0x300124, 1, RI_ALL_ONLINE },
504 { 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE }, 625 { 0x300134, 1, RI_ALL_ONLINE }, { 0x30026c, 1, RI_ALL_ONLINE },
505 { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x321000, 1, RI_ALL_ONLINE }, 626 { 0x3002a0, 1, RI_ALL_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
506 { 0x328960, 1, RI_E1H_ONLINE }, { 0x338000, 1, RI_ALL_ONLINE },
507 { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE }, 627 { 0x338040, 1, RI_ALL_ONLINE }, { 0x338080, 1, RI_ALL_ONLINE },
508 { 0x3380c0, 1, RI_ALL_ONLINE } 628 { 0x3380c0, 1, RI_ALL_ONLINE }
509}; 629};
@@ -515,7 +635,6 @@ static const struct wreg_addr wreg_addrs_e1[WREGS_COUNT_E1] = {
515 { 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE } 635 { 0x1b0c00, 192, 1, read_reg_e1_0, RI_E1_OFFLINE }
516}; 636};
517 637
518
519#define WREGS_COUNT_E1H 1 638#define WREGS_COUNT_E1H 1
520static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 }; 639static const u32 read_reg_e1h_0[] = { 0x1b1040, 0x1b1000 };
521 640
@@ -530,22 +649,53 @@ static const struct wreg_addr wreg_addrs_e2[WREGS_COUNT_E2] = {
530 { 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE } 649 { 0x1b0c00, 128, 2, read_reg_e2_0, RI_E2_OFFLINE }
531}; 650};
532 651
533static const struct dump_sign dump_sign_all = { 0x49aa93ee, 0x40835, 0x22 }; 652static const struct dump_sign dump_sign_all = { 0x4d18b0a4, 0x60010, 0x3a };
534
535 653
536#define TIMER_REGS_COUNT_E1 2 654#define TIMER_REGS_COUNT_E1 2
537static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] =
538 { 0x164014, 0x164018 };
539static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] =
540 { 0x1640d0, 0x1640d4 };
541 655
656static const u32 timer_status_regs_e1[TIMER_REGS_COUNT_E1] = {
657 0x164014, 0x164018 };
658static const u32 timer_scan_regs_e1[TIMER_REGS_COUNT_E1] = {
659 0x1640d0, 0x1640d4 };
542 660
543#define TIMER_REGS_COUNT_E1H 2 661#define TIMER_REGS_COUNT_E1H 2
544static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] =
545 { 0x164014, 0x164018 };
546static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] =
547 { 0x1640d0, 0x1640d4 };
548 662
663static const u32 timer_status_regs_e1h[TIMER_REGS_COUNT_E1H] = {
664 0x164014, 0x164018 };
665static const u32 timer_scan_regs_e1h[TIMER_REGS_COUNT_E1H] = {
666 0x1640d0, 0x1640d4 };
667
668#define TIMER_REGS_COUNT_E2 2
669
670static const u32 timer_status_regs_e2[TIMER_REGS_COUNT_E2] = {
671 0x164014, 0x164018 };
672static const u32 timer_scan_regs_e2[TIMER_REGS_COUNT_E2] = {
673 0x1640d0, 0x1640d4 };
674
675#define PAGE_MODE_VALUES_E1 0
676
677#define PAGE_READ_REGS_E1 0
678
679#define PAGE_WRITE_REGS_E1 0
680
681static const u32 page_vals_e1[] = { 0 };
682
683static const u32 page_write_regs_e1[] = { 0 };
684
685static const struct reg_addr page_read_regs_e1[] = { { 0x0, 0, RI_E1_ONLINE } };
686
687#define PAGE_MODE_VALUES_E1H 0
688
689#define PAGE_READ_REGS_E1H 0
690
691#define PAGE_WRITE_REGS_E1H 0
692
693static const u32 page_vals_e1h[] = { 0 };
694
695static const u32 page_write_regs_e1h[] = { 0 };
696
697static const struct reg_addr page_read_regs_e1h[] = {
698 { 0x0, 0, RI_E1H_ONLINE } };
549 699
550#define PAGE_MODE_VALUES_E2 2 700#define PAGE_MODE_VALUES_E2 2
551 701
diff --git a/drivers/net/bnx2x/bnx2x_ethtool.c b/drivers/net/bnx2x/bnx2x_ethtool.c
index d02ffbdc9f0e..5b44a8b48509 100644
--- a/drivers/net/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/bnx2x/bnx2x_ethtool.c
@@ -24,6 +24,144 @@
24#include "bnx2x.h" 24#include "bnx2x.h"
25#include "bnx2x_cmn.h" 25#include "bnx2x_cmn.h"
26#include "bnx2x_dump.h" 26#include "bnx2x_dump.h"
27#include "bnx2x_init.h"
28
29/* Note: in the format strings below %s is replaced by the queue-name which is
30 * either its index or 'fcoe' for the fcoe queue. Make sure the format string
31 * length does not exceed ETH_GSTRING_LEN - MAX_QUEUE_NAME_LEN + 2
32 */
33#define MAX_QUEUE_NAME_LEN 4
34static const struct {
35 long offset;
36 int size;
37 char string[ETH_GSTRING_LEN];
38} bnx2x_q_stats_arr[] = {
39/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%s]: rx_bytes" },
40 { Q_STATS_OFFSET32(error_bytes_received_hi),
41 8, "[%s]: rx_error_bytes" },
42 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
43 8, "[%s]: rx_ucast_packets" },
44 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
45 8, "[%s]: rx_mcast_packets" },
46 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
47 8, "[%s]: rx_bcast_packets" },
48 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%s]: rx_discards" },
49 { Q_STATS_OFFSET32(rx_err_discard_pkt),
50 4, "[%s]: rx_phy_ip_err_discards"},
51 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
52 4, "[%s]: rx_skb_alloc_discard" },
53 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%s]: rx_csum_offload_errors" },
54
55/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%s]: tx_bytes" },
56 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
57 8, "[%s]: tx_ucast_packets" },
58 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
59 8, "[%s]: tx_mcast_packets" },
60 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
61 8, "[%s]: tx_bcast_packets" }
62};
63
64#define BNX2X_NUM_Q_STATS ARRAY_SIZE(bnx2x_q_stats_arr)
65
66static const struct {
67 long offset;
68 int size;
69 u32 flags;
70#define STATS_FLAGS_PORT 1
71#define STATS_FLAGS_FUNC 2
72#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
73 char string[ETH_GSTRING_LEN];
74} bnx2x_stats_arr[] = {
75/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
76 8, STATS_FLAGS_BOTH, "rx_bytes" },
77 { STATS_OFFSET32(error_bytes_received_hi),
78 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
79 { STATS_OFFSET32(total_unicast_packets_received_hi),
80 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
81 { STATS_OFFSET32(total_multicast_packets_received_hi),
82 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
83 { STATS_OFFSET32(total_broadcast_packets_received_hi),
84 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
85 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
86 8, STATS_FLAGS_PORT, "rx_crc_errors" },
87 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
88 8, STATS_FLAGS_PORT, "rx_align_errors" },
89 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
90 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
91 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
92 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
93/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
94 8, STATS_FLAGS_PORT, "rx_fragments" },
95 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
96 8, STATS_FLAGS_PORT, "rx_jabbers" },
97 { STATS_OFFSET32(no_buff_discard_hi),
98 8, STATS_FLAGS_BOTH, "rx_discards" },
99 { STATS_OFFSET32(mac_filter_discard),
100 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
101 { STATS_OFFSET32(xxoverflow_discard),
102 4, STATS_FLAGS_PORT, "rx_fw_discards" },
103 { STATS_OFFSET32(brb_drop_hi),
104 8, STATS_FLAGS_PORT, "rx_brb_discard" },
105 { STATS_OFFSET32(brb_truncate_hi),
106 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
107 { STATS_OFFSET32(pause_frames_received_hi),
108 8, STATS_FLAGS_PORT, "rx_pause_frames" },
109 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
110 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
111 { STATS_OFFSET32(nig_timer_max),
112 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
113/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
114 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
115 { STATS_OFFSET32(rx_skb_alloc_failed),
116 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
117 { STATS_OFFSET32(hw_csum_err),
118 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
119
120 { STATS_OFFSET32(total_bytes_transmitted_hi),
121 8, STATS_FLAGS_BOTH, "tx_bytes" },
122 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
123 8, STATS_FLAGS_PORT, "tx_error_bytes" },
124 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
125 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
126 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
127 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
128 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
129 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
130 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
131 8, STATS_FLAGS_PORT, "tx_mac_errors" },
132 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
133 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
134/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
135 8, STATS_FLAGS_PORT, "tx_single_collisions" },
136 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
137 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
138 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
139 8, STATS_FLAGS_PORT, "tx_deferred" },
140 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
141 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
142 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
143 8, STATS_FLAGS_PORT, "tx_late_collisions" },
144 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
145 8, STATS_FLAGS_PORT, "tx_total_collisions" },
146 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
147 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
148 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
149 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
150 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
151 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
152 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
153 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
154/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
155 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
156 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
157 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
158 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
159 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
160 { STATS_OFFSET32(pause_frames_sent_hi),
161 8, STATS_FLAGS_PORT, "tx_pause_frames" }
162};
163
164#define BNX2X_NUM_STATS ARRAY_SIZE(bnx2x_stats_arr)
27 165
28static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) 166static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
29{ 167{
@@ -45,14 +183,9 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
45 cmd->speed = bp->link_params.req_line_speed[cfg_idx]; 183 cmd->speed = bp->link_params.req_line_speed[cfg_idx];
46 cmd->duplex = bp->link_params.req_duplex[cfg_idx]; 184 cmd->duplex = bp->link_params.req_duplex[cfg_idx];
47 } 185 }
48 if (IS_MF(bp)) {
49 u16 vn_max_rate = ((bp->mf_config[BP_VN(bp)] &
50 FUNC_MF_CFG_MAX_BW_MASK) >> FUNC_MF_CFG_MAX_BW_SHIFT) *
51 100;
52 186
53 if (vn_max_rate < cmd->speed) 187 if (IS_MF(bp))
54 cmd->speed = vn_max_rate; 188 cmd->speed = bnx2x_get_mf_speed(bp);
55 }
56 189
57 if (bp->port.supported[cfg_idx] & SUPPORTED_TP) 190 if (bp->port.supported[cfg_idx] & SUPPORTED_TP)
58 cmd->port = PORT_TP; 191 cmd->port = PORT_TP;
@@ -87,18 +220,57 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
87{ 220{
88 struct bnx2x *bp = netdev_priv(dev); 221 struct bnx2x *bp = netdev_priv(dev);
89 u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config; 222 u32 advertising, cfg_idx, old_multi_phy_config, new_multi_phy_config;
223 u32 speed;
90 224
91 if (IS_MF(bp)) 225 if (IS_MF_SD(bp))
92 return 0; 226 return 0;
93 227
94 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n" 228 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
95 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n" 229 " supported 0x%x advertising 0x%x speed %d speed_hi %d\n"
96 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n" 230 " duplex %d port %d phy_address %d transceiver %d\n"
97 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n", 231 " autoneg %d maxtxpkt %d maxrxpkt %d\n",
98 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed, 232 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
233 cmd->speed_hi,
99 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver, 234 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
100 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt); 235 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
101 236
237 speed = cmd->speed;
238 speed |= (cmd->speed_hi << 16);
239
240 if (IS_MF_SI(bp)) {
241 u32 param = 0;
242 u32 line_speed = bp->link_vars.line_speed;
243
244 /* use 10G if no link detected */
245 if (!line_speed)
246 line_speed = 10000;
247
248 if (bp->common.bc_ver < REQ_BC_VER_4_SET_MF_BW) {
249 BNX2X_DEV_INFO("To set speed BC %X or higher "
250 "is required, please upgrade BC\n",
251 REQ_BC_VER_4_SET_MF_BW);
252 return -EINVAL;
253 }
254 if (line_speed < speed) {
255 BNX2X_DEV_INFO("New speed should be less or equal "
256 "to actual line speed\n");
257 return -EINVAL;
258 }
259 /* load old values */
260 param = bp->mf_config[BP_VN(bp)];
261
262 /* leave only MIN value */
263 param &= FUNC_MF_CFG_MIN_BW_MASK;
264
265 /* set new MAX value */
266 param |= (((speed * 100) / line_speed)
267 << FUNC_MF_CFG_MAX_BW_SHIFT)
268 & FUNC_MF_CFG_MAX_BW_MASK;
269
270 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW, param);
271 return 0;
272 }
273
102 cfg_idx = bnx2x_get_link_cfg_idx(bp); 274 cfg_idx = bnx2x_get_link_cfg_idx(bp);
103 old_multi_phy_config = bp->link_params.multi_phy_config; 275 old_multi_phy_config = bp->link_params.multi_phy_config;
104 switch (cmd->port) { 276 switch (cmd->port) {
@@ -168,8 +340,6 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
168 340
169 } else { /* forced speed */ 341 } else { /* forced speed */
170 /* advertise the requested speed and duplex if supported */ 342 /* advertise the requested speed and duplex if supported */
171 u32 speed = cmd->speed;
172 speed |= (cmd->speed_hi << 16);
173 switch (speed) { 343 switch (speed) {
174 case SPEED_10: 344 case SPEED_10:
175 if (cmd->duplex == DUPLEX_FULL) { 345 if (cmd->duplex == DUPLEX_FULL) {
@@ -303,7 +473,7 @@ static int bnx2x_get_regs_len(struct net_device *dev)
303{ 473{
304 struct bnx2x *bp = netdev_priv(dev); 474 struct bnx2x *bp = netdev_priv(dev);
305 int regdump_len = 0; 475 int regdump_len = 0;
306 int i; 476 int i, j, k;
307 477
308 if (CHIP_IS_E1(bp)) { 478 if (CHIP_IS_E1(bp)) {
309 for (i = 0; i < REGS_COUNT; i++) 479 for (i = 0; i < REGS_COUNT; i++)
@@ -333,6 +503,15 @@ static int bnx2x_get_regs_len(struct net_device *dev)
333 if (IS_E2_ONLINE(wreg_addrs_e2[i].info)) 503 if (IS_E2_ONLINE(wreg_addrs_e2[i].info))
334 regdump_len += wreg_addrs_e2[i].size * 504 regdump_len += wreg_addrs_e2[i].size *
335 (1 + wreg_addrs_e2[i].read_regs_count); 505 (1 + wreg_addrs_e2[i].read_regs_count);
506
507 for (i = 0; i < PAGE_MODE_VALUES_E2; i++)
508 for (j = 0; j < PAGE_WRITE_REGS_E2; j++) {
509 for (k = 0; k < PAGE_READ_REGS_E2; k++)
510 if (IS_E2_ONLINE(page_read_regs_e2[k].
511 info))
512 regdump_len +=
513 page_read_regs_e2[k].size;
514 }
336 } 515 }
337 regdump_len *= 4; 516 regdump_len *= 4;
338 regdump_len += sizeof(struct dump_hdr); 517 regdump_len += sizeof(struct dump_hdr);
@@ -370,6 +549,12 @@ static void bnx2x_get_regs(struct net_device *dev,
370 if (!netif_running(bp->dev)) 549 if (!netif_running(bp->dev))
371 return; 550 return;
372 551
552 /* Disable parity attentions as long as following dump may
553 * cause false alarms by reading never written registers. We
554 * will re-enable parity attentions right after the dump.
555 */
556 bnx2x_disable_blocks_parity(bp);
557
373 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1; 558 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
374 dump_hdr.dump_sign = dump_sign_all; 559 dump_hdr.dump_sign = dump_sign_all;
375 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR); 560 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
@@ -411,6 +596,10 @@ static void bnx2x_get_regs(struct net_device *dev,
411 596
412 bnx2x_read_pages_regs_e2(bp, p); 597 bnx2x_read_pages_regs_e2(bp, p);
413 } 598 }
599 /* Re-enable parity attentions */
600 bnx2x_clear_blocks_parity(bp);
601 if (CHIP_PARITY_ENABLED(bp))
602 bnx2x_enable_blocks_parity(bp);
414} 603}
415 604
416#define PHY_FW_VER_LEN 20 605#define PHY_FW_VER_LEN 20
@@ -1286,7 +1475,7 @@ static int bnx2x_test_registers(struct bnx2x *bp)
1286 1475
1287 save_val = REG_RD(bp, offset); 1476 save_val = REG_RD(bp, offset);
1288 1477
1289 REG_WR(bp, offset, (wr_val & mask)); 1478 REG_WR(bp, offset, wr_val & mask);
1290 1479
1291 val = REG_RD(bp, offset); 1480 val = REG_RD(bp, offset);
1292 1481
@@ -1499,8 +1688,15 @@ static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
1499 * updates that have been performed while interrupts were 1688 * updates that have been performed while interrupts were
1500 * disabled. 1689 * disabled.
1501 */ 1690 */
1502 if (bp->common.int_block == INT_BLOCK_IGU) 1691 if (bp->common.int_block == INT_BLOCK_IGU) {
1692 /* Disable local BHes to prevent a dead-lock situation between
1693 * sch_direct_xmit() and bnx2x_run_loopback() (calling
1694 * bnx2x_tx_int()), as both are taking netif_tx_lock().
1695 */
1696 local_bh_disable();
1503 bnx2x_tx_int(fp_tx); 1697 bnx2x_tx_int(fp_tx);
1698 local_bh_enable();
1699 }
1504 1700
1505 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb); 1701 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
1506 if (rx_idx != rx_start_idx + num_pkts) 1702 if (rx_idx != rx_start_idx + num_pkts)
@@ -1650,7 +1846,7 @@ static int bnx2x_test_intr(struct bnx2x *bp)
1650 config->hdr.client_id = bp->fp->cl_id; 1846 config->hdr.client_id = bp->fp->cl_id;
1651 config->hdr.reserved1 = 0; 1847 config->hdr.reserved1 = 0;
1652 1848
1653 bp->set_mac_pending++; 1849 bp->set_mac_pending = 1;
1654 smp_wmb(); 1850 smp_wmb();
1655 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0, 1851 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
1656 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 1852 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
@@ -1748,134 +1944,6 @@ static void bnx2x_self_test(struct net_device *dev,
1748#endif 1944#endif
1749} 1945}
1750 1946
1751static const struct {
1752 long offset;
1753 int size;
1754 u8 string[ETH_GSTRING_LEN];
1755} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
1756/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
1757 { Q_STATS_OFFSET32(error_bytes_received_hi),
1758 8, "[%d]: rx_error_bytes" },
1759 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
1760 8, "[%d]: rx_ucast_packets" },
1761 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
1762 8, "[%d]: rx_mcast_packets" },
1763 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
1764 8, "[%d]: rx_bcast_packets" },
1765 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
1766 { Q_STATS_OFFSET32(rx_err_discard_pkt),
1767 4, "[%d]: rx_phy_ip_err_discards"},
1768 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
1769 4, "[%d]: rx_skb_alloc_discard" },
1770 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
1771
1772/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
1773 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
1774 8, "[%d]: tx_ucast_packets" },
1775 { Q_STATS_OFFSET32(total_multicast_packets_transmitted_hi),
1776 8, "[%d]: tx_mcast_packets" },
1777 { Q_STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
1778 8, "[%d]: tx_bcast_packets" }
1779};
1780
1781static const struct {
1782 long offset;
1783 int size;
1784 u32 flags;
1785#define STATS_FLAGS_PORT 1
1786#define STATS_FLAGS_FUNC 2
1787#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
1788 u8 string[ETH_GSTRING_LEN];
1789} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
1790/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
1791 8, STATS_FLAGS_BOTH, "rx_bytes" },
1792 { STATS_OFFSET32(error_bytes_received_hi),
1793 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
1794 { STATS_OFFSET32(total_unicast_packets_received_hi),
1795 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
1796 { STATS_OFFSET32(total_multicast_packets_received_hi),
1797 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
1798 { STATS_OFFSET32(total_broadcast_packets_received_hi),
1799 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
1800 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
1801 8, STATS_FLAGS_PORT, "rx_crc_errors" },
1802 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
1803 8, STATS_FLAGS_PORT, "rx_align_errors" },
1804 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
1805 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
1806 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
1807 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
1808/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
1809 8, STATS_FLAGS_PORT, "rx_fragments" },
1810 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
1811 8, STATS_FLAGS_PORT, "rx_jabbers" },
1812 { STATS_OFFSET32(no_buff_discard_hi),
1813 8, STATS_FLAGS_BOTH, "rx_discards" },
1814 { STATS_OFFSET32(mac_filter_discard),
1815 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
1816 { STATS_OFFSET32(xxoverflow_discard),
1817 4, STATS_FLAGS_PORT, "rx_fw_discards" },
1818 { STATS_OFFSET32(brb_drop_hi),
1819 8, STATS_FLAGS_PORT, "rx_brb_discard" },
1820 { STATS_OFFSET32(brb_truncate_hi),
1821 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
1822 { STATS_OFFSET32(pause_frames_received_hi),
1823 8, STATS_FLAGS_PORT, "rx_pause_frames" },
1824 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
1825 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
1826 { STATS_OFFSET32(nig_timer_max),
1827 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
1828/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
1829 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
1830 { STATS_OFFSET32(rx_skb_alloc_failed),
1831 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
1832 { STATS_OFFSET32(hw_csum_err),
1833 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
1834
1835 { STATS_OFFSET32(total_bytes_transmitted_hi),
1836 8, STATS_FLAGS_BOTH, "tx_bytes" },
1837 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
1838 8, STATS_FLAGS_PORT, "tx_error_bytes" },
1839 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
1840 8, STATS_FLAGS_BOTH, "tx_ucast_packets" },
1841 { STATS_OFFSET32(total_multicast_packets_transmitted_hi),
1842 8, STATS_FLAGS_BOTH, "tx_mcast_packets" },
1843 { STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
1844 8, STATS_FLAGS_BOTH, "tx_bcast_packets" },
1845 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
1846 8, STATS_FLAGS_PORT, "tx_mac_errors" },
1847 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
1848 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
1849/* 30 */{ STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
1850 8, STATS_FLAGS_PORT, "tx_single_collisions" },
1851 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
1852 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
1853 { STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
1854 8, STATS_FLAGS_PORT, "tx_deferred" },
1855 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
1856 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
1857 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
1858 8, STATS_FLAGS_PORT, "tx_late_collisions" },
1859 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
1860 8, STATS_FLAGS_PORT, "tx_total_collisions" },
1861 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
1862 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
1863 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
1864 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
1865 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
1866 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
1867 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
1868 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
1869/* 40 */{ STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
1870 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
1871 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
1872 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
1873 { STATS_OFFSET32(etherstatspktsover1522octets_hi),
1874 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
1875 { STATS_OFFSET32(pause_frames_sent_hi),
1876 8, STATS_FLAGS_PORT, "tx_pause_frames" }
1877};
1878
1879#define IS_PORT_STAT(i) \ 1947#define IS_PORT_STAT(i) \
1880 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT) 1948 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
1881#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC) 1949#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
@@ -1890,7 +1958,8 @@ static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
1890 switch (stringset) { 1958 switch (stringset) {
1891 case ETH_SS_STATS: 1959 case ETH_SS_STATS:
1892 if (is_multi(bp)) { 1960 if (is_multi(bp)) {
1893 num_stats = BNX2X_NUM_Q_STATS * bp->num_queues; 1961 num_stats = BNX2X_NUM_STAT_QUEUES(bp) *
1962 BNX2X_NUM_Q_STATS;
1894 if (!IS_MF_MODE_STAT(bp)) 1963 if (!IS_MF_MODE_STAT(bp))
1895 num_stats += BNX2X_NUM_STATS; 1964 num_stats += BNX2X_NUM_STATS;
1896 } else { 1965 } else {
@@ -1916,15 +1985,25 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
1916{ 1985{
1917 struct bnx2x *bp = netdev_priv(dev); 1986 struct bnx2x *bp = netdev_priv(dev);
1918 int i, j, k; 1987 int i, j, k;
1988 char queue_name[MAX_QUEUE_NAME_LEN+1];
1919 1989
1920 switch (stringset) { 1990 switch (stringset) {
1921 case ETH_SS_STATS: 1991 case ETH_SS_STATS:
1922 if (is_multi(bp)) { 1992 if (is_multi(bp)) {
1923 k = 0; 1993 k = 0;
1924 for_each_queue(bp, i) { 1994 for_each_napi_queue(bp, i) {
1995 memset(queue_name, 0, sizeof(queue_name));
1996
1997 if (IS_FCOE_IDX(i))
1998 sprintf(queue_name, "fcoe");
1999 else
2000 sprintf(queue_name, "%d", i);
2001
1925 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) 2002 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
1926 sprintf(buf + (k + j)*ETH_GSTRING_LEN, 2003 snprintf(buf + (k + j)*ETH_GSTRING_LEN,
1927 bnx2x_q_stats_arr[j].string, i); 2004 ETH_GSTRING_LEN,
2005 bnx2x_q_stats_arr[j].string,
2006 queue_name);
1928 k += BNX2X_NUM_Q_STATS; 2007 k += BNX2X_NUM_Q_STATS;
1929 } 2008 }
1930 if (IS_MF_MODE_STAT(bp)) 2009 if (IS_MF_MODE_STAT(bp))
@@ -1958,7 +2037,7 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
1958 2037
1959 if (is_multi(bp)) { 2038 if (is_multi(bp)) {
1960 k = 0; 2039 k = 0;
1961 for_each_queue(bp, i) { 2040 for_each_napi_queue(bp, i) {
1962 hw_stats = (u32 *)&bp->fp[i].eth_q_stats; 2041 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
1963 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) { 2042 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
1964 if (bnx2x_q_stats_arr[j].size == 0) { 2043 if (bnx2x_q_stats_arr[j].size == 0) {
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 4cfd4e9b5586..6238d4f63989 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -434,7 +434,12 @@ struct shared_feat_cfg { /* NVRAM Offset */
434#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED 0x00000000 434#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_DISABLED 0x00000000
435#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED 0x00000002 435#define SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED 0x00000002
436 436
437#define SHARED_FEATURE_MF_MODE_DISABLED 0x00000100 437#define SHARED_FEAT_CFG_FORCE_SF_MODE_MASK 0x00000700
438#define SHARED_FEAT_CFG_FORCE_SF_MODE_SHIFT 8
439#define SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED 0x00000000
440#define SHARED_FEAT_CFG_FORCE_SF_MODE_FORCED_SF 0x00000100
441#define SHARED_FEAT_CFG_FORCE_SF_MODE_SPIO4 0x00000200
442#define SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT 0x00000300
438 443
439}; 444};
440 445
@@ -679,7 +684,7 @@ struct shm_dev_info { /* size */
679#define E1VN_MAX 1 684#define E1VN_MAX 1
680#define E1HVN_MAX 4 685#define E1HVN_MAX 4
681 686
682 687#define E2_VF_MAX 64
683/* This value (in milliseconds) determines the frequency of the driver 688/* This value (in milliseconds) determines the frequency of the driver
684 * issuing the PULSE message code. The firmware monitors this periodic 689 * issuing the PULSE message code. The firmware monitors this periodic
685 * pulse to determine when to switch to an OS-absent mode. */ 690 * pulse to determine when to switch to an OS-absent mode. */
@@ -815,6 +820,11 @@ struct drv_func_mb {
815#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000 820#define DRV_MSG_CODE_VRFY_SPECIFIC_PHY_OPT_MDL 0xa1000000
816#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234 821#define REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL 0x00050234
817 822
823#define DRV_MSG_CODE_DCBX_ADMIN_PMF_MSG 0xb0000000
824#define DRV_MSG_CODE_DCBX_PMF_DRV_OK 0xb2000000
825#define DRV_MSG_CODE_SET_MF_BW 0xe0000000
826#define REQ_BC_VER_4_SET_MF_BW 0x00060202
827#define DRV_MSG_CODE_SET_MF_BW_ACK 0xe1000000
818#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000 828#define BIOS_MSG_CODE_LIC_CHALLENGE 0xff010000
819#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000 829#define BIOS_MSG_CODE_LIC_RESPONSE 0xff020000
820#define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000 830#define BIOS_MSG_CODE_VIRT_MAC_PRIM 0xff030000
@@ -888,6 +898,7 @@ struct drv_func_mb {
888 898
889 u32 drv_status; 899 u32 drv_status;
890#define DRV_STATUS_PMF 0x00000001 900#define DRV_STATUS_PMF 0x00000001
901#define DRV_STATUS_SET_MF_BW 0x00000004
891 902
892#define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00 903#define DRV_STATUS_DCC_EVENT_MASK 0x0000ff00
893#define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100 904#define DRV_STATUS_DCC_DISABLE_ENABLE_PF 0x00000100
@@ -896,6 +907,8 @@ struct drv_func_mb {
896#define DRV_STATUS_DCC_RESERVED1 0x00000800 907#define DRV_STATUS_DCC_RESERVED1 0x00000800
897#define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000 908#define DRV_STATUS_DCC_SET_PROTOCOL 0x00001000
898#define DRV_STATUS_DCC_SET_PRIORITY 0x00002000 909#define DRV_STATUS_DCC_SET_PRIORITY 0x00002000
910#define DRV_STATUS_DCBX_EVENT_MASK 0x000f0000
911#define DRV_STATUS_DCBX_NEGOTIATION_RESULTS 0x00010000
899 912
900 u32 virt_mac_upper; 913 u32 virt_mac_upper;
901#define VIRT_MAC_SIGN_MASK 0xffff0000 914#define VIRT_MAC_SIGN_MASK 0xffff0000
@@ -988,12 +1001,43 @@ struct func_mf_cfg {
988 1001
989}; 1002};
990 1003
1004/* This structure is not applicable and should not be accessed on 57711 */
1005struct func_ext_cfg {
1006 u32 func_cfg;
1007#define MACP_FUNC_CFG_FLAGS_MASK 0x000000FF
1008#define MACP_FUNC_CFG_FLAGS_SHIFT 0
1009#define MACP_FUNC_CFG_FLAGS_ENABLED 0x00000001
1010#define MACP_FUNC_CFG_FLAGS_ETHERNET 0x00000002
1011#define MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD 0x00000004
1012#define MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD 0x00000008
1013
1014 u32 iscsi_mac_addr_upper;
1015 u32 iscsi_mac_addr_lower;
1016
1017 u32 fcoe_mac_addr_upper;
1018 u32 fcoe_mac_addr_lower;
1019
1020 u32 fcoe_wwn_port_name_upper;
1021 u32 fcoe_wwn_port_name_lower;
1022
1023 u32 fcoe_wwn_node_name_upper;
1024 u32 fcoe_wwn_node_name_lower;
1025
1026 u32 preserve_data;
1027#define MF_FUNC_CFG_PRESERVE_L2_MAC (1<<0)
1028#define MF_FUNC_CFG_PRESERVE_ISCSI_MAC (1<<1)
1029#define MF_FUNC_CFG_PRESERVE_FCOE_MAC (1<<2)
1030#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_P (1<<3)
1031#define MF_FUNC_CFG_PRESERVE_FCOE_WWN_N (1<<4)
1032};
1033
991struct mf_cfg { 1034struct mf_cfg {
992 1035
993 struct shared_mf_cfg shared_mf_config; 1036 struct shared_mf_cfg shared_mf_config;
994 struct port_mf_cfg port_mf_config[PORT_MAX]; 1037 struct port_mf_cfg port_mf_config[PORT_MAX];
995 struct func_mf_cfg func_mf_config[E1H_FUNC_MAX]; 1038 struct func_mf_cfg func_mf_config[E1H_FUNC_MAX];
996 1039
1040 struct func_ext_cfg func_ext_config[E1H_FUNC_MAX];
997}; 1041};
998 1042
999 1043
@@ -1049,6 +1093,251 @@ struct fw_flr_mb {
1049 struct fw_flr_ack ack; 1093 struct fw_flr_ack ack;
1050}; 1094};
1051 1095
1096/**** SUPPORT FOR SHMEM ARRRAYS ***
1097 * The SHMEM HSI is aligned on 32 bit boundaries which makes it difficult to
1098 * define arrays with storage types smaller then unsigned dwords.
1099 * The macros below add generic support for SHMEM arrays with numeric elements
1100 * that can span 2,4,8 or 16 bits. The array underlying type is a 32 bit dword
1101 * array with individual bit-filed elements accessed using shifts and masks.
1102 *
1103 */
1104
1105/* eb is the bitwidth of a single element */
1106#define SHMEM_ARRAY_MASK(eb) ((1<<(eb))-1)
1107#define SHMEM_ARRAY_ENTRY(i, eb) ((i)/(32/(eb)))
1108
1109/* the bit-position macro allows the used to flip the order of the arrays
1110 * elements on a per byte or word boundary.
1111 *
1112 * example: an array with 8 entries each 4 bit wide. This array will fit into
1113 * a single dword. The diagrmas below show the array order of the nibbles.
1114 *
1115 * SHMEM_ARRAY_BITPOS(i, 4, 4) defines the stadard ordering:
1116 *
1117 * | | | |
1118 * 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 |
1119 * | | | |
1120 *
1121 * SHMEM_ARRAY_BITPOS(i, 4, 8) defines a flip ordering per byte:
1122 *
1123 * | | | |
1124 * 1 | 0 | 3 | 2 | 5 | 4 | 7 | 6 |
1125 * | | | |
1126 *
1127 * SHMEM_ARRAY_BITPOS(i, 4, 16) defines a flip ordering per word:
1128 *
1129 * | | | |
1130 * 3 | 2 | 1 | 0 | 7 | 6 | 5 | 4 |
1131 * | | | |
1132 */
1133#define SHMEM_ARRAY_BITPOS(i, eb, fb) \
1134 ((((32/(fb)) - 1 - ((i)/((fb)/(eb))) % (32/(fb))) * (fb)) + \
1135 (((i)%((fb)/(eb))) * (eb)))
1136
1137#define SHMEM_ARRAY_GET(a, i, eb, fb) \
1138 ((a[SHMEM_ARRAY_ENTRY(i, eb)] >> SHMEM_ARRAY_BITPOS(i, eb, fb)) & \
1139 SHMEM_ARRAY_MASK(eb))
1140
1141#define SHMEM_ARRAY_SET(a, i, eb, fb, val) \
1142do { \
1143 a[SHMEM_ARRAY_ENTRY(i, eb)] &= ~(SHMEM_ARRAY_MASK(eb) << \
1144 SHMEM_ARRAY_BITPOS(i, eb, fb)); \
1145 a[SHMEM_ARRAY_ENTRY(i, eb)] |= (((val) & SHMEM_ARRAY_MASK(eb)) << \
1146 SHMEM_ARRAY_BITPOS(i, eb, fb)); \
1147} while (0)
1148
1149
1150/****START OF DCBX STRUCTURES DECLARATIONS****/
1151#define DCBX_MAX_NUM_PRI_PG_ENTRIES 8
1152#define DCBX_PRI_PG_BITWIDTH 4
1153#define DCBX_PRI_PG_FBITS 8
1154#define DCBX_PRI_PG_GET(a, i) \
1155 SHMEM_ARRAY_GET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS)
1156#define DCBX_PRI_PG_SET(a, i, val) \
1157 SHMEM_ARRAY_SET(a, i, DCBX_PRI_PG_BITWIDTH, DCBX_PRI_PG_FBITS, val)
1158#define DCBX_MAX_NUM_PG_BW_ENTRIES 8
1159#define DCBX_BW_PG_BITWIDTH 8
1160#define DCBX_PG_BW_GET(a, i) \
1161 SHMEM_ARRAY_GET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH)
1162#define DCBX_PG_BW_SET(a, i, val) \
1163 SHMEM_ARRAY_SET(a, i, DCBX_BW_PG_BITWIDTH, DCBX_BW_PG_BITWIDTH, val)
1164#define DCBX_STRICT_PRI_PG 15
1165#define DCBX_MAX_APP_PROTOCOL 16
1166#define FCOE_APP_IDX 0
1167#define ISCSI_APP_IDX 1
1168#define PREDEFINED_APP_IDX_MAX 2
1169
1170struct dcbx_ets_feature {
1171 u32 enabled;
1172 u32 pg_bw_tbl[2];
1173 u32 pri_pg_tbl[1];
1174};
1175
1176struct dcbx_pfc_feature {
1177#ifdef __BIG_ENDIAN
1178 u8 pri_en_bitmap;
1179#define DCBX_PFC_PRI_0 0x01
1180#define DCBX_PFC_PRI_1 0x02
1181#define DCBX_PFC_PRI_2 0x04
1182#define DCBX_PFC_PRI_3 0x08
1183#define DCBX_PFC_PRI_4 0x10
1184#define DCBX_PFC_PRI_5 0x20
1185#define DCBX_PFC_PRI_6 0x40
1186#define DCBX_PFC_PRI_7 0x80
1187 u8 pfc_caps;
1188 u8 reserved;
1189 u8 enabled;
1190#elif defined(__LITTLE_ENDIAN)
1191 u8 enabled;
1192 u8 reserved;
1193 u8 pfc_caps;
1194 u8 pri_en_bitmap;
1195#define DCBX_PFC_PRI_0 0x01
1196#define DCBX_PFC_PRI_1 0x02
1197#define DCBX_PFC_PRI_2 0x04
1198#define DCBX_PFC_PRI_3 0x08
1199#define DCBX_PFC_PRI_4 0x10
1200#define DCBX_PFC_PRI_5 0x20
1201#define DCBX_PFC_PRI_6 0x40
1202#define DCBX_PFC_PRI_7 0x80
1203#endif
1204};
1205
1206struct dcbx_app_priority_entry {
1207#ifdef __BIG_ENDIAN
1208 u16 app_id;
1209 u8 pri_bitmap;
1210 u8 appBitfield;
1211#define DCBX_APP_ENTRY_VALID 0x01
1212#define DCBX_APP_ENTRY_SF_MASK 0x30
1213#define DCBX_APP_ENTRY_SF_SHIFT 4
1214#define DCBX_APP_SF_ETH_TYPE 0x10
1215#define DCBX_APP_SF_PORT 0x20
1216#elif defined(__LITTLE_ENDIAN)
1217 u8 appBitfield;
1218#define DCBX_APP_ENTRY_VALID 0x01
1219#define DCBX_APP_ENTRY_SF_MASK 0x30
1220#define DCBX_APP_ENTRY_SF_SHIFT 4
1221#define DCBX_APP_SF_ETH_TYPE 0x10
1222#define DCBX_APP_SF_PORT 0x20
1223 u8 pri_bitmap;
1224 u16 app_id;
1225#endif
1226};
1227
1228struct dcbx_app_priority_feature {
1229#ifdef __BIG_ENDIAN
1230 u8 reserved;
1231 u8 default_pri;
1232 u8 tc_supported;
1233 u8 enabled;
1234#elif defined(__LITTLE_ENDIAN)
1235 u8 enabled;
1236 u8 tc_supported;
1237 u8 default_pri;
1238 u8 reserved;
1239#endif
1240 struct dcbx_app_priority_entry app_pri_tbl[DCBX_MAX_APP_PROTOCOL];
1241};
1242
1243struct dcbx_features {
1244 struct dcbx_ets_feature ets;
1245 struct dcbx_pfc_feature pfc;
1246 struct dcbx_app_priority_feature app;
1247};
1248
1249struct lldp_params {
1250#ifdef __BIG_ENDIAN
1251 u8 msg_fast_tx_interval;
1252 u8 msg_tx_hold;
1253 u8 msg_tx_interval;
1254 u8 admin_status;
1255#define LLDP_TX_ONLY 0x01
1256#define LLDP_RX_ONLY 0x02
1257#define LLDP_TX_RX 0x03
1258#define LLDP_DISABLED 0x04
1259 u8 reserved1;
1260 u8 tx_fast;
1261 u8 tx_crd_max;
1262 u8 tx_crd;
1263#elif defined(__LITTLE_ENDIAN)
1264 u8 admin_status;
1265#define LLDP_TX_ONLY 0x01
1266#define LLDP_RX_ONLY 0x02
1267#define LLDP_TX_RX 0x03
1268#define LLDP_DISABLED 0x04
1269 u8 msg_tx_interval;
1270 u8 msg_tx_hold;
1271 u8 msg_fast_tx_interval;
1272 u8 tx_crd;
1273 u8 tx_crd_max;
1274 u8 tx_fast;
1275 u8 reserved1;
1276#endif
1277#define REM_CHASSIS_ID_STAT_LEN 4
1278#define REM_PORT_ID_STAT_LEN 4
1279 u32 peer_chassis_id[REM_CHASSIS_ID_STAT_LEN];
1280 u32 peer_port_id[REM_PORT_ID_STAT_LEN];
1281};
1282
1283struct lldp_dcbx_stat {
1284#define LOCAL_CHASSIS_ID_STAT_LEN 2
1285#define LOCAL_PORT_ID_STAT_LEN 2
1286 u32 local_chassis_id[LOCAL_CHASSIS_ID_STAT_LEN];
1287 u32 local_port_id[LOCAL_PORT_ID_STAT_LEN];
1288 u32 num_tx_dcbx_pkts;
1289 u32 num_rx_dcbx_pkts;
1290};
1291
1292struct lldp_admin_mib {
1293 u32 ver_cfg_flags;
1294#define DCBX_ETS_CONFIG_TX_ENABLED 0x00000001
1295#define DCBX_PFC_CONFIG_TX_ENABLED 0x00000002
1296#define DCBX_APP_CONFIG_TX_ENABLED 0x00000004
1297#define DCBX_ETS_RECO_TX_ENABLED 0x00000008
1298#define DCBX_ETS_RECO_VALID 0x00000010
1299#define DCBX_ETS_WILLING 0x00000020
1300#define DCBX_PFC_WILLING 0x00000040
1301#define DCBX_APP_WILLING 0x00000080
1302#define DCBX_VERSION_CEE 0x00000100
1303#define DCBX_VERSION_IEEE 0x00000200
1304#define DCBX_DCBX_ENABLED 0x00000400
1305#define DCBX_CEE_VERSION_MASK 0x0000f000
1306#define DCBX_CEE_VERSION_SHIFT 12
1307#define DCBX_CEE_MAX_VERSION_MASK 0x000f0000
1308#define DCBX_CEE_MAX_VERSION_SHIFT 16
1309 struct dcbx_features features;
1310};
1311
1312struct lldp_remote_mib {
1313 u32 prefix_seq_num;
1314 u32 flags;
1315#define DCBX_ETS_TLV_RX 0x00000001
1316#define DCBX_PFC_TLV_RX 0x00000002
1317#define DCBX_APP_TLV_RX 0x00000004
1318#define DCBX_ETS_RX_ERROR 0x00000010
1319#define DCBX_PFC_RX_ERROR 0x00000020
1320#define DCBX_APP_RX_ERROR 0x00000040
1321#define DCBX_ETS_REM_WILLING 0x00000100
1322#define DCBX_PFC_REM_WILLING 0x00000200
1323#define DCBX_APP_REM_WILLING 0x00000400
1324#define DCBX_REMOTE_ETS_RECO_VALID 0x00001000
1325 struct dcbx_features features;
1326 u32 suffix_seq_num;
1327};
1328
1329struct lldp_local_mib {
1330 u32 prefix_seq_num;
1331 u32 error;
1332#define DCBX_LOCAL_ETS_ERROR 0x00000001
1333#define DCBX_LOCAL_PFC_ERROR 0x00000002
1334#define DCBX_LOCAL_APP_ERROR 0x00000004
1335#define DCBX_LOCAL_PFC_MISMATCH 0x00000010
1336#define DCBX_LOCAL_APP_MISMATCH 0x00000020
1337 struct dcbx_features features;
1338 u32 suffix_seq_num;
1339};
1340/***END OF DCBX STRUCTURES DECLARATIONS***/
1052 1341
1053struct shmem2_region { 1342struct shmem2_region {
1054 1343
@@ -1072,7 +1361,12 @@ struct shmem2_region {
1072#define SHMEM_MF_CFG_ADDR_NONE 0x00000000 1361#define SHMEM_MF_CFG_ADDR_NONE 0x00000000
1073 1362
1074 struct fw_flr_mb flr_mb; 1363 struct fw_flr_mb flr_mb;
1075 u32 reserved[3]; 1364 u32 dcbx_lldp_params_offset;
1365#define SHMEM_LLDP_DCBX_PARAMS_NONE 0x00000000
1366 u32 dcbx_neg_res_offset;
1367#define SHMEM_DCBX_NEG_RES_NONE 0x00000000
1368 u32 dcbx_remote_mib_offset;
1369#define SHMEM_DCBX_REMOTE_MIB_NONE 0x00000000
1076 /* 1370 /*
1077 * The other shmemX_base_addr holds the other path's shmem address 1371 * The other shmemX_base_addr holds the other path's shmem address
1078 * required for example in case of common phy init, or for path1 to know 1372 * required for example in case of common phy init, or for path1 to know
@@ -1081,6 +1375,10 @@ struct shmem2_region {
1081 */ 1375 */
1082 u32 other_shmem_base_addr; 1376 u32 other_shmem_base_addr;
1083 u32 other_shmem2_base_addr; 1377 u32 other_shmem2_base_addr;
1378 u32 reserved1[E2_VF_MAX / 32];
1379 u32 reserved2[E2_FUNC_MAX][E2_VF_MAX / 32];
1380 u32 dcbx_lldp_dcbx_stat_offset;
1381#define SHMEM_LLDP_DCBX_STAT_NONE 0x00000000
1084}; 1382};
1085 1383
1086 1384
@@ -1534,8 +1832,8 @@ struct host_func_stats {
1534 1832
1535 1833
1536#define BCM_5710_FW_MAJOR_VERSION 6 1834#define BCM_5710_FW_MAJOR_VERSION 6
1537#define BCM_5710_FW_MINOR_VERSION 0 1835#define BCM_5710_FW_MINOR_VERSION 2
1538#define BCM_5710_FW_REVISION_VERSION 34 1836#define BCM_5710_FW_REVISION_VERSION 5
1539#define BCM_5710_FW_ENGINEERING_VERSION 0 1837#define BCM_5710_FW_ENGINEERING_VERSION 0
1540#define BCM_5710_FW_COMPILE_FLAGS 1 1838#define BCM_5710_FW_COMPILE_FLAGS 1
1541 1839
@@ -2983,6 +3281,25 @@ struct fairness_vars_per_vn {
2983 3281
2984 3282
2985/* 3283/*
3284 * The data for flow control configuration
3285 */
3286struct flow_control_configuration {
3287 struct priority_cos
3288 traffic_type_to_priority_cos[MAX_PFC_TRAFFIC_TYPES];
3289#if defined(__BIG_ENDIAN)
3290 u16 reserved1;
3291 u8 dcb_version;
3292 u8 dcb_enabled;
3293#elif defined(__LITTLE_ENDIAN)
3294 u8 dcb_enabled;
3295 u8 dcb_version;
3296 u16 reserved1;
3297#endif
3298 u32 reserved2;
3299};
3300
3301
3302/*
2986 * FW version stored in the Xstorm RAM 3303 * FW version stored in the Xstorm RAM
2987 */ 3304 */
2988struct fw_version { 3305struct fw_version {
diff --git a/drivers/net/bnx2x/bnx2x_init.h b/drivers/net/bnx2x/bnx2x_init.h
index a9d54874a559..5a268e9a0895 100644
--- a/drivers/net/bnx2x/bnx2x_init.h
+++ b/drivers/net/bnx2x/bnx2x_init.h
@@ -192,5 +192,225 @@ struct src_ent {
192 u64 next; 192 u64 next;
193}; 193};
194 194
195/****************************************************************************
196* Parity configuration
197****************************************************************************/
198#define BLOCK_PRTY_INFO(block, en_mask, m1, m1h, m2) \
199{ \
200 block##_REG_##block##_PRTY_MASK, \
201 block##_REG_##block##_PRTY_STS_CLR, \
202 en_mask, {m1, m1h, m2}, #block \
203}
204
205#define BLOCK_PRTY_INFO_0(block, en_mask, m1, m1h, m2) \
206{ \
207 block##_REG_##block##_PRTY_MASK_0, \
208 block##_REG_##block##_PRTY_STS_CLR_0, \
209 en_mask, {m1, m1h, m2}, #block"_0" \
210}
211
212#define BLOCK_PRTY_INFO_1(block, en_mask, m1, m1h, m2) \
213{ \
214 block##_REG_##block##_PRTY_MASK_1, \
215 block##_REG_##block##_PRTY_STS_CLR_1, \
216 en_mask, {m1, m1h, m2}, #block"_1" \
217}
218
219static const struct {
220 u32 mask_addr;
221 u32 sts_clr_addr;
222 u32 en_mask; /* Mask to enable parity attentions */
223 struct {
224 u32 e1; /* 57710 */
225 u32 e1h; /* 57711 */
226 u32 e2; /* 57712 */
227 } reg_mask; /* Register mask (all valid bits) */
228 char name[7]; /* Block's longest name is 6 characters long
229 * (name + suffix)
230 */
231} bnx2x_blocks_parity_data[] = {
232 /* bit 19 masked */
233 /* REG_WR(bp, PXP_REG_PXP_PRTY_MASK, 0x80000); */
234 /* bit 5,18,20-31 */
235 /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_0, 0xfff40020); */
236 /* bit 5 */
237 /* REG_WR(bp, PXP2_REG_PXP2_PRTY_MASK_1, 0x20); */
238 /* REG_WR(bp, HC_REG_HC_PRTY_MASK, 0x0); */
239 /* REG_WR(bp, MISC_REG_MISC_PRTY_MASK, 0x0); */
240
241 /* Block IGU, MISC, PXP and PXP2 parity errors as long as we don't
242 * want to handle "system kill" flow at the moment.
243 */
244 BLOCK_PRTY_INFO(PXP, 0x3ffffff, 0x3ffffff, 0x3ffffff, 0x3ffffff),
245 BLOCK_PRTY_INFO_0(PXP2, 0xffffffff, 0xffffffff, 0xffffffff, 0xffffffff),
246 BLOCK_PRTY_INFO_1(PXP2, 0x7ff, 0x7f, 0x7f, 0x7ff),
247 BLOCK_PRTY_INFO(HC, 0x7, 0x7, 0x7, 0),
248 BLOCK_PRTY_INFO(IGU, 0x7ff, 0, 0, 0x7ff),
249 BLOCK_PRTY_INFO(MISC, 0x1, 0x1, 0x1, 0x1),
250 BLOCK_PRTY_INFO(QM, 0, 0x1ff, 0xfff, 0xfff),
251 BLOCK_PRTY_INFO(DORQ, 0, 0x3, 0x3, 0x3),
252 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK,
253 GRCBASE_UPB + PB_REG_PB_PRTY_STS_CLR, 0,
254 {0xf, 0xf, 0xf}, "UPB"},
255 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK,
256 GRCBASE_XPB + PB_REG_PB_PRTY_STS_CLR, 0,
257 {0xf, 0xf, 0xf}, "XPB"},
258 BLOCK_PRTY_INFO(SRC, 0x4, 0x7, 0x7, 0x7),
259 BLOCK_PRTY_INFO(CDU, 0, 0x1f, 0x1f, 0x1f),
260 BLOCK_PRTY_INFO(CFC, 0, 0xf, 0xf, 0xf),
261 BLOCK_PRTY_INFO(DBG, 0, 0x1, 0x1, 0x1),
262 BLOCK_PRTY_INFO(DMAE, 0, 0xf, 0xf, 0xf),
263 BLOCK_PRTY_INFO(BRB1, 0, 0xf, 0xf, 0xf),
264 BLOCK_PRTY_INFO(PRS, (1<<6), 0xff, 0xff, 0xff),
265 BLOCK_PRTY_INFO(TSDM, 0x18, 0x7ff, 0x7ff, 0x7ff),
266 BLOCK_PRTY_INFO(CSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
267 BLOCK_PRTY_INFO(USDM, 0x38, 0x7ff, 0x7ff, 0x7ff),
268 BLOCK_PRTY_INFO(XSDM, 0x8, 0x7ff, 0x7ff, 0x7ff),
269 BLOCK_PRTY_INFO_0(TSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
270 BLOCK_PRTY_INFO_1(TSEM, 0, 0x3, 0x1f, 0x3f),
271 BLOCK_PRTY_INFO_0(USEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
272 BLOCK_PRTY_INFO_1(USEM, 0, 0x3, 0x1f, 0x1f),
273 BLOCK_PRTY_INFO_0(CSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
274 BLOCK_PRTY_INFO_1(CSEM, 0, 0x3, 0x1f, 0x1f),
275 BLOCK_PRTY_INFO_0(XSEM, 0, 0xffffffff, 0xffffffff, 0xffffffff),
276 BLOCK_PRTY_INFO_1(XSEM, 0, 0x3, 0x1f, 0x3f),
277};
278
279
280/* [28] MCP Latched rom_parity
281 * [29] MCP Latched ump_rx_parity
282 * [30] MCP Latched ump_tx_parity
283 * [31] MCP Latched scpad_parity
284 */
285#define MISC_AEU_ENABLE_MCP_PRTY_BITS \
286 (AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY | \
287 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY | \
288 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY | \
289 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY)
290
291/* Below registers control the MCP parity attention output. When
292 * MISC_AEU_ENABLE_MCP_PRTY_BITS are set - attentions are
293 * enabled, when cleared - disabled.
294 */
295static const u32 mcp_attn_ctl_regs[] = {
296 MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0,
297 MISC_REG_AEU_ENABLE4_NIG_0,
298 MISC_REG_AEU_ENABLE4_PXP_0,
299 MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0,
300 MISC_REG_AEU_ENABLE4_NIG_1,
301 MISC_REG_AEU_ENABLE4_PXP_1
302};
303
304static inline void bnx2x_set_mcp_parity(struct bnx2x *bp, u8 enable)
305{
306 int i;
307 u32 reg_val;
308
309 for (i = 0; i < ARRAY_SIZE(mcp_attn_ctl_regs); i++) {
310 reg_val = REG_RD(bp, mcp_attn_ctl_regs[i]);
311
312 if (enable)
313 reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS;
314 else
315 reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS;
316
317 REG_WR(bp, mcp_attn_ctl_regs[i], reg_val);
318 }
319}
320
321static inline u32 bnx2x_parity_reg_mask(struct bnx2x *bp, int idx)
322{
323 if (CHIP_IS_E1(bp))
324 return bnx2x_blocks_parity_data[idx].reg_mask.e1;
325 else if (CHIP_IS_E1H(bp))
326 return bnx2x_blocks_parity_data[idx].reg_mask.e1h;
327 else
328 return bnx2x_blocks_parity_data[idx].reg_mask.e2;
329}
330
331static inline void bnx2x_disable_blocks_parity(struct bnx2x *bp)
332{
333 int i;
334
335 for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
336 u32 dis_mask = bnx2x_parity_reg_mask(bp, i);
337
338 if (dis_mask) {
339 REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
340 dis_mask);
341 DP(NETIF_MSG_HW, "Setting parity mask "
342 "for %s to\t\t0x%x\n",
343 bnx2x_blocks_parity_data[i].name, dis_mask);
344 }
345 }
346
347 /* Disable MCP parity attentions */
348 bnx2x_set_mcp_parity(bp, false);
349}
350
351/**
352 * Clear the parity error status registers.
353 */
354static inline void bnx2x_clear_blocks_parity(struct bnx2x *bp)
355{
356 int i;
357 u32 reg_val, mcp_aeu_bits =
358 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY |
359 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY |
360 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY |
361 AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY;
362
363 /* Clear SEM_FAST parities */
364 REG_WR(bp, XSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
365 REG_WR(bp, TSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
366 REG_WR(bp, USEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
367 REG_WR(bp, CSEM_REG_FAST_MEMORY + SEM_FAST_REG_PARITY_RST, 0x1);
368
369 for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
370 u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
371
372 if (reg_mask) {
373 reg_val = REG_RD(bp, bnx2x_blocks_parity_data[i].
374 sts_clr_addr);
375 if (reg_val & reg_mask)
376 DP(NETIF_MSG_HW,
377 "Parity errors in %s: 0x%x\n",
378 bnx2x_blocks_parity_data[i].name,
379 reg_val & reg_mask);
380 }
381 }
382
383 /* Check if there were parity attentions in MCP */
384 reg_val = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_MCP);
385 if (reg_val & mcp_aeu_bits)
386 DP(NETIF_MSG_HW, "Parity error in MCP: 0x%x\n",
387 reg_val & mcp_aeu_bits);
388
389 /* Clear parity attentions in MCP:
390 * [7] clears Latched rom_parity
391 * [8] clears Latched ump_rx_parity
392 * [9] clears Latched ump_tx_parity
393 * [10] clears Latched scpad_parity (both ports)
394 */
395 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x780);
396}
397
398static inline void bnx2x_enable_blocks_parity(struct bnx2x *bp)
399{
400 int i;
401
402 for (i = 0; i < ARRAY_SIZE(bnx2x_blocks_parity_data); i++) {
403 u32 reg_mask = bnx2x_parity_reg_mask(bp, i);
404
405 if (reg_mask)
406 REG_WR(bp, bnx2x_blocks_parity_data[i].mask_addr,
407 bnx2x_blocks_parity_data[i].en_mask & reg_mask);
408 }
409
410 /* Enable MCP parity attentions */
411 bnx2x_set_mcp_parity(bp, true);
412}
413
414
195#endif /* BNX2X_INIT_H */ 415#endif /* BNX2X_INIT_H */
196 416
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 580919619252..43b0de24f391 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -164,7 +164,8 @@
164#define EDC_MODE_PASSIVE_DAC 0x0055 164#define EDC_MODE_PASSIVE_DAC 0x0055
165 165
166 166
167 167#define ETS_BW_LIMIT_CREDIT_UPPER_BOUND (0x5000)
168#define ETS_BW_LIMIT_CREDIT_WEIGHT (0x5000)
168/**********************************************************/ 169/**********************************************************/
169/* INTERFACE */ 170/* INTERFACE */
170/**********************************************************/ 171/**********************************************************/
@@ -205,6 +206,270 @@ static u32 bnx2x_bits_dis(struct bnx2x *bp, u32 reg, u32 bits)
205 return val; 206 return val;
206} 207}
207 208
209/******************************************************************/
210/* ETS section */
211/******************************************************************/
212void bnx2x_ets_disabled(struct link_params *params)
213{
214 /* ETS disabled configuration*/
215 struct bnx2x *bp = params->bp;
216
217 DP(NETIF_MSG_LINK, "ETS disabled configuration\n");
218
219 /**
220 * mapping between entry priority to client number (0,1,2 -debug and
221 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
222 * 3bits client num.
223 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
224 * cos1-100 cos0-011 dbg1-010 dbg0-001 MCP-000
225 */
226
227 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, 0x4688);
228 /**
229 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
230 * as strict. Bits 0,1,2 - debug and management entries, 3 -
231 * COS0 entry, 4 - COS1 entry.
232 * COS1 | COS0 | DEBUG1 | DEBUG0 | MGMT
233 * bit4 bit3 bit2 bit1 bit0
234 * MCP and debug are strict
235 */
236
237 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
238 /* defines which entries (clients) are subjected to WFQ arbitration */
239 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0);
240 /**
241 * For strict priority entries defines the number of consecutive
242 * slots for the highest priority.
243 */
244 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
245 /**
246 * mapping between the CREDIT_WEIGHT registers and actual client
247 * numbers
248 */
249 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0);
250 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, 0);
251 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, 0);
252
253 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0, 0);
254 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1, 0);
255 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, 0);
256 /* ETS mode disable */
257 REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
258 /**
259 * If ETS mode is enabled (there is no strict priority) defines a WFQ
260 * weight for COS0/COS1.
261 */
262 REG_WR(bp, PBF_REG_COS0_WEIGHT, 0x2710);
263 REG_WR(bp, PBF_REG_COS1_WEIGHT, 0x2710);
264 /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter */
265 REG_WR(bp, PBF_REG_COS0_UPPER_BOUND, 0x989680);
266 REG_WR(bp, PBF_REG_COS1_UPPER_BOUND, 0x989680);
267 /* Defines the number of consecutive slots for the strict priority */
268 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
269}
270
271void bnx2x_ets_bw_limit_common(const struct link_params *params)
272{
273 /* ETS disabled configuration */
274 struct bnx2x *bp = params->bp;
275 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
276 /**
277 * defines which entries (clients) are subjected to WFQ arbitration
278 * COS0 0x8
279 * COS1 0x10
280 */
281 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_SUBJECT2WFQ, 0x18);
282 /**
283 * mapping between the ARB_CREDIT_WEIGHT registers and actual
284 * client numbers (WEIGHT_0 does not actually have to represent
285 * client 0)
286 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
287 * cos1-001 cos0-000 dbg1-100 dbg0-011 MCP-010
288 */
289 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_CREDIT_MAP, 0x111A);
290
291 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_0,
292 ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
293 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_UPPER_BOUND_1,
294 ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
295
296 /* ETS mode enabled*/
297 REG_WR(bp, PBF_REG_ETS_ENABLED, 1);
298
299 /* Defines the number of consecutive slots for the strict priority */
300 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0);
301 /**
302 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
303 * as strict. Bits 0,1,2 - debug and management entries, 3 - COS0
304 * entry, 4 - COS1 entry.
305 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
306 * bit4 bit3 bit2 bit1 bit0
307 * MCP and debug are strict
308 */
309 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x7);
310
311 /* Upper bound that COS0_WEIGHT can reach in the WFQ arbiter.*/
312 REG_WR(bp, PBF_REG_COS0_UPPER_BOUND,
313 ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
314 REG_WR(bp, PBF_REG_COS1_UPPER_BOUND,
315 ETS_BW_LIMIT_CREDIT_UPPER_BOUND);
316}
317
318void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
319 const u32 cos1_bw)
320{
321 /* ETS disabled configuration*/
322 struct bnx2x *bp = params->bp;
323 const u32 total_bw = cos0_bw + cos1_bw;
324 u32 cos0_credit_weight = 0;
325 u32 cos1_credit_weight = 0;
326
327 DP(NETIF_MSG_LINK, "ETS enabled BW limit configuration\n");
328
329 if ((0 == total_bw) ||
330 (0 == cos0_bw) ||
331 (0 == cos1_bw)) {
332 DP(NETIF_MSG_LINK,
333 "bnx2x_ets_bw_limit: Total BW can't be zero\n");
334 return;
335 }
336
337 cos0_credit_weight = (cos0_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
338 total_bw;
339 cos1_credit_weight = (cos1_bw * ETS_BW_LIMIT_CREDIT_WEIGHT)/
340 total_bw;
341
342 bnx2x_ets_bw_limit_common(params);
343
344 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_0, cos0_credit_weight);
345 REG_WR(bp, NIG_REG_P0_TX_ARB_CREDIT_WEIGHT_1, cos1_credit_weight);
346
347 REG_WR(bp, PBF_REG_COS0_WEIGHT, cos0_credit_weight);
348 REG_WR(bp, PBF_REG_COS1_WEIGHT, cos1_credit_weight);
349}
350
351u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos)
352{
353 /* ETS disabled configuration*/
354 struct bnx2x *bp = params->bp;
355 u32 val = 0;
356
357 DP(NETIF_MSG_LINK, "ETS enabled strict configuration\n");
358 /**
359 * Bitmap of 5bits length. Each bit specifies whether the entry behaves
360 * as strict. Bits 0,1,2 - debug and management entries,
361 * 3 - COS0 entry, 4 - COS1 entry.
362 * COS1 | COS0 | DEBUG21 | DEBUG0 | MGMT
363 * bit4 bit3 bit2 bit1 bit0
364 * MCP and debug are strict
365 */
366 REG_WR(bp, NIG_REG_P0_TX_ARB_CLIENT_IS_STRICT, 0x1F);
367 /**
368 * For strict priority entries defines the number of consecutive slots
369 * for the highest priority.
370 */
371 REG_WR(bp, NIG_REG_P0_TX_ARB_NUM_STRICT_ARB_SLOTS, 0x100);
372 /* ETS mode disable */
373 REG_WR(bp, PBF_REG_ETS_ENABLED, 0);
374 /* Defines the number of consecutive slots for the strict priority */
375 REG_WR(bp, PBF_REG_NUM_STRICT_ARB_SLOTS, 0x100);
376
377 /* Defines the number of consecutive slots for the strict priority */
378 REG_WR(bp, PBF_REG_HIGH_PRIORITY_COS_NUM, strict_cos);
379
380 /**
381 * mapping between entry priority to client number (0,1,2 -debug and
382 * management clients, 3 - COS0 client, 4 - COS client)(HIGHEST)
383 * 3bits client num.
384 * PRI4 | PRI3 | PRI2 | PRI1 | PRI0
385 * dbg0-010 dbg1-001 cos1-100 cos0-011 MCP-000
386 * dbg0-010 dbg1-001 cos0-011 cos1-100 MCP-000
387 */
388 val = (0 == strict_cos) ? 0x2318 : 0x22E0;
389 REG_WR(bp, NIG_REG_P0_TX_ARB_PRIORITY_CLIENT, val);
390
391 return 0;
392}
393/******************************************************************/
394/* ETS section */
395/******************************************************************/
396
397static void bnx2x_bmac2_get_pfc_stat(struct link_params *params,
398 u32 pfc_frames_sent[2],
399 u32 pfc_frames_received[2])
400{
401 /* Read pfc statistic */
402 struct bnx2x *bp = params->bp;
403 u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
404 NIG_REG_INGRESS_BMAC0_MEM;
405
406 DP(NETIF_MSG_LINK, "pfc statistic read from BMAC\n");
407
408 REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_STAT_GTPP,
409 pfc_frames_sent, 2);
410
411 REG_RD_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_RX_STAT_GRPP,
412 pfc_frames_received, 2);
413
414}
415static void bnx2x_emac_get_pfc_stat(struct link_params *params,
416 u32 pfc_frames_sent[2],
417 u32 pfc_frames_received[2])
418{
419 /* Read pfc statistic */
420 struct bnx2x *bp = params->bp;
421 u32 emac_base = params->port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
422 u32 val_xon = 0;
423 u32 val_xoff = 0;
424
425 DP(NETIF_MSG_LINK, "pfc statistic read from EMAC\n");
426
427 /* PFC received frames */
428 val_xoff = REG_RD(bp, emac_base +
429 EMAC_REG_RX_PFC_STATS_XOFF_RCVD);
430 val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT;
431 val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_RCVD);
432 val_xon &= EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT;
433
434 pfc_frames_received[0] = val_xon + val_xoff;
435
436 /* PFC received sent */
437 val_xoff = REG_RD(bp, emac_base +
438 EMAC_REG_RX_PFC_STATS_XOFF_SENT);
439 val_xoff &= EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT;
440 val_xon = REG_RD(bp, emac_base + EMAC_REG_RX_PFC_STATS_XON_SENT);
441 val_xon &= EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT;
442
443 pfc_frames_sent[0] = val_xon + val_xoff;
444}
445
446void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
447 u32 pfc_frames_sent[2],
448 u32 pfc_frames_received[2])
449{
450 /* Read pfc statistic */
451 struct bnx2x *bp = params->bp;
452 u32 val = 0;
453 DP(NETIF_MSG_LINK, "pfc statistic\n");
454
455 if (!vars->link_up)
456 return;
457
458 val = REG_RD(bp, MISC_REG_RESET_REG_2);
459 if ((val & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
460 == 0) {
461 DP(NETIF_MSG_LINK, "About to read stats from EMAC\n");
462 bnx2x_emac_get_pfc_stat(params, pfc_frames_sent,
463 pfc_frames_received);
464 } else {
465 DP(NETIF_MSG_LINK, "About to read stats from BMAC\n");
466 bnx2x_bmac2_get_pfc_stat(params, pfc_frames_sent,
467 pfc_frames_received);
468 }
469}
470/******************************************************************/
471/* MAC/PBF section */
472/******************************************************************/
208static void bnx2x_emac_init(struct link_params *params, 473static void bnx2x_emac_init(struct link_params *params,
209 struct link_vars *vars) 474 struct link_vars *vars)
210{ 475{
@@ -315,24 +580,55 @@ static u8 bnx2x_emac_enable(struct link_params *params,
315 /* pause enable/disable */ 580 /* pause enable/disable */
316 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE, 581 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_RX_MODE,
317 EMAC_RX_MODE_FLOW_EN); 582 EMAC_RX_MODE_FLOW_EN);
318 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
319 bnx2x_bits_en(bp, emac_base +
320 EMAC_REG_EMAC_RX_MODE,
321 EMAC_RX_MODE_FLOW_EN);
322 583
323 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE, 584 bnx2x_bits_dis(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
324 (EMAC_TX_MODE_EXT_PAUSE_EN | 585 (EMAC_TX_MODE_EXT_PAUSE_EN |
325 EMAC_TX_MODE_FLOW_EN)); 586 EMAC_TX_MODE_FLOW_EN));
326 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) 587 if (!(params->feature_config_flags &
327 bnx2x_bits_en(bp, emac_base + 588 FEATURE_CONFIG_PFC_ENABLED)) {
328 EMAC_REG_EMAC_TX_MODE, 589 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
329 (EMAC_TX_MODE_EXT_PAUSE_EN | 590 bnx2x_bits_en(bp, emac_base +
330 EMAC_TX_MODE_FLOW_EN)); 591 EMAC_REG_EMAC_RX_MODE,
592 EMAC_RX_MODE_FLOW_EN);
593
594 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
595 bnx2x_bits_en(bp, emac_base +
596 EMAC_REG_EMAC_TX_MODE,
597 (EMAC_TX_MODE_EXT_PAUSE_EN |
598 EMAC_TX_MODE_FLOW_EN));
599 } else
600 bnx2x_bits_en(bp, emac_base + EMAC_REG_EMAC_TX_MODE,
601 EMAC_TX_MODE_FLOW_EN);
331 } 602 }
332 603
333 /* KEEP_VLAN_TAG, promiscuous */ 604 /* KEEP_VLAN_TAG, promiscuous */
334 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE); 605 val = REG_RD(bp, emac_base + EMAC_REG_EMAC_RX_MODE);
335 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS; 606 val |= EMAC_RX_MODE_KEEP_VLAN_TAG | EMAC_RX_MODE_PROMISCUOUS;
607
608 /**
609 * Setting this bit causes MAC control frames (except for pause
610 * frames) to be passed on for processing. This setting has no
611 * affect on the operation of the pause frames. This bit effects
612 * all packets regardless of RX Parser packet sorting logic.
613 * Turn the PFC off to make sure we are in Xon state before
614 * enabling it.
615 */
616 EMAC_WR(bp, EMAC_REG_RX_PFC_MODE, 0);
617 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
618 DP(NETIF_MSG_LINK, "PFC is enabled\n");
619 /* Enable PFC again */
620 EMAC_WR(bp, EMAC_REG_RX_PFC_MODE,
621 EMAC_REG_RX_PFC_MODE_RX_EN |
622 EMAC_REG_RX_PFC_MODE_TX_EN |
623 EMAC_REG_RX_PFC_MODE_PRIORITIES);
624
625 EMAC_WR(bp, EMAC_REG_RX_PFC_PARAM,
626 ((0x0101 <<
627 EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT) |
628 (0x00ff <<
629 EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT)));
630 val |= EMAC_RX_MODE_KEEP_MAC_CONTROL;
631 }
336 EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val); 632 EMAC_WR(bp, EMAC_REG_EMAC_RX_MODE, val);
337 633
338 /* Set Loopback */ 634 /* Set Loopback */
@@ -362,7 +658,9 @@ static u8 bnx2x_emac_enable(struct link_params *params,
362 /* enable the NIG in/out to the emac */ 658 /* enable the NIG in/out to the emac */
363 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1); 659 REG_WR(bp, NIG_REG_EMAC0_IN_EN + port*4, 0x1);
364 val = 0; 660 val = 0;
365 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) 661 if ((params->feature_config_flags &
662 FEATURE_CONFIG_PFC_ENABLED) ||
663 (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
366 val = 1; 664 val = 1;
367 665
368 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val); 666 REG_WR(bp, NIG_REG_EMAC0_PAUSE_OUT_EN + port*4, val);
@@ -383,9 +681,38 @@ static u8 bnx2x_emac_enable(struct link_params *params,
383 return 0; 681 return 0;
384} 682}
385 683
386static void bnx2x_update_bmac2(struct link_params *params, 684static void bnx2x_update_pfc_bmac1(struct link_params *params,
387 struct link_vars *vars, 685 struct link_vars *vars)
388 u8 is_lb) 686{
687 u32 wb_data[2];
688 struct bnx2x *bp = params->bp;
689 u32 bmac_addr = params->port ? NIG_REG_INGRESS_BMAC1_MEM :
690 NIG_REG_INGRESS_BMAC0_MEM;
691
692 u32 val = 0x14;
693 if ((!(params->feature_config_flags &
694 FEATURE_CONFIG_PFC_ENABLED)) &&
695 (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
696 /* Enable BigMAC to react on received Pause packets */
697 val |= (1<<5);
698 wb_data[0] = val;
699 wb_data[1] = 0;
700 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL, wb_data, 2);
701
702 /* tx control */
703 val = 0xc0;
704 if (!(params->feature_config_flags &
705 FEATURE_CONFIG_PFC_ENABLED) &&
706 (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
707 val |= 0x800000;
708 wb_data[0] = val;
709 wb_data[1] = 0;
710 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL, wb_data, 2);
711}
712
713static void bnx2x_update_pfc_bmac2(struct link_params *params,
714 struct link_vars *vars,
715 u8 is_lb)
389{ 716{
390 /* 717 /*
391 * Set rx control: Strip CRC and enable BigMAC to relay 718 * Set rx control: Strip CRC and enable BigMAC to relay
@@ -397,7 +724,9 @@ static void bnx2x_update_bmac2(struct link_params *params,
397 NIG_REG_INGRESS_BMAC0_MEM; 724 NIG_REG_INGRESS_BMAC0_MEM;
398 u32 val = 0x14; 725 u32 val = 0x14;
399 726
400 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX) 727 if ((!(params->feature_config_flags &
728 FEATURE_CONFIG_PFC_ENABLED)) &&
729 (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX))
401 /* Enable BigMAC to react on received Pause packets */ 730 /* Enable BigMAC to react on received Pause packets */
402 val |= (1<<5); 731 val |= (1<<5);
403 wb_data[0] = val; 732 wb_data[0] = val;
@@ -408,14 +737,47 @@ static void bnx2x_update_bmac2(struct link_params *params,
408 737
409 /* Tx control */ 738 /* Tx control */
410 val = 0xc0; 739 val = 0xc0;
411 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) 740 if (!(params->feature_config_flags &
741 FEATURE_CONFIG_PFC_ENABLED) &&
742 (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
412 val |= 0x800000; 743 val |= 0x800000;
413 wb_data[0] = val; 744 wb_data[0] = val;
414 wb_data[1] = 0; 745 wb_data[1] = 0;
415 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, 746 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_CONTROL, wb_data, 2);
416 wb_data, 2); 747
748 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED) {
749 DP(NETIF_MSG_LINK, "PFC is enabled\n");
750 /* Enable PFC RX & TX & STATS and set 8 COS */
751 wb_data[0] = 0x0;
752 wb_data[0] |= (1<<0); /* RX */
753 wb_data[0] |= (1<<1); /* TX */
754 wb_data[0] |= (1<<2); /* Force initial Xon */
755 wb_data[0] |= (1<<3); /* 8 cos */
756 wb_data[0] |= (1<<5); /* STATS */
757 wb_data[1] = 0;
758 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL,
759 wb_data, 2);
760 /* Clear the force Xon */
761 wb_data[0] &= ~(1<<2);
762 } else {
763 DP(NETIF_MSG_LINK, "PFC is disabled\n");
764 /* disable PFC RX & TX & STATS and set 8 COS */
765 wb_data[0] = 0x8;
766 wb_data[1] = 0;
767 }
768
769 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_PFC_CONTROL, wb_data, 2);
417 770
771 /**
772 * Set Time (based unit is 512 bit time) between automatic
773 * re-sending of PP packets amd enable automatic re-send of
774 * Per-Priroity Packet as long as pp_gen is asserted and
775 * pp_disable is low.
776 */
418 val = 0x8000; 777 val = 0x8000;
778 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
779 val |= (1<<16); /* enable automatic re-send */
780
419 wb_data[0] = val; 781 wb_data[0] = val;
420 wb_data[1] = 0; 782 wb_data[1] = 0;
421 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL, 783 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_TX_PAUSE_CONTROL,
@@ -427,6 +789,9 @@ static void bnx2x_update_bmac2(struct link_params *params,
427 val |= 0x4; /* Local loopback */ 789 val |= 0x4; /* Local loopback */
428 DP(NETIF_MSG_LINK, "enable bmac loopback\n"); 790 DP(NETIF_MSG_LINK, "enable bmac loopback\n");
429 } 791 }
792 /* When PFC enabled, Pass pause frames towards the NIG. */
793 if (params->feature_config_flags & FEATURE_CONFIG_PFC_ENABLED)
794 val |= ((1<<6)|(1<<5));
430 795
431 wb_data[0] = val; 796 wb_data[0] = val;
432 wb_data[1] = 0; 797 wb_data[1] = 0;
@@ -434,6 +799,239 @@ static void bnx2x_update_bmac2(struct link_params *params,
434 wb_data, 2); 799 wb_data, 2);
435} 800}
436 801
802static void bnx2x_update_pfc_brb(struct link_params *params,
803 struct link_vars *vars,
804 struct bnx2x_nig_brb_pfc_port_params *pfc_params)
805{
806 struct bnx2x *bp = params->bp;
807 int set_pfc = params->feature_config_flags &
808 FEATURE_CONFIG_PFC_ENABLED;
809
810 /* default - pause configuration */
811 u32 pause_xoff_th = PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE;
812 u32 pause_xon_th = PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE;
813 u32 full_xoff_th = PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE;
814 u32 full_xon_th = PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE;
815
816 if (set_pfc && pfc_params)
817 /* First COS */
818 if (!pfc_params->cos0_pauseable) {
819 pause_xoff_th =
820 PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE;
821 pause_xon_th =
822 PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE;
823 full_xoff_th =
824 PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE;
825 full_xon_th =
826 PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
827 }
828 /* The number of free blocks below which the pause signal to class 0
829 of MAC #n is asserted. n=0,1 */
830 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 , pause_xoff_th);
831 /* The number of free blocks above which the pause signal to class 0
832 of MAC #n is de-asserted. n=0,1 */
833 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 , pause_xon_th);
834 /* The number of free blocks below which the full signal to class 0
835 of MAC #n is asserted. n=0,1 */
836 REG_WR(bp, BRB1_REG_FULL_0_XOFF_THRESHOLD_0 , full_xoff_th);
837 /* The number of free blocks above which the full signal to class 0
838 of MAC #n is de-asserted. n=0,1 */
839 REG_WR(bp, BRB1_REG_FULL_0_XON_THRESHOLD_0 , full_xon_th);
840
841 if (set_pfc && pfc_params) {
842 /* Second COS */
843 if (pfc_params->cos1_pauseable) {
844 pause_xoff_th =
845 PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE;
846 pause_xon_th =
847 PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE;
848 full_xoff_th =
849 PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE;
850 full_xon_th =
851 PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE;
852 } else {
853 pause_xoff_th =
854 PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE;
855 pause_xon_th =
856 PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE;
857 full_xoff_th =
858 PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE;
859 full_xon_th =
860 PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE;
861 }
862 /**
863 * The number of free blocks below which the pause signal to
864 * class 1 of MAC #n is asserted. n=0,1
865 **/
866 REG_WR(bp, BRB1_REG_PAUSE_1_XOFF_THRESHOLD_0, pause_xoff_th);
867 /**
868 * The number of free blocks above which the pause signal to
869 * class 1 of MAC #n is de-asserted. n=0,1
870 **/
871 REG_WR(bp, BRB1_REG_PAUSE_1_XON_THRESHOLD_0, pause_xon_th);
872 /**
873 * The number of free blocks below which the full signal to
874 * class 1 of MAC #n is asserted. n=0,1
875 **/
876 REG_WR(bp, BRB1_REG_FULL_1_XOFF_THRESHOLD_0, full_xoff_th);
877 /**
878 * The number of free blocks above which the full signal to
879 * class 1 of MAC #n is de-asserted. n=0,1
880 **/
881 REG_WR(bp, BRB1_REG_FULL_1_XON_THRESHOLD_0, full_xon_th);
882 }
883}
884
885static void bnx2x_update_pfc_nig(struct link_params *params,
886 struct link_vars *vars,
887 struct bnx2x_nig_brb_pfc_port_params *nig_params)
888{
889 u32 xcm_mask = 0, ppp_enable = 0, pause_enable = 0, llfc_out_en = 0;
890 u32 llfc_enable = 0, xcm0_out_en = 0, p0_hwpfc_enable = 0;
891 u32 pkt_priority_to_cos = 0;
892 u32 val;
893 struct bnx2x *bp = params->bp;
894 int port = params->port;
895 int set_pfc = params->feature_config_flags &
896 FEATURE_CONFIG_PFC_ENABLED;
897 DP(NETIF_MSG_LINK, "updating pfc nig parameters\n");
898
899 /**
900 * When NIG_LLH0_XCM_MASK_REG_LLHX_XCM_MASK_BCN bit is set
901 * MAC control frames (that are not pause packets)
902 * will be forwarded to the XCM.
903 */
904 xcm_mask = REG_RD(bp,
905 port ? NIG_REG_LLH1_XCM_MASK :
906 NIG_REG_LLH0_XCM_MASK);
907 /**
908 * nig params will override non PFC params, since it's possible to
909 * do transition from PFC to SAFC
910 */
911 if (set_pfc) {
912 pause_enable = 0;
913 llfc_out_en = 0;
914 llfc_enable = 0;
915 ppp_enable = 1;
916 xcm_mask &= ~(port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
917 NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
918 xcm0_out_en = 0;
919 p0_hwpfc_enable = 1;
920 } else {
921 if (nig_params) {
922 llfc_out_en = nig_params->llfc_out_en;
923 llfc_enable = nig_params->llfc_enable;
924 pause_enable = nig_params->pause_enable;
925 } else /*defaul non PFC mode - PAUSE */
926 pause_enable = 1;
927
928 xcm_mask |= (port ? NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN :
929 NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN);
930 xcm0_out_en = 1;
931 }
932
933 REG_WR(bp, port ? NIG_REG_LLFC_OUT_EN_1 :
934 NIG_REG_LLFC_OUT_EN_0, llfc_out_en);
935 REG_WR(bp, port ? NIG_REG_LLFC_ENABLE_1 :
936 NIG_REG_LLFC_ENABLE_0, llfc_enable);
937 REG_WR(bp, port ? NIG_REG_PAUSE_ENABLE_1 :
938 NIG_REG_PAUSE_ENABLE_0, pause_enable);
939
940 REG_WR(bp, port ? NIG_REG_PPP_ENABLE_1 :
941 NIG_REG_PPP_ENABLE_0, ppp_enable);
942
943 REG_WR(bp, port ? NIG_REG_LLH1_XCM_MASK :
944 NIG_REG_LLH0_XCM_MASK, xcm_mask);
945
946 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0, 0x7);
947
948 /* output enable for RX_XCM # IF */
949 REG_WR(bp, NIG_REG_XCM0_OUT_EN, xcm0_out_en);
950
951 /* HW PFC TX enable */
952 REG_WR(bp, NIG_REG_P0_HWPFC_ENABLE, p0_hwpfc_enable);
953
954 /* 0x2 = BMAC, 0x1= EMAC */
955 switch (vars->mac_type) {
956 case MAC_TYPE_EMAC:
957 val = 1;
958 break;
959 case MAC_TYPE_BMAC:
960 val = 0;
961 break;
962 default:
963 val = 0;
964 break;
965 }
966 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT, val);
967
968 if (nig_params) {
969 pkt_priority_to_cos = nig_params->pkt_priority_to_cos;
970
971 REG_WR(bp, port ? NIG_REG_P1_RX_COS0_PRIORITY_MASK :
972 NIG_REG_P0_RX_COS0_PRIORITY_MASK,
973 nig_params->rx_cos0_priority_mask);
974
975 REG_WR(bp, port ? NIG_REG_P1_RX_COS1_PRIORITY_MASK :
976 NIG_REG_P0_RX_COS1_PRIORITY_MASK,
977 nig_params->rx_cos1_priority_mask);
978
979 REG_WR(bp, port ? NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 :
980 NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0,
981 nig_params->llfc_high_priority_classes);
982
983 REG_WR(bp, port ? NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 :
984 NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0,
985 nig_params->llfc_low_priority_classes);
986 }
987 REG_WR(bp, port ? NIG_REG_P1_PKT_PRIORITY_TO_COS :
988 NIG_REG_P0_PKT_PRIORITY_TO_COS,
989 pkt_priority_to_cos);
990}
991
992
993void bnx2x_update_pfc(struct link_params *params,
994 struct link_vars *vars,
995 struct bnx2x_nig_brb_pfc_port_params *pfc_params)
996{
997 /**
998 * The PFC and pause are orthogonal to one another, meaning when
999 * PFC is enabled, the pause are disabled, and when PFC is
1000 * disabled, pause are set according to the pause result.
1001 */
1002 u32 val;
1003 struct bnx2x *bp = params->bp;
1004
1005 /* update NIG params */
1006 bnx2x_update_pfc_nig(params, vars, pfc_params);
1007
1008 /* update BRB params */
1009 bnx2x_update_pfc_brb(params, vars, pfc_params);
1010
1011 if (!vars->link_up)
1012 return;
1013
1014 val = REG_RD(bp, MISC_REG_RESET_REG_2);
1015 if ((val & (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << params->port))
1016 == 0) {
1017 DP(NETIF_MSG_LINK, "About to update PFC in EMAC\n");
1018 bnx2x_emac_enable(params, vars, 0);
1019 return;
1020 }
1021
1022 DP(NETIF_MSG_LINK, "About to update PFC in BMAC\n");
1023 if (CHIP_IS_E2(bp))
1024 bnx2x_update_pfc_bmac2(params, vars, 0);
1025 else
1026 bnx2x_update_pfc_bmac1(params, vars);
1027
1028 val = 0;
1029 if ((params->feature_config_flags &
1030 FEATURE_CONFIG_PFC_ENABLED) ||
1031 (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
1032 val = 1;
1033 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + params->port*4, val);
1034}
437 1035
438static u8 bnx2x_bmac1_enable(struct link_params *params, 1036static u8 bnx2x_bmac1_enable(struct link_params *params,
439 struct link_vars *vars, 1037 struct link_vars *vars,
@@ -465,15 +1063,6 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
465 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR, 1063 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_SOURCE_ADDR,
466 wb_data, 2); 1064 wb_data, 2);
467 1065
468 /* tx control */
469 val = 0xc0;
470 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX)
471 val |= 0x800000;
472 wb_data[0] = val;
473 wb_data[1] = 0;
474 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_TX_CONTROL,
475 wb_data, 2);
476
477 /* mac control */ 1066 /* mac control */
478 val = 0x3; 1067 val = 0x3;
479 if (is_lb) { 1068 if (is_lb) {
@@ -491,14 +1080,7 @@ static u8 bnx2x_bmac1_enable(struct link_params *params,
491 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE, 1080 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_MAX_SIZE,
492 wb_data, 2); 1081 wb_data, 2);
493 1082
494 /* rx control set to don't strip crc */ 1083 bnx2x_update_pfc_bmac1(params, vars);
495 val = 0x14;
496 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_RX)
497 val |= 0x20;
498 wb_data[0] = val;
499 wb_data[1] = 0;
500 REG_WR_DMAE(bp, bmac_addr + BIGMAC_REGISTER_RX_CONTROL,
501 wb_data, 2);
502 1084
503 /* set tx mtu */ 1085 /* set tx mtu */
504 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD; 1086 wb_data[0] = ETH_MAX_JUMBO_PACKET_SIZE + ETH_OVREHEAD;
@@ -595,7 +1177,7 @@ static u8 bnx2x_bmac2_enable(struct link_params *params,
595 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE, 1177 REG_WR_DMAE(bp, bmac_addr + BIGMAC2_REGISTER_CNT_MAX_SIZE,
596 wb_data, 2); 1178 wb_data, 2);
597 udelay(30); 1179 udelay(30);
598 bnx2x_update_bmac2(params, vars, is_lb); 1180 bnx2x_update_pfc_bmac2(params, vars, is_lb);
599 1181
600 return 0; 1182 return 0;
601} 1183}
@@ -627,7 +1209,9 @@ static u8 bnx2x_bmac_enable(struct link_params *params,
627 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0); 1209 REG_WR(bp, NIG_REG_XGXS_LANE_SEL_P0 + port*4, 0x0);
628 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0); 1210 REG_WR(bp, NIG_REG_EGRESS_EMAC0_PORT + port*4, 0x0);
629 val = 0; 1211 val = 0;
630 if (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX) 1212 if ((params->feature_config_flags &
1213 FEATURE_CONFIG_PFC_ENABLED) ||
1214 (vars->flow_ctrl & BNX2X_FLOW_CTRL_TX))
631 val = 1; 1215 val = 1;
632 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val); 1216 REG_WR(bp, NIG_REG_BMAC0_PAUSE_OUT_EN + port*4, val);
633 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0); 1217 REG_WR(bp, NIG_REG_EGRESS_EMAC0_OUT_EN + port*4, 0x0);
@@ -3904,7 +4488,7 @@ static u8 bnx2x_8726_read_sfp_module_eeprom(struct bnx2x_phy *phy,
3904 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 4488 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
3905 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4489 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
3906 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 4490 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
3907 return 0;; 4491 return 0;
3908 msleep(1); 4492 msleep(1);
3909 } 4493 }
3910 return -EINVAL; 4494 return -EINVAL;
@@ -3988,7 +4572,7 @@ static u8 bnx2x_8727_read_sfp_module_eeprom(struct bnx2x_phy *phy,
3988 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val); 4572 MDIO_PMA_REG_SFP_TWO_WIRE_CTRL, &val);
3989 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) == 4573 if ((val & MDIO_PMA_REG_SFP_TWO_WIRE_CTRL_STATUS_MASK) ==
3990 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE) 4574 MDIO_PMA_REG_SFP_TWO_WIRE_STATUS_IDLE)
3991 return 0;; 4575 return 0;
3992 msleep(1); 4576 msleep(1);
3993 } 4577 }
3994 4578
diff --git a/drivers/net/bnx2x/bnx2x_link.h b/drivers/net/bnx2x/bnx2x_link.h
index 171abf8097ee..bedab1a942c4 100644
--- a/drivers/net/bnx2x/bnx2x_link.h
+++ b/drivers/net/bnx2x/bnx2x_link.h
@@ -65,6 +65,22 @@
65#define FW_PARAM_MDIO_CTRL_OFFSET 16 65#define FW_PARAM_MDIO_CTRL_OFFSET 16
66#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \ 66#define FW_PARAM_SET(phy_addr, phy_type, mdio_access) \
67 (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET) 67 (phy_addr | phy_type | mdio_access << FW_PARAM_MDIO_CTRL_OFFSET)
68
69#define PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_PAUSEABLE 170
70#define PFC_BRB_MAC_PAUSE_XOFF_THRESHOLD_NON_PAUSEABLE 0
71
72#define PFC_BRB_MAC_PAUSE_XON_THRESHOLD_PAUSEABLE 250
73#define PFC_BRB_MAC_PAUSE_XON_THRESHOLD_NON_PAUSEABLE 0
74
75#define PFC_BRB_MAC_FULL_XOFF_THRESHOLD_PAUSEABLE 10
76#define PFC_BRB_MAC_FULL_XOFF_THRESHOLD_NON_PAUSEABLE 90
77
78#define PFC_BRB_MAC_FULL_XON_THRESHOLD_PAUSEABLE 50
79#define PFC_BRB_MAC_FULL_XON_THRESHOLD_NON_PAUSEABLE 250
80
81#define PFC_BRB_FULL_LB_XOFF_THRESHOLD 170
82#define PFC_BRB_FULL_LB_XON_THRESHOLD 250
83
68/***********************************************************/ 84/***********************************************************/
69/* Structs */ 85/* Structs */
70/***********************************************************/ 86/***********************************************************/
@@ -216,6 +232,7 @@ struct link_params {
216 232
217 u32 feature_config_flags; 233 u32 feature_config_flags;
218#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0) 234#define FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED (1<<0)
235#define FEATURE_CONFIG_PFC_ENABLED (1<<1)
219#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2) 236#define FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY (1<<2)
220#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3) 237#define FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY (1<<3)
221 /* Will be populated during common init */ 238 /* Will be populated during common init */
@@ -332,4 +349,43 @@ u8 bnx2x_phy_probe(struct link_params *params);
332u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base, 349u8 bnx2x_fan_failure_det_req(struct bnx2x *bp, u32 shmem_base,
333 u32 shmem2_base, u8 port); 350 u32 shmem2_base, u8 port);
334 351
352/* PFC port configuration params */
353struct bnx2x_nig_brb_pfc_port_params {
354 /* NIG */
355 u32 pause_enable;
356 u32 llfc_out_en;
357 u32 llfc_enable;
358 u32 pkt_priority_to_cos;
359 u32 rx_cos0_priority_mask;
360 u32 rx_cos1_priority_mask;
361 u32 llfc_high_priority_classes;
362 u32 llfc_low_priority_classes;
363 /* BRB */
364 u32 cos0_pauseable;
365 u32 cos1_pauseable;
366};
367
368/**
369 * Used to update the PFC attributes in EMAC, BMAC, NIG and BRB
370 * when link is already up
371 */
372void bnx2x_update_pfc(struct link_params *params,
373 struct link_vars *vars,
374 struct bnx2x_nig_brb_pfc_port_params *pfc_params);
375
376
377/* Used to configure the ETS to disable */
378void bnx2x_ets_disabled(struct link_params *params);
379
380/* Used to configure the ETS to BW limited */
381void bnx2x_ets_bw_limit(const struct link_params *params, const u32 cos0_bw,
382 const u32 cos1_bw);
383
384/* Used to configure the ETS to strict */
385u8 bnx2x_ets_strict(const struct link_params *params, const u8 strict_cos);
386
387/* Read pfc statistic*/
388void bnx2x_pfc_statistic(struct link_params *params, struct link_vars *vars,
389 u32 pfc_frames_sent[2],
390 u32 pfc_frames_received[2]);
335#endif /* BNX2X_LINK_H */ 391#endif /* BNX2X_LINK_H */
diff --git a/drivers/net/bnx2x/bnx2x_main.c b/drivers/net/bnx2x/bnx2x_main.c
index be52edcf8346..8cdcf5b39d1e 100644
--- a/drivers/net/bnx2x/bnx2x_main.c
+++ b/drivers/net/bnx2x/bnx2x_main.c
@@ -55,6 +55,7 @@
55#include "bnx2x_init.h" 55#include "bnx2x_init.h"
56#include "bnx2x_init_ops.h" 56#include "bnx2x_init_ops.h"
57#include "bnx2x_cmn.h" 57#include "bnx2x_cmn.h"
58#include "bnx2x_dcb.h"
58 59
59#include <linux/firmware.h> 60#include <linux/firmware.h>
60#include "bnx2x_fw_file_hdr.h" 61#include "bnx2x_fw_file_hdr.h"
@@ -121,6 +122,10 @@ MODULE_PARM_DESC(debug, " Default debug msglevel");
121 122
122static struct workqueue_struct *bnx2x_wq; 123static struct workqueue_struct *bnx2x_wq;
123 124
125#ifdef BCM_CNIC
126static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
127#endif
128
124enum bnx2x_board_type { 129enum bnx2x_board_type {
125 BCM57710 = 0, 130 BCM57710 = 0,
126 BCM57711 = 1, 131 BCM57711 = 1,
@@ -921,7 +926,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
921 sp_sb_data.p_func.vf_valid); 926 sp_sb_data.p_func.vf_valid);
922 927
923 928
924 for_each_queue(bp, i) { 929 for_each_eth_queue(bp, i) {
925 struct bnx2x_fastpath *fp = &bp->fp[i]; 930 struct bnx2x_fastpath *fp = &bp->fp[i];
926 int loop; 931 int loop;
927 struct hc_status_block_data_e2 sb_data_e2; 932 struct hc_status_block_data_e2 sb_data_e2;
@@ -961,6 +966,10 @@ void bnx2x_panic_dump(struct bnx2x *bp)
961 966
962 /* host sb data */ 967 /* host sb data */
963 968
969#ifdef BCM_CNIC
970 if (IS_FCOE_FP(fp))
971 continue;
972#endif
964 BNX2X_ERR(" run indexes ("); 973 BNX2X_ERR(" run indexes (");
965 for (j = 0; j < HC_SB_MAX_SM; j++) 974 for (j = 0; j < HC_SB_MAX_SM; j++)
966 pr_cont("0x%x%s", 975 pr_cont("0x%x%s",
@@ -1029,7 +1038,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
1029#ifdef BNX2X_STOP_ON_ERROR 1038#ifdef BNX2X_STOP_ON_ERROR
1030 /* Rings */ 1039 /* Rings */
1031 /* Rx */ 1040 /* Rx */
1032 for_each_queue(bp, i) { 1041 for_each_rx_queue(bp, i) {
1033 struct bnx2x_fastpath *fp = &bp->fp[i]; 1042 struct bnx2x_fastpath *fp = &bp->fp[i];
1034 1043
1035 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10); 1044 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
@@ -1063,7 +1072,7 @@ void bnx2x_panic_dump(struct bnx2x *bp)
1063 } 1072 }
1064 1073
1065 /* Tx */ 1074 /* Tx */
1066 for_each_queue(bp, i) { 1075 for_each_tx_queue(bp, i) {
1067 struct bnx2x_fastpath *fp = &bp->fp[i]; 1076 struct bnx2x_fastpath *fp = &bp->fp[i];
1068 1077
1069 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10); 1078 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
@@ -1298,7 +1307,7 @@ void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
1298#ifdef BCM_CNIC 1307#ifdef BCM_CNIC
1299 offset++; 1308 offset++;
1300#endif 1309#endif
1301 for_each_queue(bp, i) 1310 for_each_eth_queue(bp, i)
1302 synchronize_irq(bp->msix_table[i + offset].vector); 1311 synchronize_irq(bp->msix_table[i + offset].vector);
1303 } else 1312 } else
1304 synchronize_irq(bp->pdev->irq); 1313 synchronize_irq(bp->pdev->irq);
@@ -1420,7 +1429,7 @@ irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1420 return IRQ_HANDLED; 1429 return IRQ_HANDLED;
1421#endif 1430#endif
1422 1431
1423 for_each_queue(bp, i) { 1432 for_each_eth_queue(bp, i) {
1424 struct bnx2x_fastpath *fp = &bp->fp[i]; 1433 struct bnx2x_fastpath *fp = &bp->fp[i];
1425 1434
1426 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE); 1435 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
@@ -2026,13 +2035,28 @@ static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2026 2035
2027static void bnx2x_read_mf_cfg(struct bnx2x *bp) 2036static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2028{ 2037{
2029 int vn; 2038 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
2030 2039
2031 if (BP_NOMCP(bp)) 2040 if (BP_NOMCP(bp))
2032 return; /* what should be the default bvalue in this case */ 2041 return; /* what should be the default bvalue in this case */
2033 2042
2043 /* For 2 port configuration the absolute function number formula
2044 * is:
2045 * abs_func = 2 * vn + BP_PORT + BP_PATH
2046 *
2047 * and there are 4 functions per port
2048 *
2049 * For 4 port configuration it is
2050 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2051 *
2052 * and there are 2 functions per port
2053 */
2034 for (vn = VN_0; vn < E1HVN_MAX; vn++) { 2054 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2035 int /*abs*/func = 2*vn + BP_PORT(bp); 2055 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2056
2057 if (func >= E1H_FUNC_MAX)
2058 break;
2059
2036 bp->mf_config[vn] = 2060 bp->mf_config[vn] =
2037 MF_CFG_RD(bp, func_mf_config[func].config); 2061 MF_CFG_RD(bp, func_mf_config[func].config);
2038 } 2062 }
@@ -2238,6 +2262,15 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
2238 return rc; 2262 return rc;
2239} 2263}
2240 2264
2265static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2266{
2267#ifdef BCM_CNIC
2268 if (IS_FCOE_FP(fp) && IS_MF(bp))
2269 return false;
2270#endif
2271 return true;
2272}
2273
2241/* must be called under rtnl_lock */ 2274/* must be called under rtnl_lock */
2242static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters) 2275static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2243{ 2276{
@@ -2248,10 +2281,21 @@ static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2248 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0; 2281 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2249 u8 unmatched_unicast = 0; 2282 u8 unmatched_unicast = 0;
2250 2283
2284 if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2285 unmatched_unicast = 1;
2286
2251 if (filters & BNX2X_PROMISCUOUS_MODE) { 2287 if (filters & BNX2X_PROMISCUOUS_MODE) {
2252 /* promiscious - accept all, drop none */ 2288 /* promiscious - accept all, drop none */
2253 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0; 2289 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2254 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1; 2290 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2291 if (IS_MF_SI(bp)) {
2292 /*
2293 * SI mode defines to accept in promiscuos mode
2294 * only unmatched packets
2295 */
2296 unmatched_unicast = 1;
2297 accp_all_ucast = 0;
2298 }
2255 } 2299 }
2256 if (filters & BNX2X_ACCEPT_UNICAST) { 2300 if (filters & BNX2X_ACCEPT_UNICAST) {
2257 /* accept matched ucast */ 2301 /* accept matched ucast */
@@ -2260,6 +2304,11 @@ static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2260 if (filters & BNX2X_ACCEPT_MULTICAST) { 2304 if (filters & BNX2X_ACCEPT_MULTICAST) {
2261 /* accept matched mcast */ 2305 /* accept matched mcast */
2262 drop_all_mcast = 0; 2306 drop_all_mcast = 0;
2307 if (IS_MF_SI(bp))
2308 /* since mcast addresses won't arrive with ovlan,
2309 * fw needs to accept all of them in
2310 * switch-independent mode */
2311 accp_all_mcast = 1;
2263 } 2312 }
2264 if (filters & BNX2X_ACCEPT_ALL_UNICAST) { 2313 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2265 /* accept all mcast */ 2314 /* accept all mcast */
@@ -2372,7 +2421,7 @@ static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2372 /* calculate queue flags */ 2421 /* calculate queue flags */
2373 flags |= QUEUE_FLG_CACHE_ALIGN; 2422 flags |= QUEUE_FLG_CACHE_ALIGN;
2374 flags |= QUEUE_FLG_HC; 2423 flags |= QUEUE_FLG_HC;
2375 flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0; 2424 flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
2376 2425
2377 flags |= QUEUE_FLG_VLAN; 2426 flags |= QUEUE_FLG_VLAN;
2378 DP(NETIF_MSG_IFUP, "vlan removal enabled\n"); 2427 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
@@ -2380,7 +2429,8 @@ static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2380 if (!fp->disable_tpa) 2429 if (!fp->disable_tpa)
2381 flags |= QUEUE_FLG_TPA; 2430 flags |= QUEUE_FLG_TPA;
2382 2431
2383 flags |= QUEUE_FLG_STATS; 2432 flags = stat_counter_valid(bp, fp) ?
2433 (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
2384 2434
2385 return flags; 2435 return flags;
2386} 2436}
@@ -2440,7 +2490,10 @@ static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2440 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT; 2490 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2441 rxq_init->fw_sb_id = fp->fw_sb_id; 2491 rxq_init->fw_sb_id = fp->fw_sb_id;
2442 2492
2443 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX; 2493 if (IS_FCOE_FP(fp))
2494 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2495 else
2496 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2444 2497
2445 rxq_init->cid = HW_CID(bp, fp->cid); 2498 rxq_init->cid = HW_CID(bp, fp->cid);
2446 2499
@@ -2460,6 +2513,12 @@ static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2460 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX; 2513 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2461 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW; 2514 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2462 txq_init->fw_sb_id = fp->fw_sb_id; 2515 txq_init->fw_sb_id = fp->fw_sb_id;
2516
2517 if (IS_FCOE_FP(fp)) {
2518 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2519 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2520 }
2521
2463 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0; 2522 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2464} 2523}
2465 2524
@@ -2573,6 +2632,26 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
2573 */ 2632 */
2574} 2633}
2575 2634
2635/* called due to MCP event (on pmf):
2636 * reread new bandwidth configuration
2637 * configure FW
2638 * notify others function about the change
2639 */
2640static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2641{
2642 if (bp->link_vars.link_up) {
2643 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2644 bnx2x_link_sync_notify(bp);
2645 }
2646 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2647}
2648
2649static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2650{
2651 bnx2x_config_mf_bw(bp);
2652 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2653}
2654
2576static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) 2655static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2577{ 2656{
2578 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); 2657 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
@@ -2598,10 +2677,7 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2598 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF; 2677 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2599 } 2678 }
2600 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) { 2679 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2601 2680 bnx2x_config_mf_bw(bp);
2602 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2603 bnx2x_link_sync_notify(bp);
2604 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2605 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION; 2681 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2606 } 2682 }
2607 2683
@@ -3022,10 +3098,20 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3022 if (val & DRV_STATUS_DCC_EVENT_MASK) 3098 if (val & DRV_STATUS_DCC_EVENT_MASK)
3023 bnx2x_dcc_event(bp, 3099 bnx2x_dcc_event(bp,
3024 (val & DRV_STATUS_DCC_EVENT_MASK)); 3100 (val & DRV_STATUS_DCC_EVENT_MASK));
3101
3102 if (val & DRV_STATUS_SET_MF_BW)
3103 bnx2x_set_mf_bw(bp);
3104
3025 bnx2x__link_status_update(bp); 3105 bnx2x__link_status_update(bp);
3026 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF)) 3106 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
3027 bnx2x_pmf_update(bp); 3107 bnx2x_pmf_update(bp);
3028 3108
3109 if (bp->port.pmf &&
3110 (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) &&
3111 bp->dcbx_enabled > 0)
3112 /* start dcbx state machine */
3113 bnx2x_dcbx_set_params(bp,
3114 BNX2X_DCBX_STATE_NEG_RECEIVED);
3029 } else if (attn & BNX2X_MC_ASSERT_BITS) { 3115 } else if (attn & BNX2X_MC_ASSERT_BITS) {
3030 3116
3031 BNX2X_ERR("MC assert!\n"); 3117 BNX2X_ERR("MC assert!\n");
@@ -3066,7 +3152,6 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3066#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1) 3152#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3067#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK) 3153#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3068#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS 3154#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3069#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
3070 3155
3071/* 3156/*
3072 * should be run under rtnl lock 3157 * should be run under rtnl lock
@@ -3441,7 +3526,7 @@ static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3441 try to handle this event */ 3526 try to handle this event */
3442 bnx2x_acquire_alr(bp); 3527 bnx2x_acquire_alr(bp);
3443 3528
3444 if (bnx2x_chk_parity_attn(bp)) { 3529 if (CHIP_PARITY_ENABLED(bp) && bnx2x_chk_parity_attn(bp)) {
3445 bp->recovery_state = BNX2X_RECOVERY_INIT; 3530 bp->recovery_state = BNX2X_RECOVERY_INIT;
3446 bnx2x_set_reset_in_progress(bp); 3531 bnx2x_set_reset_in_progress(bp);
3447 schedule_delayed_work(&bp->reset_task, 0); 3532 schedule_delayed_work(&bp->reset_task, 0);
@@ -3637,11 +3722,23 @@ static void bnx2x_eq_int(struct bnx2x *bp)
3637#ifdef BCM_CNIC 3722#ifdef BCM_CNIC
3638 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem)) 3723 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3639 goto next_spqe; 3724 goto next_spqe;
3725 if (cid == BNX2X_FCOE_ETH_CID)
3726 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
3727 else
3640#endif 3728#endif
3641 bnx2x_fp(bp, cid, state) = 3729 bnx2x_fp(bp, cid, state) =
3642 BNX2X_FP_STATE_CLOSED; 3730 BNX2X_FP_STATE_CLOSED;
3643 3731
3644 goto next_spqe; 3732 goto next_spqe;
3733
3734 case EVENT_RING_OPCODE_STOP_TRAFFIC:
3735 DP(NETIF_MSG_IFUP, "got STOP TRAFFIC\n");
3736 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_PAUSED);
3737 goto next_spqe;
3738 case EVENT_RING_OPCODE_START_TRAFFIC:
3739 DP(NETIF_MSG_IFUP, "got START TRAFFIC\n");
3740 bnx2x_dcbx_set_params(bp, BNX2X_DCBX_STATE_TX_RELEASED);
3741 goto next_spqe;
3645 } 3742 }
3646 3743
3647 switch (opcode | bp->state) { 3744 switch (opcode | bp->state) {
@@ -3714,7 +3811,13 @@ static void bnx2x_sp_task(struct work_struct *work)
3714 3811
3715 /* SP events: STAT_QUERY and others */ 3812 /* SP events: STAT_QUERY and others */
3716 if (status & BNX2X_DEF_SB_IDX) { 3813 if (status & BNX2X_DEF_SB_IDX) {
3814#ifdef BCM_CNIC
3815 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
3717 3816
3817 if ((!NO_FCOE(bp)) &&
3818 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
3819 napi_schedule(&bnx2x_fcoe(bp, napi));
3820#endif
3718 /* Handle EQ completions */ 3821 /* Handle EQ completions */
3719 bnx2x_eq_int(bp); 3822 bnx2x_eq_int(bp);
3720 3823
@@ -4097,7 +4200,7 @@ void bnx2x_update_coalesce(struct bnx2x *bp)
4097{ 4200{
4098 int i; 4201 int i;
4099 4202
4100 for_each_queue(bp, i) 4203 for_each_eth_queue(bp, i)
4101 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id, 4204 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4102 bp->rx_ticks, bp->tx_ticks); 4205 bp->rx_ticks, bp->tx_ticks);
4103} 4206}
@@ -4145,13 +4248,16 @@ static void bnx2x_init_ind_table(struct bnx2x *bp)
4145 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++) 4248 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4146 REG_WR8(bp, BAR_TSTRORM_INTMEM + 4249 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4147 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i, 4250 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4148 bp->fp->cl_id + (i % bp->num_queues)); 4251 bp->fp->cl_id + (i % (bp->num_queues -
4252 NONE_ETH_CONTEXT_USE)));
4149} 4253}
4150 4254
4151void bnx2x_set_storm_rx_mode(struct bnx2x *bp) 4255void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4152{ 4256{
4153 int mode = bp->rx_mode; 4257 int mode = bp->rx_mode;
4258 int port = BP_PORT(bp);
4154 u16 cl_id; 4259 u16 cl_id;
4260 u32 def_q_filters = 0;
4155 4261
4156 /* All but management unicast packets should pass to the host as well */ 4262 /* All but management unicast packets should pass to the host as well */
4157 u32 llh_mask = 4263 u32 llh_mask =
@@ -4162,30 +4268,42 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4162 4268
4163 switch (mode) { 4269 switch (mode) {
4164 case BNX2X_RX_MODE_NONE: /* no Rx */ 4270 case BNX2X_RX_MODE_NONE: /* no Rx */
4165 cl_id = BP_L_ID(bp); 4271 def_q_filters = BNX2X_ACCEPT_NONE;
4166 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE); 4272#ifdef BCM_CNIC
4273 if (!NO_FCOE(bp)) {
4274 cl_id = bnx2x_fcoe(bp, cl_id);
4275 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4276 }
4277#endif
4167 break; 4278 break;
4168 4279
4169 case BNX2X_RX_MODE_NORMAL: 4280 case BNX2X_RX_MODE_NORMAL:
4170 cl_id = BP_L_ID(bp); 4281 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4171 bnx2x_rxq_set_mac_filters(bp, cl_id, 4282 BNX2X_ACCEPT_MULTICAST;
4172 BNX2X_ACCEPT_UNICAST | 4283#ifdef BCM_CNIC
4173 BNX2X_ACCEPT_BROADCAST | 4284 cl_id = bnx2x_fcoe(bp, cl_id);
4174 BNX2X_ACCEPT_MULTICAST); 4285 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
4286 BNX2X_ACCEPT_MULTICAST);
4287#endif
4175 break; 4288 break;
4176 4289
4177 case BNX2X_RX_MODE_ALLMULTI: 4290 case BNX2X_RX_MODE_ALLMULTI:
4178 cl_id = BP_L_ID(bp); 4291 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4179 bnx2x_rxq_set_mac_filters(bp, cl_id, 4292 BNX2X_ACCEPT_ALL_MULTICAST;
4180 BNX2X_ACCEPT_UNICAST | 4293#ifdef BCM_CNIC
4181 BNX2X_ACCEPT_BROADCAST | 4294 cl_id = bnx2x_fcoe(bp, cl_id);
4182 BNX2X_ACCEPT_ALL_MULTICAST); 4295 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
4296 BNX2X_ACCEPT_MULTICAST);
4297#endif
4183 break; 4298 break;
4184 4299
4185 case BNX2X_RX_MODE_PROMISC: 4300 case BNX2X_RX_MODE_PROMISC:
4186 cl_id = BP_L_ID(bp); 4301 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4187 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE); 4302#ifdef BCM_CNIC
4188 4303 cl_id = bnx2x_fcoe(bp, cl_id);
4304 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
4305 BNX2X_ACCEPT_MULTICAST);
4306#endif
4189 /* pass management unicast packets as well */ 4307 /* pass management unicast packets as well */
4190 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST; 4308 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4191 break; 4309 break;
@@ -4195,20 +4313,24 @@ void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4195 break; 4313 break;
4196 } 4314 }
4197 4315
4316 cl_id = BP_L_ID(bp);
4317 bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
4318
4198 REG_WR(bp, 4319 REG_WR(bp,
4199 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK : 4320 (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
4200 NIG_REG_LLH0_BRB1_DRV_MASK, 4321 NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
4201 llh_mask);
4202 4322
4203 DP(NETIF_MSG_IFUP, "rx mode %d\n" 4323 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4204 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n" 4324 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4205 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode, 4325 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
4326 "unmatched_ucast 0x%x\n", mode,
4206 bp->mac_filters.ucast_drop_all, 4327 bp->mac_filters.ucast_drop_all,
4207 bp->mac_filters.mcast_drop_all, 4328 bp->mac_filters.mcast_drop_all,
4208 bp->mac_filters.bcast_drop_all, 4329 bp->mac_filters.bcast_drop_all,
4209 bp->mac_filters.ucast_accept_all, 4330 bp->mac_filters.ucast_accept_all,
4210 bp->mac_filters.mcast_accept_all, 4331 bp->mac_filters.mcast_accept_all,
4211 bp->mac_filters.bcast_accept_all 4332 bp->mac_filters.bcast_accept_all,
4333 bp->mac_filters.unmatched_unicast
4212 ); 4334 );
4213 4335
4214 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp)); 4336 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
@@ -4232,6 +4354,15 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
4232 bp->mf_mode); 4354 bp->mf_mode);
4233 } 4355 }
4234 4356
4357 if (IS_MF_SI(bp))
4358 /*
4359 * In switch independent mode, the TSTORM needs to accept
4360 * packets that failed classification, since approximate match
4361 * mac addresses aren't written to NIG LLH
4362 */
4363 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4364 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4365
4235 /* Zero this manually as its initialization is 4366 /* Zero this manually as its initialization is
4236 currently missing in the initTool */ 4367 currently missing in the initTool */
4237 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++) 4368 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
@@ -4247,6 +4378,7 @@ static void bnx2x_init_internal_common(struct bnx2x *bp)
4247static void bnx2x_init_internal_port(struct bnx2x *bp) 4378static void bnx2x_init_internal_port(struct bnx2x *bp)
4248{ 4379{
4249 /* port */ 4380 /* port */
4381 bnx2x_dcb_init_intmem_pfc(bp);
4250} 4382}
4251 4383
4252static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code) 4384static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
@@ -4308,9 +4440,11 @@ void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
4308{ 4440{
4309 int i; 4441 int i;
4310 4442
4311 for_each_queue(bp, i) 4443 for_each_eth_queue(bp, i)
4312 bnx2x_init_fp_sb(bp, i); 4444 bnx2x_init_fp_sb(bp, i);
4313#ifdef BCM_CNIC 4445#ifdef BCM_CNIC
4446 if (!NO_FCOE(bp))
4447 bnx2x_init_fcoe_fp(bp);
4314 4448
4315 bnx2x_init_sb(bp, bp->cnic_sb_mapping, 4449 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4316 BNX2X_VF_ID_INVALID, false, 4450 BNX2X_VF_ID_INVALID, false,
@@ -4619,7 +4753,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
4619 return 0; /* OK */ 4753 return 0; /* OK */
4620} 4754}
4621 4755
4622static void enable_blocks_attention(struct bnx2x *bp) 4756static void bnx2x_enable_blocks_attention(struct bnx2x *bp)
4623{ 4757{
4624 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0); 4758 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4625 if (CHIP_IS_E2(bp)) 4759 if (CHIP_IS_E2(bp))
@@ -4673,53 +4807,9 @@ static void enable_blocks_attention(struct bnx2x *bp)
4673 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0); 4807 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4674 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0); 4808 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
4675/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */ 4809/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4676 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */ 4810 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0x18); /* bit 3,4 masked */
4677}
4678
4679static const struct {
4680 u32 addr;
4681 u32 mask;
4682} bnx2x_parity_mask[] = {
4683 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
4684 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4685 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4686 {HC_REG_HC_PRTY_MASK, 0x7},
4687 {MISC_REG_MISC_PRTY_MASK, 0x1},
4688 {QM_REG_QM_PRTY_MASK, 0x0},
4689 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
4690 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4691 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
4692 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4693 {CDU_REG_CDU_PRTY_MASK, 0x0},
4694 {CFC_REG_CFC_PRTY_MASK, 0x0},
4695 {DBG_REG_DBG_PRTY_MASK, 0x0},
4696 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4697 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4698 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4699 {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
4700 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4701 {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
4702 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4703 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4704 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4705 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4706 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4707 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4708 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4709 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4710 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
4711};
4712
4713static void enable_blocks_parity(struct bnx2x *bp)
4714{
4715 int i;
4716
4717 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
4718 REG_WR(bp, bnx2x_parity_mask[i].addr,
4719 bnx2x_parity_mask[i].mask);
4720} 4811}
4721 4812
4722
4723static void bnx2x_reset_common(struct bnx2x *bp) 4813static void bnx2x_reset_common(struct bnx2x *bp)
4724{ 4814{
4725 /* reset_common */ 4815 /* reset_common */
@@ -5048,12 +5138,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
5048 REG_WR(bp, PRS_REG_NIC_MODE, 1); 5138 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5049#endif 5139#endif
5050 if (!CHIP_IS_E1(bp)) 5140 if (!CHIP_IS_E1(bp))
5051 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp)); 5141 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
5052 5142
5053 if (CHIP_IS_E2(bp)) { 5143 if (CHIP_IS_E2(bp)) {
5054 /* Bit-map indicating which L2 hdrs may appear after the 5144 /* Bit-map indicating which L2 hdrs may appear after the
5055 basic Ethernet header */ 5145 basic Ethernet header */
5056 int has_ovlan = IS_MF(bp); 5146 int has_ovlan = IS_MF_SD(bp);
5057 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6)); 5147 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5058 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0)); 5148 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5059 } 5149 }
@@ -5087,7 +5177,7 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
5087 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE); 5177 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5088 5178
5089 if (CHIP_IS_E2(bp)) { 5179 if (CHIP_IS_E2(bp)) {
5090 int has_ovlan = IS_MF(bp); 5180 int has_ovlan = IS_MF_SD(bp);
5091 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6)); 5181 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5092 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0)); 5182 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5093 } 5183 }
@@ -5164,12 +5254,12 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
5164 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE); 5254 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5165 if (!CHIP_IS_E1(bp)) { 5255 if (!CHIP_IS_E1(bp)) {
5166 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp)); 5256 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5167 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp)); 5257 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
5168 } 5258 }
5169 if (CHIP_IS_E2(bp)) { 5259 if (CHIP_IS_E2(bp)) {
5170 /* Bit-map indicating which L2 hdrs may appear after the 5260 /* Bit-map indicating which L2 hdrs may appear after the
5171 basic Ethernet header */ 5261 basic Ethernet header */
5172 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6)); 5262 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
5173 } 5263 }
5174 5264
5175 if (CHIP_REV_IS_SLOW(bp)) 5265 if (CHIP_REV_IS_SLOW(bp))
@@ -5215,9 +5305,9 @@ static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
5215 /* clear PXP2 attentions */ 5305 /* clear PXP2 attentions */
5216 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0); 5306 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5217 5307
5218 enable_blocks_attention(bp); 5308 bnx2x_enable_blocks_attention(bp);
5219 if (CHIP_PARITY_SUPPORTED(bp)) 5309 if (CHIP_PARITY_ENABLED(bp))
5220 enable_blocks_parity(bp); 5310 bnx2x_enable_blocks_parity(bp);
5221 5311
5222 if (!BP_NOMCP(bp)) { 5312 if (!BP_NOMCP(bp)) {
5223 /* In E2 2-PORT mode, same ext phy is used for the two paths */ 5313 /* In E2 2-PORT mode, same ext phy is used for the two paths */
@@ -5370,8 +5460,10 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
5370 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use 5460 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5371 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF 5461 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5372 * bits 4-7 are used for "per vn group attention" */ 5462 * bits 4-7 are used for "per vn group attention" */
5373 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 5463 val = IS_MF(bp) ? 0xF7 : 0x7;
5374 (IS_MF(bp) ? 0xF7 : 0x7)); 5464 /* Enable DCBX attention for all but E1 */
5465 val |= CHIP_IS_E1(bp) ? 0 : 0x10;
5466 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, val);
5375 5467
5376 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage); 5468 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
5377 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage); 5469 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
@@ -5386,7 +5478,7 @@ static int bnx2x_init_hw_port(struct bnx2x *bp)
5386 if (!CHIP_IS_E1(bp)) { 5478 if (!CHIP_IS_E1(bp)) {
5387 /* 0x2 disable mf_ov, 0x1 enable */ 5479 /* 0x2 disable mf_ov, 0x1 enable */
5388 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4, 5480 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5389 (IS_MF(bp) ? 0x1 : 0x2)); 5481 (IS_MF_SD(bp) ? 0x1 : 0x2));
5390 5482
5391 if (CHIP_IS_E2(bp)) { 5483 if (CHIP_IS_E2(bp)) {
5392 val = 0; 5484 val = 0;
@@ -5816,6 +5908,15 @@ void bnx2x_free_mem(struct bnx2x *bp)
5816 /* fastpath */ 5908 /* fastpath */
5817 /* Common */ 5909 /* Common */
5818 for_each_queue(bp, i) { 5910 for_each_queue(bp, i) {
5911#ifdef BCM_CNIC
5912 /* FCoE client uses default status block */
5913 if (IS_FCOE_IDX(i)) {
5914 union host_hc_status_block *sb =
5915 &bnx2x_fp(bp, i, status_blk);
5916 memset(sb, 0, sizeof(union host_hc_status_block));
5917 bnx2x_fp(bp, i, status_blk_mapping) = 0;
5918 } else {
5919#endif
5819 /* status blocks */ 5920 /* status blocks */
5820 if (CHIP_IS_E2(bp)) 5921 if (CHIP_IS_E2(bp))
5821 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb), 5922 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
@@ -5825,9 +5926,12 @@ void bnx2x_free_mem(struct bnx2x *bp)
5825 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb), 5926 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5826 bnx2x_fp(bp, i, status_blk_mapping), 5927 bnx2x_fp(bp, i, status_blk_mapping),
5827 sizeof(struct host_hc_status_block_e1x)); 5928 sizeof(struct host_hc_status_block_e1x));
5929#ifdef BCM_CNIC
5930 }
5931#endif
5828 } 5932 }
5829 /* Rx */ 5933 /* Rx */
5830 for_each_queue(bp, i) { 5934 for_each_rx_queue(bp, i) {
5831 5935
5832 /* fastpath rx rings: rx_buf rx_desc rx_comp */ 5936 /* fastpath rx rings: rx_buf rx_desc rx_comp */
5833 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring)); 5937 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
@@ -5847,7 +5951,7 @@ void bnx2x_free_mem(struct bnx2x *bp)
5847 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES); 5951 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5848 } 5952 }
5849 /* Tx */ 5953 /* Tx */
5850 for_each_queue(bp, i) { 5954 for_each_tx_queue(bp, i) {
5851 5955
5852 /* fastpath tx rings: tx_buf tx_desc */ 5956 /* fastpath tx rings: tx_buf tx_desc */
5853 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring)); 5957 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
@@ -5931,15 +6035,20 @@ int bnx2x_alloc_mem(struct bnx2x *bp)
5931 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk); 6035 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
5932 bnx2x_fp(bp, i, bp) = bp; 6036 bnx2x_fp(bp, i, bp) = bp;
5933 /* status blocks */ 6037 /* status blocks */
5934 if (CHIP_IS_E2(bp)) 6038#ifdef BCM_CNIC
5935 BNX2X_PCI_ALLOC(sb->e2_sb, 6039 if (!IS_FCOE_IDX(i)) {
5936 &bnx2x_fp(bp, i, status_blk_mapping), 6040#endif
5937 sizeof(struct host_hc_status_block_e2)); 6041 if (CHIP_IS_E2(bp))
5938 else 6042 BNX2X_PCI_ALLOC(sb->e2_sb,
5939 BNX2X_PCI_ALLOC(sb->e1x_sb, 6043 &bnx2x_fp(bp, i, status_blk_mapping),
5940 &bnx2x_fp(bp, i, status_blk_mapping), 6044 sizeof(struct host_hc_status_block_e2));
5941 sizeof(struct host_hc_status_block_e1x)); 6045 else
5942 6046 BNX2X_PCI_ALLOC(sb->e1x_sb,
6047 &bnx2x_fp(bp, i, status_blk_mapping),
6048 sizeof(struct host_hc_status_block_e1x));
6049#ifdef BCM_CNIC
6050 }
6051#endif
5943 set_sb_shortcuts(bp, i); 6052 set_sb_shortcuts(bp, i);
5944 } 6053 }
5945 /* Rx */ 6054 /* Rx */
@@ -6055,7 +6164,7 @@ static int bnx2x_func_stop(struct bnx2x *bp)
6055 * @param cam_offset offset in a CAM to use 6164 * @param cam_offset offset in a CAM to use
6056 * @param is_bcast is the set MAC a broadcast address (for E1 only) 6165 * @param is_bcast is the set MAC a broadcast address (for E1 only)
6057 */ 6166 */
6058static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac, 6167static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, const u8 *mac,
6059 u32 cl_bit_vec, u8 cam_offset, 6168 u32 cl_bit_vec, u8 cam_offset,
6060 u8 is_bcast) 6169 u8 is_bcast)
6061{ 6170{
@@ -6170,6 +6279,70 @@ static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
6170 return BP_VN(bp) * 32 + rel_offset; 6279 return BP_VN(bp) * 32 + rel_offset;
6171} 6280}
6172 6281
6282/**
6283 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6284 * relevant. In addition, current implementation is tuned for a
6285 * single ETH MAC.
6286 *
6287 * When multiple unicast ETH MACs PF configuration in switch
6288 * independent mode is required (NetQ, multiple netdev MACs,
6289 * etc.), consider better utilisation of 16 per function MAC
6290 * entries in the LLH memory.
6291 */
6292enum {
6293 LLH_CAM_ISCSI_ETH_LINE = 0,
6294 LLH_CAM_ETH_LINE,
6295 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6296};
6297
6298static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6299 int set,
6300 unsigned char *dev_addr,
6301 int index)
6302{
6303 u32 wb_data[2];
6304 u32 mem_offset, ena_offset, mem_index;
6305 /**
6306 * indexes mapping:
6307 * 0..7 - goes to MEM
6308 * 8..15 - goes to MEM2
6309 */
6310
6311 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6312 return;
6313
6314 /* calculate memory start offset according to the mapping
6315 * and index in the memory */
6316 if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6317 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6318 NIG_REG_LLH0_FUNC_MEM;
6319 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6320 NIG_REG_LLH0_FUNC_MEM_ENABLE;
6321 mem_index = index;
6322 } else {
6323 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6324 NIG_REG_P0_LLH_FUNC_MEM2;
6325 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6326 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6327 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6328 }
6329
6330 if (set) {
6331 /* LLH_FUNC_MEM is a u64 WB register */
6332 mem_offset += 8*mem_index;
6333
6334 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6335 (dev_addr[4] << 8) | dev_addr[5]);
6336 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
6337
6338 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6339 }
6340
6341 /* enable/disable the entry */
6342 REG_WR(bp, ena_offset + 4*mem_index, set);
6343
6344}
6345
6173void bnx2x_set_eth_mac(struct bnx2x *bp, int set) 6346void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6174{ 6347{
6175 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) : 6348 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
@@ -6179,9 +6352,13 @@ void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
6179 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr, 6352 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6180 (1 << bp->fp->cl_id), cam_offset , 0); 6353 (1 << bp->fp->cl_id), cam_offset , 0);
6181 6354
6355 bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6356
6182 if (CHIP_IS_E1(bp)) { 6357 if (CHIP_IS_E1(bp)) {
6183 /* broadcast MAC */ 6358 /* broadcast MAC */
6184 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 6359 static const u8 bcast[ETH_ALEN] = {
6360 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
6361 };
6185 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1); 6362 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6186 } 6363 }
6187} 6364}
@@ -6283,12 +6460,59 @@ static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
6283{ 6460{
6284 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) : 6461 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6285 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE)); 6462 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6286 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID; 6463 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6464 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
6287 u32 cl_bit_vec = (1 << iscsi_l2_cl_id); 6465 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
6288 6466
6289 /* Send a SET_MAC ramrod */ 6467 /* Send a SET_MAC ramrod */
6290 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec, 6468 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6291 cam_offset, 0); 6469 cam_offset, 0);
6470
6471 bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
6472
6473 return 0;
6474}
6475
6476/**
6477 * Set FCoE L2 MAC(s) at the next enties in the CAM after the
6478 * ETH MAC(s). This function will wait until the ramdord
6479 * completion returns.
6480 *
6481 * @param bp driver handle
6482 * @param set set or clear the CAM entry
6483 *
6484 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6485 */
6486int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
6487{
6488 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6489 /**
6490 * CAM allocation for E1H
6491 * eth unicasts: by func number
6492 * iscsi: by func number
6493 * fip unicast: by func number
6494 * fip multicast: by func number
6495 */
6496 bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
6497 cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
6498
6499 return 0;
6500}
6501
6502int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
6503{
6504 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6505
6506 /**
6507 * CAM allocation for E1H
6508 * eth unicasts: by func number
6509 * iscsi: by func number
6510 * fip unicast: by func number
6511 * fip multicast: by func number
6512 */
6513 bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
6514 bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
6515
6292 return 0; 6516 return 0;
6293} 6517}
6294#endif 6518#endif
@@ -6306,6 +6530,8 @@ static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6306 data->general.statistics_counter_id = params->rxq_params.stat_id; 6530 data->general.statistics_counter_id = params->rxq_params.stat_id;
6307 data->general.statistics_en_flg = 6531 data->general.statistics_en_flg =
6308 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0; 6532 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6533 data->general.is_fcoe_flg =
6534 (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
6309 data->general.activate_flg = activate; 6535 data->general.activate_flg = activate;
6310 data->general.sp_client_id = params->rxq_params.spcl_id; 6536 data->general.sp_client_id = params->rxq_params.spcl_id;
6311 6537
@@ -6374,7 +6600,9 @@ static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6374 data->fc.safc_group_num = params->txq_params.cos; 6600 data->fc.safc_group_num = params->txq_params.cos;
6375 data->fc.safc_group_en_flg = 6601 data->fc.safc_group_en_flg =
6376 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0; 6602 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6377 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW; 6603 data->fc.traffic_type =
6604 (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
6605 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
6378} 6606}
6379 6607
6380static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid) 6608static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
@@ -6473,7 +6701,7 @@ static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
6473 bnx2x_enable_msi(bp); 6701 bnx2x_enable_msi(bp);
6474 /* falling through... */ 6702 /* falling through... */
6475 case INT_MODE_INTx: 6703 case INT_MODE_INTx:
6476 bp->num_queues = 1; 6704 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
6477 DP(NETIF_MSG_IFUP, "set number of queues to 1\n"); 6705 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
6478 break; 6706 break;
6479 default: 6707 default:
@@ -6496,8 +6724,8 @@ static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
6496 "enable MSI-X (%d), " 6724 "enable MSI-X (%d), "
6497 "set number of queues to %d\n", 6725 "set number of queues to %d\n",
6498 bp->num_queues, 6726 bp->num_queues,
6499 1); 6727 1 + NONE_ETH_CONTEXT_USE);
6500 bp->num_queues = 1; 6728 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
6501 6729
6502 if (!(bp->flags & DISABLE_MSI_FLAG)) 6730 if (!(bp->flags & DISABLE_MSI_FLAG))
6503 bnx2x_enable_msi(bp); 6731 bnx2x_enable_msi(bp);
@@ -6618,7 +6846,9 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6618 struct bnx2x_client_init_params params = { {0} }; 6846 struct bnx2x_client_init_params params = { {0} };
6619 int rc; 6847 int rc;
6620 6848
6621 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0, 6849 /* reset IGU state skip FCoE L2 queue */
6850 if (!IS_FCOE_FP(fp))
6851 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6622 IGU_INT_ENABLE, 0); 6852 IGU_INT_ENABLE, 0);
6623 6853
6624 params.ramrod_params.pstate = &fp->state; 6854 params.ramrod_params.pstate = &fp->state;
@@ -6626,6 +6856,12 @@ int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6626 params.ramrod_params.index = fp->index; 6856 params.ramrod_params.index = fp->index;
6627 params.ramrod_params.cid = fp->cid; 6857 params.ramrod_params.cid = fp->cid;
6628 6858
6859#ifdef BCM_CNIC
6860 if (IS_FCOE_FP(fp))
6861 params.ramrod_params.flags |= CLIENT_IS_FCOE;
6862
6863#endif
6864
6629 if (is_leading) 6865 if (is_leading)
6630 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS; 6866 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6631 6867
@@ -6710,7 +6946,7 @@ static void bnx2x_reset_func(struct bnx2x *bp)
6710 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0); 6946 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6711 6947
6712 /* FP SBs */ 6948 /* FP SBs */
6713 for_each_queue(bp, i) { 6949 for_each_eth_queue(bp, i) {
6714 struct bnx2x_fastpath *fp = &bp->fp[i]; 6950 struct bnx2x_fastpath *fp = &bp->fp[i];
6715 REG_WR8(bp, 6951 REG_WR8(bp,
6716 BAR_CSTRORM_INTMEM + 6952 BAR_CSTRORM_INTMEM +
@@ -6830,6 +7066,20 @@ static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6830 } 7066 }
6831} 7067}
6832 7068
7069#ifdef BCM_CNIC
7070static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
7071{
7072 if (bp->flags & FCOE_MACS_SET) {
7073 if (!IS_MF_SD(bp))
7074 bnx2x_set_fip_eth_mac_addr(bp, 0);
7075
7076 bnx2x_set_all_enode_macs(bp, 0);
7077
7078 bp->flags &= ~FCOE_MACS_SET;
7079 }
7080}
7081#endif
7082
6833void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode) 7083void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
6834{ 7084{
6835 int port = BP_PORT(bp); 7085 int port = BP_PORT(bp);
@@ -6837,7 +7087,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
6837 int i, cnt, rc; 7087 int i, cnt, rc;
6838 7088
6839 /* Wait until tx fastpath tasks complete */ 7089 /* Wait until tx fastpath tasks complete */
6840 for_each_queue(bp, i) { 7090 for_each_tx_queue(bp, i) {
6841 struct bnx2x_fastpath *fp = &bp->fp[i]; 7091 struct bnx2x_fastpath *fp = &bp->fp[i];
6842 7092
6843 cnt = 1000; 7093 cnt = 1000;
@@ -6877,13 +7127,7 @@ void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
6877 } 7127 }
6878 7128
6879#ifdef BCM_CNIC 7129#ifdef BCM_CNIC
6880 /* Clear iSCSI L2 MAC */ 7130 bnx2x_del_fcoe_eth_macs(bp);
6881 mutex_lock(&bp->cnic_mutex);
6882 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
6883 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
6884 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
6885 }
6886 mutex_unlock(&bp->cnic_mutex);
6887#endif 7131#endif
6888 7132
6889 if (unload_mode == UNLOAD_NORMAL) 7133 if (unload_mode == UNLOAD_NORMAL)
@@ -7736,7 +7980,7 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7736 bp->igu_sb_cnt = 0; 7980 bp->igu_sb_cnt = 0;
7737 if (CHIP_INT_MODE_IS_BC(bp)) { 7981 if (CHIP_INT_MODE_IS_BC(bp)) {
7738 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, 7982 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7739 bp->l2_cid_count); 7983 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
7740 7984
7741 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) * 7985 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7742 FP_SB_MAX_E1x; 7986 FP_SB_MAX_E1x;
@@ -7767,7 +8011,8 @@ static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7767 } 8011 }
7768 } 8012 }
7769 } 8013 }
7770 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count); 8014 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8015 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
7771 if (bp->igu_sb_cnt == 0) 8016 if (bp->igu_sb_cnt == 0)
7772 BNX2X_ERR("CAM configuration error\n"); 8017 BNX2X_ERR("CAM configuration error\n");
7773} 8018}
@@ -8076,9 +8321,8 @@ static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8076static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) 8321static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8077{ 8322{
8078 int port = BP_PORT(bp); 8323 int port = BP_PORT(bp);
8079 u32 val, val2;
8080 u32 config; 8324 u32 config;
8081 u32 ext_phy_type, ext_phy_config;; 8325 u32 ext_phy_type, ext_phy_config;
8082 8326
8083 bp->link_params.bp = bp; 8327 bp->link_params.bp = bp;
8084 bp->link_params.port = port; 8328 bp->link_params.port = port;
@@ -8135,25 +8379,73 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8135 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN)) 8379 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8136 bp->mdio.prtad = 8380 bp->mdio.prtad =
8137 XGXS_EXT_PHY_ADDR(ext_phy_config); 8381 XGXS_EXT_PHY_ADDR(ext_phy_config);
8382}
8383
8384static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8385{
8386 u32 val, val2;
8387 int func = BP_ABS_FUNC(bp);
8388 int port = BP_PORT(bp);
8389
8390 if (BP_NOMCP(bp)) {
8391 BNX2X_ERROR("warning: random MAC workaround active\n");
8392 random_ether_addr(bp->dev->dev_addr);
8393 } else if (IS_MF(bp)) {
8394 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8395 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8396 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8397 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8398 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8399
8400#ifdef BCM_CNIC
8401 /* iSCSI NPAR MAC */
8402 if (IS_MF_SI(bp)) {
8403 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8404 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8405 val2 = MF_CFG_RD(bp, func_ext_config[func].
8406 iscsi_mac_addr_upper);
8407 val = MF_CFG_RD(bp, func_ext_config[func].
8408 iscsi_mac_addr_lower);
8409 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8410 }
8411 }
8412#endif
8413 } else {
8414 /* in SF read MACs from port configuration */
8415 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8416 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8417 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8418
8419#ifdef BCM_CNIC
8420 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8421 iscsi_mac_upper);
8422 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8423 iscsi_mac_lower);
8424 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8425#endif
8426 }
8138 8427
8139 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8140 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8141 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8142 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); 8428 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8143 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 8429 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8144 8430
8145#ifdef BCM_CNIC 8431#ifdef BCM_CNIC
8146 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper); 8432 /* Inform the upper layers about FCoE MAC */
8147 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower); 8433 if (!CHIP_IS_E1x(bp)) {
8148 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2); 8434 if (IS_MF_SD(bp))
8435 memcpy(bp->fip_mac, bp->dev->dev_addr,
8436 sizeof(bp->fip_mac));
8437 else
8438 memcpy(bp->fip_mac, bp->iscsi_mac,
8439 sizeof(bp->fip_mac));
8440 }
8149#endif 8441#endif
8150} 8442}
8151 8443
8152static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) 8444static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8153{ 8445{
8154 int func = BP_ABS_FUNC(bp); 8446 int /*abs*/func = BP_ABS_FUNC(bp);
8155 int vn; 8447 int vn, port;
8156 u32 val, val2; 8448 u32 val = 0;
8157 int rc = 0; 8449 int rc = 0;
8158 8450
8159 bnx2x_get_common_hwinfo(bp); 8451 bnx2x_get_common_hwinfo(bp);
@@ -8163,7 +8455,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8163 8455
8164 bp->igu_dsb_id = DEF_SB_IGU_ID; 8456 bp->igu_dsb_id = DEF_SB_IGU_ID;
8165 bp->igu_base_sb = 0; 8457 bp->igu_base_sb = 0;
8166 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count); 8458 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8459 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
8167 } else { 8460 } else {
8168 bp->common.int_block = INT_BLOCK_IGU; 8461 bp->common.int_block = INT_BLOCK_IGU;
8169 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION); 8462 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
@@ -8186,44 +8479,99 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8186 bp->mf_ov = 0; 8479 bp->mf_ov = 0;
8187 bp->mf_mode = 0; 8480 bp->mf_mode = 0;
8188 vn = BP_E1HVN(bp); 8481 vn = BP_E1HVN(bp);
8482 port = BP_PORT(bp);
8483
8189 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) { 8484 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8485 DP(NETIF_MSG_PROBE,
8486 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8487 bp->common.shmem2_base, SHMEM2_RD(bp, size),
8488 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
8190 if (SHMEM2_HAS(bp, mf_cfg_addr)) 8489 if (SHMEM2_HAS(bp, mf_cfg_addr))
8191 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr); 8490 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8192 else 8491 else
8193 bp->common.mf_cfg_base = bp->common.shmem_base + 8492 bp->common.mf_cfg_base = bp->common.shmem_base +
8194 offsetof(struct shmem_region, func_mb) + 8493 offsetof(struct shmem_region, func_mb) +
8195 E1H_FUNC_MAX * sizeof(struct drv_func_mb); 8494 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
8196 bp->mf_config[vn] = 8495 /*
8197 MF_CFG_RD(bp, func_mf_config[func].config); 8496 * get mf configuration:
8497 * 1. existance of MF configuration
8498 * 2. MAC address must be legal (check only upper bytes)
8499 * for Switch-Independent mode;
8500 * OVLAN must be legal for Switch-Dependent mode
8501 * 3. SF_MODE configures specific MF mode
8502 */
8503 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8504 /* get mf configuration */
8505 val = SHMEM_RD(bp,
8506 dev_info.shared_feature_config.config);
8507 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
8508
8509 switch (val) {
8510 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8511 val = MF_CFG_RD(bp, func_mf_config[func].
8512 mac_upper);
8513 /* check for legal mac (upper bytes)*/
8514 if (val != 0xffff) {
8515 bp->mf_mode = MULTI_FUNCTION_SI;
8516 bp->mf_config[vn] = MF_CFG_RD(bp,
8517 func_mf_config[func].config);
8518 } else
8519 DP(NETIF_MSG_PROBE, "illegal MAC "
8520 "address for SI\n");
8521 break;
8522 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8523 /* get OV configuration */
8524 val = MF_CFG_RD(bp,
8525 func_mf_config[FUNC_0].e1hov_tag);
8526 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8527
8528 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8529 bp->mf_mode = MULTI_FUNCTION_SD;
8530 bp->mf_config[vn] = MF_CFG_RD(bp,
8531 func_mf_config[func].config);
8532 } else
8533 DP(NETIF_MSG_PROBE, "illegal OV for "
8534 "SD\n");
8535 break;
8536 default:
8537 /* Unknown configuration: reset mf_config */
8538 bp->mf_config[vn] = 0;
8539 DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
8540 val);
8541 }
8542 }
8198 8543
8199 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
8200 FUNC_MF_CFG_E1HOV_TAG_MASK);
8201 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
8202 bp->mf_mode = 1;
8203 BNX2X_DEV_INFO("%s function mode\n", 8544 BNX2X_DEV_INFO("%s function mode\n",
8204 IS_MF(bp) ? "multi" : "single"); 8545 IS_MF(bp) ? "multi" : "single");
8205 8546
8206 if (IS_MF(bp)) { 8547 switch (bp->mf_mode) {
8207 val = (MF_CFG_RD(bp, func_mf_config[func]. 8548 case MULTI_FUNCTION_SD:
8208 e1hov_tag) & 8549 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8209 FUNC_MF_CFG_E1HOV_TAG_MASK); 8550 FUNC_MF_CFG_E1HOV_TAG_MASK;
8210 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 8551 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8211 bp->mf_ov = val; 8552 bp->mf_ov = val;
8212 BNX2X_DEV_INFO("MF OV for func %d is %d " 8553 BNX2X_DEV_INFO("MF OV for func %d is %d"
8213 "(0x%04x)\n", 8554 " (0x%04x)\n", func,
8214 func, bp->mf_ov, bp->mf_ov); 8555 bp->mf_ov, bp->mf_ov);
8215 } else { 8556 } else {
8216 BNX2X_ERROR("No valid MF OV for func %d," 8557 BNX2X_ERR("No valid MF OV for func %d,"
8217 " aborting\n", func); 8558 " aborting\n", func);
8218 rc = -EPERM; 8559 rc = -EPERM;
8219 } 8560 }
8220 } else { 8561 break;
8221 if (BP_VN(bp)) { 8562 case MULTI_FUNCTION_SI:
8222 BNX2X_ERROR("VN %d in single function mode," 8563 BNX2X_DEV_INFO("func %d is in MF "
8223 " aborting\n", BP_E1HVN(bp)); 8564 "switch-independent mode\n", func);
8565 break;
8566 default:
8567 if (vn) {
8568 BNX2X_ERR("VN %d in single function mode,"
8569 " aborting\n", vn);
8224 rc = -EPERM; 8570 rc = -EPERM;
8225 } 8571 }
8572 break;
8226 } 8573 }
8574
8227 } 8575 }
8228 8576
8229 /* adjust igu_sb_cnt to MF for E1x */ 8577 /* adjust igu_sb_cnt to MF for E1x */
@@ -8248,32 +8596,8 @@ static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8248 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq); 8596 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8249 } 8597 }
8250 8598
8251 if (IS_MF(bp)) { 8599 /* Get MAC addresses */
8252 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper); 8600 bnx2x_get_mac_hwinfo(bp);
8253 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8254 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8255 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8256 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8257 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8258 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8259 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8260 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8261 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8262 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8263 ETH_ALEN);
8264 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8265 ETH_ALEN);
8266 }
8267
8268 return rc;
8269 }
8270
8271 if (BP_NOMCP(bp)) {
8272 /* only supposed to happen on emulation/FPGA */
8273 BNX2X_ERROR("warning: random MAC workaround active\n");
8274 random_ether_addr(bp->dev->dev_addr);
8275 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8276 }
8277 8601
8278 return rc; 8602 return rc;
8279} 8603}
@@ -8382,13 +8706,6 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8382 dev_err(&bp->pdev->dev, "MCP disabled, " 8706 dev_err(&bp->pdev->dev, "MCP disabled, "
8383 "must load devices in order!\n"); 8707 "must load devices in order!\n");
8384 8708
8385 /* Set multi queue mode */
8386 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8387 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8388 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8389 "requested is not MSI-X\n");
8390 multi_mode = ETH_RSS_MODE_DISABLED;
8391 }
8392 bp->multi_mode = multi_mode; 8709 bp->multi_mode = multi_mode;
8393 bp->int_mode = int_mode; 8710 bp->int_mode = int_mode;
8394 8711
@@ -8427,6 +8744,9 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8427 bp->timer.data = (unsigned long) bp; 8744 bp->timer.data = (unsigned long) bp;
8428 bp->timer.function = bnx2x_timer; 8745 bp->timer.function = bnx2x_timer;
8429 8746
8747 bnx2x_dcbx_set_state(bp, true, BNX2X_DCBX_ENABLED_ON_NEG_ON);
8748 bnx2x_dcbx_init_params(bp);
8749
8430 return rc; 8750 return rc;
8431} 8751}
8432 8752
@@ -8629,6 +8949,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
8629 .ndo_open = bnx2x_open, 8949 .ndo_open = bnx2x_open,
8630 .ndo_stop = bnx2x_close, 8950 .ndo_stop = bnx2x_close,
8631 .ndo_start_xmit = bnx2x_start_xmit, 8951 .ndo_start_xmit = bnx2x_start_xmit,
8952 .ndo_select_queue = bnx2x_select_queue,
8632 .ndo_set_multicast_list = bnx2x_set_rx_mode, 8953 .ndo_set_multicast_list = bnx2x_set_rx_mode,
8633 .ndo_set_mac_address = bnx2x_change_mac_addr, 8954 .ndo_set_mac_address = bnx2x_change_mac_addr,
8634 .ndo_validate_addr = eth_validate_addr, 8955 .ndo_validate_addr = eth_validate_addr,
@@ -8761,7 +9082,7 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8761 dev->netdev_ops = &bnx2x_netdev_ops; 9082 dev->netdev_ops = &bnx2x_netdev_ops;
8762 bnx2x_set_ethtool_ops(dev); 9083 bnx2x_set_ethtool_ops(dev);
8763 dev->features |= NETIF_F_SG; 9084 dev->features |= NETIF_F_SG;
8764 dev->features |= NETIF_F_HW_CSUM; 9085 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
8765 if (bp->flags & USING_DAC_FLAG) 9086 if (bp->flags & USING_DAC_FLAG)
8766 dev->features |= NETIF_F_HIGHDMA; 9087 dev->features |= NETIF_F_HIGHDMA;
8767 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); 9088 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
@@ -8769,12 +9090,16 @@ static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8769 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX); 9090 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
8770 9091
8771 dev->vlan_features |= NETIF_F_SG; 9092 dev->vlan_features |= NETIF_F_SG;
8772 dev->vlan_features |= NETIF_F_HW_CSUM; 9093 dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
8773 if (bp->flags & USING_DAC_FLAG) 9094 if (bp->flags & USING_DAC_FLAG)
8774 dev->vlan_features |= NETIF_F_HIGHDMA; 9095 dev->vlan_features |= NETIF_F_HIGHDMA;
8775 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN); 9096 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8776 dev->vlan_features |= NETIF_F_TSO6; 9097 dev->vlan_features |= NETIF_F_TSO6;
8777 9098
9099#ifdef BCM_DCB
9100 dev->dcbnl_ops = &bnx2x_dcbnl_ops;
9101#endif
9102
8778 /* get_port_hwinfo() will set prtad and mmds properly */ 9103 /* get_port_hwinfo() will set prtad and mmds properly */
8779 bp->mdio.prtad = MDIO_PRTAD_NONE; 9104 bp->mdio.prtad = MDIO_PRTAD_NONE;
8780 bp->mdio.mmds = 0; 9105 bp->mdio.mmds = 0;
@@ -9067,7 +9392,7 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9067 return -ENODEV; 9392 return -ENODEV;
9068 } 9393 }
9069 9394
9070 cid_count += CNIC_CONTEXT_USE; 9395 cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
9071 9396
9072 /* dev zeroed in init_etherdev */ 9397 /* dev zeroed in init_etherdev */
9073 dev = alloc_etherdev_mq(sizeof(*bp), cid_count); 9398 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
@@ -9096,11 +9421,12 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9096 /* calc qm_cid_count */ 9421 /* calc qm_cid_count */
9097 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count); 9422 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9098 9423
9099 rc = register_netdev(dev); 9424#ifdef BCM_CNIC
9100 if (rc) { 9425 /* disable FCOE L2 queue for E1x*/
9101 dev_err(&pdev->dev, "Cannot register net device\n"); 9426 if (CHIP_IS_E1x(bp))
9102 goto init_one_exit; 9427 bp->flags |= NO_FCOE_FLAG;
9103 } 9428
9429#endif
9104 9430
9105 /* Configure interupt mode: try to enable MSI-X/MSI if 9431 /* Configure interupt mode: try to enable MSI-X/MSI if
9106 * needed, set bp->num_queues appropriately. 9432 * needed, set bp->num_queues appropriately.
@@ -9110,6 +9436,21 @@ static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9110 /* Add all NAPI objects */ 9436 /* Add all NAPI objects */
9111 bnx2x_add_all_napi(bp); 9437 bnx2x_add_all_napi(bp);
9112 9438
9439 rc = register_netdev(dev);
9440 if (rc) {
9441 dev_err(&pdev->dev, "Cannot register net device\n");
9442 goto init_one_exit;
9443 }
9444
9445#ifdef BCM_CNIC
9446 if (!NO_FCOE(bp)) {
9447 /* Add storage MAC address */
9448 rtnl_lock();
9449 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9450 rtnl_unlock();
9451 }
9452#endif
9453
9113 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed); 9454 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
9114 9455
9115 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx," 9456 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
@@ -9153,14 +9494,29 @@ static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9153 } 9494 }
9154 bp = netdev_priv(dev); 9495 bp = netdev_priv(dev);
9155 9496
9497#ifdef BCM_CNIC
9498 /* Delete storage MAC address */
9499 if (!NO_FCOE(bp)) {
9500 rtnl_lock();
9501 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9502 rtnl_unlock();
9503 }
9504#endif
9505
9156 unregister_netdev(dev); 9506 unregister_netdev(dev);
9157 9507
9158 /* Delete all NAPI objects */ 9508 /* Delete all NAPI objects */
9159 bnx2x_del_all_napi(bp); 9509 bnx2x_del_all_napi(bp);
9160 9510
9511 /* Power on: we can't let PCI layer write to us while we are in D3 */
9512 bnx2x_set_power_state(bp, PCI_D0);
9513
9161 /* Disable MSI/MSI-X */ 9514 /* Disable MSI/MSI-X */
9162 bnx2x_disable_msi(bp); 9515 bnx2x_disable_msi(bp);
9163 9516
9517 /* Power off */
9518 bnx2x_set_power_state(bp, PCI_D3hot);
9519
9164 /* Make sure RESET task is not scheduled before continuing */ 9520 /* Make sure RESET task is not scheduled before continuing */
9165 cancel_delayed_work_sync(&bp->reset_task); 9521 cancel_delayed_work_sync(&bp->reset_task);
9166 9522
@@ -9202,7 +9558,7 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9202 /* Free SKBs, SGEs, TPA pool and driver internals */ 9558 /* Free SKBs, SGEs, TPA pool and driver internals */
9203 bnx2x_free_skbs(bp); 9559 bnx2x_free_skbs(bp);
9204 9560
9205 for_each_queue(bp, i) 9561 for_each_rx_queue(bp, i)
9206 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE); 9562 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
9207 9563
9208 bnx2x_free_mem(bp); 9564 bnx2x_free_mem(bp);
@@ -9429,7 +9785,8 @@ static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9429 break; 9785 break;
9430 else 9786 else
9431 atomic_dec(&bp->spq_left); 9787 atomic_dec(&bp->spq_left);
9432 } else if (type == ISCSI_CONNECTION_TYPE) { 9788 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9789 (type == FCOE_CONNECTION_TYPE)) {
9433 if (bp->cnic_spq_pending >= 9790 if (bp->cnic_spq_pending >=
9434 bp->cnic_eth_dev.max_kwqe_pending) 9791 bp->cnic_eth_dev.max_kwqe_pending)
9435 break; 9792 break;
@@ -9576,6 +9933,9 @@ static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9576 case DRV_CTL_START_L2_CMD: { 9933 case DRV_CTL_START_L2_CMD: {
9577 u32 cli = ctl->data.ring.client_id; 9934 u32 cli = ctl->data.ring.client_id;
9578 9935
9936 /* Clear FCoE FIP and ALL ENODE MACs addresses first */
9937 bnx2x_del_fcoe_eth_macs(bp);
9938
9579 /* Set iSCSI MAC address */ 9939 /* Set iSCSI MAC address */
9580 bnx2x_set_iscsi_eth_mac_addr(bp, 1); 9940 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9581 9941
@@ -9697,10 +10057,6 @@ static int bnx2x_unregister_cnic(struct net_device *dev)
9697 struct cnic_eth_dev *cp = &bp->cnic_eth_dev; 10057 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9698 10058
9699 mutex_lock(&bp->cnic_mutex); 10059 mutex_lock(&bp->cnic_mutex);
9700 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
9701 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
9702 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9703 }
9704 cp->drv_state = 0; 10060 cp->drv_state = 0;
9705 rcu_assign_pointer(bp->cnic_ops, NULL); 10061 rcu_assign_pointer(bp->cnic_ops, NULL);
9706 mutex_unlock(&bp->cnic_mutex); 10062 mutex_unlock(&bp->cnic_mutex);
@@ -9731,7 +10087,9 @@ struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9731 cp->drv_ctl = bnx2x_drv_ctl; 10087 cp->drv_ctl = bnx2x_drv_ctl;
9732 cp->drv_register_cnic = bnx2x_register_cnic; 10088 cp->drv_register_cnic = bnx2x_register_cnic;
9733 cp->drv_unregister_cnic = bnx2x_unregister_cnic; 10089 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
9734 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID; 10090 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
10091 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
10092 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
9735 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID; 10093 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
9736 10094
9737 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, " 10095 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
diff --git a/drivers/net/bnx2x/bnx2x_reg.h b/drivers/net/bnx2x/bnx2x_reg.h
index 245220af9feb..c939683e3d61 100644
--- a/drivers/net/bnx2x/bnx2x_reg.h
+++ b/drivers/net/bnx2x/bnx2x_reg.h
@@ -18,6 +18,8 @@
18 * WR - Write Clear (write 1 to clear the bit) 18 * WR - Write Clear (write 1 to clear the bit)
19 * 19 *
20 */ 20 */
21#ifndef BNX2X_REG_H
22#define BNX2X_REG_H
21 23
22#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0) 24#define ATC_ATC_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
23#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2) 25#define ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS (0x1<<2)
@@ -39,6 +41,8 @@
39#define BRB1_REG_BRB1_PRTY_MASK 0x60138 41#define BRB1_REG_BRB1_PRTY_MASK 0x60138
40/* [R 4] Parity register #0 read */ 42/* [R 4] Parity register #0 read */
41#define BRB1_REG_BRB1_PRTY_STS 0x6012c 43#define BRB1_REG_BRB1_PRTY_STS 0x6012c
44/* [RC 4] Parity register #0 read clear */
45#define BRB1_REG_BRB1_PRTY_STS_CLR 0x60130
42/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At 46/* [RW 10] At address BRB1_IND_FREE_LIST_PRS_CRDT initialize free head. At
43 * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address 47 * address BRB1_IND_FREE_LIST_PRS_CRDT+1 initialize free tail. At address
44 * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning - 48 * BRB1_IND_FREE_LIST_PRS_CRDT+2 initialize parser initial credit. Warning -
@@ -132,8 +136,12 @@
132#define CCM_REG_CCM_INT_MASK 0xd01e4 136#define CCM_REG_CCM_INT_MASK 0xd01e4
133/* [R 11] Interrupt register #0 read */ 137/* [R 11] Interrupt register #0 read */
134#define CCM_REG_CCM_INT_STS 0xd01d8 138#define CCM_REG_CCM_INT_STS 0xd01d8
139/* [RW 27] Parity mask register #0 read/write */
140#define CCM_REG_CCM_PRTY_MASK 0xd01f4
135/* [R 27] Parity register #0 read */ 141/* [R 27] Parity register #0 read */
136#define CCM_REG_CCM_PRTY_STS 0xd01e8 142#define CCM_REG_CCM_PRTY_STS 0xd01e8
143/* [RC 27] Parity register #0 read clear */
144#define CCM_REG_CCM_PRTY_STS_CLR 0xd01ec
137/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS 145/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
138 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). 146 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
139 Is used to determine the number of the AG context REG-pairs written back; 147 Is used to determine the number of the AG context REG-pairs written back;
@@ -350,6 +358,8 @@
350#define CDU_REG_CDU_PRTY_MASK 0x10104c 358#define CDU_REG_CDU_PRTY_MASK 0x10104c
351/* [R 5] Parity register #0 read */ 359/* [R 5] Parity register #0 read */
352#define CDU_REG_CDU_PRTY_STS 0x101040 360#define CDU_REG_CDU_PRTY_STS 0x101040
361/* [RC 5] Parity register #0 read clear */
362#define CDU_REG_CDU_PRTY_STS_CLR 0x101044
353/* [RC 32] logging of error data in case of a CDU load error: 363/* [RC 32] logging of error data in case of a CDU load error:
354 {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error; 364 {expected_cid[15:0]; xpected_type[2:0]; xpected_region[2:0]; ctive_error;
355 ype_error; ctual_active; ctual_compressed_context}; */ 365 ype_error; ctual_active; ctual_compressed_context}; */
@@ -381,6 +391,8 @@
381#define CFC_REG_CFC_PRTY_MASK 0x104118 391#define CFC_REG_CFC_PRTY_MASK 0x104118
382/* [R 4] Parity register #0 read */ 392/* [R 4] Parity register #0 read */
383#define CFC_REG_CFC_PRTY_STS 0x10410c 393#define CFC_REG_CFC_PRTY_STS 0x10410c
394/* [RC 4] Parity register #0 read clear */
395#define CFC_REG_CFC_PRTY_STS_CLR 0x104110
384/* [RW 21] CID cam access (21:1 - Data; alid - 0) */ 396/* [RW 21] CID cam access (21:1 - Data; alid - 0) */
385#define CFC_REG_CID_CAM 0x104800 397#define CFC_REG_CID_CAM 0x104800
386#define CFC_REG_CONTROL0 0x104028 398#define CFC_REG_CONTROL0 0x104028
@@ -466,6 +478,8 @@
466#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc 478#define CSDM_REG_CSDM_PRTY_MASK 0xc22bc
467/* [R 11] Parity register #0 read */ 479/* [R 11] Parity register #0 read */
468#define CSDM_REG_CSDM_PRTY_STS 0xc22b0 480#define CSDM_REG_CSDM_PRTY_STS 0xc22b0
481/* [RC 11] Parity register #0 read clear */
482#define CSDM_REG_CSDM_PRTY_STS_CLR 0xc22b4
469#define CSDM_REG_ENABLE_IN1 0xc2238 483#define CSDM_REG_ENABLE_IN1 0xc2238
470#define CSDM_REG_ENABLE_IN2 0xc223c 484#define CSDM_REG_ENABLE_IN2 0xc223c
471#define CSDM_REG_ENABLE_OUT1 0xc2240 485#define CSDM_REG_ENABLE_OUT1 0xc2240
@@ -556,6 +570,9 @@
556/* [R 32] Parity register #0 read */ 570/* [R 32] Parity register #0 read */
557#define CSEM_REG_CSEM_PRTY_STS_0 0x200124 571#define CSEM_REG_CSEM_PRTY_STS_0 0x200124
558#define CSEM_REG_CSEM_PRTY_STS_1 0x200134 572#define CSEM_REG_CSEM_PRTY_STS_1 0x200134
573/* [RC 32] Parity register #0 read clear */
574#define CSEM_REG_CSEM_PRTY_STS_CLR_0 0x200128
575#define CSEM_REG_CSEM_PRTY_STS_CLR_1 0x200138
559#define CSEM_REG_ENABLE_IN 0x2000a4 576#define CSEM_REG_ENABLE_IN 0x2000a4
560#define CSEM_REG_ENABLE_OUT 0x2000a8 577#define CSEM_REG_ENABLE_OUT 0x2000a8
561/* [RW 32] This address space contains all registers and memories that are 578/* [RW 32] This address space contains all registers and memories that are
@@ -648,6 +665,8 @@
648#define DBG_REG_DBG_PRTY_MASK 0xc0a8 665#define DBG_REG_DBG_PRTY_MASK 0xc0a8
649/* [R 1] Parity register #0 read */ 666/* [R 1] Parity register #0 read */
650#define DBG_REG_DBG_PRTY_STS 0xc09c 667#define DBG_REG_DBG_PRTY_STS 0xc09c
668/* [RC 1] Parity register #0 read clear */
669#define DBG_REG_DBG_PRTY_STS_CLR 0xc0a0
651/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The 670/* [RW 1] When set the DMAE will process the commands as in E1.5. 1.The
652 * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0; 671 * function that is used is always SRC-PCI; 2.VF_Valid = 0; 3.VFID=0;
653 * 4.Completion function=0; 5.Error handling=0 */ 672 * 4.Completion function=0; 5.Error handling=0 */
@@ -668,6 +687,8 @@
668#define DMAE_REG_DMAE_PRTY_MASK 0x102064 687#define DMAE_REG_DMAE_PRTY_MASK 0x102064
669/* [R 4] Parity register #0 read */ 688/* [R 4] Parity register #0 read */
670#define DMAE_REG_DMAE_PRTY_STS 0x102058 689#define DMAE_REG_DMAE_PRTY_STS 0x102058
690/* [RC 4] Parity register #0 read clear */
691#define DMAE_REG_DMAE_PRTY_STS_CLR 0x10205c
671/* [RW 1] Command 0 go. */ 692/* [RW 1] Command 0 go. */
672#define DMAE_REG_GO_C0 0x102080 693#define DMAE_REG_GO_C0 0x102080
673/* [RW 1] Command 1 go. */ 694/* [RW 1] Command 1 go. */
@@ -734,6 +755,8 @@
734#define DORQ_REG_DORQ_PRTY_MASK 0x170190 755#define DORQ_REG_DORQ_PRTY_MASK 0x170190
735/* [R 2] Parity register #0 read */ 756/* [R 2] Parity register #0 read */
736#define DORQ_REG_DORQ_PRTY_STS 0x170184 757#define DORQ_REG_DORQ_PRTY_STS 0x170184
758/* [RC 2] Parity register #0 read clear */
759#define DORQ_REG_DORQ_PRTY_STS_CLR 0x170188
737/* [RW 8] The address to write the DPM CID to STORM. */ 760/* [RW 8] The address to write the DPM CID to STORM. */
738#define DORQ_REG_DPM_CID_ADDR 0x170044 761#define DORQ_REG_DPM_CID_ADDR 0x170044
739/* [RW 5] The DPM mode CID extraction offset. */ 762/* [RW 5] The DPM mode CID extraction offset. */
@@ -842,8 +865,12 @@
842/* [R 1] data availble for error memory. If this bit is clear do not red 865/* [R 1] data availble for error memory. If this bit is clear do not red
843 * from error_handling_memory. */ 866 * from error_handling_memory. */
844#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130 867#define IGU_REG_ERROR_HANDLING_DATA_VALID 0x130130
868/* [RW 11] Parity mask register #0 read/write */
869#define IGU_REG_IGU_PRTY_MASK 0x1300a8
845/* [R 11] Parity register #0 read */ 870/* [R 11] Parity register #0 read */
846#define IGU_REG_IGU_PRTY_STS 0x13009c 871#define IGU_REG_IGU_PRTY_STS 0x13009c
872/* [RC 11] Parity register #0 read clear */
873#define IGU_REG_IGU_PRTY_STS_CLR 0x1300a0
847/* [R 4] Debug: int_handle_fsm */ 874/* [R 4] Debug: int_handle_fsm */
848#define IGU_REG_INT_HANDLE_FSM 0x130050 875#define IGU_REG_INT_HANDLE_FSM 0x130050
849#define IGU_REG_LEADING_EDGE_LATCH 0x130134 876#define IGU_REG_LEADING_EDGE_LATCH 0x130134
@@ -1501,6 +1528,8 @@
1501#define MISC_REG_MISC_PRTY_MASK 0xa398 1528#define MISC_REG_MISC_PRTY_MASK 0xa398
1502/* [R 1] Parity register #0 read */ 1529/* [R 1] Parity register #0 read */
1503#define MISC_REG_MISC_PRTY_STS 0xa38c 1530#define MISC_REG_MISC_PRTY_STS 0xa38c
1531/* [RC 1] Parity register #0 read clear */
1532#define MISC_REG_MISC_PRTY_STS_CLR 0xa390
1504#define MISC_REG_NIG_WOL_P0 0xa270 1533#define MISC_REG_NIG_WOL_P0 0xa270
1505#define MISC_REG_NIG_WOL_P1 0xa274 1534#define MISC_REG_NIG_WOL_P1 0xa274
1506/* [R 1] If set indicate that the pcie_rst_b was asserted without perst 1535/* [R 1] If set indicate that the pcie_rst_b was asserted without perst
@@ -1615,6 +1644,8 @@
1615#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4) 1644#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN (0x1<<4)
1616#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2) 1645#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST (0x1<<2)
1617#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3) 1646#define NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN (0x1<<3)
1647#define NIG_LLH0_XCM_MASK_REG_LLH0_XCM_MASK_BCN (0x1<<0)
1648#define NIG_LLH1_XCM_MASK_REG_LLH1_XCM_MASK_BCN (0x1<<0)
1618#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0) 1649#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_EMAC0_MISC_MI_INT (0x1<<0)
1619#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9) 1650#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_SERDES0_LINK_STATUS (0x1<<9)
1620#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15) 1651#define NIG_MASK_INTERRUPT_PORT0_REG_MASK_XGXS0_LINK10G (0x1<<15)
@@ -1744,12 +1775,16 @@
1744 ~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same 1775 ~ppp_enable.ppp_enable = 0 and pause_enable.pause_enable =0 for the same
1745 port */ 1776 port */
1746#define NIG_REG_LLFC_ENABLE_0 0x16208 1777#define NIG_REG_LLFC_ENABLE_0 0x16208
1778#define NIG_REG_LLFC_ENABLE_1 0x1620c
1747/* [RW 16] classes are high-priority for port0 */ 1779/* [RW 16] classes are high-priority for port0 */
1748#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 0x16058 1780#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_0 0x16058
1781#define NIG_REG_LLFC_HIGH_PRIORITY_CLASSES_1 0x1605c
1749/* [RW 16] classes are low-priority for port0 */ 1782/* [RW 16] classes are low-priority for port0 */
1750#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 0x16060 1783#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_0 0x16060
1784#define NIG_REG_LLFC_LOW_PRIORITY_CLASSES_1 0x16064
1751/* [RW 1] Output enable of message to LLFC BMAC IF for port0 */ 1785/* [RW 1] Output enable of message to LLFC BMAC IF for port0 */
1752#define NIG_REG_LLFC_OUT_EN_0 0x160c8 1786#define NIG_REG_LLFC_OUT_EN_0 0x160c8
1787#define NIG_REG_LLFC_OUT_EN_1 0x160cc
1753#define NIG_REG_LLH0_ACPI_PAT_0_CRC 0x1015c 1788#define NIG_REG_LLH0_ACPI_PAT_0_CRC 0x1015c
1754#define NIG_REG_LLH0_ACPI_PAT_6_LEN 0x10154 1789#define NIG_REG_LLH0_ACPI_PAT_6_LEN 0x10154
1755#define NIG_REG_LLH0_BRB1_DRV_MASK 0x10244 1790#define NIG_REG_LLH0_BRB1_DRV_MASK 0x10244
@@ -1774,6 +1809,8 @@
1774/* [RW 8] event id for llh0 */ 1809/* [RW 8] event id for llh0 */
1775#define NIG_REG_LLH0_EVENT_ID 0x10084 1810#define NIG_REG_LLH0_EVENT_ID 0x10084
1776#define NIG_REG_LLH0_FUNC_EN 0x160fc 1811#define NIG_REG_LLH0_FUNC_EN 0x160fc
1812#define NIG_REG_LLH0_FUNC_MEM 0x16180
1813#define NIG_REG_LLH0_FUNC_MEM_ENABLE 0x16140
1777#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100 1814#define NIG_REG_LLH0_FUNC_VLAN_ID 0x16100
1778/* [RW 1] Determine the IP version to look for in 1815/* [RW 1] Determine the IP version to look for in
1779 ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */ 1816 ~nig_registers_llh0_dest_ip_0.llh0_dest_ip_0. 0 - IPv6; 1-IPv4 */
@@ -1797,6 +1834,9 @@
1797#define NIG_REG_LLH1_ERROR_MASK 0x10090 1834#define NIG_REG_LLH1_ERROR_MASK 0x10090
1798/* [RW 8] event id for llh1 */ 1835/* [RW 8] event id for llh1 */
1799#define NIG_REG_LLH1_EVENT_ID 0x10088 1836#define NIG_REG_LLH1_EVENT_ID 0x10088
1837#define NIG_REG_LLH1_FUNC_MEM 0x161c0
1838#define NIG_REG_LLH1_FUNC_MEM_ENABLE 0x16160
1839#define NIG_REG_LLH1_FUNC_MEM_SIZE 16
1800/* [RW 8] init credit counter for port1 in LLH */ 1840/* [RW 8] init credit counter for port1 in LLH */
1801#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564 1841#define NIG_REG_LLH1_XCM_INIT_CREDIT 0x10564
1802#define NIG_REG_LLH1_XCM_MASK 0x10134 1842#define NIG_REG_LLH1_XCM_MASK 0x10134
@@ -1907,11 +1947,17 @@
1907 ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same 1947 ~safc_enable.safc_enable = 0 and ppp_enable.ppp_enable =0 for the same
1908 port */ 1948 port */
1909#define NIG_REG_PAUSE_ENABLE_0 0x160c0 1949#define NIG_REG_PAUSE_ENABLE_0 0x160c0
1950#define NIG_REG_PAUSE_ENABLE_1 0x160c4
1910/* [RW 1] Input enable for RX PBF LP IF */ 1951/* [RW 1] Input enable for RX PBF LP IF */
1911#define NIG_REG_PBF_LB_IN_EN 0x100b4 1952#define NIG_REG_PBF_LB_IN_EN 0x100b4
1912/* [RW 1] Value of this register will be transmitted to port swap when 1953/* [RW 1] Value of this register will be transmitted to port swap when
1913 ~nig_registers_strap_override.strap_override =1 */ 1954 ~nig_registers_strap_override.strap_override =1 */
1914#define NIG_REG_PORT_SWAP 0x10394 1955#define NIG_REG_PORT_SWAP 0x10394
1956/* [RW 1] PPP enable for port0. This register may get 1 only when
1957 * ~safc_enable.safc_enable = 0 and pause_enable.pause_enable =0 for the
1958 * same port */
1959#define NIG_REG_PPP_ENABLE_0 0x160b0
1960#define NIG_REG_PPP_ENABLE_1 0x160b4
1915/* [RW 1] output enable for RX parser descriptor IF */ 1961/* [RW 1] output enable for RX parser descriptor IF */
1916#define NIG_REG_PRS_EOP_OUT_EN 0x10104 1962#define NIG_REG_PRS_EOP_OUT_EN 0x10104
1917/* [RW 1] Input enable for RX parser request IF */ 1963/* [RW 1] Input enable for RX parser request IF */
@@ -1978,6 +2024,14 @@
1978#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G (0x1<<15) 2024#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK10G (0x1<<15)
1979#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS (0xf<<18) 2025#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS (0xf<<18)
1980#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18 2026#define NIG_STATUS_INTERRUPT_PORT0_REG_STATUS_XGXS0_LINK_STATUS_SIZE 18
2027/* [RW 31] The upper bound of the weight of COS0 in the ETS command arbiter. */
2028#define PBF_REG_COS0_UPPER_BOUND 0x15c05c
2029/* [RW 31] The weight of COS0 in the ETS command arbiter. */
2030#define PBF_REG_COS0_WEIGHT 0x15c054
2031/* [RW 31] The upper bound of the weight of COS1 in the ETS command arbiter. */
2032#define PBF_REG_COS1_UPPER_BOUND 0x15c060
2033/* [RW 31] The weight of COS1 in the ETS command arbiter. */
2034#define PBF_REG_COS1_WEIGHT 0x15c058
1981/* [RW 1] Disable processing further tasks from port 0 (after ending the 2035/* [RW 1] Disable processing further tasks from port 0 (after ending the
1982 current task in process). */ 2036 current task in process). */
1983#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c 2037#define PBF_REG_DISABLE_NEW_TASK_PROC_P0 0x14005c
@@ -1988,9 +2042,16 @@
1988 current task in process). */ 2042 current task in process). */
1989#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c 2043#define PBF_REG_DISABLE_NEW_TASK_PROC_P4 0x14006c
1990#define PBF_REG_DISABLE_PF 0x1402e8 2044#define PBF_REG_DISABLE_PF 0x1402e8
2045/* [RW 1] Indicates that ETS is performed between the COSes in the command
2046 * arbiter. If reset strict priority w/ anti-starvation will be performed
2047 * w/o WFQ. */
2048#define PBF_REG_ETS_ENABLED 0x15c050
1991/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic 2049/* [RW 6] Bit-map indicating which L2 hdrs may appear after the basic
1992 * Ethernet header. */ 2050 * Ethernet header. */
1993#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8 2051#define PBF_REG_HDRS_AFTER_BASIC 0x15c0a8
2052/* [RW 1] Indicates which COS is conncted to the highest priority in the
2053 * command arbiter. */
2054#define PBF_REG_HIGH_PRIORITY_COS_NUM 0x15c04c
1994#define PBF_REG_IF_ENABLE_REG 0x140044 2055#define PBF_REG_IF_ENABLE_REG 0x140044
1995/* [RW 1] Init bit. When set the initial credits are copied to the credit 2056/* [RW 1] Init bit. When set the initial credits are copied to the credit
1996 registers (except the port credits). Should be set and then reset after 2057 registers (except the port credits). Should be set and then reset after
@@ -2016,6 +2077,10 @@
2016#define PBF_REG_MAC_LB_ENABLE 0x140040 2077#define PBF_REG_MAC_LB_ENABLE 0x140040
2017/* [RW 6] Bit-map indicating which headers must appear in the packet */ 2078/* [RW 6] Bit-map indicating which headers must appear in the packet */
2018#define PBF_REG_MUST_HAVE_HDRS 0x15c0c4 2079#define PBF_REG_MUST_HAVE_HDRS 0x15c0c4
2080/* [RW 16] The number of strict priority arbitration slots between 2 RR
2081 * arbitration slots. A value of 0 means no strict priority cycles; i.e. the
2082 * strict-priority w/ anti-starvation arbiter is a RR arbiter. */
2083#define PBF_REG_NUM_STRICT_ARB_SLOTS 0x15c064
2019/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause 2084/* [RW 10] Port 0 threshold used by arbiter in 16 byte lines used when pause
2020 not suppoterd. */ 2085 not suppoterd. */
2021#define PBF_REG_P0_ARB_THRSH 0x1400e4 2086#define PBF_REG_P0_ARB_THRSH 0x1400e4
@@ -2046,6 +2111,10 @@
2046#define PBF_REG_PBF_INT_MASK 0x1401d4 2111#define PBF_REG_PBF_INT_MASK 0x1401d4
2047/* [R 5] Interrupt register #0 read */ 2112/* [R 5] Interrupt register #0 read */
2048#define PBF_REG_PBF_INT_STS 0x1401c8 2113#define PBF_REG_PBF_INT_STS 0x1401c8
2114/* [RW 20] Parity mask register #0 read/write */
2115#define PBF_REG_PBF_PRTY_MASK 0x1401e4
2116/* [RC 20] Parity register #0 read clear */
2117#define PBF_REG_PBF_PRTY_STS_CLR 0x1401dc
2049#define PB_REG_CONTROL 0 2118#define PB_REG_CONTROL 0
2050/* [RW 2] Interrupt mask register #0 read/write */ 2119/* [RW 2] Interrupt mask register #0 read/write */
2051#define PB_REG_PB_INT_MASK 0x28 2120#define PB_REG_PB_INT_MASK 0x28
@@ -2055,6 +2124,8 @@
2055#define PB_REG_PB_PRTY_MASK 0x38 2124#define PB_REG_PB_PRTY_MASK 0x38
2056/* [R 4] Parity register #0 read */ 2125/* [R 4] Parity register #0 read */
2057#define PB_REG_PB_PRTY_STS 0x2c 2126#define PB_REG_PB_PRTY_STS 0x2c
2127/* [RC 4] Parity register #0 read clear */
2128#define PB_REG_PB_PRTY_STS_CLR 0x30
2058#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0) 2129#define PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR (0x1<<0)
2059#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8) 2130#define PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW (0x1<<8)
2060#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1) 2131#define PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR (0x1<<1)
@@ -2410,6 +2481,8 @@
2410#define PRS_REG_PRS_PRTY_MASK 0x401a4 2481#define PRS_REG_PRS_PRTY_MASK 0x401a4
2411/* [R 8] Parity register #0 read */ 2482/* [R 8] Parity register #0 read */
2412#define PRS_REG_PRS_PRTY_STS 0x40198 2483#define PRS_REG_PRS_PRTY_STS 0x40198
2484/* [RC 8] Parity register #0 read clear */
2485#define PRS_REG_PRS_PRTY_STS_CLR 0x4019c
2413/* [RW 8] Context region for pure acknowledge packets. Used in CFC load 2486/* [RW 8] Context region for pure acknowledge packets. Used in CFC load
2414 request message */ 2487 request message */
2415#define PRS_REG_PURE_REGIONS 0x40024 2488#define PRS_REG_PURE_REGIONS 0x40024
@@ -2563,6 +2636,9 @@
2563/* [R 32] Parity register #0 read */ 2636/* [R 32] Parity register #0 read */
2564#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c 2637#define PXP2_REG_PXP2_PRTY_STS_0 0x12057c
2565#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c 2638#define PXP2_REG_PXP2_PRTY_STS_1 0x12058c
2639/* [RC 32] Parity register #0 read clear */
2640#define PXP2_REG_PXP2_PRTY_STS_CLR_0 0x120580
2641#define PXP2_REG_PXP2_PRTY_STS_CLR_1 0x120590
2566/* [R 1] Debug only: The 'almost full' indication from each fifo (gives 2642/* [R 1] Debug only: The 'almost full' indication from each fifo (gives
2567 indication about backpressure) */ 2643 indication about backpressure) */
2568#define PXP2_REG_RD_ALMOST_FULL_0 0x120424 2644#define PXP2_REG_RD_ALMOST_FULL_0 0x120424
@@ -2965,6 +3041,8 @@
2965#define PXP_REG_PXP_PRTY_MASK 0x103094 3041#define PXP_REG_PXP_PRTY_MASK 0x103094
2966/* [R 26] Parity register #0 read */ 3042/* [R 26] Parity register #0 read */
2967#define PXP_REG_PXP_PRTY_STS 0x103088 3043#define PXP_REG_PXP_PRTY_STS 0x103088
3044/* [RC 27] Parity register #0 read clear */
3045#define PXP_REG_PXP_PRTY_STS_CLR 0x10308c
2968/* [RW 4] The activity counter initial increment value sent in the load 3046/* [RW 4] The activity counter initial increment value sent in the load
2969 request */ 3047 request */
2970#define QM_REG_ACTCTRINITVAL_0 0x168040 3048#define QM_REG_ACTCTRINITVAL_0 0x168040
@@ -3121,6 +3199,8 @@
3121#define QM_REG_QM_PRTY_MASK 0x168454 3199#define QM_REG_QM_PRTY_MASK 0x168454
3122/* [R 12] Parity register #0 read */ 3200/* [R 12] Parity register #0 read */
3123#define QM_REG_QM_PRTY_STS 0x168448 3201#define QM_REG_QM_PRTY_STS 0x168448
3202/* [RC 12] Parity register #0 read clear */
3203#define QM_REG_QM_PRTY_STS_CLR 0x16844c
3124/* [R 32] Current queues in pipeline: Queues from 32 to 63 */ 3204/* [R 32] Current queues in pipeline: Queues from 32 to 63 */
3125#define QM_REG_QSTATUS_HIGH 0x16802c 3205#define QM_REG_QSTATUS_HIGH 0x16802c
3126/* [R 32] Current queues in pipeline: Queues from 96 to 127 */ 3206/* [R 32] Current queues in pipeline: Queues from 96 to 127 */
@@ -3406,6 +3486,8 @@
3406#define QM_REG_WRRWEIGHTS_9 0x168848 3486#define QM_REG_WRRWEIGHTS_9 0x168848
3407/* [R 6] Keep the fill level of the fifo from write client 1 */ 3487/* [R 6] Keep the fill level of the fifo from write client 1 */
3408#define QM_REG_XQM_WRC_FIFOLVL 0x168000 3488#define QM_REG_XQM_WRC_FIFOLVL 0x168000
3489/* [W 1] reset to parity interrupt */
3490#define SEM_FAST_REG_PARITY_RST 0x18840
3409#define SRC_REG_COUNTFREE0 0x40500 3491#define SRC_REG_COUNTFREE0 0x40500
3410/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two 3492/* [RW 1] If clr the searcher is compatible to E1 A0 - support only two
3411 ports. If set the searcher support 8 functions. */ 3493 ports. If set the searcher support 8 functions. */
@@ -3434,6 +3516,8 @@
3434#define SRC_REG_SRC_PRTY_MASK 0x404c8 3516#define SRC_REG_SRC_PRTY_MASK 0x404c8
3435/* [R 3] Parity register #0 read */ 3517/* [R 3] Parity register #0 read */
3436#define SRC_REG_SRC_PRTY_STS 0x404bc 3518#define SRC_REG_SRC_PRTY_STS 0x404bc
3519/* [RC 3] Parity register #0 read clear */
3520#define SRC_REG_SRC_PRTY_STS_CLR 0x404c0
3437/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */ 3521/* [R 4] Used to read the value of the XX protection CAM occupancy counter. */
3438#define TCM_REG_CAM_OCCUP 0x5017c 3522#define TCM_REG_CAM_OCCUP 0x5017c
3439/* [RW 1] CDU AG read Interface enable. If 0 - the request input is 3523/* [RW 1] CDU AG read Interface enable. If 0 - the request input is
@@ -3560,8 +3644,12 @@
3560#define TCM_REG_TCM_INT_MASK 0x501dc 3644#define TCM_REG_TCM_INT_MASK 0x501dc
3561/* [R 11] Interrupt register #0 read */ 3645/* [R 11] Interrupt register #0 read */
3562#define TCM_REG_TCM_INT_STS 0x501d0 3646#define TCM_REG_TCM_INT_STS 0x501d0
3647/* [RW 27] Parity mask register #0 read/write */
3648#define TCM_REG_TCM_PRTY_MASK 0x501ec
3563/* [R 27] Parity register #0 read */ 3649/* [R 27] Parity register #0 read */
3564#define TCM_REG_TCM_PRTY_STS 0x501e0 3650#define TCM_REG_TCM_PRTY_STS 0x501e0
3651/* [RC 27] Parity register #0 read clear */
3652#define TCM_REG_TCM_PRTY_STS_CLR 0x501e4
3565/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS 3653/* [RW 3] The size of AG context region 0 in REG-pairs. Designates the MS
3566 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). 3654 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
3567 Is used to determine the number of the AG context REG-pairs written back; 3655 Is used to determine the number of the AG context REG-pairs written back;
@@ -3719,6 +3807,10 @@
3719#define TM_REG_TM_INT_MASK 0x1640fc 3807#define TM_REG_TM_INT_MASK 0x1640fc
3720/* [R 1] Interrupt register #0 read */ 3808/* [R 1] Interrupt register #0 read */
3721#define TM_REG_TM_INT_STS 0x1640f0 3809#define TM_REG_TM_INT_STS 0x1640f0
3810/* [RW 7] Parity mask register #0 read/write */
3811#define TM_REG_TM_PRTY_MASK 0x16410c
3812/* [RC 7] Parity register #0 read clear */
3813#define TM_REG_TM_PRTY_STS_CLR 0x164104
3722/* [RW 8] The event id for aggregated interrupt 0 */ 3814/* [RW 8] The event id for aggregated interrupt 0 */
3723#define TSDM_REG_AGG_INT_EVENT_0 0x42038 3815#define TSDM_REG_AGG_INT_EVENT_0 0x42038
3724#define TSDM_REG_AGG_INT_EVENT_1 0x4203c 3816#define TSDM_REG_AGG_INT_EVENT_1 0x4203c
@@ -3799,6 +3891,8 @@
3799#define TSDM_REG_TSDM_PRTY_MASK 0x422bc 3891#define TSDM_REG_TSDM_PRTY_MASK 0x422bc
3800/* [R 11] Parity register #0 read */ 3892/* [R 11] Parity register #0 read */
3801#define TSDM_REG_TSDM_PRTY_STS 0x422b0 3893#define TSDM_REG_TSDM_PRTY_STS 0x422b0
3894/* [RC 11] Parity register #0 read clear */
3895#define TSDM_REG_TSDM_PRTY_STS_CLR 0x422b4
3802/* [RW 5] The number of time_slots in the arbitration cycle */ 3896/* [RW 5] The number of time_slots in the arbitration cycle */
3803#define TSEM_REG_ARB_CYCLE_SIZE 0x180034 3897#define TSEM_REG_ARB_CYCLE_SIZE 0x180034
3804/* [RW 3] The source that is associated with arbitration element 0. Source 3898/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -3878,6 +3972,9 @@
3878#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0 3972#define TSEM_REG_SLOW_EXT_STORE_EMPTY 0x1802a0
3879/* [RW 8] List of free threads . There is a bit per thread. */ 3973/* [RW 8] List of free threads . There is a bit per thread. */
3880#define TSEM_REG_THREADS_LIST 0x1802e4 3974#define TSEM_REG_THREADS_LIST 0x1802e4
3975/* [RC 32] Parity register #0 read clear */
3976#define TSEM_REG_TSEM_PRTY_STS_CLR_0 0x180118
3977#define TSEM_REG_TSEM_PRTY_STS_CLR_1 0x180128
3881/* [RW 3] The arbitration scheme of time_slot 0 */ 3978/* [RW 3] The arbitration scheme of time_slot 0 */
3882#define TSEM_REG_TS_0_AS 0x180038 3979#define TSEM_REG_TS_0_AS 0x180038
3883/* [RW 3] The arbitration scheme of time_slot 10 */ 3980/* [RW 3] The arbitration scheme of time_slot 10 */
@@ -4080,6 +4177,8 @@
4080#define UCM_REG_UCM_INT_STS 0xe01c8 4177#define UCM_REG_UCM_INT_STS 0xe01c8
4081/* [R 27] Parity register #0 read */ 4178/* [R 27] Parity register #0 read */
4082#define UCM_REG_UCM_PRTY_STS 0xe01d8 4179#define UCM_REG_UCM_PRTY_STS 0xe01d8
4180/* [RC 27] Parity register #0 read clear */
4181#define UCM_REG_UCM_PRTY_STS_CLR 0xe01dc
4083/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS 4182/* [RW 2] The size of AG context region 0 in REG-pairs. Designates the MS
4084 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5). 4183 REG-pair number (e.g. if region 0 is 6 REG-pairs; the value should be 5).
4085 Is used to determine the number of the AG context REG-pairs written back; 4184 Is used to determine the number of the AG context REG-pairs written back;
@@ -4256,6 +4355,8 @@
4256#define USDM_REG_USDM_PRTY_MASK 0xc42c0 4355#define USDM_REG_USDM_PRTY_MASK 0xc42c0
4257/* [R 11] Parity register #0 read */ 4356/* [R 11] Parity register #0 read */
4258#define USDM_REG_USDM_PRTY_STS 0xc42b4 4357#define USDM_REG_USDM_PRTY_STS 0xc42b4
4358/* [RC 11] Parity register #0 read clear */
4359#define USDM_REG_USDM_PRTY_STS_CLR 0xc42b8
4259/* [RW 5] The number of time_slots in the arbitration cycle */ 4360/* [RW 5] The number of time_slots in the arbitration cycle */
4260#define USEM_REG_ARB_CYCLE_SIZE 0x300034 4361#define USEM_REG_ARB_CYCLE_SIZE 0x300034
4261/* [RW 3] The source that is associated with arbitration element 0. Source 4362/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -4385,6 +4486,9 @@
4385/* [R 32] Parity register #0 read */ 4486/* [R 32] Parity register #0 read */
4386#define USEM_REG_USEM_PRTY_STS_0 0x300124 4487#define USEM_REG_USEM_PRTY_STS_0 0x300124
4387#define USEM_REG_USEM_PRTY_STS_1 0x300134 4488#define USEM_REG_USEM_PRTY_STS_1 0x300134
4489/* [RC 32] Parity register #0 read clear */
4490#define USEM_REG_USEM_PRTY_STS_CLR_0 0x300128
4491#define USEM_REG_USEM_PRTY_STS_CLR_1 0x300138
4388/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64 4492/* [W 7] VF or PF ID for reset error bit. Values 0-63 reset error bit for 64
4389 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */ 4493 * VF; values 64-67 reset error for 4 PF; values 68-127 are not valid. */
4390#define USEM_REG_VFPF_ERR_NUM 0x300380 4494#define USEM_REG_VFPF_ERR_NUM 0x300380
@@ -4761,6 +4865,8 @@
4761#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc 4865#define XSDM_REG_XSDM_PRTY_MASK 0x1662bc
4762/* [R 11] Parity register #0 read */ 4866/* [R 11] Parity register #0 read */
4763#define XSDM_REG_XSDM_PRTY_STS 0x1662b0 4867#define XSDM_REG_XSDM_PRTY_STS 0x1662b0
4868/* [RC 11] Parity register #0 read clear */
4869#define XSDM_REG_XSDM_PRTY_STS_CLR 0x1662b4
4764/* [RW 5] The number of time_slots in the arbitration cycle */ 4870/* [RW 5] The number of time_slots in the arbitration cycle */
4765#define XSEM_REG_ARB_CYCLE_SIZE 0x280034 4871#define XSEM_REG_ARB_CYCLE_SIZE 0x280034
4766/* [RW 3] The source that is associated with arbitration element 0. Source 4872/* [RW 3] The source that is associated with arbitration element 0. Source
@@ -4893,6 +4999,9 @@
4893/* [R 32] Parity register #0 read */ 4999/* [R 32] Parity register #0 read */
4894#define XSEM_REG_XSEM_PRTY_STS_0 0x280124 5000#define XSEM_REG_XSEM_PRTY_STS_0 0x280124
4895#define XSEM_REG_XSEM_PRTY_STS_1 0x280134 5001#define XSEM_REG_XSEM_PRTY_STS_1 0x280134
5002/* [RC 32] Parity register #0 read clear */
5003#define XSEM_REG_XSEM_PRTY_STS_CLR_0 0x280128
5004#define XSEM_REG_XSEM_PRTY_STS_CLR_1 0x280138
4896#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0) 5005#define MCPR_NVM_ACCESS_ENABLE_EN (1L<<0)
4897#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1) 5006#define MCPR_NVM_ACCESS_ENABLE_WR_EN (1L<<1)
4898#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0) 5007#define MCPR_NVM_ADDR_NVM_ADDR_VALUE (0xffffffL<<0)
@@ -4970,7 +5079,23 @@
4970#define EMAC_REG_EMAC_TX_MODE 0xbc 5079#define EMAC_REG_EMAC_TX_MODE 0xbc
4971#define EMAC_REG_EMAC_TX_STAT_AC 0x280 5080#define EMAC_REG_EMAC_TX_STAT_AC 0x280
4972#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22 5081#define EMAC_REG_EMAC_TX_STAT_AC_COUNT 22
5082#define EMAC_REG_RX_PFC_MODE 0x320
5083#define EMAC_REG_RX_PFC_MODE_PRIORITIES (1L<<2)
5084#define EMAC_REG_RX_PFC_MODE_RX_EN (1L<<1)
5085#define EMAC_REG_RX_PFC_MODE_TX_EN (1L<<0)
5086#define EMAC_REG_RX_PFC_PARAM 0x324
5087#define EMAC_REG_RX_PFC_PARAM_OPCODE_BITSHIFT 0
5088#define EMAC_REG_RX_PFC_PARAM_PRIORITY_EN_BITSHIFT 16
5089#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD 0x328
5090#define EMAC_REG_RX_PFC_STATS_XOFF_RCVD_COUNT (0xffff<<0)
5091#define EMAC_REG_RX_PFC_STATS_XOFF_SENT 0x330
5092#define EMAC_REG_RX_PFC_STATS_XOFF_SENT_COUNT (0xffff<<0)
5093#define EMAC_REG_RX_PFC_STATS_XON_RCVD 0x32c
5094#define EMAC_REG_RX_PFC_STATS_XON_RCVD_COUNT (0xffff<<0)
5095#define EMAC_REG_RX_PFC_STATS_XON_SENT 0x334
5096#define EMAC_REG_RX_PFC_STATS_XON_SENT_COUNT (0xffff<<0)
4973#define EMAC_RX_MODE_FLOW_EN (1L<<2) 5097#define EMAC_RX_MODE_FLOW_EN (1L<<2)
5098#define EMAC_RX_MODE_KEEP_MAC_CONTROL (1L<<3)
4974#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10) 5099#define EMAC_RX_MODE_KEEP_VLAN_TAG (1L<<10)
4975#define EMAC_RX_MODE_PROMISCUOUS (1L<<8) 5100#define EMAC_RX_MODE_PROMISCUOUS (1L<<8)
4976#define EMAC_RX_MODE_RESET (1L<<0) 5101#define EMAC_RX_MODE_RESET (1L<<0)
@@ -6264,3 +6389,4 @@ static inline u8 calc_crc8(u32 data, u8 crc)
6264} 6389}
6265 6390
6266 6391
6392#endif /* BNX2X_REG_H */
diff --git a/drivers/net/bnx2x/bnx2x_stats.c b/drivers/net/bnx2x/bnx2x_stats.c
index 4733c835dad9..bda60d590fa8 100644
--- a/drivers/net/bnx2x/bnx2x_stats.c
+++ b/drivers/net/bnx2x/bnx2x_stats.c
@@ -158,9 +158,14 @@ static void bnx2x_storm_stats_post(struct bnx2x *bp)
158 158
159 spin_lock_bh(&bp->stats_lock); 159 spin_lock_bh(&bp->stats_lock);
160 160
161 if (bp->stats_pending) {
162 spin_unlock_bh(&bp->stats_lock);
163 return;
164 }
165
161 ramrod_data.drv_counter = bp->stats_counter++; 166 ramrod_data.drv_counter = bp->stats_counter++;
162 ramrod_data.collect_port = bp->port.pmf ? 1 : 0; 167 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
163 for_each_queue(bp, i) 168 for_each_eth_queue(bp, i)
164 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id); 169 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
165 170
166 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0, 171 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_STAT_QUERY, 0,
@@ -766,7 +771,7 @@ static int bnx2x_storm_stats_update(struct bnx2x *bp)
766 estats->no_buff_discard_hi = 0; 771 estats->no_buff_discard_hi = 0;
767 estats->no_buff_discard_lo = 0; 772 estats->no_buff_discard_lo = 0;
768 773
769 for_each_queue(bp, i) { 774 for_each_eth_queue(bp, i) {
770 struct bnx2x_fastpath *fp = &bp->fp[i]; 775 struct bnx2x_fastpath *fp = &bp->fp[i];
771 int cl_id = fp->cl_id; 776 int cl_id = fp->cl_id;
772 struct tstorm_per_client_stats *tclient = 777 struct tstorm_per_client_stats *tclient =
@@ -996,7 +1001,7 @@ static void bnx2x_net_stats_update(struct bnx2x *bp)
996 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi); 1001 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
997 1002
998 tmp = estats->mac_discard; 1003 tmp = estats->mac_discard;
999 for_each_queue(bp, i) 1004 for_each_rx_queue(bp, i)
1000 tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard); 1005 tmp += le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
1001 nstats->rx_dropped = tmp; 1006 nstats->rx_dropped = tmp;
1002 1007
@@ -1087,7 +1092,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1087 bp->dev->name, 1092 bp->dev->name,
1088 estats->brb_drop_lo, estats->brb_truncate_lo); 1093 estats->brb_drop_lo, estats->brb_truncate_lo);
1089 1094
1090 for_each_queue(bp, i) { 1095 for_each_eth_queue(bp, i) {
1091 struct bnx2x_fastpath *fp = &bp->fp[i]; 1096 struct bnx2x_fastpath *fp = &bp->fp[i];
1092 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; 1097 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1093 1098
@@ -1101,7 +1106,7 @@ static void bnx2x_stats_update(struct bnx2x *bp)
1101 fp->rx_calls, fp->rx_pkt); 1106 fp->rx_calls, fp->rx_pkt);
1102 } 1107 }
1103 1108
1104 for_each_queue(bp, i) { 1109 for_each_eth_queue(bp, i) {
1105 struct bnx2x_fastpath *fp = &bp->fp[i]; 1110 struct bnx2x_fastpath *fp = &bp->fp[i];
1106 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats; 1111 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
1107 struct netdev_queue *txq = 1112 struct netdev_queue *txq =
@@ -1381,7 +1386,8 @@ void bnx2x_stats_init(struct bnx2x *bp)
1381 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats)); 1386 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
1382 } 1387 }
1383 1388
1384 for_each_queue(bp, i) { 1389 /* FW stats are currently collected for ETH clients only */
1390 for_each_eth_queue(bp, i) {
1385 /* Set initial stats counter in the stats ramrod data to -1 */ 1391 /* Set initial stats counter in the stats ramrod data to -1 */
1386 int cl_id = bp->fp[i].cl_id; 1392 int cl_id = bp->fp[i].cl_id;
1387 1393
diff --git a/drivers/net/bnx2x/bnx2x_stats.h b/drivers/net/bnx2x/bnx2x_stats.h
index afd15efa429a..596798c47452 100644
--- a/drivers/net/bnx2x/bnx2x_stats.h
+++ b/drivers/net/bnx2x/bnx2x_stats.h
@@ -53,7 +53,6 @@ struct bnx2x_eth_q_stats {
53 u32 hw_csum_err; 53 u32 hw_csum_err;
54}; 54};
55 55
56#define BNX2X_NUM_Q_STATS 13
57#define Q_STATS_OFFSET32(stat_name) \ 56#define Q_STATS_OFFSET32(stat_name) \
58 (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4) 57 (offsetof(struct bnx2x_eth_q_stats, stat_name) / 4)
59 58
@@ -225,7 +224,6 @@ struct bnx2x_eth_stats {
225 u32 nig_timer_max; 224 u32 nig_timer_max;
226}; 225};
227 226
228#define BNX2X_NUM_STATS 43
229#define STATS_OFFSET32(stat_name) \ 227#define STATS_OFFSET32(stat_name) \
230 (offsetof(struct bnx2x_eth_stats, stat_name) / 4) 228 (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
231 229
diff --git a/drivers/net/bonding/Makefile b/drivers/net/bonding/Makefile
index 6f9c6faef24c..0e2737eac8b7 100644
--- a/drivers/net/bonding/Makefile
+++ b/drivers/net/bonding/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-$(CONFIG_BONDING) += bonding.o 5obj-$(CONFIG_BONDING) += bonding.o
6 6
7bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o 7bonding-objs := bond_main.o bond_3ad.o bond_alb.o bond_sysfs.o bond_debugfs.o
8 8
9ipv6-$(subst m,y,$(CONFIG_IPV6)) += bond_ipv6.o 9ipv6-$(subst m,y,$(CONFIG_IPV6)) += bond_ipv6.o
10bonding-objs += $(ipv6-y) 10bonding-objs += $(ipv6-y)
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index 106718c1be5b..171782e2bb39 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -2474,8 +2474,7 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
2474 goto out; 2474 goto out;
2475 2475
2476 read_lock(&bond->lock); 2476 read_lock(&bond->lock);
2477 slave = bond_get_slave_by_dev((struct bonding *)netdev_priv(dev), 2477 slave = bond_get_slave_by_dev(netdev_priv(dev), orig_dev);
2478 orig_dev);
2479 if (!slave) 2478 if (!slave)
2480 goto out_unlock; 2479 goto out_unlock;
2481 2480
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 26bb118c4533..f4e638c65129 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -44,42 +44,6 @@
44#include "bond_alb.h" 44#include "bond_alb.h"
45 45
46 46
47#define ALB_TIMER_TICKS_PER_SEC 10 /* should be a divisor of HZ */
48#define BOND_TLB_REBALANCE_INTERVAL 10 /* In seconds, periodic re-balancing.
49 * Used for division - never set
50 * to zero !!!
51 */
52#define BOND_ALB_LP_INTERVAL 1 /* In seconds, periodic send of
53 * learning packets to the switch
54 */
55
56#define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \
57 * ALB_TIMER_TICKS_PER_SEC)
58
59#define BOND_ALB_LP_TICKS (BOND_ALB_LP_INTERVAL \
60 * ALB_TIMER_TICKS_PER_SEC)
61
62#define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table.
63 * Note that this value MUST NOT be smaller
64 * because the key hash table is BYTE wide !
65 */
66
67
68#define TLB_NULL_INDEX 0xffffffff
69#define MAX_LP_BURST 3
70
71/* rlb defs */
72#define RLB_HASH_TABLE_SIZE 256
73#define RLB_NULL_INDEX 0xffffffff
74#define RLB_UPDATE_DELAY 2*ALB_TIMER_TICKS_PER_SEC /* 2 seconds */
75#define RLB_ARP_BURST_SIZE 2
76#define RLB_UPDATE_RETRY 3 /* 3-ticks - must be smaller than the rlb
77 * rebalance interval (5 min).
78 */
79/* RLB_PROMISC_TIMEOUT = 10 sec equals the time that the current slave is
80 * promiscuous after failover
81 */
82#define RLB_PROMISC_TIMEOUT 10*ALB_TIMER_TICKS_PER_SEC
83 47
84#ifndef __long_aligned 48#ifndef __long_aligned
85#define __long_aligned __attribute__((aligned((sizeof(long))))) 49#define __long_aligned __attribute__((aligned((sizeof(long)))))
diff --git a/drivers/net/bonding/bond_alb.h b/drivers/net/bonding/bond_alb.h
index 50968f8196cf..118c28aa471e 100644
--- a/drivers/net/bonding/bond_alb.h
+++ b/drivers/net/bonding/bond_alb.h
@@ -31,6 +31,44 @@ struct slave;
31#define BOND_ALB_INFO(bond) ((bond)->alb_info) 31#define BOND_ALB_INFO(bond) ((bond)->alb_info)
32#define SLAVE_TLB_INFO(slave) ((slave)->tlb_info) 32#define SLAVE_TLB_INFO(slave) ((slave)->tlb_info)
33 33
34#define ALB_TIMER_TICKS_PER_SEC 10 /* should be a divisor of HZ */
35#define BOND_TLB_REBALANCE_INTERVAL 10 /* In seconds, periodic re-balancing.
36 * Used for division - never set
37 * to zero !!!
38 */
39#define BOND_ALB_LP_INTERVAL 1 /* In seconds, periodic send of
40 * learning packets to the switch
41 */
42
43#define BOND_TLB_REBALANCE_TICKS (BOND_TLB_REBALANCE_INTERVAL \
44 * ALB_TIMER_TICKS_PER_SEC)
45
46#define BOND_ALB_LP_TICKS (BOND_ALB_LP_INTERVAL \
47 * ALB_TIMER_TICKS_PER_SEC)
48
49#define TLB_HASH_TABLE_SIZE 256 /* The size of the clients hash table.
50 * Note that this value MUST NOT be smaller
51 * because the key hash table is BYTE wide !
52 */
53
54
55#define TLB_NULL_INDEX 0xffffffff
56#define MAX_LP_BURST 3
57
58/* rlb defs */
59#define RLB_HASH_TABLE_SIZE 256
60#define RLB_NULL_INDEX 0xffffffff
61#define RLB_UPDATE_DELAY (2*ALB_TIMER_TICKS_PER_SEC) /* 2 seconds */
62#define RLB_ARP_BURST_SIZE 2
63#define RLB_UPDATE_RETRY 3 /* 3-ticks - must be smaller than the rlb
64 * rebalance interval (5 min).
65 */
66/* RLB_PROMISC_TIMEOUT = 10 sec equals the time that the current slave is
67 * promiscuous after failover
68 */
69#define RLB_PROMISC_TIMEOUT (10*ALB_TIMER_TICKS_PER_SEC)
70
71
34struct tlb_client_info { 72struct tlb_client_info {
35 struct slave *tx_slave; /* A pointer to slave used for transmiting 73 struct slave *tx_slave; /* A pointer to slave used for transmiting
36 * packets to a Client that the Hash function 74 * packets to a Client that the Hash function
diff --git a/drivers/net/bonding/bond_debugfs.c b/drivers/net/bonding/bond_debugfs.c
new file mode 100644
index 000000000000..3680aa251dea
--- /dev/null
+++ b/drivers/net/bonding/bond_debugfs.c
@@ -0,0 +1,146 @@
1#include <linux/kernel.h>
2#include <linux/module.h>
3#include <linux/device.h>
4#include <linux/netdevice.h>
5
6#include "bonding.h"
7#include "bond_alb.h"
8
9#ifdef CONFIG_DEBUG_FS
10
11#include <linux/debugfs.h>
12#include <linux/seq_file.h>
13
14static struct dentry *bonding_debug_root;
15
16/*
17 * Show RLB hash table
18 */
19static int bond_debug_rlb_hash_show(struct seq_file *m, void *v)
20{
21 struct bonding *bond = m->private;
22 struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
23 struct rlb_client_info *client_info;
24 u32 hash_index;
25
26 if (bond->params.mode != BOND_MODE_ALB)
27 return 0;
28
29 seq_printf(m, "SourceIP DestinationIP "
30 "Destination MAC DEV\n");
31
32 spin_lock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
33
34 hash_index = bond_info->rx_hashtbl_head;
35 for (; hash_index != RLB_NULL_INDEX; hash_index = client_info->next) {
36 client_info = &(bond_info->rx_hashtbl[hash_index]);
37 seq_printf(m, "%-15pI4 %-15pI4 %-17pM %s\n",
38 &client_info->ip_src,
39 &client_info->ip_dst,
40 &client_info->mac_dst,
41 client_info->slave->dev->name);
42 }
43
44 spin_unlock_bh(&(BOND_ALB_INFO(bond).rx_hashtbl_lock));
45
46 return 0;
47}
48
49static int bond_debug_rlb_hash_open(struct inode *inode, struct file *file)
50{
51 return single_open(file, bond_debug_rlb_hash_show, inode->i_private);
52}
53
54static const struct file_operations bond_debug_rlb_hash_fops = {
55 .owner = THIS_MODULE,
56 .open = bond_debug_rlb_hash_open,
57 .read = seq_read,
58 .llseek = seq_lseek,
59 .release = single_release,
60};
61
62void bond_debug_register(struct bonding *bond)
63{
64 if (!bonding_debug_root)
65 return;
66
67 bond->debug_dir =
68 debugfs_create_dir(bond->dev->name, bonding_debug_root);
69
70 if (!bond->debug_dir) {
71 pr_warning("%s: Warning: failed to register to debugfs\n",
72 bond->dev->name);
73 return;
74 }
75
76 debugfs_create_file("rlb_hash_table", 0400, bond->debug_dir,
77 bond, &bond_debug_rlb_hash_fops);
78}
79
80void bond_debug_unregister(struct bonding *bond)
81{
82 if (!bonding_debug_root)
83 return;
84
85 debugfs_remove_recursive(bond->debug_dir);
86}
87
88void bond_debug_reregister(struct bonding *bond)
89{
90 struct dentry *d;
91
92 if (!bonding_debug_root)
93 return;
94
95 d = debugfs_rename(bonding_debug_root, bond->debug_dir,
96 bonding_debug_root, bond->dev->name);
97 if (d) {
98 bond->debug_dir = d;
99 } else {
100 pr_warning("%s: Warning: failed to reregister, "
101 "so just unregister old one\n",
102 bond->dev->name);
103 bond_debug_unregister(bond);
104 }
105}
106
107void bond_create_debugfs(void)
108{
109 bonding_debug_root = debugfs_create_dir("bonding", NULL);
110
111 if (!bonding_debug_root) {
112 pr_warning("Warning: Cannot create bonding directory"
113 " in debugfs\n");
114 }
115}
116
117void bond_destroy_debugfs(void)
118{
119 debugfs_remove_recursive(bonding_debug_root);
120 bonding_debug_root = NULL;
121}
122
123
124#else /* !CONFIG_DEBUG_FS */
125
126void bond_debug_register(struct bonding *bond)
127{
128}
129
130void bond_debug_unregister(struct bonding *bond)
131{
132}
133
134void bond_debug_reregister(struct bonding *bond)
135{
136}
137
138void bond_create_debugfs(void)
139{
140}
141
142void bond_destroy_debugfs(void)
143{
144}
145
146#endif /* CONFIG_DEBUG_FS */
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
index 121b073a6c3f..84fbd4ebd778 100644
--- a/drivers/net/bonding/bond_ipv6.c
+++ b/drivers/net/bonding/bond_ipv6.c
@@ -88,7 +88,12 @@ static void bond_na_send(struct net_device *slave_dev,
88 } 88 }
89 89
90 if (vlan_id) { 90 if (vlan_id) {
91 skb = vlan_put_tag(skb, vlan_id); 91 /* The Ethernet header is not present yet, so it is
92 * too early to insert a VLAN tag. Force use of an
93 * out-of-line tag here and let dev_hard_start_xmit()
94 * insert it if the slave hardware can't.
95 */
96 skb = __vlan_hwaccel_put_tag(skb, vlan_id);
92 if (!skb) { 97 if (!skb) {
93 pr_err("failed to insert VLAN tag\n"); 98 pr_err("failed to insert VLAN tag\n");
94 return; 99 return;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index d0ea760ce419..b1025b85acf1 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -418,36 +418,11 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
418 * @bond: bond device that got this skb for tx. 418 * @bond: bond device that got this skb for tx.
419 * @skb: hw accel VLAN tagged skb to transmit 419 * @skb: hw accel VLAN tagged skb to transmit
420 * @slave_dev: slave that is supposed to xmit this skbuff 420 * @slave_dev: slave that is supposed to xmit this skbuff
421 *
422 * When the bond gets an skb to transmit that is
423 * already hardware accelerated VLAN tagged, and it
424 * needs to relay this skb to a slave that is not
425 * hw accel capable, the skb needs to be "unaccelerated",
426 * i.e. strip the hwaccel tag and re-insert it as part
427 * of the payload.
428 */ 421 */
429int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, 422int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
430 struct net_device *slave_dev) 423 struct net_device *slave_dev)
431{ 424{
432 unsigned short uninitialized_var(vlan_id); 425 skb->dev = slave_dev;
433
434 /* Test vlan_list not vlgrp to catch and handle 802.1p tags */
435 if (!list_empty(&bond->vlan_list) &&
436 !(slave_dev->features & NETIF_F_HW_VLAN_TX) &&
437 vlan_get_tag(skb, &vlan_id) == 0) {
438 skb->dev = slave_dev;
439 skb = vlan_put_tag(skb, vlan_id);
440 if (!skb) {
441 /* vlan_put_tag() frees the skb in case of error,
442 * so return success here so the calling functions
443 * won't attempt to free is again.
444 */
445 return 0;
446 }
447 } else {
448 skb->dev = slave_dev;
449 }
450
451 skb->priority = 1; 426 skb->priority = 1;
452#ifdef CONFIG_NET_POLL_CONTROLLER 427#ifdef CONFIG_NET_POLL_CONTROLLER
453 if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) { 428 if (unlikely(bond->dev->priv_flags & IFF_IN_NETPOLL)) {
@@ -873,17 +848,11 @@ static void bond_mc_del(struct bonding *bond, void *addr)
873static void __bond_resend_igmp_join_requests(struct net_device *dev) 848static void __bond_resend_igmp_join_requests(struct net_device *dev)
874{ 849{
875 struct in_device *in_dev; 850 struct in_device *in_dev;
876 struct ip_mc_list *im;
877 851
878 rcu_read_lock(); 852 rcu_read_lock();
879 in_dev = __in_dev_get_rcu(dev); 853 in_dev = __in_dev_get_rcu(dev);
880 if (in_dev) { 854 if (in_dev)
881 read_lock(&in_dev->mc_list_lock); 855 ip_mc_rejoin_groups(in_dev);
882 for (im = in_dev->mc_list; im; im = im->next)
883 ip_mc_rejoin_group(im);
884 read_unlock(&in_dev->mc_list_lock);
885 }
886
887 rcu_read_unlock(); 856 rcu_read_unlock();
888} 857}
889 858
@@ -1203,11 +1172,13 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1203 bond_do_fail_over_mac(bond, new_active, 1172 bond_do_fail_over_mac(bond, new_active,
1204 old_active); 1173 old_active);
1205 1174
1206 bond->send_grat_arp = bond->params.num_grat_arp; 1175 if (netif_running(bond->dev)) {
1207 bond_send_gratuitous_arp(bond); 1176 bond->send_grat_arp = bond->params.num_grat_arp;
1177 bond_send_gratuitous_arp(bond);
1208 1178
1209 bond->send_unsol_na = bond->params.num_unsol_na; 1179 bond->send_unsol_na = bond->params.num_unsol_na;
1210 bond_send_unsolicited_na(bond); 1180 bond_send_unsolicited_na(bond);
1181 }
1211 1182
1212 write_unlock_bh(&bond->curr_slave_lock); 1183 write_unlock_bh(&bond->curr_slave_lock);
1213 read_unlock(&bond->lock); 1184 read_unlock(&bond->lock);
@@ -1221,8 +1192,9 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
1221 1192
1222 /* resend IGMP joins since active slave has changed or 1193 /* resend IGMP joins since active slave has changed or
1223 * all were sent on curr_active_slave */ 1194 * all were sent on curr_active_slave */
1224 if ((USES_PRIMARY(bond->params.mode) && new_active) || 1195 if (((USES_PRIMARY(bond->params.mode) && new_active) ||
1225 bond->params.mode == BOND_MODE_ROUNDROBIN) { 1196 bond->params.mode == BOND_MODE_ROUNDROBIN) &&
1197 netif_running(bond->dev)) {
1226 bond->igmp_retrans = bond->params.resend_igmp; 1198 bond->igmp_retrans = bond->params.resend_igmp;
1227 queue_delayed_work(bond->wq, &bond->mcast_work, 0); 1199 queue_delayed_work(bond->wq, &bond->mcast_work, 0);
1228 } 1200 }
@@ -3211,7 +3183,7 @@ out:
3211#ifdef CONFIG_PROC_FS 3183#ifdef CONFIG_PROC_FS
3212 3184
3213static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos) 3185static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
3214 __acquires(&dev_base_lock) 3186 __acquires(RCU)
3215 __acquires(&bond->lock) 3187 __acquires(&bond->lock)
3216{ 3188{
3217 struct bonding *bond = seq->private; 3189 struct bonding *bond = seq->private;
@@ -3220,7 +3192,7 @@ static void *bond_info_seq_start(struct seq_file *seq, loff_t *pos)
3220 int i; 3192 int i;
3221 3193
3222 /* make sure the bond won't be taken away */ 3194 /* make sure the bond won't be taken away */
3223 read_lock(&dev_base_lock); 3195 rcu_read_lock();
3224 read_lock(&bond->lock); 3196 read_lock(&bond->lock);
3225 3197
3226 if (*pos == 0) 3198 if (*pos == 0)
@@ -3250,12 +3222,12 @@ static void *bond_info_seq_next(struct seq_file *seq, void *v, loff_t *pos)
3250 3222
3251static void bond_info_seq_stop(struct seq_file *seq, void *v) 3223static void bond_info_seq_stop(struct seq_file *seq, void *v)
3252 __releases(&bond->lock) 3224 __releases(&bond->lock)
3253 __releases(&dev_base_lock) 3225 __releases(RCU)
3254{ 3226{
3255 struct bonding *bond = seq->private; 3227 struct bonding *bond = seq->private;
3256 3228
3257 read_unlock(&bond->lock); 3229 read_unlock(&bond->lock);
3258 read_unlock(&dev_base_lock); 3230 rcu_read_unlock();
3259} 3231}
3260 3232
3261static void bond_info_show_master(struct seq_file *seq) 3233static void bond_info_show_master(struct seq_file *seq)
@@ -3509,6 +3481,8 @@ static int bond_event_changename(struct bonding *bond)
3509 bond_remove_proc_entry(bond); 3481 bond_remove_proc_entry(bond);
3510 bond_create_proc_entry(bond); 3482 bond_create_proc_entry(bond);
3511 3483
3484 bond_debug_reregister(bond);
3485
3512 return NOTIFY_DONE; 3486 return NOTIFY_DONE;
3513} 3487}
3514 3488
@@ -4791,6 +4765,8 @@ static void bond_uninit(struct net_device *bond_dev)
4791 4765
4792 bond_remove_proc_entry(bond); 4766 bond_remove_proc_entry(bond);
4793 4767
4768 bond_debug_unregister(bond);
4769
4794 __hw_addr_flush(&bond->mc_list); 4770 __hw_addr_flush(&bond->mc_list);
4795 4771
4796 list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) { 4772 list_for_each_entry_safe(vlan, tmp, &bond->vlan_list, vlan_list) {
@@ -5193,6 +5169,8 @@ static int bond_init(struct net_device *bond_dev)
5193 5169
5194 bond_prepare_sysfs_group(bond); 5170 bond_prepare_sysfs_group(bond);
5195 5171
5172 bond_debug_register(bond);
5173
5196 __hw_addr_init(&bond->mc_list); 5174 __hw_addr_init(&bond->mc_list);
5197 return 0; 5175 return 0;
5198} 5176}
@@ -5307,6 +5285,8 @@ static int __init bonding_init(void)
5307 if (res) 5285 if (res)
5308 goto err_link; 5286 goto err_link;
5309 5287
5288 bond_create_debugfs();
5289
5310 for (i = 0; i < max_bonds; i++) { 5290 for (i = 0; i < max_bonds; i++) {
5311 res = bond_create(&init_net, NULL); 5291 res = bond_create(&init_net, NULL);
5312 if (res) 5292 if (res)
@@ -5317,7 +5297,6 @@ static int __init bonding_init(void)
5317 if (res) 5297 if (res)
5318 goto err; 5298 goto err;
5319 5299
5320
5321 register_netdevice_notifier(&bond_netdev_notifier); 5300 register_netdevice_notifier(&bond_netdev_notifier);
5322 register_inetaddr_notifier(&bond_inetaddr_notifier); 5301 register_inetaddr_notifier(&bond_inetaddr_notifier);
5323 bond_register_ipv6_notifier(); 5302 bond_register_ipv6_notifier();
@@ -5338,6 +5317,7 @@ static void __exit bonding_exit(void)
5338 bond_unregister_ipv6_notifier(); 5317 bond_unregister_ipv6_notifier();
5339 5318
5340 bond_destroy_sysfs(); 5319 bond_destroy_sysfs();
5320 bond_destroy_debugfs();
5341 5321
5342 rtnl_link_unregister(&bond_link_ops); 5322 rtnl_link_unregister(&bond_link_ops);
5343 unregister_pernet_subsys(&bond_net_ops); 5323 unregister_pernet_subsys(&bond_net_ops);
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index c2f081352a03..31fe980e4e28 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -18,7 +18,6 @@
18#include <linux/timer.h> 18#include <linux/timer.h>
19#include <linux/proc_fs.h> 19#include <linux/proc_fs.h>
20#include <linux/if_bonding.h> 20#include <linux/if_bonding.h>
21#include <linux/kobject.h>
22#include <linux/cpumask.h> 21#include <linux/cpumask.h>
23#include <linux/in6.h> 22#include <linux/in6.h>
24#include "bond_3ad.h" 23#include "bond_3ad.h"
@@ -255,6 +254,10 @@ struct bonding {
255#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 254#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
256 struct in6_addr master_ipv6; 255 struct in6_addr master_ipv6;
257#endif 256#endif
257#ifdef CONFIG_DEBUG_FS
258 /* debugging suport via debugfs */
259 struct dentry *debug_dir;
260#endif /* CONFIG_DEBUG_FS */
258}; 261};
259 262
260/** 263/**
@@ -269,11 +272,11 @@ static inline struct slave *bond_get_slave_by_dev(struct bonding *bond, struct n
269 272
270 bond_for_each_slave(bond, slave, i) { 273 bond_for_each_slave(bond, slave, i) {
271 if (slave->dev == slave_dev) { 274 if (slave->dev == slave_dev) {
272 break; 275 return slave;
273 } 276 }
274 } 277 }
275 278
276 return slave; 279 return 0;
277} 280}
278 281
279static inline struct bonding *bond_get_bond_by_slave(struct slave *slave) 282static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
@@ -282,7 +285,7 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
282 return NULL; 285 return NULL;
283 } 286 }
284 287
285 return (struct bonding *)netdev_priv(slave->dev->master); 288 return netdev_priv(slave->dev->master);
286} 289}
287 290
288static inline bool bond_is_lb(const struct bonding *bond) 291static inline bool bond_is_lb(const struct bonding *bond)
@@ -376,6 +379,11 @@ void bond_select_active_slave(struct bonding *bond);
376void bond_change_active_slave(struct bonding *bond, struct slave *new_active); 379void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
377void bond_register_arp(struct bonding *); 380void bond_register_arp(struct bonding *);
378void bond_unregister_arp(struct bonding *); 381void bond_unregister_arp(struct bonding *);
382void bond_create_debugfs(void);
383void bond_destroy_debugfs(void);
384void bond_debug_register(struct bonding *bond);
385void bond_debug_unregister(struct bonding *bond);
386void bond_debug_reregister(struct bonding *bond);
379 387
380struct bond_net { 388struct bond_net {
381 struct net * net; /* Associated network namespace */ 389 struct net * net; /* Associated network namespace */
diff --git a/drivers/net/caif/caif_shm_u5500.c b/drivers/net/caif/caif_shm_u5500.c
index 32b1c6fb2de1..5f771ab712c4 100644
--- a/drivers/net/caif/caif_shm_u5500.c
+++ b/drivers/net/caif/caif_shm_u5500.c
@@ -11,7 +11,7 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/netdevice.h> 13#include <linux/netdevice.h>
14#include <mach/mbox.h> 14#include <mach/mbox-db5500.h>
15#include <net/caif/caif_shm.h> 15#include <net/caif/caif_shm.h>
16 16
17MODULE_LICENSE("GPL"); 17MODULE_LICENSE("GPL");
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 080574b0fff0..d5a9db60ade9 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -12,6 +12,27 @@ config CAN_VCAN
12 This driver can also be built as a module. If so, the module 12 This driver can also be built as a module. If so, the module
13 will be called vcan. 13 will be called vcan.
14 14
15config CAN_SLCAN
16 tristate "Serial / USB serial CAN Adaptors (slcan)"
17 depends on CAN
18 default N
19 ---help---
20 CAN driver for several 'low cost' CAN interfaces that are attached
21 via serial lines or via USB-to-serial adapters using the LAWICEL
22 ASCII protocol. The driver implements the tty linediscipline N_SLCAN.
23
24 As only the sending and receiving of CAN frames is implemented, this
25 driver should work with the (serial/USB) CAN hardware from:
26 www.canusb.com / www.can232.com / www.mictronic.com / www.canhack.de
27
28 Userspace tools to attach the SLCAN line discipline (slcan_attach,
29 slcand) can be found in the can-utils at the SocketCAN SVN, see
30 http://developer.berlios.de/projects/socketcan for details.
31
32 The slcan driver supports up to 10 CAN netdevices by default which
33 can be changed by the 'maxdev=xx' module option. This driver can
34 also be built as a module. If so, the module will be called slcan.
35
15config CAN_DEV 36config CAN_DEV
16 tristate "Platform CAN drivers with Netlink support" 37 tristate "Platform CAN drivers with Netlink support"
17 depends on CAN 38 depends on CAN
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 90af15a4f106..07ca159ba3f9 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -3,6 +3,7 @@
3# 3#
4 4
5obj-$(CONFIG_CAN_VCAN) += vcan.o 5obj-$(CONFIG_CAN_VCAN) += vcan.o
6obj-$(CONFIG_CAN_SLCAN) += slcan.o
6 7
7obj-$(CONFIG_CAN_DEV) += can-dev.o 8obj-$(CONFIG_CAN_DEV) += can-dev.o
8can-dev-y := dev.o 9can-dev-y := dev.o
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c
index 6e533dcc36c0..b9a6d7a5a739 100644
--- a/drivers/net/can/janz-ican3.c
+++ b/drivers/net/can/janz-ican3.c
@@ -1114,11 +1114,6 @@ static bool ican3_txok(struct ican3_dev *mod)
1114/* 1114/*
1115 * Recieve one CAN frame from the hardware 1115 * Recieve one CAN frame from the hardware
1116 * 1116 *
1117 * This works like the core of a NAPI function, but is intended to be called
1118 * from workqueue context instead. This driver already needs a workqueue to
1119 * process control messages, so we use the workqueue instead of using NAPI.
1120 * This was done to simplify locking.
1121 *
1122 * CONTEXT: must be called from user context 1117 * CONTEXT: must be called from user context
1123 */ 1118 */
1124static int ican3_recv_skb(struct ican3_dev *mod) 1119static int ican3_recv_skb(struct ican3_dev *mod)
@@ -1251,7 +1246,6 @@ static irqreturn_t ican3_irq(int irq, void *dev_id)
1251 * Reset an ICAN module to its power-on state 1246 * Reset an ICAN module to its power-on state
1252 * 1247 *
1253 * CONTEXT: no network device registered 1248 * CONTEXT: no network device registered
1254 * LOCKING: work function disabled
1255 */ 1249 */
1256static int ican3_reset_module(struct ican3_dev *mod) 1250static int ican3_reset_module(struct ican3_dev *mod)
1257{ 1251{
@@ -1262,9 +1256,6 @@ static int ican3_reset_module(struct ican3_dev *mod)
1262 /* disable interrupts so no more work is scheduled */ 1256 /* disable interrupts so no more work is scheduled */
1263 iowrite8(1 << mod->num, &mod->ctrl->int_disable); 1257 iowrite8(1 << mod->num, &mod->ctrl->int_disable);
1264 1258
1265 /* flush any pending work */
1266 flush_scheduled_work();
1267
1268 /* the first unallocated page in the DPM is #9 */ 1259 /* the first unallocated page in the DPM is #9 */
1269 mod->free_page = DPM_FREE_START; 1260 mod->free_page = DPM_FREE_START;
1270 1261
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
index 64c378cd0c34..74cd880c7e06 100644
--- a/drivers/net/can/mscan/mscan.c
+++ b/drivers/net/can/mscan/mscan.c
@@ -182,7 +182,7 @@ static int mscan_restart(struct net_device *dev)
182 182
183 priv->can.state = CAN_STATE_ERROR_ACTIVE; 183 priv->can.state = CAN_STATE_ERROR_ACTIVE;
184 WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD), 184 WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD),
185 "bus-off state expected"); 185 "bus-off state expected\n");
186 out_8(&regs->canmisc, MSCAN_BOHOLD); 186 out_8(&regs->canmisc, MSCAN_BOHOLD);
187 /* Re-enable receive interrupts. */ 187 /* Re-enable receive interrupts. */
188 out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE); 188 out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE);
diff --git a/drivers/net/can/pch_can.c b/drivers/net/can/pch_can.c
index 672718261c68..c42e97268248 100644
--- a/drivers/net/can/pch_can.c
+++ b/drivers/net/can/pch_can.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 1999 - 2010 Intel Corporation. 2 * Copyright (C) 1999 - 2010 Intel Corporation.
3 * Copyright (C) 2010 OKI SEMICONDUCTOR Co., LTD. 3 * Copyright (C) 2010 OKI SEMICONDUCTOR CO., LTD.
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -32,106 +32,115 @@
32#include <linux/can/dev.h> 32#include <linux/can/dev.h>
33#include <linux/can/error.h> 33#include <linux/can/error.h>
34 34
35#define MAX_MSG_OBJ 32 35#define PCH_CTRL_INIT BIT(0) /* The INIT bit of CANCONT register. */
36#define MSG_OBJ_RX 0 /* The receive message object flag. */ 36#define PCH_CTRL_IE BIT(1) /* The IE bit of CAN control register */
37#define MSG_OBJ_TX 1 /* The transmit message object flag. */ 37#define PCH_CTRL_IE_SIE_EIE (BIT(3) | BIT(2) | BIT(1))
38 38#define PCH_CTRL_CCE BIT(6)
39#define ENABLE 1 /* The enable flag */ 39#define PCH_CTRL_OPT BIT(7) /* The OPT bit of CANCONT register. */
40#define DISABLE 0 /* The disable flag */ 40#define PCH_OPT_SILENT BIT(3) /* The Silent bit of CANOPT reg. */
41#define CAN_CTRL_INIT 0x0001 /* The INIT bit of CANCONT register. */ 41#define PCH_OPT_LBACK BIT(4) /* The LoopBack bit of CANOPT reg. */
42#define CAN_CTRL_IE 0x0002 /* The IE bit of CAN control register */ 42
43#define CAN_CTRL_IE_SIE_EIE 0x000e 43#define PCH_CMASK_RX_TX_SET 0x00f3
44#define CAN_CTRL_CCE 0x0040 44#define PCH_CMASK_RX_TX_GET 0x0073
45#define CAN_CTRL_OPT 0x0080 /* The OPT bit of CANCONT register. */ 45#define PCH_CMASK_ALL 0xff
46#define CAN_OPT_SILENT 0x0008 /* The Silent bit of CANOPT reg. */ 46#define PCH_CMASK_NEWDAT BIT(2)
47#define CAN_OPT_LBACK 0x0010 /* The LoopBack bit of CANOPT reg. */ 47#define PCH_CMASK_CLRINTPND BIT(3)
48#define CAN_CMASK_RX_TX_SET 0x00f3 48#define PCH_CMASK_CTRL BIT(4)
49#define CAN_CMASK_RX_TX_GET 0x0073 49#define PCH_CMASK_ARB BIT(5)
50#define CAN_CMASK_ALL 0xff 50#define PCH_CMASK_MASK BIT(6)
51#define CAN_CMASK_RDWR 0x80 51#define PCH_CMASK_RDWR BIT(7)
52#define CAN_CMASK_ARB 0x20 52#define PCH_IF_MCONT_NEWDAT BIT(15)
53#define CAN_CMASK_CTRL 0x10 53#define PCH_IF_MCONT_MSGLOST BIT(14)
54#define CAN_CMASK_MASK 0x40 54#define PCH_IF_MCONT_INTPND BIT(13)
55#define CAN_CMASK_NEWDAT 0x04 55#define PCH_IF_MCONT_UMASK BIT(12)
56#define CAN_CMASK_CLRINTPND 0x08 56#define PCH_IF_MCONT_TXIE BIT(11)
57 57#define PCH_IF_MCONT_RXIE BIT(10)
58#define CAN_IF_MCONT_NEWDAT 0x8000 58#define PCH_IF_MCONT_RMTEN BIT(9)
59#define CAN_IF_MCONT_INTPND 0x2000 59#define PCH_IF_MCONT_TXRQXT BIT(8)
60#define CAN_IF_MCONT_UMASK 0x1000 60#define PCH_IF_MCONT_EOB BIT(7)
61#define CAN_IF_MCONT_TXIE 0x0800 61#define PCH_IF_MCONT_DLC (BIT(0) | BIT(1) | BIT(2) | BIT(3))
62#define CAN_IF_MCONT_RXIE 0x0400 62#define PCH_MASK2_MDIR_MXTD (BIT(14) | BIT(15))
63#define CAN_IF_MCONT_RMTEN 0x0200 63#define PCH_ID2_DIR BIT(13)
64#define CAN_IF_MCONT_TXRQXT 0x0100 64#define PCH_ID2_XTD BIT(14)
65#define CAN_IF_MCONT_EOB 0x0080 65#define PCH_ID_MSGVAL BIT(15)
66#define CAN_IF_MCONT_DLC 0x000f 66#define PCH_IF_CREQ_BUSY BIT(15)
67#define CAN_IF_MCONT_MSGLOST 0x4000 67
68#define CAN_MASK2_MDIR_MXTD 0xc000 68#define PCH_STATUS_INT 0x8000
69#define CAN_ID2_DIR 0x2000 69#define PCH_REC 0x00007f00
70#define CAN_ID_MSGVAL 0x8000 70#define PCH_TEC 0x000000ff
71 71
72#define CAN_STATUS_INT 0x8000 72#define PCH_TX_OK BIT(3)
73#define CAN_IF_CREQ_BUSY 0x8000 73#define PCH_RX_OK BIT(4)
74#define CAN_ID2_XTD 0x4000 74#define PCH_EPASSIV BIT(5)
75 75#define PCH_EWARN BIT(6)
76#define CAN_REC 0x00007f00 76#define PCH_BUS_OFF BIT(7)
77#define CAN_TEC 0x000000ff
78
79#define PCH_RX_OK 0x00000010
80#define PCH_TX_OK 0x00000008
81#define PCH_BUS_OFF 0x00000080
82#define PCH_EWARN 0x00000040
83#define PCH_EPASSIV 0x00000020
84#define PCH_LEC0 0x00000001
85#define PCH_LEC1 0x00000002
86#define PCH_LEC2 0x00000004
87#define PCH_LEC_ALL (PCH_LEC0 | PCH_LEC1 | PCH_LEC2)
88#define PCH_STUF_ERR PCH_LEC0
89#define PCH_FORM_ERR PCH_LEC1
90#define PCH_ACK_ERR (PCH_LEC0 | PCH_LEC1)
91#define PCH_BIT1_ERR PCH_LEC2
92#define PCH_BIT0_ERR (PCH_LEC0 | PCH_LEC2)
93#define PCH_CRC_ERR (PCH_LEC1 | PCH_LEC2)
94 77
95/* bit position of certain controller bits. */ 78/* bit position of certain controller bits. */
96#define BIT_BITT_BRP 0 79#define PCH_BIT_BRP_SHIFT 0
97#define BIT_BITT_SJW 6 80#define PCH_BIT_SJW_SHIFT 6
98#define BIT_BITT_TSEG1 8 81#define PCH_BIT_TSEG1_SHIFT 8
99#define BIT_BITT_TSEG2 12 82#define PCH_BIT_TSEG2_SHIFT 12
100#define BIT_IF1_MCONT_RXIE 10 83#define PCH_BIT_BRPE_BRPE_SHIFT 6
101#define BIT_IF2_MCONT_TXIE 11 84
102#define BIT_BRPE_BRPE 6 85#define PCH_MSK_BITT_BRP 0x3f
103#define BIT_ES_TXERRCNT 0 86#define PCH_MSK_BRPE_BRPE 0x3c0
104#define BIT_ES_RXERRCNT 8 87#define PCH_MSK_CTRL_IE_SIE_EIE 0x07
105#define MSK_BITT_BRP 0x3f 88#define PCH_COUNTER_LIMIT 10
106#define MSK_BITT_SJW 0xc0
107#define MSK_BITT_TSEG1 0xf00
108#define MSK_BITT_TSEG2 0x7000
109#define MSK_BRPE_BRPE 0x3c0
110#define MSK_BRPE_GET 0x0f
111#define MSK_CTRL_IE_SIE_EIE 0x07
112#define MSK_MCONT_TXIE 0x08
113#define MSK_MCONT_RXIE 0x10
114#define PCH_CAN_NO_TX_BUFF 1
115#define COUNTER_LIMIT 10
116 89
117#define PCH_CAN_CLK 50000000 /* 50MHz */ 90#define PCH_CAN_CLK 50000000 /* 50MHz */
118 91
119/* Define the number of message object. 92/*
93 * Define the number of message object.
120 * PCH CAN communications are done via Message RAM. 94 * PCH CAN communications are done via Message RAM.
121 * The Message RAM consists of 32 message objects. */ 95 * The Message RAM consists of 32 message objects.
122#define PCH_RX_OBJ_NUM 26 /* 1~ PCH_RX_OBJ_NUM is Rx*/ 96 */
123#define PCH_TX_OBJ_NUM 6 /* PCH_RX_OBJ_NUM is RX ~ Tx*/ 97#define PCH_RX_OBJ_NUM 26
124#define PCH_OBJ_NUM (PCH_TX_OBJ_NUM + PCH_RX_OBJ_NUM) 98#define PCH_TX_OBJ_NUM 6
99#define PCH_RX_OBJ_START 1
100#define PCH_RX_OBJ_END PCH_RX_OBJ_NUM
101#define PCH_TX_OBJ_START (PCH_RX_OBJ_END + 1)
102#define PCH_TX_OBJ_END (PCH_RX_OBJ_NUM + PCH_TX_OBJ_NUM)
125 103
126#define PCH_FIFO_THRESH 16 104#define PCH_FIFO_THRESH 16
127 105
106/* TxRqst2 show status of MsgObjNo.17~32 */
107#define PCH_TREQ2_TX_MASK (((1 << PCH_TX_OBJ_NUM) - 1) <<\
108 (PCH_RX_OBJ_END - 16))
109
110enum pch_ifreg {
111 PCH_RX_IFREG,
112 PCH_TX_IFREG,
113};
114
115enum pch_can_err {
116 PCH_STUF_ERR = 1,
117 PCH_FORM_ERR,
118 PCH_ACK_ERR,
119 PCH_BIT1_ERR,
120 PCH_BIT0_ERR,
121 PCH_CRC_ERR,
122 PCH_LEC_ALL,
123};
124
128enum pch_can_mode { 125enum pch_can_mode {
129 PCH_CAN_ENABLE, 126 PCH_CAN_ENABLE,
130 PCH_CAN_DISABLE, 127 PCH_CAN_DISABLE,
131 PCH_CAN_ALL, 128 PCH_CAN_ALL,
132 PCH_CAN_NONE, 129 PCH_CAN_NONE,
133 PCH_CAN_STOP, 130 PCH_CAN_STOP,
134 PCH_CAN_RUN 131 PCH_CAN_RUN,
132};
133
134struct pch_can_if_regs {
135 u32 creq;
136 u32 cmask;
137 u32 mask1;
138 u32 mask2;
139 u32 id1;
140 u32 id2;
141 u32 mcont;
142 u32 data[4];
143 u32 rsv[13];
135}; 144};
136 145
137struct pch_can_regs { 146struct pch_can_regs {
@@ -142,57 +151,36 @@ struct pch_can_regs {
142 u32 intr; 151 u32 intr;
143 u32 opt; 152 u32 opt;
144 u32 brpe; 153 u32 brpe;
145 u32 reserve1; 154 u32 reserve;
146 u32 if1_creq; 155 struct pch_can_if_regs ifregs[2]; /* [0]=if1 [1]=if2 */
147 u32 if1_cmask; 156 u32 reserve1[8];
148 u32 if1_mask1;
149 u32 if1_mask2;
150 u32 if1_id1;
151 u32 if1_id2;
152 u32 if1_mcont;
153 u32 if1_dataa1;
154 u32 if1_dataa2;
155 u32 if1_datab1;
156 u32 if1_datab2;
157 u32 reserve2;
158 u32 reserve3[12];
159 u32 if2_creq;
160 u32 if2_cmask;
161 u32 if2_mask1;
162 u32 if2_mask2;
163 u32 if2_id1;
164 u32 if2_id2;
165 u32 if2_mcont;
166 u32 if2_dataa1;
167 u32 if2_dataa2;
168 u32 if2_datab1;
169 u32 if2_datab2;
170 u32 reserve4;
171 u32 reserve5[20];
172 u32 treq1; 157 u32 treq1;
173 u32 treq2; 158 u32 treq2;
174 u32 reserve6[2]; 159 u32 reserve2[6];
175 u32 reserve7[56]; 160 u32 data1;
176 u32 reserve8[3]; 161 u32 data2;
162 u32 reserve3[6];
163 u32 canipend1;
164 u32 canipend2;
165 u32 reserve4[6];
166 u32 canmval1;
167 u32 canmval2;
168 u32 reserve5[37];
177 u32 srst; 169 u32 srst;
178}; 170};
179 171
180struct pch_can_priv { 172struct pch_can_priv {
181 struct can_priv can; 173 struct can_priv can;
182 unsigned int can_num;
183 struct pci_dev *dev; 174 struct pci_dev *dev;
184 unsigned int tx_enable[MAX_MSG_OBJ]; 175 u32 tx_enable[PCH_TX_OBJ_END];
185 unsigned int rx_enable[MAX_MSG_OBJ]; 176 u32 rx_enable[PCH_TX_OBJ_END];
186 unsigned int rx_link[MAX_MSG_OBJ]; 177 u32 rx_link[PCH_TX_OBJ_END];
187 unsigned int int_enables; 178 u32 int_enables;
188 unsigned int int_stat;
189 struct net_device *ndev; 179 struct net_device *ndev;
190 spinlock_t msgif_reg_lock; /* Message Interface Registers Access Lock*/
191 unsigned int msg_obj[MAX_MSG_OBJ];
192 struct pch_can_regs __iomem *regs; 180 struct pch_can_regs __iomem *regs;
193 struct napi_struct napi; 181 struct napi_struct napi;
194 unsigned int tx_obj; /* Point next Tx Obj index */ 182 int tx_obj; /* Point next Tx Obj index */
195 unsigned int use_msi; 183 int use_msi;
196}; 184};
197 185
198static struct can_bittiming_const pch_can_bittiming_const = { 186static struct can_bittiming_const pch_can_bittiming_const = {
@@ -228,15 +216,15 @@ static void pch_can_set_run_mode(struct pch_can_priv *priv,
228{ 216{
229 switch (mode) { 217 switch (mode) {
230 case PCH_CAN_RUN: 218 case PCH_CAN_RUN:
231 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_INIT); 219 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_INIT);
232 break; 220 break;
233 221
234 case PCH_CAN_STOP: 222 case PCH_CAN_STOP:
235 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_INIT); 223 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_INIT);
236 break; 224 break;
237 225
238 default: 226 default:
239 dev_err(&priv->ndev->dev, "%s -> Invalid Mode.\n", __func__); 227 netdev_err(priv->ndev, "%s -> Invalid Mode.\n", __func__);
240 break; 228 break;
241 } 229 }
242} 230}
@@ -246,357 +234,184 @@ static void pch_can_set_optmode(struct pch_can_priv *priv)
246 u32 reg_val = ioread32(&priv->regs->opt); 234 u32 reg_val = ioread32(&priv->regs->opt);
247 235
248 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) 236 if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
249 reg_val |= CAN_OPT_SILENT; 237 reg_val |= PCH_OPT_SILENT;
250 238
251 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) 239 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
252 reg_val |= CAN_OPT_LBACK; 240 reg_val |= PCH_OPT_LBACK;
253 241
254 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_OPT); 242 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_OPT);
255 iowrite32(reg_val, &priv->regs->opt); 243 iowrite32(reg_val, &priv->regs->opt);
256} 244}
257 245
258static void pch_can_set_int_custom(struct pch_can_priv *priv) 246static void pch_can_rw_msg_obj(void __iomem *creq_addr, u32 num)
259{ 247{
260 /* Clearing the IE, SIE and EIE bits of Can control register. */ 248 int counter = PCH_COUNTER_LIMIT;
261 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE); 249 u32 ifx_creq;
262
263 /* Appropriately setting them. */
264 pch_can_bit_set(&priv->regs->cont,
265 ((priv->int_enables & MSK_CTRL_IE_SIE_EIE) << 1));
266}
267 250
268/* This function retrieves interrupt enabled for the CAN device. */ 251 iowrite32(num, creq_addr);
269static void pch_can_get_int_enables(struct pch_can_priv *priv, u32 *enables) 252 while (counter) {
270{ 253 ifx_creq = ioread32(creq_addr) & PCH_IF_CREQ_BUSY;
271 /* Obtaining the status of IE, SIE and EIE interrupt bits. */ 254 if (!ifx_creq)
272 *enables = ((ioread32(&priv->regs->cont) & CAN_CTRL_IE_SIE_EIE) >> 1); 255 break;
256 counter--;
257 udelay(1);
258 }
259 if (!counter)
260 pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
273} 261}
274 262
275static void pch_can_set_int_enables(struct pch_can_priv *priv, 263static void pch_can_set_int_enables(struct pch_can_priv *priv,
276 enum pch_can_mode interrupt_no) 264 enum pch_can_mode interrupt_no)
277{ 265{
278 switch (interrupt_no) { 266 switch (interrupt_no) {
279 case PCH_CAN_ENABLE:
280 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE);
281 break;
282
283 case PCH_CAN_DISABLE: 267 case PCH_CAN_DISABLE:
284 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE); 268 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE);
285 break; 269 break;
286 270
287 case PCH_CAN_ALL: 271 case PCH_CAN_ALL:
288 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE); 272 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
289 break; 273 break;
290 274
291 case PCH_CAN_NONE: 275 case PCH_CAN_NONE:
292 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_IE_SIE_EIE); 276 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
293 break; 277 break;
294 278
295 default: 279 default:
296 dev_err(&priv->ndev->dev, "Invalid interrupt number.\n"); 280 netdev_err(priv->ndev, "Invalid interrupt number.\n");
297 break; 281 break;
298 } 282 }
299} 283}
300 284
301static void pch_can_check_if_busy(u32 __iomem *creq_addr, u32 num) 285static void pch_can_set_rxtx(struct pch_can_priv *priv, u32 buff_num,
302{ 286 int set, enum pch_ifreg dir)
303 u32 counter = COUNTER_LIMIT;
304 u32 ifx_creq;
305
306 iowrite32(num, creq_addr);
307 while (counter) {
308 ifx_creq = ioread32(creq_addr) & CAN_IF_CREQ_BUSY;
309 if (!ifx_creq)
310 break;
311 counter--;
312 udelay(1);
313 }
314 if (!counter)
315 pr_err("%s:IF1 BUSY Flag is set forever.\n", __func__);
316}
317
318static void pch_can_set_rx_enable(struct pch_can_priv *priv, u32 buff_num,
319 u32 set)
320{
321 unsigned long flags;
322
323 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
324 /* Reading the receive buffer data from RAM to Interface1 registers */
325 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
326 pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
327
328 /* Setting the IF1MASK1 register to access MsgVal and RxIE bits */
329 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
330 &priv->regs->if1_cmask);
331
332 if (set == ENABLE) {
333 /* Setting the MsgVal and RxIE bits */
334 pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
335 pch_can_bit_set(&priv->regs->if1_id2, CAN_ID_MSGVAL);
336
337 } else if (set == DISABLE) {
338 /* Resetting the MsgVal and RxIE bits */
339 pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_RXIE);
340 pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID_MSGVAL);
341 }
342
343 pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
344 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
345}
346
347static void pch_can_rx_enable_all(struct pch_can_priv *priv)
348{ 287{
349 int i; 288 u32 ie;
350 289
351 /* Traversing to obtain the object configured as receivers. */ 290 if (dir)
352 for (i = 0; i < PCH_OBJ_NUM; i++) { 291 ie = PCH_IF_MCONT_TXIE;
353 if (priv->msg_obj[i] == MSG_OBJ_RX) 292 else
354 pch_can_set_rx_enable(priv, i + 1, ENABLE); 293 ie = PCH_IF_MCONT_RXIE;
355 }
356}
357 294
358static void pch_can_rx_disable_all(struct pch_can_priv *priv) 295 /* Reading the Msg buffer from Message RAM to IF1/2 registers. */
359{ 296 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
360 int i; 297 pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
361 298
362 /* Traversing to obtain the object configured as receivers. */ 299 /* Setting the IF1/2MASK1 register to access MsgVal and RxIE bits */
363 for (i = 0; i < PCH_OBJ_NUM; i++) { 300 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_ARB | PCH_CMASK_CTRL,
364 if (priv->msg_obj[i] == MSG_OBJ_RX) 301 &priv->regs->ifregs[dir].cmask);
365 pch_can_set_rx_enable(priv, i + 1, DISABLE);
366 }
367}
368 302
369static void pch_can_set_tx_enable(struct pch_can_priv *priv, u32 buff_num, 303 if (set) {
370 u32 set) 304 /* Setting the MsgVal and RxIE/TxIE bits */
371{ 305 pch_can_bit_set(&priv->regs->ifregs[dir].mcont, ie);
372 unsigned long flags; 306 pch_can_bit_set(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
373 307 } else {
374 spin_lock_irqsave(&priv->msgif_reg_lock, flags); 308 /* Clearing the MsgVal and RxIE/TxIE bits */
375 /* Reading the Msg buffer from Message RAM to Interface2 registers. */ 309 pch_can_bit_clear(&priv->regs->ifregs[dir].mcont, ie);
376 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask); 310 pch_can_bit_clear(&priv->regs->ifregs[dir].id2, PCH_ID_MSGVAL);
377 pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
378
379 /* Setting the IF2CMASK register for accessing the
380 MsgVal and TxIE bits */
381 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_ARB | CAN_CMASK_CTRL,
382 &priv->regs->if2_cmask);
383
384 if (set == ENABLE) {
385 /* Setting the MsgVal and TxIE bits */
386 pch_can_bit_set(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
387 pch_can_bit_set(&priv->regs->if2_id2, CAN_ID_MSGVAL);
388 } else if (set == DISABLE) {
389 /* Resetting the MsgVal and TxIE bits. */
390 pch_can_bit_clear(&priv->regs->if2_mcont, CAN_IF_MCONT_TXIE);
391 pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID_MSGVAL);
392 } 311 }
393 312
394 pch_can_check_if_busy(&priv->regs->if2_creq, buff_num); 313 pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
395 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
396} 314}
397 315
398static void pch_can_tx_enable_all(struct pch_can_priv *priv) 316static void pch_can_set_rx_all(struct pch_can_priv *priv, int set)
399{ 317{
400 int i; 318 int i;
401 319
402 /* Traversing to obtain the object configured as transmit object. */ 320 /* Traversing to obtain the object configured as receivers. */
403 for (i = 0; i < PCH_OBJ_NUM; i++) { 321 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++)
404 if (priv->msg_obj[i] == MSG_OBJ_TX) 322 pch_can_set_rxtx(priv, i, set, PCH_RX_IFREG);
405 pch_can_set_tx_enable(priv, i + 1, ENABLE);
406 }
407} 323}
408 324
409static void pch_can_tx_disable_all(struct pch_can_priv *priv) 325static void pch_can_set_tx_all(struct pch_can_priv *priv, int set)
410{ 326{
411 int i; 327 int i;
412 328
413 /* Traversing to obtain the object configured as transmit object. */ 329 /* Traversing to obtain the object configured as transmit object. */
414 for (i = 0; i < PCH_OBJ_NUM; i++) { 330 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
415 if (priv->msg_obj[i] == MSG_OBJ_TX) 331 pch_can_set_rxtx(priv, i, set, PCH_TX_IFREG);
416 pch_can_set_tx_enable(priv, i + 1, DISABLE);
417 }
418}
419
420static void pch_can_get_rx_enable(struct pch_can_priv *priv, u32 buff_num,
421 u32 *enable)
422{
423 unsigned long flags;
424
425 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
426 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
427 pch_can_check_if_busy(&priv->regs->if1_creq, buff_num);
428
429 if (((ioread32(&priv->regs->if1_id2)) & CAN_ID_MSGVAL) &&
430 ((ioread32(&priv->regs->if1_mcont)) &
431 CAN_IF_MCONT_RXIE))
432 *enable = ENABLE;
433 else
434 *enable = DISABLE;
435 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
436}
437
438static void pch_can_get_tx_enable(struct pch_can_priv *priv, u32 buff_num,
439 u32 *enable)
440{
441 unsigned long flags;
442
443 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
444 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
445 pch_can_check_if_busy(&priv->regs->if2_creq, buff_num);
446
447 if (((ioread32(&priv->regs->if2_id2)) & CAN_ID_MSGVAL) &&
448 ((ioread32(&priv->regs->if2_mcont)) &
449 CAN_IF_MCONT_TXIE)) {
450 *enable = ENABLE;
451 } else {
452 *enable = DISABLE;
453 }
454 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
455} 332}
456 333
457static int pch_can_int_pending(struct pch_can_priv *priv) 334static u32 pch_can_int_pending(struct pch_can_priv *priv)
458{ 335{
459 return ioread32(&priv->regs->intr) & 0xffff; 336 return ioread32(&priv->regs->intr) & 0xffff;
460} 337}
461 338
462static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv, 339static void pch_can_clear_if_buffers(struct pch_can_priv *priv)
463 u32 buffer_num, u32 set)
464{ 340{
465 unsigned long flags; 341 int i; /* Msg Obj ID (1~32) */
466 342
467 spin_lock_irqsave(&priv->msgif_reg_lock, flags); 343 for (i = PCH_RX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
468 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask); 344 iowrite32(PCH_CMASK_RX_TX_SET, &priv->regs->ifregs[0].cmask);
469 pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num); 345 iowrite32(0xffff, &priv->regs->ifregs[0].mask1);
470 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL, &priv->regs->if1_cmask); 346 iowrite32(0xffff, &priv->regs->ifregs[0].mask2);
471 if (set == ENABLE) 347 iowrite32(0x0, &priv->regs->ifregs[0].id1);
472 pch_can_bit_clear(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB); 348 iowrite32(0x0, &priv->regs->ifregs[0].id2);
473 else 349 iowrite32(0x0, &priv->regs->ifregs[0].mcont);
474 pch_can_bit_set(&priv->regs->if1_mcont, CAN_IF_MCONT_EOB); 350 iowrite32(0x0, &priv->regs->ifregs[0].data[0]);
475 351 iowrite32(0x0, &priv->regs->ifregs[0].data[1]);
476 pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num); 352 iowrite32(0x0, &priv->regs->ifregs[0].data[2]);
477 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags); 353 iowrite32(0x0, &priv->regs->ifregs[0].data[3]);
478} 354 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK |
479 355 PCH_CMASK_ARB | PCH_CMASK_CTRL,
480static void pch_can_get_rx_buffer_link(struct pch_can_priv *priv, 356 &priv->regs->ifregs[0].cmask);
481 u32 buffer_num, u32 *link) 357 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
482{
483 unsigned long flags;
484
485 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
486 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
487 pch_can_check_if_busy(&priv->regs->if1_creq, buffer_num);
488
489 if (ioread32(&priv->regs->if1_mcont) & CAN_IF_MCONT_EOB)
490 *link = DISABLE;
491 else
492 *link = ENABLE;
493 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
494}
495
496static void pch_can_clear_buffers(struct pch_can_priv *priv)
497{
498 int i;
499
500 for (i = 0; i < PCH_RX_OBJ_NUM; i++) {
501 iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if1_cmask);
502 iowrite32(0xffff, &priv->regs->if1_mask1);
503 iowrite32(0xffff, &priv->regs->if1_mask2);
504 iowrite32(0x0, &priv->regs->if1_id1);
505 iowrite32(0x0, &priv->regs->if1_id2);
506 iowrite32(0x0, &priv->regs->if1_mcont);
507 iowrite32(0x0, &priv->regs->if1_dataa1);
508 iowrite32(0x0, &priv->regs->if1_dataa2);
509 iowrite32(0x0, &priv->regs->if1_datab1);
510 iowrite32(0x0, &priv->regs->if1_datab2);
511 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
512 CAN_CMASK_ARB | CAN_CMASK_CTRL,
513 &priv->regs->if1_cmask);
514 pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
515 }
516
517 for (i = i; i < PCH_OBJ_NUM; i++) {
518 iowrite32(CAN_CMASK_RX_TX_SET, &priv->regs->if2_cmask);
519 iowrite32(0xffff, &priv->regs->if2_mask1);
520 iowrite32(0xffff, &priv->regs->if2_mask2);
521 iowrite32(0x0, &priv->regs->if2_id1);
522 iowrite32(0x0, &priv->regs->if2_id2);
523 iowrite32(0x0, &priv->regs->if2_mcont);
524 iowrite32(0x0, &priv->regs->if2_dataa1);
525 iowrite32(0x0, &priv->regs->if2_dataa2);
526 iowrite32(0x0, &priv->regs->if2_datab1);
527 iowrite32(0x0, &priv->regs->if2_datab2);
528 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK |
529 CAN_CMASK_ARB | CAN_CMASK_CTRL,
530 &priv->regs->if2_cmask);
531 pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
532 } 358 }
533} 359}
534 360
535static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv) 361static void pch_can_config_rx_tx_buffers(struct pch_can_priv *priv)
536{ 362{
537 int i; 363 int i;
538 unsigned long flags;
539 364
540 spin_lock_irqsave(&priv->msgif_reg_lock, flags); 365 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
366 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
367 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
541 368
542 for (i = 0; i < PCH_OBJ_NUM; i++) { 369 iowrite32(0x0, &priv->regs->ifregs[0].id1);
543 if (priv->msg_obj[i] == MSG_OBJ_RX) { 370 iowrite32(0x0, &priv->regs->ifregs[0].id2);
544 iowrite32(CAN_CMASK_RX_TX_GET,
545 &priv->regs->if1_cmask);
546 pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
547 371
548 iowrite32(0x0, &priv->regs->if1_id1); 372 pch_can_bit_set(&priv->regs->ifregs[0].mcont,
549 iowrite32(0x0, &priv->regs->if1_id2); 373 PCH_IF_MCONT_UMASK);
550 374
551 pch_can_bit_set(&priv->regs->if1_mcont, 375 /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
552 CAN_IF_MCONT_UMASK); 376 if (i == PCH_RX_OBJ_END)
377 pch_can_bit_set(&priv->regs->ifregs[0].mcont,
378 PCH_IF_MCONT_EOB);
379 else
380 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
381 PCH_IF_MCONT_EOB);
553 382
554 /* Set FIFO mode set to 0 except last Rx Obj*/ 383 iowrite32(0, &priv->regs->ifregs[0].mask1);
555 pch_can_bit_clear(&priv->regs->if1_mcont, 384 pch_can_bit_clear(&priv->regs->ifregs[0].mask2,
556 CAN_IF_MCONT_EOB); 385 0x1fff | PCH_MASK2_MDIR_MXTD);
557 /* In case FIFO mode, Last EoB of Rx Obj must be 1 */
558 if (i == (PCH_RX_OBJ_NUM - 1))
559 pch_can_bit_set(&priv->regs->if1_mcont,
560 CAN_IF_MCONT_EOB);
561 386
562 iowrite32(0, &priv->regs->if1_mask1); 387 /* Setting CMASK for writing */
563 pch_can_bit_clear(&priv->regs->if1_mask2, 388 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB |
564 0x1fff | CAN_MASK2_MDIR_MXTD); 389 PCH_CMASK_CTRL, &priv->regs->ifregs[0].cmask);
565 390
566 /* Setting CMASK for writing */ 391 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, i);
567 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK | 392 }
568 CAN_CMASK_ARB | CAN_CMASK_CTRL,
569 &priv->regs->if1_cmask);
570
571 pch_can_check_if_busy(&priv->regs->if1_creq, i+1);
572 } else if (priv->msg_obj[i] == MSG_OBJ_TX) {
573 iowrite32(CAN_CMASK_RX_TX_GET,
574 &priv->regs->if2_cmask);
575 pch_can_check_if_busy(&priv->regs->if2_creq, i+1);
576 393
577 /* Resetting DIR bit for reception */ 394 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++) {
578 iowrite32(0x0, &priv->regs->if2_id1); 395 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[1].cmask);
579 iowrite32(0x0, &priv->regs->if2_id2); 396 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i);
580 pch_can_bit_set(&priv->regs->if2_id2, CAN_ID2_DIR);
581 397
582 /* Setting EOB bit for transmitter */ 398 /* Resetting DIR bit for reception */
583 iowrite32(CAN_IF_MCONT_EOB, &priv->regs->if2_mcont); 399 iowrite32(0x0, &priv->regs->ifregs[1].id1);
400 iowrite32(PCH_ID2_DIR, &priv->regs->ifregs[1].id2);
584 401
585 pch_can_bit_set(&priv->regs->if2_mcont, 402 /* Setting EOB bit for transmitter */
586 CAN_IF_MCONT_UMASK); 403 iowrite32(PCH_IF_MCONT_EOB | PCH_IF_MCONT_UMASK,
404 &priv->regs->ifregs[1].mcont);
587 405
588 iowrite32(0, &priv->regs->if2_mask1); 406 iowrite32(0, &priv->regs->ifregs[1].mask1);
589 pch_can_bit_clear(&priv->regs->if2_mask2, 0x1fff); 407 pch_can_bit_clear(&priv->regs->ifregs[1].mask2, 0x1fff);
590 408
591 /* Setting CMASK for writing */ 409 /* Setting CMASK for writing */
592 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_MASK | 410 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_MASK | PCH_CMASK_ARB |
593 CAN_CMASK_ARB | CAN_CMASK_CTRL, 411 PCH_CMASK_CTRL, &priv->regs->ifregs[1].cmask);
594 &priv->regs->if2_cmask);
595 412
596 pch_can_check_if_busy(&priv->regs->if2_creq, i+1); 413 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, i);
597 }
598 } 414 }
599 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
600} 415}
601 416
602static void pch_can_init(struct pch_can_priv *priv) 417static void pch_can_init(struct pch_can_priv *priv)
@@ -605,7 +420,7 @@ static void pch_can_init(struct pch_can_priv *priv)
605 pch_can_set_run_mode(priv, PCH_CAN_STOP); 420 pch_can_set_run_mode(priv, PCH_CAN_STOP);
606 421
607 /* Clearing all the message object buffers. */ 422 /* Clearing all the message object buffers. */
608 pch_can_clear_buffers(priv); 423 pch_can_clear_if_buffers(priv);
609 424
610 /* Configuring the respective message object as either rx/tx object. */ 425 /* Configuring the respective message object as either rx/tx object. */
611 pch_can_config_rx_tx_buffers(priv); 426 pch_can_config_rx_tx_buffers(priv);
@@ -623,57 +438,47 @@ static void pch_can_release(struct pch_can_priv *priv)
623 pch_can_set_int_enables(priv, PCH_CAN_NONE); 438 pch_can_set_int_enables(priv, PCH_CAN_NONE);
624 439
625 /* Disabling all the receive object. */ 440 /* Disabling all the receive object. */
626 pch_can_rx_disable_all(priv); 441 pch_can_set_rx_all(priv, 0);
627 442
628 /* Disabling all the transmit object. */ 443 /* Disabling all the transmit object. */
629 pch_can_tx_disable_all(priv); 444 pch_can_set_tx_all(priv, 0);
630} 445}
631 446
632/* This function clears interrupt(s) from the CAN device. */ 447/* This function clears interrupt(s) from the CAN device. */
633static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask) 448static void pch_can_int_clr(struct pch_can_priv *priv, u32 mask)
634{ 449{
635 if (mask == CAN_STATUS_INT) {
636 ioread32(&priv->regs->stat);
637 return;
638 }
639
640 /* Clear interrupt for transmit object */ 450 /* Clear interrupt for transmit object */
641 if (priv->msg_obj[mask - 1] == MSG_OBJ_TX) { 451 if ((mask >= PCH_RX_OBJ_START) && (mask <= PCH_RX_OBJ_END)) {
642 /* Setting CMASK for clearing interrupts for
643 frame transmission. */
644 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB,
645 &priv->regs->if2_cmask);
646
647 /* Resetting the ID registers. */
648 pch_can_bit_set(&priv->regs->if2_id2,
649 CAN_ID2_DIR | (0x7ff << 2));
650 iowrite32(0x0, &priv->regs->if2_id1);
651
652 /* Claring NewDat, TxRqst & IntPnd */
653 pch_can_bit_clear(&priv->regs->if2_mcont,
654 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND |
655 CAN_IF_MCONT_TXRQXT);
656 pch_can_check_if_busy(&priv->regs->if2_creq, mask);
657 } else if (priv->msg_obj[mask - 1] == MSG_OBJ_RX) {
658 /* Setting CMASK for clearing the reception interrupts. */ 452 /* Setting CMASK for clearing the reception interrupts. */
659 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | CAN_CMASK_ARB, 453 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
660 &priv->regs->if1_cmask); 454 &priv->regs->ifregs[0].cmask);
661 455
662 /* Clearing the Dir bit. */ 456 /* Clearing the Dir bit. */
663 pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR); 457 pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
664 458
665 /* Clearing NewDat & IntPnd */ 459 /* Clearing NewDat & IntPnd */
666 pch_can_bit_clear(&priv->regs->if1_mcont, 460 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
667 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND); 461 PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND);
668 462
669 pch_can_check_if_busy(&priv->regs->if1_creq, mask); 463 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, mask);
670 } 464 } else if ((mask >= PCH_TX_OBJ_START) && (mask <= PCH_TX_OBJ_END)) {
671} 465 /*
466 * Setting CMASK for clearing interrupts for frame transmission.
467 */
468 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL | PCH_CMASK_ARB,
469 &priv->regs->ifregs[1].cmask);
672 470
673static int pch_can_get_buffer_status(struct pch_can_priv *priv) 471 /* Resetting the ID registers. */
674{ 472 pch_can_bit_set(&priv->regs->ifregs[1].id2,
675 return (ioread32(&priv->regs->treq1) & 0xffff) | 473 PCH_ID2_DIR | (0x7ff << 2));
676 ((ioread32(&priv->regs->treq2) & 0xffff) << 16); 474 iowrite32(0x0, &priv->regs->ifregs[1].id1);
475
476 /* Claring NewDat, TxRqst & IntPnd */
477 pch_can_bit_clear(&priv->regs->ifregs[1].mcont,
478 PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_INTPND |
479 PCH_IF_MCONT_TXRQXT);
480 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, mask);
481 }
677} 482}
678 483
679static void pch_can_reset(struct pch_can_priv *priv) 484static void pch_can_reset(struct pch_can_priv *priv)
@@ -688,7 +493,7 @@ static void pch_can_error(struct net_device *ndev, u32 status)
688 struct sk_buff *skb; 493 struct sk_buff *skb;
689 struct pch_can_priv *priv = netdev_priv(ndev); 494 struct pch_can_priv *priv = netdev_priv(ndev);
690 struct can_frame *cf; 495 struct can_frame *cf;
691 u32 errc; 496 u32 errc, lec;
692 struct net_device_stats *stats = &(priv->ndev->stats); 497 struct net_device_stats *stats = &(priv->ndev->stats);
693 enum can_state state = priv->can.state; 498 enum can_state state = priv->can.state;
694 499
@@ -697,26 +502,24 @@ static void pch_can_error(struct net_device *ndev, u32 status)
697 return; 502 return;
698 503
699 if (status & PCH_BUS_OFF) { 504 if (status & PCH_BUS_OFF) {
700 pch_can_tx_disable_all(priv); 505 pch_can_set_tx_all(priv, 0);
701 pch_can_rx_disable_all(priv); 506 pch_can_set_rx_all(priv, 0);
702 state = CAN_STATE_BUS_OFF; 507 state = CAN_STATE_BUS_OFF;
703 cf->can_id |= CAN_ERR_BUSOFF; 508 cf->can_id |= CAN_ERR_BUSOFF;
704 can_bus_off(ndev); 509 can_bus_off(ndev);
705 pch_can_set_run_mode(priv, PCH_CAN_RUN);
706 dev_err(&ndev->dev, "%s -> Bus Off occurres.\n", __func__);
707 } 510 }
708 511
512 errc = ioread32(&priv->regs->errc);
709 /* Warning interrupt. */ 513 /* Warning interrupt. */
710 if (status & PCH_EWARN) { 514 if (status & PCH_EWARN) {
711 state = CAN_STATE_ERROR_WARNING; 515 state = CAN_STATE_ERROR_WARNING;
712 priv->can.can_stats.error_warning++; 516 priv->can.can_stats.error_warning++;
713 cf->can_id |= CAN_ERR_CRTL; 517 cf->can_id |= CAN_ERR_CRTL;
714 errc = ioread32(&priv->regs->errc); 518 if (((errc & PCH_REC) >> 8) > 96)
715 if (((errc & CAN_REC) >> 8) > 96)
716 cf->data[1] |= CAN_ERR_CRTL_RX_WARNING; 519 cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
717 if ((errc & CAN_TEC) > 96) 520 if ((errc & PCH_TEC) > 96)
718 cf->data[1] |= CAN_ERR_CRTL_TX_WARNING; 521 cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
719 dev_warn(&ndev->dev, 522 netdev_dbg(ndev,
720 "%s -> Error Counter is more than 96.\n", __func__); 523 "%s -> Error Counter is more than 96.\n", __func__);
721 } 524 }
722 /* Error passive interrupt. */ 525 /* Error passive interrupt. */
@@ -724,46 +527,52 @@ static void pch_can_error(struct net_device *ndev, u32 status)
724 priv->can.can_stats.error_passive++; 527 priv->can.can_stats.error_passive++;
725 state = CAN_STATE_ERROR_PASSIVE; 528 state = CAN_STATE_ERROR_PASSIVE;
726 cf->can_id |= CAN_ERR_CRTL; 529 cf->can_id |= CAN_ERR_CRTL;
727 errc = ioread32(&priv->regs->errc); 530 if (((errc & PCH_REC) >> 8) > 127)
728 if (((errc & CAN_REC) >> 8) > 127)
729 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; 531 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
730 if ((errc & CAN_TEC) > 127) 532 if ((errc & PCH_TEC) > 127)
731 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; 533 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
732 dev_err(&ndev->dev, 534 netdev_dbg(ndev,
733 "%s -> CAN controller is ERROR PASSIVE .\n", __func__); 535 "%s -> CAN controller is ERROR PASSIVE .\n", __func__);
734 } 536 }
735 537
736 if (status & PCH_LEC_ALL) { 538 lec = status & PCH_LEC_ALL;
539 switch (lec) {
540 case PCH_STUF_ERR:
541 cf->data[2] |= CAN_ERR_PROT_STUFF;
737 priv->can.can_stats.bus_error++; 542 priv->can.can_stats.bus_error++;
738 stats->rx_errors++; 543 stats->rx_errors++;
739 switch (status & PCH_LEC_ALL) { 544 break;
740 case PCH_STUF_ERR: 545 case PCH_FORM_ERR:
741 cf->data[2] |= CAN_ERR_PROT_STUFF; 546 cf->data[2] |= CAN_ERR_PROT_FORM;
742 break; 547 priv->can.can_stats.bus_error++;
743 case PCH_FORM_ERR: 548 stats->rx_errors++;
744 cf->data[2] |= CAN_ERR_PROT_FORM; 549 break;
745 break; 550 case PCH_ACK_ERR:
746 case PCH_ACK_ERR: 551 cf->can_id |= CAN_ERR_ACK;
747 cf->data[2] |= CAN_ERR_PROT_LOC_ACK | 552 priv->can.can_stats.bus_error++;
748 CAN_ERR_PROT_LOC_ACK_DEL; 553 stats->rx_errors++;
749 break; 554 break;
750 case PCH_BIT1_ERR: 555 case PCH_BIT1_ERR:
751 case PCH_BIT0_ERR: 556 case PCH_BIT0_ERR:
752 cf->data[2] |= CAN_ERR_PROT_BIT; 557 cf->data[2] |= CAN_ERR_PROT_BIT;
753 break; 558 priv->can.can_stats.bus_error++;
754 case PCH_CRC_ERR: 559 stats->rx_errors++;
755 cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ | 560 break;
756 CAN_ERR_PROT_LOC_CRC_DEL; 561 case PCH_CRC_ERR:
757 break; 562 cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
758 default: 563 CAN_ERR_PROT_LOC_CRC_DEL;
759 iowrite32(status | PCH_LEC_ALL, &priv->regs->stat); 564 priv->can.can_stats.bus_error++;
760 break; 565 stats->rx_errors++;
761 } 566 break;
762 567 case PCH_LEC_ALL: /* Written by CPU. No error status */
568 break;
763 } 569 }
764 570
571 cf->data[6] = errc & PCH_TEC;
572 cf->data[7] = (errc & PCH_REC) >> 8;
573
765 priv->can.state = state; 574 priv->can.state = state;
766 netif_rx(skb); 575 netif_receive_skb(skb);
767 576
768 stats->rx_packets++; 577 stats->rx_packets++;
769 stats->rx_bytes += cf->can_dlc; 578 stats->rx_bytes += cf->can_dlc;
@@ -774,204 +583,202 @@ static irqreturn_t pch_can_interrupt(int irq, void *dev_id)
774 struct net_device *ndev = (struct net_device *)dev_id; 583 struct net_device *ndev = (struct net_device *)dev_id;
775 struct pch_can_priv *priv = netdev_priv(ndev); 584 struct pch_can_priv *priv = netdev_priv(ndev);
776 585
777 pch_can_set_int_enables(priv, PCH_CAN_NONE); 586 if (!pch_can_int_pending(priv))
587 return IRQ_NONE;
778 588
589 pch_can_set_int_enables(priv, PCH_CAN_NONE);
779 napi_schedule(&priv->napi); 590 napi_schedule(&priv->napi);
780
781 return IRQ_HANDLED; 591 return IRQ_HANDLED;
782} 592}
783 593
784static int pch_can_rx_normal(struct net_device *ndev, u32 int_stat) 594static void pch_fifo_thresh(struct pch_can_priv *priv, int obj_id)
595{
596 if (obj_id < PCH_FIFO_THRESH) {
597 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL |
598 PCH_CMASK_ARB, &priv->regs->ifregs[0].cmask);
599
600 /* Clearing the Dir bit. */
601 pch_can_bit_clear(&priv->regs->ifregs[0].id2, PCH_ID2_DIR);
602
603 /* Clearing NewDat & IntPnd */
604 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
605 PCH_IF_MCONT_INTPND);
606 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id);
607 } else if (obj_id > PCH_FIFO_THRESH) {
608 pch_can_int_clr(priv, obj_id);
609 } else if (obj_id == PCH_FIFO_THRESH) {
610 int cnt;
611 for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++)
612 pch_can_int_clr(priv, cnt + 1);
613 }
614}
615
616static void pch_can_rx_msg_lost(struct net_device *ndev, int obj_id)
617{
618 struct pch_can_priv *priv = netdev_priv(ndev);
619 struct net_device_stats *stats = &(priv->ndev->stats);
620 struct sk_buff *skb;
621 struct can_frame *cf;
622
623 netdev_dbg(priv->ndev, "Msg Obj is overwritten.\n");
624 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
625 PCH_IF_MCONT_MSGLOST);
626 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
627 &priv->regs->ifregs[0].cmask);
628 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_id);
629
630 skb = alloc_can_err_skb(ndev, &cf);
631 if (!skb)
632 return;
633
634 cf->can_id |= CAN_ERR_CRTL;
635 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
636 stats->rx_over_errors++;
637 stats->rx_errors++;
638
639 netif_receive_skb(skb);
640}
641
642static int pch_can_rx_normal(struct net_device *ndev, u32 obj_num, int quota)
785{ 643{
786 u32 reg; 644 u32 reg;
787 canid_t id; 645 canid_t id;
788 u32 ide;
789 u32 rtr;
790 int i, j, k;
791 int rcv_pkts = 0; 646 int rcv_pkts = 0;
792 struct sk_buff *skb; 647 struct sk_buff *skb;
793 struct can_frame *cf; 648 struct can_frame *cf;
794 struct pch_can_priv *priv = netdev_priv(ndev); 649 struct pch_can_priv *priv = netdev_priv(ndev);
795 struct net_device_stats *stats = &(priv->ndev->stats); 650 struct net_device_stats *stats = &(priv->ndev->stats);
651 int i;
652 u32 id2;
653 u16 data_reg;
654
655 do {
656 /* Reading the messsage object from the Message RAM */
657 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
658 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, obj_num);
796 659
797 /* Reading the messsage object from the Message RAM */ 660 /* Reading the MCONT register. */
798 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask); 661 reg = ioread32(&priv->regs->ifregs[0].mcont);
799 pch_can_check_if_busy(&priv->regs->if1_creq, int_stat);
800 662
801 /* Reading the MCONT register. */ 663 if (reg & PCH_IF_MCONT_EOB)
802 reg = ioread32(&priv->regs->if1_mcont); 664 break;
803 reg &= 0xffff;
804 665
805 for (k = int_stat; !(reg & CAN_IF_MCONT_EOB); k++) {
806 /* If MsgLost bit set. */ 666 /* If MsgLost bit set. */
807 if (reg & CAN_IF_MCONT_MSGLOST) { 667 if (reg & PCH_IF_MCONT_MSGLOST) {
808 dev_err(&priv->ndev->dev, "Msg Obj is overwritten.\n"); 668 pch_can_rx_msg_lost(ndev, obj_num);
809 pch_can_bit_clear(&priv->regs->if1_mcont,
810 CAN_IF_MCONT_MSGLOST);
811 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL,
812 &priv->regs->if1_cmask);
813 pch_can_check_if_busy(&priv->regs->if1_creq, k);
814
815 skb = alloc_can_err_skb(ndev, &cf);
816 if (!skb)
817 return -ENOMEM;
818
819 priv->can.can_stats.error_passive++;
820 priv->can.state = CAN_STATE_ERROR_PASSIVE;
821 cf->can_id |= CAN_ERR_CRTL;
822 cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
823 cf->data[2] |= CAN_ERR_PROT_OVERLOAD;
824 stats->rx_packets++;
825 stats->rx_bytes += cf->can_dlc;
826
827 netif_receive_skb(skb);
828 rcv_pkts++; 669 rcv_pkts++;
829 goto RX_NEXT; 670 quota--;
671 obj_num++;
672 continue;
673 } else if (!(reg & PCH_IF_MCONT_NEWDAT)) {
674 obj_num++;
675 continue;
830 } 676 }
831 if (!(reg & CAN_IF_MCONT_NEWDAT))
832 goto RX_NEXT;
833 677
834 skb = alloc_can_skb(priv->ndev, &cf); 678 skb = alloc_can_skb(priv->ndev, &cf);
835 if (!skb) 679 if (!skb) {
836 return -ENOMEM; 680 netdev_err(ndev, "alloc_can_skb Failed\n");
681 return rcv_pkts;
682 }
837 683
838 /* Get Received data */ 684 /* Get Received data */
839 ide = ((ioread32(&priv->regs->if1_id2)) & CAN_ID2_XTD) >> 14; 685 id2 = ioread32(&priv->regs->ifregs[0].id2);
840 if (ide) { 686 if (id2 & PCH_ID2_XTD) {
841 id = (ioread32(&priv->regs->if1_id1) & 0xffff); 687 id = (ioread32(&priv->regs->ifregs[0].id1) & 0xffff);
842 id |= (((ioread32(&priv->regs->if1_id2)) & 688 id |= (((id2) & 0x1fff) << 16);
843 0x1fff) << 16); 689 cf->can_id = id | CAN_EFF_FLAG;
844 cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
845 } else { 690 } else {
846 id = (((ioread32(&priv->regs->if1_id2)) & 691 id = (id2 >> 2) & CAN_SFF_MASK;
847 (CAN_SFF_MASK << 2)) >> 2); 692 cf->can_id = id;
848 cf->can_id = (id & CAN_SFF_MASK);
849 } 693 }
850 694
851 rtr = (ioread32(&priv->regs->if1_id2) & CAN_ID2_DIR); 695 if (id2 & PCH_ID2_DIR)
852 if (rtr) {
853 cf->can_dlc = 0;
854 cf->can_id |= CAN_RTR_FLAG; 696 cf->can_id |= CAN_RTR_FLAG;
855 } else {
856 cf->can_dlc = ((ioread32(&priv->regs->if1_mcont)) &
857 0x0f);
858 }
859 697
860 for (i = 0, j = 0; i < cf->can_dlc; j++) { 698 cf->can_dlc = get_can_dlc((ioread32(&priv->regs->
861 reg = ioread32(&priv->regs->if1_dataa1 + j*4); 699 ifregs[0].mcont)) & 0xF);
862 cf->data[i++] = cpu_to_le32(reg & 0xff); 700
863 if (i == cf->can_dlc) 701 for (i = 0; i < cf->can_dlc; i += 2) {
864 break; 702 data_reg = ioread16(&priv->regs->ifregs[0].data[i / 2]);
865 cf->data[i++] = cpu_to_le32((reg >> 8) & 0xff); 703 cf->data[i] = data_reg;
704 cf->data[i + 1] = data_reg >> 8;
866 } 705 }
867 706
868 netif_receive_skb(skb); 707 netif_receive_skb(skb);
869 rcv_pkts++; 708 rcv_pkts++;
870 stats->rx_packets++; 709 stats->rx_packets++;
710 quota--;
871 stats->rx_bytes += cf->can_dlc; 711 stats->rx_bytes += cf->can_dlc;
872 712
873 if (k < PCH_FIFO_THRESH) { 713 pch_fifo_thresh(priv, obj_num);
874 iowrite32(CAN_CMASK_RDWR | CAN_CMASK_CTRL | 714 obj_num++;
875 CAN_CMASK_ARB, &priv->regs->if1_cmask); 715 } while (quota > 0);
876
877 /* Clearing the Dir bit. */
878 pch_can_bit_clear(&priv->regs->if1_id2, CAN_ID2_DIR);
879
880 /* Clearing NewDat & IntPnd */
881 pch_can_bit_clear(&priv->regs->if1_mcont,
882 CAN_IF_MCONT_INTPND);
883 pch_can_check_if_busy(&priv->regs->if1_creq, k);
884 } else if (k > PCH_FIFO_THRESH) {
885 pch_can_int_clr(priv, k);
886 } else if (k == PCH_FIFO_THRESH) {
887 int cnt;
888 for (cnt = 0; cnt < PCH_FIFO_THRESH; cnt++)
889 pch_can_int_clr(priv, cnt+1);
890 }
891RX_NEXT:
892 /* Reading the messsage object from the Message RAM */
893 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if1_cmask);
894 pch_can_check_if_busy(&priv->regs->if1_creq, k + 1);
895 reg = ioread32(&priv->regs->if1_mcont);
896 }
897 716
898 return rcv_pkts; 717 return rcv_pkts;
899} 718}
900static int pch_can_rx_poll(struct napi_struct *napi, int quota) 719
720static void pch_can_tx_complete(struct net_device *ndev, u32 int_stat)
901{ 721{
902 struct net_device *ndev = napi->dev;
903 struct pch_can_priv *priv = netdev_priv(ndev); 722 struct pch_can_priv *priv = netdev_priv(ndev);
904 struct net_device_stats *stats = &(priv->ndev->stats); 723 struct net_device_stats *stats = &(priv->ndev->stats);
905 u32 dlc; 724 u32 dlc;
725
726 can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_END - 1);
727 iowrite32(PCH_CMASK_RX_TX_GET | PCH_CMASK_CLRINTPND,
728 &priv->regs->ifregs[1].cmask);
729 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, int_stat);
730 dlc = get_can_dlc(ioread32(&priv->regs->ifregs[1].mcont) &
731 PCH_IF_MCONT_DLC);
732 stats->tx_bytes += dlc;
733 stats->tx_packets++;
734 if (int_stat == PCH_TX_OBJ_END)
735 netif_wake_queue(ndev);
736}
737
738static int pch_can_poll(struct napi_struct *napi, int quota)
739{
740 struct net_device *ndev = napi->dev;
741 struct pch_can_priv *priv = netdev_priv(ndev);
906 u32 int_stat; 742 u32 int_stat;
907 int rcv_pkts = 0;
908 u32 reg_stat; 743 u32 reg_stat;
909 unsigned long flags; 744 int quota_save = quota;
910 745
911 int_stat = pch_can_int_pending(priv); 746 int_stat = pch_can_int_pending(priv);
912 if (!int_stat) 747 if (!int_stat)
913 return 0; 748 goto end;
914 749
915INT_STAT: 750 if (int_stat == PCH_STATUS_INT) {
916 if (int_stat == CAN_STATUS_INT) {
917 reg_stat = ioread32(&priv->regs->stat); 751 reg_stat = ioread32(&priv->regs->stat);
918 if (reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) {
919 if ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)
920 pch_can_error(ndev, reg_stat);
921 }
922 752
923 if (reg_stat & PCH_TX_OK) { 753 if ((reg_stat & (PCH_BUS_OFF | PCH_LEC_ALL)) &&
924 spin_lock_irqsave(&priv->msgif_reg_lock, flags); 754 ((reg_stat & PCH_LEC_ALL) != PCH_LEC_ALL)) {
925 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask); 755 pch_can_error(ndev, reg_stat);
926 pch_can_check_if_busy(&priv->regs->if2_creq, 756 quota--;
927 ioread32(&priv->regs->intr));
928 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
929 pch_can_bit_clear(&priv->regs->stat, PCH_TX_OK);
930 } 757 }
931 758
932 if (reg_stat & PCH_RX_OK) 759 if (reg_stat & (PCH_TX_OK | PCH_RX_OK))
933 pch_can_bit_clear(&priv->regs->stat, PCH_RX_OK); 760 pch_can_bit_clear(&priv->regs->stat,
761 reg_stat & (PCH_TX_OK | PCH_RX_OK));
934 762
935 int_stat = pch_can_int_pending(priv); 763 int_stat = pch_can_int_pending(priv);
936 if (int_stat == CAN_STATUS_INT)
937 goto INT_STAT;
938 } 764 }
939 765
940MSG_OBJ: 766 if (quota == 0)
941 if ((int_stat >= 1) && (int_stat <= PCH_RX_OBJ_NUM)) { 767 goto end;
942 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
943 rcv_pkts = pch_can_rx_normal(ndev, int_stat);
944 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
945 if (rcv_pkts < 0)
946 return 0;
947 } else if ((int_stat > PCH_RX_OBJ_NUM) && (int_stat <= PCH_OBJ_NUM)) {
948 if (priv->msg_obj[int_stat - 1] == MSG_OBJ_TX) {
949 /* Handle transmission interrupt */
950 can_get_echo_skb(ndev, int_stat - PCH_RX_OBJ_NUM - 1);
951 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
952 iowrite32(CAN_CMASK_RX_TX_GET | CAN_CMASK_CLRINTPND,
953 &priv->regs->if2_cmask);
954 dlc = ioread32(&priv->regs->if2_mcont) &
955 CAN_IF_MCONT_DLC;
956 pch_can_check_if_busy(&priv->regs->if2_creq, int_stat);
957 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags);
958 if (dlc > 8)
959 dlc = 8;
960 stats->tx_bytes += dlc;
961 stats->tx_packets++;
962 }
963 }
964 768
965 int_stat = pch_can_int_pending(priv); 769 if ((int_stat >= PCH_RX_OBJ_START) && (int_stat <= PCH_RX_OBJ_END)) {
966 if (int_stat == CAN_STATUS_INT) 770 quota -= pch_can_rx_normal(ndev, int_stat, quota);
967 goto INT_STAT; 771 } else if ((int_stat >= PCH_TX_OBJ_START) &&
968 else if (int_stat >= 1 && int_stat <= 32) 772 (int_stat <= PCH_TX_OBJ_END)) {
969 goto MSG_OBJ; 773 /* Handle transmission interrupt */
774 pch_can_tx_complete(ndev, int_stat);
775 }
970 776
777end:
971 napi_complete(napi); 778 napi_complete(napi);
972 pch_can_set_int_enables(priv, PCH_CAN_ALL); 779 pch_can_set_int_enables(priv, PCH_CAN_ALL);
973 780
974 return rcv_pkts; 781 return quota_save - quota;
975} 782}
976 783
977static int pch_set_bittiming(struct net_device *ndev) 784static int pch_set_bittiming(struct net_device *ndev)
@@ -980,20 +787,18 @@ static int pch_set_bittiming(struct net_device *ndev)
980 const struct can_bittiming *bt = &priv->can.bittiming; 787 const struct can_bittiming *bt = &priv->can.bittiming;
981 u32 canbit; 788 u32 canbit;
982 u32 bepe; 789 u32 bepe;
983 u32 brp;
984 790
985 /* Setting the CCE bit for accessing the Can Timing register. */ 791 /* Setting the CCE bit for accessing the Can Timing register. */
986 pch_can_bit_set(&priv->regs->cont, CAN_CTRL_CCE); 792 pch_can_bit_set(&priv->regs->cont, PCH_CTRL_CCE);
987 793
988 brp = (bt->tq) / (1000000000/PCH_CAN_CLK) - 1; 794 canbit = (bt->brp - 1) & PCH_MSK_BITT_BRP;
989 canbit = brp & MSK_BITT_BRP; 795 canbit |= (bt->sjw - 1) << PCH_BIT_SJW_SHIFT;
990 canbit |= (bt->sjw - 1) << BIT_BITT_SJW; 796 canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << PCH_BIT_TSEG1_SHIFT;
991 canbit |= (bt->phase_seg1 + bt->prop_seg - 1) << BIT_BITT_TSEG1; 797 canbit |= (bt->phase_seg2 - 1) << PCH_BIT_TSEG2_SHIFT;
992 canbit |= (bt->phase_seg2 - 1) << BIT_BITT_TSEG2; 798 bepe = ((bt->brp - 1) & PCH_MSK_BRPE_BRPE) >> PCH_BIT_BRPE_BRPE_SHIFT;
993 bepe = (brp & MSK_BRPE_BRPE) >> BIT_BRPE_BRPE;
994 iowrite32(canbit, &priv->regs->bitt); 799 iowrite32(canbit, &priv->regs->bitt);
995 iowrite32(bepe, &priv->regs->brpe); 800 iowrite32(bepe, &priv->regs->brpe);
996 pch_can_bit_clear(&priv->regs->cont, CAN_CTRL_CCE); 801 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_CCE);
997 802
998 return 0; 803 return 0;
999} 804}
@@ -1008,8 +813,8 @@ static void pch_can_start(struct net_device *ndev)
1008 pch_set_bittiming(ndev); 813 pch_set_bittiming(ndev);
1009 pch_can_set_optmode(priv); 814 pch_can_set_optmode(priv);
1010 815
1011 pch_can_tx_enable_all(priv); 816 pch_can_set_tx_all(priv, 1);
1012 pch_can_rx_enable_all(priv); 817 pch_can_set_rx_all(priv, 1);
1013 818
1014 /* Setting the CAN to run mode. */ 819 /* Setting the CAN to run mode. */
1015 pch_can_set_run_mode(priv, PCH_CAN_RUN); 820 pch_can_set_run_mode(priv, PCH_CAN_RUN);
@@ -1041,27 +846,18 @@ static int pch_can_open(struct net_device *ndev)
1041 struct pch_can_priv *priv = netdev_priv(ndev); 846 struct pch_can_priv *priv = netdev_priv(ndev);
1042 int retval; 847 int retval;
1043 848
1044 retval = pci_enable_msi(priv->dev); 849 /* Regstering the interrupt. */
1045 if (retval) {
1046 dev_info(&ndev->dev, "PCH CAN opened without MSI\n");
1047 priv->use_msi = 0;
1048 } else {
1049 dev_info(&ndev->dev, "PCH CAN opened with MSI\n");
1050 priv->use_msi = 1;
1051 }
1052
1053 /* Regsitering the interrupt. */
1054 retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED, 850 retval = request_irq(priv->dev->irq, pch_can_interrupt, IRQF_SHARED,
1055 ndev->name, ndev); 851 ndev->name, ndev);
1056 if (retval) { 852 if (retval) {
1057 dev_err(&ndev->dev, "request_irq failed.\n"); 853 netdev_err(ndev, "request_irq failed.\n");
1058 goto req_irq_err; 854 goto req_irq_err;
1059 } 855 }
1060 856
1061 /* Open common can device */ 857 /* Open common can device */
1062 retval = open_candev(ndev); 858 retval = open_candev(ndev);
1063 if (retval) { 859 if (retval) {
1064 dev_err(ndev->dev.parent, "open_candev() failed %d\n", retval); 860 netdev_err(ndev, "open_candev() failed %d\n", retval);
1065 goto err_open_candev; 861 goto err_open_candev;
1066 } 862 }
1067 863
@@ -1075,9 +871,6 @@ static int pch_can_open(struct net_device *ndev)
1075err_open_candev: 871err_open_candev:
1076 free_irq(priv->dev->irq, ndev); 872 free_irq(priv->dev->irq, ndev);
1077req_irq_err: 873req_irq_err:
1078 if (priv->use_msi)
1079 pci_disable_msi(priv->dev);
1080
1081 pch_can_release(priv); 874 pch_can_release(priv);
1082 875
1083 return retval; 876 return retval;
@@ -1091,102 +884,65 @@ static int pch_close(struct net_device *ndev)
1091 napi_disable(&priv->napi); 884 napi_disable(&priv->napi);
1092 pch_can_release(priv); 885 pch_can_release(priv);
1093 free_irq(priv->dev->irq, ndev); 886 free_irq(priv->dev->irq, ndev);
1094 if (priv->use_msi)
1095 pci_disable_msi(priv->dev);
1096 close_candev(ndev); 887 close_candev(ndev);
1097 priv->can.state = CAN_STATE_STOPPED; 888 priv->can.state = CAN_STATE_STOPPED;
1098 return 0; 889 return 0;
1099} 890}
1100 891
1101static int pch_get_msg_obj_sts(struct net_device *ndev, u32 obj_id)
1102{
1103 u32 buffer_status = 0;
1104 struct pch_can_priv *priv = netdev_priv(ndev);
1105
1106 /* Getting the message object status. */
1107 buffer_status = (u32) pch_can_get_buffer_status(priv);
1108
1109 return buffer_status & obj_id;
1110}
1111
1112
1113static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev) 892static netdev_tx_t pch_xmit(struct sk_buff *skb, struct net_device *ndev)
1114{ 893{
1115 int i, j;
1116 unsigned long flags;
1117 struct pch_can_priv *priv = netdev_priv(ndev); 894 struct pch_can_priv *priv = netdev_priv(ndev);
1118 struct can_frame *cf = (struct can_frame *)skb->data; 895 struct can_frame *cf = (struct can_frame *)skb->data;
1119 int tx_buffer_avail = 0; 896 int tx_obj_no;
897 int i;
898 u32 id2;
1120 899
1121 if (can_dropped_invalid_skb(ndev, skb)) 900 if (can_dropped_invalid_skb(ndev, skb))
1122 return NETDEV_TX_OK; 901 return NETDEV_TX_OK;
1123 902
1124 if (priv->tx_obj == (PCH_OBJ_NUM + 1)) { /* Point tail Obj */ 903 tx_obj_no = priv->tx_obj;
1125 while (pch_get_msg_obj_sts(ndev, (((1 << PCH_TX_OBJ_NUM)-1) << 904 if (priv->tx_obj == PCH_TX_OBJ_END) {
1126 PCH_RX_OBJ_NUM))) 905 if (ioread32(&priv->regs->treq2) & PCH_TREQ2_TX_MASK)
1127 udelay(500); 906 netif_stop_queue(ndev);
1128 907
1129 priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj ID */ 908 priv->tx_obj = PCH_TX_OBJ_START;
1130 tx_buffer_avail = priv->tx_obj; /* Point Tail of Tx Obj */
1131 } else { 909 } else {
1132 tx_buffer_avail = priv->tx_obj; 910 priv->tx_obj++;
1133 } 911 }
1134 priv->tx_obj++;
1135
1136 /* Attaining the lock. */
1137 spin_lock_irqsave(&priv->msgif_reg_lock, flags);
1138
1139 /* Reading the Msg Obj from the Msg RAM to the Interface register. */
1140 iowrite32(CAN_CMASK_RX_TX_GET, &priv->regs->if2_cmask);
1141 pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail);
1142 912
1143 /* Setting the CMASK register. */ 913 /* Setting the CMASK register. */
1144 pch_can_bit_set(&priv->regs->if2_cmask, CAN_CMASK_ALL); 914 pch_can_bit_set(&priv->regs->ifregs[1].cmask, PCH_CMASK_ALL);
1145 915
1146 /* If ID extended is set. */ 916 /* If ID extended is set. */
1147 pch_can_bit_clear(&priv->regs->if2_id1, 0xffff);
1148 pch_can_bit_clear(&priv->regs->if2_id2, 0x1fff | CAN_ID2_XTD);
1149 if (cf->can_id & CAN_EFF_FLAG) { 917 if (cf->can_id & CAN_EFF_FLAG) {
1150 pch_can_bit_set(&priv->regs->if2_id1, cf->can_id & 0xffff); 918 iowrite32(cf->can_id & 0xffff, &priv->regs->ifregs[1].id1);
1151 pch_can_bit_set(&priv->regs->if2_id2, 919 id2 = ((cf->can_id >> 16) & 0x1fff) | PCH_ID2_XTD;
1152 ((cf->can_id >> 16) & 0x1fff) | CAN_ID2_XTD);
1153 } else { 920 } else {
1154 pch_can_bit_set(&priv->regs->if2_id1, 0); 921 iowrite32(0, &priv->regs->ifregs[1].id1);
1155 pch_can_bit_set(&priv->regs->if2_id2, 922 id2 = (cf->can_id & CAN_SFF_MASK) << 2;
1156 (cf->can_id & CAN_SFF_MASK) << 2);
1157 } 923 }
1158 924
1159 /* If remote frame has to be transmitted.. */ 925 id2 |= PCH_ID_MSGVAL;
1160 if (cf->can_id & CAN_RTR_FLAG)
1161 pch_can_bit_clear(&priv->regs->if2_id2, CAN_ID2_DIR);
1162 926
1163 for (i = 0, j = 0; i < cf->can_dlc; j++) { 927 /* If remote frame has to be transmitted.. */
1164 iowrite32(le32_to_cpu(cf->data[i++]), 928 if (!(cf->can_id & CAN_RTR_FLAG))
1165 (&priv->regs->if2_dataa1) + j*4); 929 id2 |= PCH_ID2_DIR;
1166 if (i == cf->can_dlc)
1167 break;
1168 iowrite32(le32_to_cpu(cf->data[i++] << 8),
1169 (&priv->regs->if2_dataa1) + j*4);
1170 }
1171
1172 can_put_echo_skb(skb, ndev, tx_buffer_avail - PCH_RX_OBJ_NUM - 1);
1173 930
1174 /* Updating the size of the data. */ 931 iowrite32(id2, &priv->regs->ifregs[1].id2);
1175 pch_can_bit_clear(&priv->regs->if2_mcont, 0x0f);
1176 pch_can_bit_set(&priv->regs->if2_mcont, cf->can_dlc);
1177 932
1178 /* Clearing IntPend, NewDat & TxRqst */ 933 /* Copy data to register */
1179 pch_can_bit_clear(&priv->regs->if2_mcont, 934 for (i = 0; i < cf->can_dlc; i += 2) {
1180 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_INTPND | 935 iowrite16(cf->data[i] | (cf->data[i + 1] << 8),
1181 CAN_IF_MCONT_TXRQXT); 936 &priv->regs->ifregs[1].data[i / 2]);
937 }
1182 938
1183 /* Setting NewDat, TxRqst bits */ 939 can_put_echo_skb(skb, ndev, tx_obj_no - PCH_RX_OBJ_END - 1);
1184 pch_can_bit_set(&priv->regs->if2_mcont,
1185 CAN_IF_MCONT_NEWDAT | CAN_IF_MCONT_TXRQXT);
1186 940
1187 pch_can_check_if_busy(&priv->regs->if2_creq, tx_buffer_avail); 941 /* Set the size of the data. Update if2_mcont */
942 iowrite32(cf->can_dlc | PCH_IF_MCONT_NEWDAT | PCH_IF_MCONT_TXRQXT |
943 PCH_IF_MCONT_TXIE, &priv->regs->ifregs[1].mcont);
1188 944
1189 spin_unlock_irqrestore(&priv->msgif_reg_lock, flags); 945 pch_can_rw_msg_obj(&priv->regs->ifregs[1].creq, tx_obj_no);
1190 946
1191 return NETDEV_TX_OK; 947 return NETDEV_TX_OK;
1192} 948}
@@ -1203,21 +959,98 @@ static void __devexit pch_can_remove(struct pci_dev *pdev)
1203 struct pch_can_priv *priv = netdev_priv(ndev); 959 struct pch_can_priv *priv = netdev_priv(ndev);
1204 960
1205 unregister_candev(priv->ndev); 961 unregister_candev(priv->ndev);
1206 free_candev(priv->ndev);
1207 pci_iounmap(pdev, priv->regs); 962 pci_iounmap(pdev, priv->regs);
963 if (priv->use_msi)
964 pci_disable_msi(priv->dev);
1208 pci_release_regions(pdev); 965 pci_release_regions(pdev);
1209 pci_disable_device(pdev); 966 pci_disable_device(pdev);
1210 pci_set_drvdata(pdev, NULL); 967 pci_set_drvdata(pdev, NULL);
1211 pch_can_reset(priv); 968 pch_can_reset(priv);
969 free_candev(priv->ndev);
1212} 970}
1213 971
1214#ifdef CONFIG_PM 972#ifdef CONFIG_PM
973static void pch_can_set_int_custom(struct pch_can_priv *priv)
974{
975 /* Clearing the IE, SIE and EIE bits of Can control register. */
976 pch_can_bit_clear(&priv->regs->cont, PCH_CTRL_IE_SIE_EIE);
977
978 /* Appropriately setting them. */
979 pch_can_bit_set(&priv->regs->cont,
980 ((priv->int_enables & PCH_MSK_CTRL_IE_SIE_EIE) << 1));
981}
982
983/* This function retrieves interrupt enabled for the CAN device. */
984static u32 pch_can_get_int_enables(struct pch_can_priv *priv)
985{
986 /* Obtaining the status of IE, SIE and EIE interrupt bits. */
987 return (ioread32(&priv->regs->cont) & PCH_CTRL_IE_SIE_EIE) >> 1;
988}
989
990static u32 pch_can_get_rxtx_ir(struct pch_can_priv *priv, u32 buff_num,
991 enum pch_ifreg dir)
992{
993 u32 ie, enable;
994
995 if (dir)
996 ie = PCH_IF_MCONT_RXIE;
997 else
998 ie = PCH_IF_MCONT_TXIE;
999
1000 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[dir].cmask);
1001 pch_can_rw_msg_obj(&priv->regs->ifregs[dir].creq, buff_num);
1002
1003 if (((ioread32(&priv->regs->ifregs[dir].id2)) & PCH_ID_MSGVAL) &&
1004 ((ioread32(&priv->regs->ifregs[dir].mcont)) & ie))
1005 enable = 1;
1006 else
1007 enable = 0;
1008
1009 return enable;
1010}
1011
1012static void pch_can_set_rx_buffer_link(struct pch_can_priv *priv,
1013 u32 buffer_num, int set)
1014{
1015 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
1016 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
1017 iowrite32(PCH_CMASK_RDWR | PCH_CMASK_CTRL,
1018 &priv->regs->ifregs[0].cmask);
1019 if (set)
1020 pch_can_bit_clear(&priv->regs->ifregs[0].mcont,
1021 PCH_IF_MCONT_EOB);
1022 else
1023 pch_can_bit_set(&priv->regs->ifregs[0].mcont, PCH_IF_MCONT_EOB);
1024
1025 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
1026}
1027
1028static u32 pch_can_get_rx_buffer_link(struct pch_can_priv *priv, u32 buffer_num)
1029{
1030 u32 link;
1031
1032 iowrite32(PCH_CMASK_RX_TX_GET, &priv->regs->ifregs[0].cmask);
1033 pch_can_rw_msg_obj(&priv->regs->ifregs[0].creq, buffer_num);
1034
1035 if (ioread32(&priv->regs->ifregs[0].mcont) & PCH_IF_MCONT_EOB)
1036 link = 0;
1037 else
1038 link = 1;
1039 return link;
1040}
1041
1042static int pch_can_get_buffer_status(struct pch_can_priv *priv)
1043{
1044 return (ioread32(&priv->regs->treq1) & 0xffff) |
1045 (ioread32(&priv->regs->treq2) << 16);
1046}
1047
1215static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state) 1048static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
1216{ 1049{
1217 int i; /* Counter variable. */ 1050 int i;
1218 int retval; /* Return value. */ 1051 int retval;
1219 u32 buf_stat; /* Variable for reading the transmit buffer status. */ 1052 u32 buf_stat; /* Variable for reading the transmit buffer status. */
1220 u32 counter = 0xFFFFFF; 1053 int counter = PCH_COUNTER_LIMIT;
1221 1054
1222 struct net_device *dev = pci_get_drvdata(pdev); 1055 struct net_device *dev = pci_get_drvdata(pdev);
1223 struct pch_can_priv *priv = netdev_priv(dev); 1056 struct pch_can_priv *priv = netdev_priv(dev);
@@ -1226,7 +1059,7 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
1226 pch_can_set_run_mode(priv, PCH_CAN_STOP); 1059 pch_can_set_run_mode(priv, PCH_CAN_STOP);
1227 1060
1228 /* Indicate that we are aboutto/in suspend */ 1061 /* Indicate that we are aboutto/in suspend */
1229 priv->can.state = CAN_STATE_SLEEPING; 1062 priv->can.state = CAN_STATE_STOPPED;
1230 1063
1231 /* Waiting for all transmission to complete. */ 1064 /* Waiting for all transmission to complete. */
1232 while (counter) { 1065 while (counter) {
@@ -1240,31 +1073,26 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
1240 dev_err(&pdev->dev, "%s -> Transmission time out.\n", __func__); 1073 dev_err(&pdev->dev, "%s -> Transmission time out.\n", __func__);
1241 1074
1242 /* Save interrupt configuration and then disable them */ 1075 /* Save interrupt configuration and then disable them */
1243 pch_can_get_int_enables(priv, &(priv->int_enables)); 1076 priv->int_enables = pch_can_get_int_enables(priv);
1244 pch_can_set_int_enables(priv, PCH_CAN_DISABLE); 1077 pch_can_set_int_enables(priv, PCH_CAN_DISABLE);
1245 1078
1246 /* Save Tx buffer enable state */ 1079 /* Save Tx buffer enable state */
1247 for (i = 0; i < PCH_OBJ_NUM; i++) { 1080 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
1248 if (priv->msg_obj[i] == MSG_OBJ_TX) 1081 priv->tx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i,
1249 pch_can_get_tx_enable(priv, i + 1, 1082 PCH_TX_IFREG);
1250 &(priv->tx_enable[i]));
1251 }
1252 1083
1253 /* Disable all Transmit buffers */ 1084 /* Disable all Transmit buffers */
1254 pch_can_tx_disable_all(priv); 1085 pch_can_set_tx_all(priv, 0);
1255 1086
1256 /* Save Rx buffer enable state */ 1087 /* Save Rx buffer enable state */
1257 for (i = 0; i < PCH_OBJ_NUM; i++) { 1088 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
1258 if (priv->msg_obj[i] == MSG_OBJ_RX) { 1089 priv->rx_enable[i - 1] = pch_can_get_rxtx_ir(priv, i,
1259 pch_can_get_rx_enable(priv, i + 1, 1090 PCH_RX_IFREG);
1260 &(priv->rx_enable[i])); 1091 priv->rx_link[i - 1] = pch_can_get_rx_buffer_link(priv, i);
1261 pch_can_get_rx_buffer_link(priv, i + 1,
1262 &(priv->rx_link[i]));
1263 }
1264 } 1092 }
1265 1093
1266 /* Disable all Receive buffers */ 1094 /* Disable all Receive buffers */
1267 pch_can_rx_disable_all(priv); 1095 pch_can_set_rx_all(priv, 0);
1268 retval = pci_save_state(pdev); 1096 retval = pci_save_state(pdev);
1269 if (retval) { 1097 if (retval) {
1270 dev_err(&pdev->dev, "pci_save_state failed.\n"); 1098 dev_err(&pdev->dev, "pci_save_state failed.\n");
@@ -1279,8 +1107,8 @@ static int pch_can_suspend(struct pci_dev *pdev, pm_message_t state)
1279 1107
1280static int pch_can_resume(struct pci_dev *pdev) 1108static int pch_can_resume(struct pci_dev *pdev)
1281{ 1109{
1282 int i; /* Counter variable. */ 1110 int i;
1283 int retval; /* Return variable. */ 1111 int retval;
1284 struct net_device *dev = pci_get_drvdata(pdev); 1112 struct net_device *dev = pci_get_drvdata(pdev);
1285 struct pch_can_priv *priv = netdev_priv(dev); 1113 struct pch_can_priv *priv = netdev_priv(dev);
1286 1114
@@ -1312,23 +1140,16 @@ static int pch_can_resume(struct pci_dev *pdev)
1312 pch_can_set_optmode(priv); 1140 pch_can_set_optmode(priv);
1313 1141
1314 /* Enabling the transmit buffer. */ 1142 /* Enabling the transmit buffer. */
1315 for (i = 0; i < PCH_OBJ_NUM; i++) { 1143 for (i = PCH_TX_OBJ_START; i <= PCH_TX_OBJ_END; i++)
1316 if (priv->msg_obj[i] == MSG_OBJ_TX) { 1144 pch_can_set_rxtx(priv, i, priv->tx_enable[i - 1], PCH_TX_IFREG);
1317 pch_can_set_tx_enable(priv, i + 1,
1318 priv->tx_enable[i]);
1319 }
1320 }
1321 1145
1322 /* Configuring the receive buffer and enabling them. */ 1146 /* Configuring the receive buffer and enabling them. */
1323 for (i = 0; i < PCH_OBJ_NUM; i++) { 1147 for (i = PCH_RX_OBJ_START; i <= PCH_RX_OBJ_END; i++) {
1324 if (priv->msg_obj[i] == MSG_OBJ_RX) { 1148 /* Restore buffer link */
1325 /* Restore buffer link */ 1149 pch_can_set_rx_buffer_link(priv, i, priv->rx_link[i - 1]);
1326 pch_can_set_rx_buffer_link(priv, i + 1, 1150
1327 priv->rx_link[i]); 1151 /* Restore buffer enables */
1328 1152 pch_can_set_rxtx(priv, i, priv->rx_enable[i - 1], PCH_RX_IFREG);
1329 /* Restore buffer enables */
1330 pch_can_set_rx_enable(priv, i + 1, priv->rx_enable[i]);
1331 }
1332 } 1153 }
1333 1154
1334 /* Enable CAN Interrupts */ 1155 /* Enable CAN Interrupts */
@@ -1348,9 +1169,10 @@ static int pch_can_get_berr_counter(const struct net_device *dev,
1348 struct can_berr_counter *bec) 1169 struct can_berr_counter *bec)
1349{ 1170{
1350 struct pch_can_priv *priv = netdev_priv(dev); 1171 struct pch_can_priv *priv = netdev_priv(dev);
1172 u32 errc = ioread32(&priv->regs->errc);
1351 1173
1352 bec->txerr = ioread32(&priv->regs->errc) & CAN_TEC; 1174 bec->txerr = errc & PCH_TEC;
1353 bec->rxerr = (ioread32(&priv->regs->errc) & CAN_REC) >> 8; 1175 bec->rxerr = (errc & PCH_REC) >> 8;
1354 1176
1355 return 0; 1177 return 0;
1356} 1178}
@@ -1361,7 +1183,6 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
1361 struct net_device *ndev; 1183 struct net_device *ndev;
1362 struct pch_can_priv *priv; 1184 struct pch_can_priv *priv;
1363 int rc; 1185 int rc;
1364 int index;
1365 void __iomem *addr; 1186 void __iomem *addr;
1366 1187
1367 rc = pci_enable_device(pdev); 1188 rc = pci_enable_device(pdev);
@@ -1383,7 +1204,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
1383 goto probe_exit_ipmap; 1204 goto probe_exit_ipmap;
1384 } 1205 }
1385 1206
1386 ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_NUM); 1207 ndev = alloc_candev(sizeof(struct pch_can_priv), PCH_TX_OBJ_END);
1387 if (!ndev) { 1208 if (!ndev) {
1388 rc = -ENOMEM; 1209 rc = -ENOMEM;
1389 dev_err(&pdev->dev, "Failed alloc_candev\n"); 1210 dev_err(&pdev->dev, "Failed alloc_candev\n");
@@ -1399,7 +1220,7 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
1399 priv->can.do_get_berr_counter = pch_can_get_berr_counter; 1220 priv->can.do_get_berr_counter = pch_can_get_berr_counter;
1400 priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | 1221 priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY |
1401 CAN_CTRLMODE_LOOPBACK; 1222 CAN_CTRLMODE_LOOPBACK;
1402 priv->tx_obj = PCH_RX_OBJ_NUM + 1; /* Point head of Tx Obj */ 1223 priv->tx_obj = PCH_TX_OBJ_START; /* Point head of Tx Obj */
1403 1224
1404 ndev->irq = pdev->irq; 1225 ndev->irq = pdev->irq;
1405 ndev->flags |= IFF_ECHO; 1226 ndev->flags |= IFF_ECHO;
@@ -1407,15 +1228,18 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
1407 pci_set_drvdata(pdev, ndev); 1228 pci_set_drvdata(pdev, ndev);
1408 SET_NETDEV_DEV(ndev, &pdev->dev); 1229 SET_NETDEV_DEV(ndev, &pdev->dev);
1409 ndev->netdev_ops = &pch_can_netdev_ops; 1230 ndev->netdev_ops = &pch_can_netdev_ops;
1410
1411 priv->can.clock.freq = PCH_CAN_CLK; /* Hz */ 1231 priv->can.clock.freq = PCH_CAN_CLK; /* Hz */
1412 for (index = 0; index < PCH_RX_OBJ_NUM;)
1413 priv->msg_obj[index++] = MSG_OBJ_RX;
1414 1232
1415 for (index = index; index < PCH_OBJ_NUM;) 1233 netif_napi_add(ndev, &priv->napi, pch_can_poll, PCH_RX_OBJ_END);
1416 priv->msg_obj[index++] = MSG_OBJ_TX;
1417 1234
1418 netif_napi_add(ndev, &priv->napi, pch_can_rx_poll, PCH_RX_OBJ_NUM); 1235 rc = pci_enable_msi(priv->dev);
1236 if (rc) {
1237 netdev_err(ndev, "PCH CAN opened without MSI\n");
1238 priv->use_msi = 0;
1239 } else {
1240 netdev_err(ndev, "PCH CAN opened with MSI\n");
1241 priv->use_msi = 1;
1242 }
1419 1243
1420 rc = register_candev(ndev); 1244 rc = register_candev(ndev);
1421 if (rc) { 1245 if (rc) {
@@ -1426,6 +1250,8 @@ static int __devinit pch_can_probe(struct pci_dev *pdev,
1426 return 0; 1250 return 0;
1427 1251
1428probe_exit_reg_candev: 1252probe_exit_reg_candev:
1253 if (priv->use_msi)
1254 pci_disable_msi(priv->dev);
1429 free_candev(ndev); 1255 free_candev(ndev);
1430probe_exit_alloc_candev: 1256probe_exit_alloc_candev:
1431 pci_iounmap(pdev, addr); 1257 pci_iounmap(pdev, addr);
@@ -1458,6 +1284,6 @@ static void __exit pch_can_pci_exit(void)
1458} 1284}
1459module_exit(pch_can_pci_exit); 1285module_exit(pch_can_pci_exit);
1460 1286
1461MODULE_DESCRIPTION("Controller Area Network Driver"); 1287MODULE_DESCRIPTION("Intel EG20T PCH CAN(Controller Area Network) Driver");
1462MODULE_LICENSE("GPL v2"); 1288MODULE_LICENSE("GPL v2");
1463MODULE_VERSION("0.94"); 1289MODULE_VERSION("0.94");
diff --git a/drivers/net/can/sja1000/plx_pci.c b/drivers/net/can/sja1000/plx_pci.c
index 437b5c716a24..231385b8e08f 100644
--- a/drivers/net/can/sja1000/plx_pci.c
+++ b/drivers/net/can/sja1000/plx_pci.c
@@ -383,7 +383,7 @@ static void plx_pci_reset_marathon(struct pci_dev *pdev)
383{ 383{
384 void __iomem *reset_addr; 384 void __iomem *reset_addr;
385 int i; 385 int i;
386 int reset_bar[2] = {3, 5}; 386 static const int reset_bar[2] = {3, 5};
387 387
388 plx_pci_reset_common(pdev); 388 plx_pci_reset_common(pdev);
389 389
diff --git a/drivers/net/can/sja1000/sja1000_of_platform.c b/drivers/net/can/sja1000/sja1000_of_platform.c
index 5bfccfdf3bbb..09c3e9db9316 100644
--- a/drivers/net/can/sja1000/sja1000_of_platform.c
+++ b/drivers/net/can/sja1000/sja1000_of_platform.c
@@ -107,17 +107,13 @@ static int __devinit sja1000_ofp_probe(struct platform_device *ofdev,
107 res_size = resource_size(&res); 107 res_size = resource_size(&res);
108 108
109 if (!request_mem_region(res.start, res_size, DRV_NAME)) { 109 if (!request_mem_region(res.start, res_size, DRV_NAME)) {
110 dev_err(&ofdev->dev, "couldn't request %#llx..%#llx\n", 110 dev_err(&ofdev->dev, "couldn't request %pR\n", &res);
111 (unsigned long long)res.start,
112 (unsigned long long)res.end);
113 return -EBUSY; 111 return -EBUSY;
114 } 112 }
115 113
116 base = ioremap_nocache(res.start, res_size); 114 base = ioremap_nocache(res.start, res_size);
117 if (!base) { 115 if (!base) {
118 dev_err(&ofdev->dev, "couldn't ioremap %#llx..%#llx\n", 116 dev_err(&ofdev->dev, "couldn't ioremap %pR\n", &res);
119 (unsigned long long)res.start,
120 (unsigned long long)res.end);
121 err = -ENOMEM; 117 err = -ENOMEM;
122 goto exit_release_mem; 118 goto exit_release_mem;
123 } 119 }
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
new file mode 100644
index 000000000000..b423965a78d1
--- /dev/null
+++ b/drivers/net/can/slcan.c
@@ -0,0 +1,756 @@
1/*
2 * slcan.c - serial line CAN interface driver (using tty line discipline)
3 *
4 * This file is derived from linux/drivers/net/slip.c
5 *
6 * slip.c Authors : Laurence Culhane <loz@holmes.demon.co.uk>
7 * Fred N. van Kempen <waltje@uwalt.nl.mugnet.org>
8 * slcan.c Author : Oliver Hartkopp <socketcan@hartkopp.net>
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 59 Temple Place, Suite 330, Boston, MA 02111-1307. You can also get it
23 * at http://www.gnu.org/licenses/gpl.html
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
26 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
27 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
28 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
29 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
30 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
31 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
32 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
33 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
34 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
35 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
36 * DAMAGE.
37 *
38 * Send feedback to <socketcan-users@lists.berlios.de>
39 *
40 */
41
42#include <linux/module.h>
43#include <linux/moduleparam.h>
44
45#include <asm/system.h>
46#include <linux/uaccess.h>
47#include <linux/bitops.h>
48#include <linux/string.h>
49#include <linux/tty.h>
50#include <linux/errno.h>
51#include <linux/netdevice.h>
52#include <linux/skbuff.h>
53#include <linux/rtnetlink.h>
54#include <linux/if_arp.h>
55#include <linux/if_ether.h>
56#include <linux/sched.h>
57#include <linux/delay.h>
58#include <linux/init.h>
59#include <linux/can.h>
60
61static __initdata const char banner[] =
62 KERN_INFO "slcan: serial line CAN interface driver\n";
63
64MODULE_ALIAS_LDISC(N_SLCAN);
65MODULE_DESCRIPTION("serial line CAN interface");
66MODULE_LICENSE("GPL");
67MODULE_AUTHOR("Oliver Hartkopp <socketcan@hartkopp.net>");
68
69#define SLCAN_MAGIC 0x53CA
70
71static int maxdev = 10; /* MAX number of SLCAN channels;
72 This can be overridden with
73 insmod slcan.ko maxdev=nnn */
74module_param(maxdev, int, 0);
75MODULE_PARM_DESC(maxdev, "Maximum number of slcan interfaces");
76
77/* maximum rx buffer len: extended CAN frame with timestamp */
78#define SLC_MTU (sizeof("T1111222281122334455667788EA5F\r")+1)
79
80struct slcan {
81 int magic;
82
83 /* Various fields. */
84 struct tty_struct *tty; /* ptr to TTY structure */
85 struct net_device *dev; /* easy for intr handling */
86 spinlock_t lock;
87
88 /* These are pointers to the malloc()ed frame buffers. */
89 unsigned char rbuff[SLC_MTU]; /* receiver buffer */
90 int rcount; /* received chars counter */
91 unsigned char xbuff[SLC_MTU]; /* transmitter buffer */
92 unsigned char *xhead; /* pointer to next XMIT byte */
93 int xleft; /* bytes left in XMIT queue */
94
95 unsigned long flags; /* Flag values/ mode etc */
96#define SLF_INUSE 0 /* Channel in use */
97#define SLF_ERROR 1 /* Parity, etc. error */
98
99 unsigned char leased;
100 dev_t line;
101 pid_t pid;
102};
103
104static struct net_device **slcan_devs;
105
106 /************************************************************************
107 * SLCAN ENCAPSULATION FORMAT *
108 ************************************************************************/
109
110/*
111 * A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended
112 * frame format) a data length code (can_dlc) which can be from 0 to 8
113 * and up to <can_dlc> data bytes as payload.
114 * Additionally a CAN frame may become a remote transmission frame if the
115 * RTR-bit is set. This causes another ECU to send a CAN frame with the
116 * given can_id.
117 *
118 * The SLCAN ASCII representation of these different frame types is:
119 * <type> <id> <dlc> <data>*
120 *
121 * Extended frames (29 bit) are defined by capital characters in the type.
122 * RTR frames are defined as 'r' types - normal frames have 't' type:
123 * t => 11 bit data frame
124 * r => 11 bit RTR frame
125 * T => 29 bit data frame
126 * R => 29 bit RTR frame
127 *
128 * The <id> is 3 (standard) or 8 (extended) bytes in ASCII Hex (base64).
129 * The <dlc> is a one byte ASCII number ('0' - '8')
130 * The <data> section has at much ASCII Hex bytes as defined by the <dlc>
131 *
132 * Examples:
133 *
134 * t1230 : can_id 0x123, can_dlc 0, no data
135 * t4563112233 : can_id 0x456, can_dlc 3, data 0x11 0x22 0x33
136 * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, can_dlc 2, data 0xAA 0x55
137 * r1230 : can_id 0x123, can_dlc 0, no data, remote transmission request
138 *
139 */
140
141 /************************************************************************
142 * STANDARD SLCAN DECAPSULATION *
143 ************************************************************************/
144
145static int asc2nibble(char c)
146{
147
148 if ((c >= '0') && (c <= '9'))
149 return c - '0';
150
151 if ((c >= 'A') && (c <= 'F'))
152 return c - 'A' + 10;
153
154 if ((c >= 'a') && (c <= 'f'))
155 return c - 'a' + 10;
156
157 return 16; /* error */
158}
159
160/* Send one completely decapsulated can_frame to the network layer */
161static void slc_bump(struct slcan *sl)
162{
163 struct sk_buff *skb;
164 struct can_frame cf;
165 int i, dlc_pos, tmp;
166 unsigned long ultmp;
167 char cmd = sl->rbuff[0];
168
169 if ((cmd != 't') && (cmd != 'T') && (cmd != 'r') && (cmd != 'R'))
170 return;
171
172 if (cmd & 0x20) /* tiny chars 'r' 't' => standard frame format */
173 dlc_pos = 4; /* dlc position tiiid */
174 else
175 dlc_pos = 9; /* dlc position Tiiiiiiiid */
176
177 if (!((sl->rbuff[dlc_pos] >= '0') && (sl->rbuff[dlc_pos] < '9')))
178 return;
179
180 cf.can_dlc = sl->rbuff[dlc_pos] - '0'; /* get can_dlc from ASCII val */
181
182 sl->rbuff[dlc_pos] = 0; /* terminate can_id string */
183
184 if (strict_strtoul(sl->rbuff+1, 16, &ultmp))
185 return;
186
187 cf.can_id = ultmp;
188
189 if (!(cmd & 0x20)) /* NO tiny chars => extended frame format */
190 cf.can_id |= CAN_EFF_FLAG;
191
192 if ((cmd | 0x20) == 'r') /* RTR frame */
193 cf.can_id |= CAN_RTR_FLAG;
194
195 *(u64 *) (&cf.data) = 0; /* clear payload */
196
197 for (i = 0, dlc_pos++; i < cf.can_dlc; i++) {
198
199 tmp = asc2nibble(sl->rbuff[dlc_pos++]);
200 if (tmp > 0x0F)
201 return;
202 cf.data[i] = (tmp << 4);
203 tmp = asc2nibble(sl->rbuff[dlc_pos++]);
204 if (tmp > 0x0F)
205 return;
206 cf.data[i] |= tmp;
207 }
208
209
210 skb = dev_alloc_skb(sizeof(struct can_frame));
211 if (!skb)
212 return;
213
214 skb->dev = sl->dev;
215 skb->protocol = htons(ETH_P_CAN);
216 skb->pkt_type = PACKET_BROADCAST;
217 skb->ip_summed = CHECKSUM_UNNECESSARY;
218 memcpy(skb_put(skb, sizeof(struct can_frame)),
219 &cf, sizeof(struct can_frame));
220 netif_rx(skb);
221
222 sl->dev->stats.rx_packets++;
223 sl->dev->stats.rx_bytes += cf.can_dlc;
224}
225
226/* parse tty input stream */
227static void slcan_unesc(struct slcan *sl, unsigned char s)
228{
229
230 if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */
231 if (!test_and_clear_bit(SLF_ERROR, &sl->flags) &&
232 (sl->rcount > 4)) {
233 slc_bump(sl);
234 }
235 sl->rcount = 0;
236 } else {
237 if (!test_bit(SLF_ERROR, &sl->flags)) {
238 if (sl->rcount < SLC_MTU) {
239 sl->rbuff[sl->rcount++] = s;
240 return;
241 } else {
242 sl->dev->stats.rx_over_errors++;
243 set_bit(SLF_ERROR, &sl->flags);
244 }
245 }
246 }
247}
248
249 /************************************************************************
250 * STANDARD SLCAN ENCAPSULATION *
251 ************************************************************************/
252
253/* Encapsulate one can_frame and stuff into a TTY queue. */
254static void slc_encaps(struct slcan *sl, struct can_frame *cf)
255{
256 int actual, idx, i;
257 char cmd;
258
259 if (cf->can_id & CAN_RTR_FLAG)
260 cmd = 'R'; /* becomes 'r' in standard frame format */
261 else
262 cmd = 'T'; /* becomes 't' in standard frame format */
263
264 if (cf->can_id & CAN_EFF_FLAG)
265 sprintf(sl->xbuff, "%c%08X%d", cmd,
266 cf->can_id & CAN_EFF_MASK, cf->can_dlc);
267 else
268 sprintf(sl->xbuff, "%c%03X%d", cmd | 0x20,
269 cf->can_id & CAN_SFF_MASK, cf->can_dlc);
270
271 idx = strlen(sl->xbuff);
272
273 for (i = 0; i < cf->can_dlc; i++)
274 sprintf(&sl->xbuff[idx + 2*i], "%02X", cf->data[i]);
275
276 strcat(sl->xbuff, "\r"); /* add terminating character */
277
278 /* Order of next two lines is *very* important.
279 * When we are sending a little amount of data,
280 * the transfer may be completed inside the ops->write()
281 * routine, because it's running with interrupts enabled.
282 * In this case we *never* got WRITE_WAKEUP event,
283 * if we did not request it before write operation.
284 * 14 Oct 1994 Dmitry Gorodchanin.
285 */
286 set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
287 actual = sl->tty->ops->write(sl->tty, sl->xbuff, strlen(sl->xbuff));
288 sl->xleft = strlen(sl->xbuff) - actual;
289 sl->xhead = sl->xbuff + actual;
290 sl->dev->stats.tx_bytes += cf->can_dlc;
291}
292
293/*
294 * Called by the driver when there's room for more data. If we have
295 * more packets to send, we send them here.
296 */
297static void slcan_write_wakeup(struct tty_struct *tty)
298{
299 int actual;
300 struct slcan *sl = (struct slcan *) tty->disc_data;
301
302 /* First make sure we're connected. */
303 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
304 return;
305
306 if (sl->xleft <= 0) {
307 /* Now serial buffer is almost free & we can start
308 * transmission of another packet */
309 sl->dev->stats.tx_packets++;
310 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
311 netif_wake_queue(sl->dev);
312 return;
313 }
314
315 actual = tty->ops->write(tty, sl->xhead, sl->xleft);
316 sl->xleft -= actual;
317 sl->xhead += actual;
318}
319
320/* Send a can_frame to a TTY queue. */
321static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
322{
323 struct slcan *sl = netdev_priv(dev);
324
325 if (skb->len != sizeof(struct can_frame))
326 goto out;
327
328 spin_lock(&sl->lock);
329 if (!netif_running(dev)) {
330 spin_unlock(&sl->lock);
331 printk(KERN_WARNING "%s: xmit: iface is down\n", dev->name);
332 goto out;
333 }
334 if (sl->tty == NULL) {
335 spin_unlock(&sl->lock);
336 goto out;
337 }
338
339 netif_stop_queue(sl->dev);
340 slc_encaps(sl, (struct can_frame *) skb->data); /* encaps & send */
341 spin_unlock(&sl->lock);
342
343out:
344 kfree_skb(skb);
345 return NETDEV_TX_OK;
346}
347
348
349/******************************************
350 * Routines looking at netdevice side.
351 ******************************************/
352
353/* Netdevice UP -> DOWN routine */
354static int slc_close(struct net_device *dev)
355{
356 struct slcan *sl = netdev_priv(dev);
357
358 spin_lock_bh(&sl->lock);
359 if (sl->tty) {
360 /* TTY discipline is running. */
361 clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
362 }
363 netif_stop_queue(dev);
364 sl->rcount = 0;
365 sl->xleft = 0;
366 spin_unlock_bh(&sl->lock);
367
368 return 0;
369}
370
371/* Netdevice DOWN -> UP routine */
372static int slc_open(struct net_device *dev)
373{
374 struct slcan *sl = netdev_priv(dev);
375
376 if (sl->tty == NULL)
377 return -ENODEV;
378
379 sl->flags &= (1 << SLF_INUSE);
380 netif_start_queue(dev);
381 return 0;
382}
383
384/* Hook the destructor so we can free slcan devs at the right point in time */
385static void slc_free_netdev(struct net_device *dev)
386{
387 int i = dev->base_addr;
388 free_netdev(dev);
389 slcan_devs[i] = NULL;
390}
391
392static const struct net_device_ops slc_netdev_ops = {
393 .ndo_open = slc_open,
394 .ndo_stop = slc_close,
395 .ndo_start_xmit = slc_xmit,
396};
397
398static void slc_setup(struct net_device *dev)
399{
400 dev->netdev_ops = &slc_netdev_ops;
401 dev->destructor = slc_free_netdev;
402
403 dev->hard_header_len = 0;
404 dev->addr_len = 0;
405 dev->tx_queue_len = 10;
406
407 dev->mtu = sizeof(struct can_frame);
408 dev->type = ARPHRD_CAN;
409
410 /* New-style flags. */
411 dev->flags = IFF_NOARP;
412 dev->features = NETIF_F_NO_CSUM;
413}
414
415/******************************************
416 Routines looking at TTY side.
417 ******************************************/
418
419/*
420 * Handle the 'receiver data ready' interrupt.
421 * This function is called by the 'tty_io' module in the kernel when
422 * a block of SLCAN data has been received, which can now be decapsulated
423 * and sent on to some IP layer for further processing. This will not
424 * be re-entered while running but other ldisc functions may be called
425 * in parallel
426 */
427
428static void slcan_receive_buf(struct tty_struct *tty,
429 const unsigned char *cp, char *fp, int count)
430{
431 struct slcan *sl = (struct slcan *) tty->disc_data;
432
433 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
434 return;
435
436 /* Read the characters out of the buffer */
437 while (count--) {
438 if (fp && *fp++) {
439 if (!test_and_set_bit(SLF_ERROR, &sl->flags))
440 sl->dev->stats.rx_errors++;
441 cp++;
442 continue;
443 }
444 slcan_unesc(sl, *cp++);
445 }
446}
447
448/************************************
449 * slcan_open helper routines.
450 ************************************/
451
452/* Collect hanged up channels */
453static void slc_sync(void)
454{
455 int i;
456 struct net_device *dev;
457 struct slcan *sl;
458
459 for (i = 0; i < maxdev; i++) {
460 dev = slcan_devs[i];
461 if (dev == NULL)
462 break;
463
464 sl = netdev_priv(dev);
465 if (sl->tty || sl->leased)
466 continue;
467 if (dev->flags & IFF_UP)
468 dev_close(dev);
469 }
470}
471
472/* Find a free SLCAN channel, and link in this `tty' line. */
473static struct slcan *slc_alloc(dev_t line)
474{
475 int i;
476 struct net_device *dev = NULL;
477 struct slcan *sl;
478
479 if (slcan_devs == NULL)
480 return NULL; /* Master array missing ! */
481
482 for (i = 0; i < maxdev; i++) {
483 dev = slcan_devs[i];
484 if (dev == NULL)
485 break;
486
487 }
488
489 /* Sorry, too many, all slots in use */
490 if (i >= maxdev)
491 return NULL;
492
493 if (dev) {
494 sl = netdev_priv(dev);
495 if (test_bit(SLF_INUSE, &sl->flags)) {
496 unregister_netdevice(dev);
497 dev = NULL;
498 slcan_devs[i] = NULL;
499 }
500 }
501
502 if (!dev) {
503 char name[IFNAMSIZ];
504 sprintf(name, "slcan%d", i);
505
506 dev = alloc_netdev(sizeof(*sl), name, slc_setup);
507 if (!dev)
508 return NULL;
509 dev->base_addr = i;
510 }
511
512 sl = netdev_priv(dev);
513
514 /* Initialize channel control data */
515 sl->magic = SLCAN_MAGIC;
516 sl->dev = dev;
517 spin_lock_init(&sl->lock);
518 slcan_devs[i] = dev;
519
520 return sl;
521}
522
523/*
524 * Open the high-level part of the SLCAN channel.
525 * This function is called by the TTY module when the
526 * SLCAN line discipline is called for. Because we are
527 * sure the tty line exists, we only have to link it to
528 * a free SLCAN channel...
529 *
530 * Called in process context serialized from other ldisc calls.
531 */
532
533static int slcan_open(struct tty_struct *tty)
534{
535 struct slcan *sl;
536 int err;
537
538 if (!capable(CAP_NET_ADMIN))
539 return -EPERM;
540
541 if (tty->ops->write == NULL)
542 return -EOPNOTSUPP;
543
544 /* RTnetlink lock is misused here to serialize concurrent
545 opens of slcan channels. There are better ways, but it is
546 the simplest one.
547 */
548 rtnl_lock();
549
550 /* Collect hanged up channels. */
551 slc_sync();
552
553 sl = tty->disc_data;
554
555 err = -EEXIST;
556 /* First make sure we're not already connected. */
557 if (sl && sl->magic == SLCAN_MAGIC)
558 goto err_exit;
559
560 /* OK. Find a free SLCAN channel to use. */
561 err = -ENFILE;
562 sl = slc_alloc(tty_devnum(tty));
563 if (sl == NULL)
564 goto err_exit;
565
566 sl->tty = tty;
567 tty->disc_data = sl;
568 sl->line = tty_devnum(tty);
569 sl->pid = current->pid;
570
571 if (!test_bit(SLF_INUSE, &sl->flags)) {
572 /* Perform the low-level SLCAN initialization. */
573 sl->rcount = 0;
574 sl->xleft = 0;
575
576 set_bit(SLF_INUSE, &sl->flags);
577
578 err = register_netdevice(sl->dev);
579 if (err)
580 goto err_free_chan;
581 }
582
583 /* Done. We have linked the TTY line to a channel. */
584 rtnl_unlock();
585 tty->receive_room = 65536; /* We don't flow control */
586 return sl->dev->base_addr;
587
588err_free_chan:
589 sl->tty = NULL;
590 tty->disc_data = NULL;
591 clear_bit(SLF_INUSE, &sl->flags);
592
593err_exit:
594 rtnl_unlock();
595
596 /* Count references from TTY module */
597 return err;
598}
599
600/*
601 * Close down a SLCAN channel.
602 * This means flushing out any pending queues, and then returning. This
603 * call is serialized against other ldisc functions.
604 *
605 * We also use this method for a hangup event.
606 */
607
608static void slcan_close(struct tty_struct *tty)
609{
610 struct slcan *sl = (struct slcan *) tty->disc_data;
611
612 /* First make sure we're connected. */
613 if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
614 return;
615
616 tty->disc_data = NULL;
617 sl->tty = NULL;
618 if (!sl->leased)
619 sl->line = 0;
620
621 /* Flush network side */
622 unregister_netdev(sl->dev);
623 /* This will complete via sl_free_netdev */
624}
625
626static int slcan_hangup(struct tty_struct *tty)
627{
628 slcan_close(tty);
629 return 0;
630}
631
632/* Perform I/O control on an active SLCAN channel. */
633static int slcan_ioctl(struct tty_struct *tty, struct file *file,
634 unsigned int cmd, unsigned long arg)
635{
636 struct slcan *sl = (struct slcan *) tty->disc_data;
637 unsigned int tmp;
638
639 /* First make sure we're connected. */
640 if (!sl || sl->magic != SLCAN_MAGIC)
641 return -EINVAL;
642
643 switch (cmd) {
644 case SIOCGIFNAME:
645 tmp = strlen(sl->dev->name) + 1;
646 if (copy_to_user((void __user *)arg, sl->dev->name, tmp))
647 return -EFAULT;
648 return 0;
649
650 case SIOCSIFHWADDR:
651 return -EINVAL;
652
653 default:
654 return tty_mode_ioctl(tty, file, cmd, arg);
655 }
656}
657
658static struct tty_ldisc_ops slc_ldisc = {
659 .owner = THIS_MODULE,
660 .magic = TTY_LDISC_MAGIC,
661 .name = "slcan",
662 .open = slcan_open,
663 .close = slcan_close,
664 .hangup = slcan_hangup,
665 .ioctl = slcan_ioctl,
666 .receive_buf = slcan_receive_buf,
667 .write_wakeup = slcan_write_wakeup,
668};
669
670static int __init slcan_init(void)
671{
672 int status;
673
674 if (maxdev < 4)
675 maxdev = 4; /* Sanity */
676
677 printk(banner);
678 printk(KERN_INFO "slcan: %d dynamic interface channels.\n", maxdev);
679
680 slcan_devs = kzalloc(sizeof(struct net_device *)*maxdev, GFP_KERNEL);
681 if (!slcan_devs) {
682 printk(KERN_ERR "slcan: can't allocate slcan device array!\n");
683 return -ENOMEM;
684 }
685
686 /* Fill in our line protocol discipline, and register it */
687 status = tty_register_ldisc(N_SLCAN, &slc_ldisc);
688 if (status) {
689 printk(KERN_ERR "slcan: can't register line discipline\n");
690 kfree(slcan_devs);
691 }
692 return status;
693}
694
695static void __exit slcan_exit(void)
696{
697 int i;
698 struct net_device *dev;
699 struct slcan *sl;
700 unsigned long timeout = jiffies + HZ;
701 int busy = 0;
702
703 if (slcan_devs == NULL)
704 return;
705
706 /* First of all: check for active disciplines and hangup them.
707 */
708 do {
709 if (busy)
710 msleep_interruptible(100);
711
712 busy = 0;
713 for (i = 0; i < maxdev; i++) {
714 dev = slcan_devs[i];
715 if (!dev)
716 continue;
717 sl = netdev_priv(dev);
718 spin_lock_bh(&sl->lock);
719 if (sl->tty) {
720 busy++;
721 tty_hangup(sl->tty);
722 }
723 spin_unlock_bh(&sl->lock);
724 }
725 } while (busy && time_before(jiffies, timeout));
726
727 /* FIXME: hangup is async so we should wait when doing this second
728 phase */
729
730 for (i = 0; i < maxdev; i++) {
731 dev = slcan_devs[i];
732 if (!dev)
733 continue;
734 slcan_devs[i] = NULL;
735
736 sl = netdev_priv(dev);
737 if (sl->tty) {
738 printk(KERN_ERR "%s: tty discipline still running\n",
739 dev->name);
740 /* Intentionally leak the control block. */
741 dev->destructor = NULL;
742 }
743
744 unregister_netdev(dev);
745 }
746
747 kfree(slcan_devs);
748 slcan_devs = NULL;
749
750 i = tty_unregister_ldisc(N_SLCAN);
751 if (i)
752 printk(KERN_ERR "slcan: can't unregister ldisc (err %d)\n", i);
753}
754
755module_init(slcan_init);
756module_exit(slcan_exit);
diff --git a/drivers/net/cassini.c b/drivers/net/cassini.c
index d6b6d6aa565a..7206ab2cbbf8 100644
--- a/drivers/net/cassini.c
+++ b/drivers/net/cassini.c
@@ -2788,7 +2788,7 @@ static inline int cas_xmit_tx_ringN(struct cas *cp, int ring,
2788 2788
2789 ctrl = 0; 2789 ctrl = 0;
2790 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2790 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2791 const u64 csum_start_off = skb_transport_offset(skb); 2791 const u64 csum_start_off = skb_checksum_start_offset(skb);
2792 const u64 csum_stuff_off = csum_start_off + skb->csum_offset; 2792 const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
2793 2793
2794 ctrl = TX_DESC_CSUM_EN | 2794 ctrl = TX_DESC_CSUM_EN |
@@ -3203,6 +3203,10 @@ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr,
3203 int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */ 3203 int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */
3204 int mac_off = 0; 3204 int mac_off = 0;
3205 3205
3206#if defined(CONFIG_OF)
3207 const unsigned char *addr;
3208#endif
3209
3206 /* give us access to the PROM */ 3210 /* give us access to the PROM */
3207 writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD, 3211 writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD,
3208 cp->regs + REG_BIM_LOCAL_DEV_EN); 3212 cp->regs + REG_BIM_LOCAL_DEV_EN);
@@ -3350,6 +3354,14 @@ use_random_mac_addr:
3350 if (found & VPD_FOUND_MAC) 3354 if (found & VPD_FOUND_MAC)
3351 goto done; 3355 goto done;
3352 3356
3357#if defined(CONFIG_OF)
3358 addr = of_get_property(cp->of_node, "local-mac-address", NULL);
3359 if (addr != NULL) {
3360 memcpy(dev_addr, addr, 6);
3361 goto done;
3362 }
3363#endif
3364
3353 /* Sun MAC prefix then 3 random bytes. */ 3365 /* Sun MAC prefix then 3 random bytes. */
3354 pr_info("MAC address not found in ROM VPD\n"); 3366 pr_info("MAC address not found in ROM VPD\n");
3355 dev_addr[0] = 0x08; 3367 dev_addr[0] = 0x08;
@@ -3880,7 +3892,7 @@ static int cas_change_mtu(struct net_device *dev, int new_mtu)
3880 schedule_work(&cp->reset_task); 3892 schedule_work(&cp->reset_task);
3881#endif 3893#endif
3882 3894
3883 flush_scheduled_work(); 3895 flush_work_sync(&cp->reset_task);
3884 return 0; 3896 return 0;
3885} 3897}
3886 3898
@@ -5019,6 +5031,10 @@ static int __devinit cas_init_one(struct pci_dev *pdev,
5019 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : 5031 cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE :
5020 cassini_debug; 5032 cassini_debug;
5021 5033
5034#if defined(CONFIG_OF)
5035 cp->of_node = pci_device_to_OF_node(pdev);
5036#endif
5037
5022 cp->link_transition = LINK_TRANSITION_UNKNOWN; 5038 cp->link_transition = LINK_TRANSITION_UNKNOWN;
5023 cp->link_transition_jiffies_valid = 0; 5039 cp->link_transition_jiffies_valid = 0;
5024 5040
@@ -5177,7 +5193,7 @@ static void __devexit cas_remove_one(struct pci_dev *pdev)
5177 vfree(cp->fw_data); 5193 vfree(cp->fw_data);
5178 5194
5179 mutex_lock(&cp->pm_mutex); 5195 mutex_lock(&cp->pm_mutex);
5180 flush_scheduled_work(); 5196 cancel_work_sync(&cp->reset_task);
5181 if (cp->hw_running) 5197 if (cp->hw_running)
5182 cas_shutdown(cp); 5198 cas_shutdown(cp);
5183 mutex_unlock(&cp->pm_mutex); 5199 mutex_unlock(&cp->pm_mutex);
diff --git a/drivers/net/cassini.h b/drivers/net/cassini.h
index dbc47878d83b..faf4746a0f3e 100644
--- a/drivers/net/cassini.h
+++ b/drivers/net/cassini.h
@@ -2868,6 +2868,9 @@ struct cas {
2868 dma_addr_t block_dvma, tx_tiny_dvma[N_TX_RINGS]; 2868 dma_addr_t block_dvma, tx_tiny_dvma[N_TX_RINGS];
2869 struct pci_dev *pdev; 2869 struct pci_dev *pdev;
2870 struct net_device *dev; 2870 struct net_device *dev;
2871#if defined(CONFIG_OF)
2872 struct device_node *of_node;
2873#endif
2871 2874
2872 /* Firmware Info */ 2875 /* Firmware Info */
2873 u16 fw_load_addr; 2876 u16 fw_load_addr;
diff --git a/drivers/net/chelsio/my3126.c b/drivers/net/chelsio/my3126.c
index 4c6028512d10..a683fd3bb624 100644
--- a/drivers/net/chelsio/my3126.c
+++ b/drivers/net/chelsio/my3126.c
@@ -22,7 +22,7 @@ static int my3126_interrupt_enable(struct cphy *cphy)
22 22
23static int my3126_interrupt_disable(struct cphy *cphy) 23static int my3126_interrupt_disable(struct cphy *cphy)
24{ 24{
25 cancel_rearming_delayed_work(&cphy->phy_update); 25 cancel_delayed_work_sync(&cphy->phy_update);
26 return 0; 26 return 0;
27} 27}
28 28
diff --git a/drivers/net/chelsio/sge.c b/drivers/net/chelsio/sge.c
index 70221ca32683..f778b15ad3fd 100644
--- a/drivers/net/chelsio/sge.c
+++ b/drivers/net/chelsio/sge.c
@@ -273,6 +273,10 @@ struct sge {
273 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp; 273 struct cmdQ cmdQ[SGE_CMDQ_N] ____cacheline_aligned_in_smp;
274}; 274};
275 275
276static const u8 ch_mac_addr[ETH_ALEN] = {
277 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
278};
279
276/* 280/*
277 * stop tasklet and free all pending skb's 281 * stop tasklet and free all pending skb's
278 */ 282 */
@@ -2012,10 +2016,6 @@ static void espibug_workaround_t204(unsigned long data)
2012 continue; 2016 continue;
2013 2017
2014 if (!skb->cb[0]) { 2018 if (!skb->cb[0]) {
2015 u8 ch_mac_addr[ETH_ALEN] = {
2016 0x0, 0x7, 0x43, 0x0, 0x0, 0x0
2017 };
2018
2019 skb_copy_to_linear_data_offset(skb, 2019 skb_copy_to_linear_data_offset(skb,
2020 sizeof(struct cpl_tx_pkt), 2020 sizeof(struct cpl_tx_pkt),
2021 ch_mac_addr, 2021 ch_mac_addr,
@@ -2048,8 +2048,6 @@ static void espibug_workaround(unsigned long data)
2048 2048
2049 if ((seop & 0xfff0fff) == 0xfff && skb) { 2049 if ((seop & 0xfff0fff) == 0xfff && skb) {
2050 if (!skb->cb[0]) { 2050 if (!skb->cb[0]) {
2051 u8 ch_mac_addr[ETH_ALEN] =
2052 {0x0, 0x7, 0x43, 0x0, 0x0, 0x0};
2053 skb_copy_to_linear_data_offset(skb, 2051 skb_copy_to_linear_data_offset(skb,
2054 sizeof(struct cpl_tx_pkt), 2052 sizeof(struct cpl_tx_pkt),
2055 ch_mac_addr, 2053 ch_mac_addr,
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 92bac19ad60a..263a2944566f 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -59,6 +59,7 @@ MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
59MODULE_LICENSE("GPL"); 59MODULE_LICENSE("GPL");
60MODULE_VERSION(CNIC_MODULE_VERSION); 60MODULE_VERSION(CNIC_MODULE_VERSION);
61 61
62/* cnic_dev_list modifications are protected by both rtnl and cnic_dev_lock */
62static LIST_HEAD(cnic_dev_list); 63static LIST_HEAD(cnic_dev_list);
63static LIST_HEAD(cnic_udev_list); 64static LIST_HEAD(cnic_udev_list);
64static DEFINE_RWLOCK(cnic_dev_lock); 65static DEFINE_RWLOCK(cnic_dev_lock);
@@ -278,6 +279,7 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
278 u32 msg_type = ISCSI_KEVENT_IF_DOWN; 279 u32 msg_type = ISCSI_KEVENT_IF_DOWN;
279 struct cnic_ulp_ops *ulp_ops; 280 struct cnic_ulp_ops *ulp_ops;
280 struct cnic_uio_dev *udev = cp->udev; 281 struct cnic_uio_dev *udev = cp->udev;
282 int rc = 0, retry = 0;
281 283
282 if (!udev || udev->uio_dev == -1) 284 if (!udev || udev->uio_dev == -1)
283 return -ENODEV; 285 return -ENODEV;
@@ -302,14 +304,26 @@ static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
302 path_req.pmtu = csk->mtu; 304 path_req.pmtu = csk->mtu;
303 } 305 }
304 306
305 rcu_read_lock(); 307 while (retry < 3) {
306 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]); 308 rc = 0;
307 if (ulp_ops) 309 rcu_read_lock();
308 ulp_ops->iscsi_nl_send_msg(cp->dev, msg_type, buf, len); 310 ulp_ops = rcu_dereference(cnic_ulp_tbl[CNIC_ULP_ISCSI]);
309 rcu_read_unlock(); 311 if (ulp_ops)
312 rc = ulp_ops->iscsi_nl_send_msg(
313 cp->ulp_handle[CNIC_ULP_ISCSI],
314 msg_type, buf, len);
315 rcu_read_unlock();
316 if (rc == 0 || msg_type != ISCSI_KEVENT_PATH_REQ)
317 break;
318
319 msleep(100);
320 retry++;
321 }
310 return 0; 322 return 0;
311} 323}
312 324
325static void cnic_cm_upcall(struct cnic_local *, struct cnic_sock *, u8);
326
313static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type, 327static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
314 char *buf, u16 len) 328 char *buf, u16 len)
315{ 329{
@@ -339,7 +353,9 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
339 } 353 }
340 csk = &cp->csk_tbl[l5_cid]; 354 csk = &cp->csk_tbl[l5_cid];
341 csk_hold(csk); 355 csk_hold(csk);
342 if (cnic_in_use(csk)) { 356 if (cnic_in_use(csk) &&
357 test_bit(SK_F_CONNECT_START, &csk->flags)) {
358
343 memcpy(csk->ha, path_resp->mac_addr, 6); 359 memcpy(csk->ha, path_resp->mac_addr, 6);
344 if (test_bit(SK_F_IPV6, &csk->flags)) 360 if (test_bit(SK_F_IPV6, &csk->flags))
345 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr, 361 memcpy(&csk->src_ip[0], &path_resp->src.v6_addr,
@@ -347,8 +363,16 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
347 else 363 else
348 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr, 364 memcpy(&csk->src_ip[0], &path_resp->src.v4_addr,
349 sizeof(struct in_addr)); 365 sizeof(struct in_addr));
350 if (is_valid_ether_addr(csk->ha)) 366
367 if (is_valid_ether_addr(csk->ha)) {
351 cnic_cm_set_pg(csk); 368 cnic_cm_set_pg(csk);
369 } else if (!test_bit(SK_F_OFFLD_SCHED, &csk->flags) &&
370 !test_bit(SK_F_OFFLD_COMPLETE, &csk->flags)) {
371
372 cnic_cm_upcall(cp, csk,
373 L4_KCQE_OPCODE_VALUE_CONNECT_COMPLETE);
374 clear_bit(SK_F_CONNECT_START, &csk->flags);
375 }
352 } 376 }
353 csk_put(csk); 377 csk_put(csk);
354 rcu_read_unlock(); 378 rcu_read_unlock();
@@ -402,19 +426,6 @@ static int cnic_abort_prep(struct cnic_sock *csk)
402 return 0; 426 return 0;
403} 427}
404 428
405static void cnic_uio_stop(void)
406{
407 struct cnic_dev *dev;
408
409 read_lock(&cnic_dev_lock);
410 list_for_each_entry(dev, &cnic_dev_list, list) {
411 struct cnic_local *cp = dev->cnic_priv;
412
413 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
414 }
415 read_unlock(&cnic_dev_lock);
416}
417
418int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops) 429int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
419{ 430{
420 struct cnic_dev *dev; 431 struct cnic_dev *dev;
@@ -445,14 +456,12 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
445 456
446 /* Prevent race conditions with netdev_event */ 457 /* Prevent race conditions with netdev_event */
447 rtnl_lock(); 458 rtnl_lock();
448 read_lock(&cnic_dev_lock);
449 list_for_each_entry(dev, &cnic_dev_list, list) { 459 list_for_each_entry(dev, &cnic_dev_list, list) {
450 struct cnic_local *cp = dev->cnic_priv; 460 struct cnic_local *cp = dev->cnic_priv;
451 461
452 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type])) 462 if (!test_and_set_bit(ULP_F_INIT, &cp->ulp_flags[ulp_type]))
453 ulp_ops->cnic_init(dev); 463 ulp_ops->cnic_init(dev);
454 } 464 }
455 read_unlock(&cnic_dev_lock);
456 rtnl_unlock(); 465 rtnl_unlock();
457 466
458 return 0; 467 return 0;
@@ -488,9 +497,6 @@ int cnic_unregister_driver(int ulp_type)
488 } 497 }
489 read_unlock(&cnic_dev_lock); 498 read_unlock(&cnic_dev_lock);
490 499
491 if (ulp_type == CNIC_ULP_ISCSI)
492 cnic_uio_stop();
493
494 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL); 500 rcu_assign_pointer(cnic_ulp_tbl[ulp_type], NULL);
495 501
496 mutex_unlock(&cnic_lock); 502 mutex_unlock(&cnic_lock);
@@ -574,6 +580,9 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
574 } 580 }
575 mutex_unlock(&cnic_lock); 581 mutex_unlock(&cnic_lock);
576 582
583 if (ulp_type == CNIC_ULP_ISCSI)
584 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
585
577 synchronize_rcu(); 586 synchronize_rcu();
578 587
579 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) && 588 while (test_bit(ULP_F_CALL_PENDING, &cp->ulp_flags[ulp_type]) &&
@@ -821,12 +830,14 @@ static void cnic_free_resc(struct cnic_dev *dev)
821 cnic_free_dma(dev, &cp->conn_buf_info); 830 cnic_free_dma(dev, &cp->conn_buf_info);
822 cnic_free_dma(dev, &cp->kwq_info); 831 cnic_free_dma(dev, &cp->kwq_info);
823 cnic_free_dma(dev, &cp->kwq_16_data_info); 832 cnic_free_dma(dev, &cp->kwq_16_data_info);
833 cnic_free_dma(dev, &cp->kcq2.dma);
824 cnic_free_dma(dev, &cp->kcq1.dma); 834 cnic_free_dma(dev, &cp->kcq1.dma);
825 kfree(cp->iscsi_tbl); 835 kfree(cp->iscsi_tbl);
826 cp->iscsi_tbl = NULL; 836 cp->iscsi_tbl = NULL;
827 kfree(cp->ctx_tbl); 837 kfree(cp->ctx_tbl);
828 cp->ctx_tbl = NULL; 838 cp->ctx_tbl = NULL;
829 839
840 cnic_free_id_tbl(&cp->fcoe_cid_tbl);
830 cnic_free_id_tbl(&cp->cid_tbl); 841 cnic_free_id_tbl(&cp->cid_tbl);
831} 842}
832 843
@@ -940,7 +951,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
940 &udev->l2_ring_map, 951 &udev->l2_ring_map,
941 GFP_KERNEL | __GFP_COMP); 952 GFP_KERNEL | __GFP_COMP);
942 if (!udev->l2_ring) 953 if (!udev->l2_ring)
943 return -ENOMEM; 954 goto err_udev;
944 955
945 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size; 956 udev->l2_buf_size = (cp->l2_rx_ring_size + 1) * cp->l2_single_buf_size;
946 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size); 957 udev->l2_buf_size = PAGE_ALIGN(udev->l2_buf_size);
@@ -948,7 +959,7 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
948 &udev->l2_buf_map, 959 &udev->l2_buf_map,
949 GFP_KERNEL | __GFP_COMP); 960 GFP_KERNEL | __GFP_COMP);
950 if (!udev->l2_buf) 961 if (!udev->l2_buf)
951 return -ENOMEM; 962 goto err_dma;
952 963
953 write_lock(&cnic_dev_lock); 964 write_lock(&cnic_dev_lock);
954 list_add(&udev->list, &cnic_udev_list); 965 list_add(&udev->list, &cnic_udev_list);
@@ -959,6 +970,12 @@ static int cnic_alloc_uio_rings(struct cnic_dev *dev, int pages)
959 cp->udev = udev; 970 cp->udev = udev;
960 971
961 return 0; 972 return 0;
973 err_dma:
974 dma_free_coherent(&udev->pdev->dev, udev->l2_ring_size,
975 udev->l2_ring, udev->l2_ring_map);
976 err_udev:
977 kfree(udev);
978 return -ENOMEM;
962} 979}
963 980
964static int cnic_init_uio(struct cnic_dev *dev) 981static int cnic_init_uio(struct cnic_dev *dev)
@@ -1114,12 +1131,22 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1114 1131
1115 cp->iro_arr = ethdev->iro_arr; 1132 cp->iro_arr = ethdev->iro_arr;
1116 1133
1117 cp->max_cid_space = MAX_ISCSI_TBL_SZ; 1134 cp->max_cid_space = MAX_ISCSI_TBL_SZ + BNX2X_FCOE_NUM_CONNECTIONS;
1118 cp->iscsi_start_cid = start_cid; 1135 cp->iscsi_start_cid = start_cid;
1136 cp->fcoe_start_cid = start_cid + MAX_ISCSI_TBL_SZ;
1137
1138 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
1139 cp->max_cid_space += BNX2X_FCOE_NUM_CONNECTIONS;
1140 cp->fcoe_init_cid = ethdev->fcoe_init_cid;
1141 if (!cp->fcoe_init_cid)
1142 cp->fcoe_init_cid = 0x10;
1143 }
1144
1119 if (start_cid < BNX2X_ISCSI_START_CID) { 1145 if (start_cid < BNX2X_ISCSI_START_CID) {
1120 u32 delta = BNX2X_ISCSI_START_CID - start_cid; 1146 u32 delta = BNX2X_ISCSI_START_CID - start_cid;
1121 1147
1122 cp->iscsi_start_cid = BNX2X_ISCSI_START_CID; 1148 cp->iscsi_start_cid = BNX2X_ISCSI_START_CID;
1149 cp->fcoe_start_cid += delta;
1123 cp->max_cid_space += delta; 1150 cp->max_cid_space += delta;
1124 } 1151 }
1125 1152
@@ -1138,6 +1165,9 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1138 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI; 1165 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1139 } 1166 }
1140 1167
1168 for (i = MAX_ISCSI_TBL_SZ; i < cp->max_cid_space; i++)
1169 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_FCOE;
1170
1141 pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) / 1171 pages = PAGE_ALIGN(cp->max_cid_space * CNIC_KWQ16_DATA_SIZE) /
1142 PAGE_SIZE; 1172 PAGE_SIZE;
1143 1173
@@ -1161,6 +1191,12 @@ static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1161 if (ret) 1191 if (ret)
1162 goto error; 1192 goto error;
1163 1193
1194 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
1195 ret = cnic_alloc_kcq(dev, &cp->kcq2);
1196 if (ret)
1197 goto error;
1198 }
1199
1164 pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS * 1200 pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
1165 BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE; 1201 BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
1166 ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1); 1202 ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1);
@@ -1254,12 +1290,18 @@ static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1254 struct cnic_local *cp = dev->cnic_priv; 1290 struct cnic_local *cp = dev->cnic_priv;
1255 struct l5cm_spe kwqe; 1291 struct l5cm_spe kwqe;
1256 struct kwqe_16 *kwq[1]; 1292 struct kwqe_16 *kwq[1];
1293 u16 type_16;
1257 int ret; 1294 int ret;
1258 1295
1259 kwqe.hdr.conn_and_cmd_data = 1296 kwqe.hdr.conn_and_cmd_data =
1260 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) | 1297 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1261 BNX2X_HW_CID(cp, cid))); 1298 BNX2X_HW_CID(cp, cid)));
1262 kwqe.hdr.type = cpu_to_le16(type); 1299
1300 type_16 = (type << SPE_HDR_CONN_TYPE_SHIFT) & SPE_HDR_CONN_TYPE;
1301 type_16 |= (cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1302 SPE_HDR_FUNCTION_ID;
1303
1304 kwqe.hdr.type = cpu_to_le16(type_16);
1263 kwqe.hdr.reserved1 = 0; 1305 kwqe.hdr.reserved1 = 0;
1264 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo); 1306 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1265 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi); 1307 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
@@ -1425,8 +1467,11 @@ static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1425 cnic_free_dma(dev, &iscsi->hq_info); 1467 cnic_free_dma(dev, &iscsi->hq_info);
1426 cnic_free_dma(dev, &iscsi->r2tq_info); 1468 cnic_free_dma(dev, &iscsi->r2tq_info);
1427 cnic_free_dma(dev, &iscsi->task_array_info); 1469 cnic_free_dma(dev, &iscsi->task_array_info);
1470 cnic_free_id(&cp->cid_tbl, ctx->cid);
1471 } else {
1472 cnic_free_id(&cp->fcoe_cid_tbl, ctx->cid);
1428 } 1473 }
1429 cnic_free_id(&cp->cid_tbl, ctx->cid); 1474
1430 ctx->cid = 0; 1475 ctx->cid = 0;
1431} 1476}
1432 1477
@@ -1438,6 +1483,16 @@ static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1438 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1483 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1439 struct cnic_iscsi *iscsi = ctx->proto.iscsi; 1484 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1440 1485
1486 if (ctx->ulp_proto_id == CNIC_ULP_FCOE) {
1487 cid = cnic_alloc_new_id(&cp->fcoe_cid_tbl);
1488 if (cid == -1) {
1489 ret = -ENOMEM;
1490 goto error;
1491 }
1492 ctx->cid = cid;
1493 return 0;
1494 }
1495
1441 cid = cnic_alloc_new_id(&cp->cid_tbl); 1496 cid = cnic_alloc_new_id(&cp->cid_tbl);
1442 if (cid == -1) { 1497 if (cid == -1) {
1443 ret = -ENOMEM; 1498 ret = -ENOMEM;
@@ -1695,7 +1750,7 @@ static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1695 *work = num; 1750 *work = num;
1696 return -EINVAL; 1751 return -EINVAL;
1697 } 1752 }
1698 *work = 2 + req2->num_additional_wqes;; 1753 *work = 2 + req2->num_additional_wqes;
1699 1754
1700 l5_cid = req1->iscsi_conn_id; 1755 l5_cid = req1->iscsi_conn_id;
1701 if (l5_cid >= MAX_ISCSI_TBL_SZ) 1756 if (l5_cid >= MAX_ISCSI_TBL_SZ)
@@ -1770,19 +1825,15 @@ static int cnic_bnx2x_destroy_ramrod(struct cnic_dev *dev, u32 l5_cid)
1770 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid]; 1825 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1771 union l5cm_specific_data l5_data; 1826 union l5cm_specific_data l5_data;
1772 int ret; 1827 int ret;
1773 u32 hw_cid, type; 1828 u32 hw_cid;
1774 1829
1775 init_waitqueue_head(&ctx->waitq); 1830 init_waitqueue_head(&ctx->waitq);
1776 ctx->wait_cond = 0; 1831 ctx->wait_cond = 0;
1777 memset(&l5_data, 0, sizeof(l5_data)); 1832 memset(&l5_data, 0, sizeof(l5_data));
1778 hw_cid = BNX2X_HW_CID(cp, ctx->cid); 1833 hw_cid = BNX2X_HW_CID(cp, ctx->cid);
1779 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
1780 & SPE_HDR_CONN_TYPE;
1781 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
1782 SPE_HDR_FUNCTION_ID);
1783 1834
1784 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, 1835 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
1785 hw_cid, type, &l5_data); 1836 hw_cid, NONE_CONNECTION_TYPE, &l5_data);
1786 1837
1787 if (ret == 0) 1838 if (ret == 0)
1788 wait_event(ctx->waitq, ctx->wait_cond); 1839 wait_event(ctx->waitq, ctx->wait_cond);
@@ -2078,8 +2129,306 @@ static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
2078 return 0; 2129 return 0;
2079} 2130}
2080 2131
2081static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[], 2132static int cnic_bnx2x_fcoe_stat(struct cnic_dev *dev, struct kwqe *kwqe)
2082 u32 num_wqes) 2133{
2134 struct fcoe_kwqe_stat *req;
2135 struct fcoe_stat_ramrod_params *fcoe_stat;
2136 union l5cm_specific_data l5_data;
2137 struct cnic_local *cp = dev->cnic_priv;
2138 int ret;
2139 u32 cid;
2140
2141 req = (struct fcoe_kwqe_stat *) kwqe;
2142 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2143
2144 fcoe_stat = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2145 if (!fcoe_stat)
2146 return -ENOMEM;
2147
2148 memset(fcoe_stat, 0, sizeof(*fcoe_stat));
2149 memcpy(&fcoe_stat->stat_kwqe, req, sizeof(*req));
2150
2151 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_STAT, cid,
2152 FCOE_CONNECTION_TYPE, &l5_data);
2153 return ret;
2154}
2155
2156static int cnic_bnx2x_fcoe_init1(struct cnic_dev *dev, struct kwqe *wqes[],
2157 u32 num, int *work)
2158{
2159 int ret;
2160 struct cnic_local *cp = dev->cnic_priv;
2161 u32 cid;
2162 struct fcoe_init_ramrod_params *fcoe_init;
2163 struct fcoe_kwqe_init1 *req1;
2164 struct fcoe_kwqe_init2 *req2;
2165 struct fcoe_kwqe_init3 *req3;
2166 union l5cm_specific_data l5_data;
2167
2168 if (num < 3) {
2169 *work = num;
2170 return -EINVAL;
2171 }
2172 req1 = (struct fcoe_kwqe_init1 *) wqes[0];
2173 req2 = (struct fcoe_kwqe_init2 *) wqes[1];
2174 req3 = (struct fcoe_kwqe_init3 *) wqes[2];
2175 if (req2->hdr.op_code != FCOE_KWQE_OPCODE_INIT2) {
2176 *work = 1;
2177 return -EINVAL;
2178 }
2179 if (req3->hdr.op_code != FCOE_KWQE_OPCODE_INIT3) {
2180 *work = 2;
2181 return -EINVAL;
2182 }
2183
2184 if (sizeof(*fcoe_init) > CNIC_KWQ16_DATA_SIZE) {
2185 netdev_err(dev->netdev, "fcoe_init size too big\n");
2186 return -ENOMEM;
2187 }
2188 fcoe_init = cnic_get_kwqe_16_data(cp, BNX2X_FCOE_L5_CID_BASE, &l5_data);
2189 if (!fcoe_init)
2190 return -ENOMEM;
2191
2192 memset(fcoe_init, 0, sizeof(*fcoe_init));
2193 memcpy(&fcoe_init->init_kwqe1, req1, sizeof(*req1));
2194 memcpy(&fcoe_init->init_kwqe2, req2, sizeof(*req2));
2195 memcpy(&fcoe_init->init_kwqe3, req3, sizeof(*req3));
2196 fcoe_init->eq_addr.lo = cp->kcq2.dma.pg_map_arr[0] & 0xffffffff;
2197 fcoe_init->eq_addr.hi = (u64) cp->kcq2.dma.pg_map_arr[0] >> 32;
2198 fcoe_init->eq_next_page_addr.lo =
2199 cp->kcq2.dma.pg_map_arr[1] & 0xffffffff;
2200 fcoe_init->eq_next_page_addr.hi =
2201 (u64) cp->kcq2.dma.pg_map_arr[1] >> 32;
2202
2203 fcoe_init->sb_num = cp->status_blk_num;
2204 fcoe_init->eq_prod = MAX_KCQ_IDX;
2205 fcoe_init->sb_id = HC_INDEX_FCOE_EQ_CONS;
2206 cp->kcq2.sw_prod_idx = 0;
2207
2208 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2209 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_INIT, cid,
2210 FCOE_CONNECTION_TYPE, &l5_data);
2211 *work = 3;
2212 return ret;
2213}
2214
2215static int cnic_bnx2x_fcoe_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
2216 u32 num, int *work)
2217{
2218 int ret = 0;
2219 u32 cid = -1, l5_cid;
2220 struct cnic_local *cp = dev->cnic_priv;
2221 struct fcoe_kwqe_conn_offload1 *req1;
2222 struct fcoe_kwqe_conn_offload2 *req2;
2223 struct fcoe_kwqe_conn_offload3 *req3;
2224 struct fcoe_kwqe_conn_offload4 *req4;
2225 struct fcoe_conn_offload_ramrod_params *fcoe_offload;
2226 struct cnic_context *ctx;
2227 struct fcoe_context *fctx;
2228 struct regpair ctx_addr;
2229 union l5cm_specific_data l5_data;
2230 struct fcoe_kcqe kcqe;
2231 struct kcqe *cqes[1];
2232
2233 if (num < 4) {
2234 *work = num;
2235 return -EINVAL;
2236 }
2237 req1 = (struct fcoe_kwqe_conn_offload1 *) wqes[0];
2238 req2 = (struct fcoe_kwqe_conn_offload2 *) wqes[1];
2239 req3 = (struct fcoe_kwqe_conn_offload3 *) wqes[2];
2240 req4 = (struct fcoe_kwqe_conn_offload4 *) wqes[3];
2241
2242 *work = 4;
2243
2244 l5_cid = req1->fcoe_conn_id;
2245 if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
2246 goto err_reply;
2247
2248 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2249
2250 ctx = &cp->ctx_tbl[l5_cid];
2251 if (test_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags))
2252 goto err_reply;
2253
2254 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
2255 if (ret) {
2256 ret = 0;
2257 goto err_reply;
2258 }
2259 cid = ctx->cid;
2260
2261 fctx = cnic_get_bnx2x_ctx(dev, cid, 1, &ctx_addr);
2262 if (fctx) {
2263 u32 hw_cid = BNX2X_HW_CID(cp, cid);
2264 u32 val;
2265
2266 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
2267 FCOE_CONNECTION_TYPE);
2268 fctx->xstorm_ag_context.cdu_reserved = val;
2269 val = CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
2270 FCOE_CONNECTION_TYPE);
2271 fctx->ustorm_ag_context.cdu_usage = val;
2272 }
2273 if (sizeof(*fcoe_offload) > CNIC_KWQ16_DATA_SIZE) {
2274 netdev_err(dev->netdev, "fcoe_offload size too big\n");
2275 goto err_reply;
2276 }
2277 fcoe_offload = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2278 if (!fcoe_offload)
2279 goto err_reply;
2280
2281 memset(fcoe_offload, 0, sizeof(*fcoe_offload));
2282 memcpy(&fcoe_offload->offload_kwqe1, req1, sizeof(*req1));
2283 memcpy(&fcoe_offload->offload_kwqe2, req2, sizeof(*req2));
2284 memcpy(&fcoe_offload->offload_kwqe3, req3, sizeof(*req3));
2285 memcpy(&fcoe_offload->offload_kwqe4, req4, sizeof(*req4));
2286
2287 cid = BNX2X_HW_CID(cp, cid);
2288 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_OFFLOAD_CONN, cid,
2289 FCOE_CONNECTION_TYPE, &l5_data);
2290 if (!ret)
2291 set_bit(CTX_FL_OFFLD_START, &ctx->ctx_flags);
2292
2293 return ret;
2294
2295err_reply:
2296 if (cid != -1)
2297 cnic_free_bnx2x_conn_resc(dev, l5_cid);
2298
2299 memset(&kcqe, 0, sizeof(kcqe));
2300 kcqe.op_code = FCOE_KCQE_OPCODE_OFFLOAD_CONN;
2301 kcqe.fcoe_conn_id = req1->fcoe_conn_id;
2302 kcqe.completion_status = FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
2303
2304 cqes[0] = (struct kcqe *) &kcqe;
2305 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2306 return ret;
2307}
2308
2309static int cnic_bnx2x_fcoe_enable(struct cnic_dev *dev, struct kwqe *kwqe)
2310{
2311 struct fcoe_kwqe_conn_enable_disable *req;
2312 struct fcoe_conn_enable_disable_ramrod_params *fcoe_enable;
2313 union l5cm_specific_data l5_data;
2314 int ret;
2315 u32 cid, l5_cid;
2316 struct cnic_local *cp = dev->cnic_priv;
2317
2318 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2319 cid = req->context_id;
2320 l5_cid = req->conn_id + BNX2X_FCOE_L5_CID_BASE;
2321
2322 if (sizeof(*fcoe_enable) > CNIC_KWQ16_DATA_SIZE) {
2323 netdev_err(dev->netdev, "fcoe_enable size too big\n");
2324 return -ENOMEM;
2325 }
2326 fcoe_enable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2327 if (!fcoe_enable)
2328 return -ENOMEM;
2329
2330 memset(fcoe_enable, 0, sizeof(*fcoe_enable));
2331 memcpy(&fcoe_enable->enable_disable_kwqe, req, sizeof(*req));
2332 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_ENABLE_CONN, cid,
2333 FCOE_CONNECTION_TYPE, &l5_data);
2334 return ret;
2335}
2336
2337static int cnic_bnx2x_fcoe_disable(struct cnic_dev *dev, struct kwqe *kwqe)
2338{
2339 struct fcoe_kwqe_conn_enable_disable *req;
2340 struct fcoe_conn_enable_disable_ramrod_params *fcoe_disable;
2341 union l5cm_specific_data l5_data;
2342 int ret;
2343 u32 cid, l5_cid;
2344 struct cnic_local *cp = dev->cnic_priv;
2345
2346 req = (struct fcoe_kwqe_conn_enable_disable *) kwqe;
2347 cid = req->context_id;
2348 l5_cid = req->conn_id;
2349 if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
2350 return -EINVAL;
2351
2352 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2353
2354 if (sizeof(*fcoe_disable) > CNIC_KWQ16_DATA_SIZE) {
2355 netdev_err(dev->netdev, "fcoe_disable size too big\n");
2356 return -ENOMEM;
2357 }
2358 fcoe_disable = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
2359 if (!fcoe_disable)
2360 return -ENOMEM;
2361
2362 memset(fcoe_disable, 0, sizeof(*fcoe_disable));
2363 memcpy(&fcoe_disable->enable_disable_kwqe, req, sizeof(*req));
2364 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DISABLE_CONN, cid,
2365 FCOE_CONNECTION_TYPE, &l5_data);
2366 return ret;
2367}
2368
2369static int cnic_bnx2x_fcoe_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2370{
2371 struct fcoe_kwqe_conn_destroy *req;
2372 union l5cm_specific_data l5_data;
2373 int ret;
2374 u32 cid, l5_cid;
2375 struct cnic_local *cp = dev->cnic_priv;
2376 struct cnic_context *ctx;
2377 struct fcoe_kcqe kcqe;
2378 struct kcqe *cqes[1];
2379
2380 req = (struct fcoe_kwqe_conn_destroy *) kwqe;
2381 cid = req->context_id;
2382 l5_cid = req->conn_id;
2383 if (l5_cid >= BNX2X_FCOE_NUM_CONNECTIONS)
2384 return -EINVAL;
2385
2386 l5_cid += BNX2X_FCOE_L5_CID_BASE;
2387
2388 ctx = &cp->ctx_tbl[l5_cid];
2389
2390 init_waitqueue_head(&ctx->waitq);
2391 ctx->wait_cond = 0;
2392
2393 memset(&l5_data, 0, sizeof(l5_data));
2394 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_TERMINATE_CONN, cid,
2395 FCOE_CONNECTION_TYPE, &l5_data);
2396 if (ret == 0) {
2397 wait_event(ctx->waitq, ctx->wait_cond);
2398 set_bit(CTX_FL_DELETE_WAIT, &ctx->ctx_flags);
2399 queue_delayed_work(cnic_wq, &cp->delete_task,
2400 msecs_to_jiffies(2000));
2401 }
2402
2403 memset(&kcqe, 0, sizeof(kcqe));
2404 kcqe.op_code = FCOE_KCQE_OPCODE_DESTROY_CONN;
2405 kcqe.fcoe_conn_id = req->conn_id;
2406 kcqe.fcoe_conn_context_id = cid;
2407
2408 cqes[0] = (struct kcqe *) &kcqe;
2409 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_FCOE, cqes, 1);
2410 return ret;
2411}
2412
2413static int cnic_bnx2x_fcoe_fw_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
2414{
2415 struct fcoe_kwqe_destroy *req;
2416 union l5cm_specific_data l5_data;
2417 struct cnic_local *cp = dev->cnic_priv;
2418 int ret;
2419 u32 cid;
2420
2421 req = (struct fcoe_kwqe_destroy *) kwqe;
2422 cid = BNX2X_HW_CID(cp, cp->fcoe_init_cid);
2423
2424 memset(&l5_data, 0, sizeof(l5_data));
2425 ret = cnic_submit_kwqe_16(dev, FCOE_RAMROD_CMD_ID_DESTROY, cid,
2426 FCOE_CONNECTION_TYPE, &l5_data);
2427 return ret;
2428}
2429
2430static int cnic_submit_bnx2x_iscsi_kwqes(struct cnic_dev *dev,
2431 struct kwqe *wqes[], u32 num_wqes)
2083{ 2432{
2084 int i, work, ret; 2433 int i, work, ret;
2085 u32 opcode; 2434 u32 opcode;
@@ -2143,6 +2492,98 @@ static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2143 return 0; 2492 return 0;
2144} 2493}
2145 2494
2495static int cnic_submit_bnx2x_fcoe_kwqes(struct cnic_dev *dev,
2496 struct kwqe *wqes[], u32 num_wqes)
2497{
2498 struct cnic_local *cp = dev->cnic_priv;
2499 int i, work, ret;
2500 u32 opcode;
2501 struct kwqe *kwqe;
2502
2503 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2504 return -EAGAIN; /* bnx2 is down */
2505
2506 if (BNX2X_CHIP_NUM(cp->chip_id) == BNX2X_CHIP_NUM_57710)
2507 return -EINVAL;
2508
2509 for (i = 0; i < num_wqes; ) {
2510 kwqe = wqes[i];
2511 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
2512 work = 1;
2513
2514 switch (opcode) {
2515 case FCOE_KWQE_OPCODE_INIT1:
2516 ret = cnic_bnx2x_fcoe_init1(dev, &wqes[i],
2517 num_wqes - i, &work);
2518 break;
2519 case FCOE_KWQE_OPCODE_OFFLOAD_CONN1:
2520 ret = cnic_bnx2x_fcoe_ofld1(dev, &wqes[i],
2521 num_wqes - i, &work);
2522 break;
2523 case FCOE_KWQE_OPCODE_ENABLE_CONN:
2524 ret = cnic_bnx2x_fcoe_enable(dev, kwqe);
2525 break;
2526 case FCOE_KWQE_OPCODE_DISABLE_CONN:
2527 ret = cnic_bnx2x_fcoe_disable(dev, kwqe);
2528 break;
2529 case FCOE_KWQE_OPCODE_DESTROY_CONN:
2530 ret = cnic_bnx2x_fcoe_destroy(dev, kwqe);
2531 break;
2532 case FCOE_KWQE_OPCODE_DESTROY:
2533 ret = cnic_bnx2x_fcoe_fw_destroy(dev, kwqe);
2534 break;
2535 case FCOE_KWQE_OPCODE_STAT:
2536 ret = cnic_bnx2x_fcoe_stat(dev, kwqe);
2537 break;
2538 default:
2539 ret = 0;
2540 netdev_err(dev->netdev, "Unknown type of KWQE(0x%x)\n",
2541 opcode);
2542 break;
2543 }
2544 if (ret < 0)
2545 netdev_err(dev->netdev, "KWQE(0x%x) failed\n",
2546 opcode);
2547 i += work;
2548 }
2549 return 0;
2550}
2551
2552static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
2553 u32 num_wqes)
2554{
2555 int ret = -EINVAL;
2556 u32 layer_code;
2557
2558 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
2559 return -EAGAIN; /* bnx2x is down */
2560
2561 if (!num_wqes)
2562 return 0;
2563
2564 layer_code = wqes[0]->kwqe_op_flag & KWQE_LAYER_MASK;
2565 switch (layer_code) {
2566 case KWQE_FLAGS_LAYER_MASK_L5_ISCSI:
2567 case KWQE_FLAGS_LAYER_MASK_L4:
2568 case KWQE_FLAGS_LAYER_MASK_L2:
2569 ret = cnic_submit_bnx2x_iscsi_kwqes(dev, wqes, num_wqes);
2570 break;
2571
2572 case KWQE_FLAGS_LAYER_MASK_L5_FCOE:
2573 ret = cnic_submit_bnx2x_fcoe_kwqes(dev, wqes, num_wqes);
2574 break;
2575 }
2576 return ret;
2577}
2578
2579static inline u32 cnic_get_kcqe_layer_mask(u32 opflag)
2580{
2581 if (unlikely(KCQE_OPCODE(opflag) == FCOE_RAMROD_CMD_ID_TERMINATE_CONN))
2582 return KCQE_FLAGS_LAYER_MASK_L4;
2583
2584 return opflag & KCQE_FLAGS_LAYER_MASK;
2585}
2586
2146static void service_kcqes(struct cnic_dev *dev, int num_cqes) 2587static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2147{ 2588{
2148 struct cnic_local *cp = dev->cnic_priv; 2589 struct cnic_local *cp = dev->cnic_priv;
@@ -2154,7 +2595,7 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2154 struct cnic_ulp_ops *ulp_ops; 2595 struct cnic_ulp_ops *ulp_ops;
2155 int ulp_type; 2596 int ulp_type;
2156 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag; 2597 u32 kcqe_op_flag = cp->completed_kcq[i]->kcqe_op_flag;
2157 u32 kcqe_layer = kcqe_op_flag & KCQE_FLAGS_LAYER_MASK; 2598 u32 kcqe_layer = cnic_get_kcqe_layer_mask(kcqe_op_flag);
2158 2599
2159 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION)) 2600 if (unlikely(kcqe_op_flag & KCQE_RAMROD_COMPLETION))
2160 comp++; 2601 comp++;
@@ -2162,7 +2603,7 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2162 while (j < num_cqes) { 2603 while (j < num_cqes) {
2163 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag; 2604 u32 next_op = cp->completed_kcq[i + j]->kcqe_op_flag;
2164 2605
2165 if ((next_op & KCQE_FLAGS_LAYER_MASK) != kcqe_layer) 2606 if (cnic_get_kcqe_layer_mask(next_op) != kcqe_layer)
2166 break; 2607 break;
2167 2608
2168 if (unlikely(next_op & KCQE_RAMROD_COMPLETION)) 2609 if (unlikely(next_op & KCQE_RAMROD_COMPLETION))
@@ -2174,6 +2615,8 @@ static void service_kcqes(struct cnic_dev *dev, int num_cqes)
2174 ulp_type = CNIC_ULP_RDMA; 2615 ulp_type = CNIC_ULP_RDMA;
2175 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI) 2616 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_ISCSI)
2176 ulp_type = CNIC_ULP_ISCSI; 2617 ulp_type = CNIC_ULP_ISCSI;
2618 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L5_FCOE)
2619 ulp_type = CNIC_ULP_FCOE;
2177 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4) 2620 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L4)
2178 ulp_type = CNIC_ULP_L4; 2621 ulp_type = CNIC_ULP_L4;
2179 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2) 2622 else if (kcqe_layer == KCQE_FLAGS_LAYER_MASK_L2)
@@ -2342,11 +2785,12 @@ static u32 cnic_service_bnx2_queues(struct cnic_dev *dev)
2342static int cnic_service_bnx2(void *data, void *status_blk) 2785static int cnic_service_bnx2(void *data, void *status_blk)
2343{ 2786{
2344 struct cnic_dev *dev = data; 2787 struct cnic_dev *dev = data;
2345 struct cnic_local *cp = dev->cnic_priv;
2346 u32 status_idx = *cp->kcq1.status_idx_ptr;
2347 2788
2348 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) 2789 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2349 return status_idx; 2790 struct status_block *sblk = status_blk;
2791
2792 return sblk->status_idx;
2793 }
2350 2794
2351 return cnic_service_bnx2_queues(dev); 2795 return cnic_service_bnx2_queues(dev);
2352} 2796}
@@ -2365,9 +2809,10 @@ static void cnic_service_bnx2_msix(unsigned long data)
2365static void cnic_doirq(struct cnic_dev *dev) 2809static void cnic_doirq(struct cnic_dev *dev)
2366{ 2810{
2367 struct cnic_local *cp = dev->cnic_priv; 2811 struct cnic_local *cp = dev->cnic_priv;
2368 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
2369 2812
2370 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) { 2813 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags))) {
2814 u16 prod = cp->kcq1.sw_prod_idx & MAX_KCQ_IDX;
2815
2371 prefetch(cp->status_blk.gen); 2816 prefetch(cp->status_blk.gen);
2372 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]); 2817 prefetch(&cp->kcq1.kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2373 2818
@@ -2469,12 +2914,19 @@ static void cnic_service_bnx2x_bh(unsigned long data)
2469 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1); 2914 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq1);
2470 2915
2471 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX); 2916 CNIC_WR16(dev, cp->kcq1.io_addr, cp->kcq1.sw_prod_idx + MAX_KCQ_IDX);
2472 if (BNX2X_CHIP_IS_E2(cp->chip_id)) 2917
2918 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
2919 status_idx = cnic_service_bnx2x_kcq(dev, &cp->kcq2);
2920
2921 CNIC_WR16(dev, cp->kcq2.io_addr, cp->kcq2.sw_prod_idx +
2922 MAX_KCQ_IDX);
2923
2473 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF, 2924 cnic_ack_igu_sb(dev, cp->bnx2x_igu_sb_id, IGU_SEG_ACCESS_DEF,
2474 status_idx, IGU_INT_ENABLE, 1); 2925 status_idx, IGU_INT_ENABLE, 1);
2475 else 2926 } else {
2476 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID, 2927 cnic_ack_bnx2x_int(dev, cp->bnx2x_igu_sb_id, USTORM_ID,
2477 status_idx, IGU_INT_ENABLE, 1); 2928 status_idx, IGU_INT_ENABLE, 1);
2929 }
2478} 2930}
2479 2931
2480static int cnic_service_bnx2x(void *data, void *status_blk) 2932static int cnic_service_bnx2x(void *data, void *status_blk)
@@ -2883,7 +3335,7 @@ static void cnic_cm_cleanup(struct cnic_sock *csk)
2883 struct cnic_dev *dev = csk->dev; 3335 struct cnic_dev *dev = csk->dev;
2884 struct cnic_local *cp = dev->cnic_priv; 3336 struct cnic_local *cp = dev->cnic_priv;
2885 3337
2886 cnic_free_id(&cp->csk_port_tbl, csk->src_port); 3338 cnic_free_id(&cp->csk_port_tbl, be16_to_cpu(csk->src_port));
2887 csk->src_port = 0; 3339 csk->src_port = 0;
2888 } 3340 }
2889} 3341}
@@ -3014,7 +3466,8 @@ static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3014 int is_v6, rc = 0; 3466 int is_v6, rc = 0;
3015 struct dst_entry *dst = NULL; 3467 struct dst_entry *dst = NULL;
3016 struct net_device *realdev; 3468 struct net_device *realdev;
3017 u32 local_port; 3469 __be16 local_port;
3470 u32 port_id;
3018 3471
3019 if (saddr->local.v6.sin6_family == AF_INET6 && 3472 if (saddr->local.v6.sin6_family == AF_INET6 &&
3020 saddr->remote.v6.sin6_family == AF_INET6) 3473 saddr->remote.v6.sin6_family == AF_INET6)
@@ -3054,19 +3507,21 @@ static int cnic_get_route(struct cnic_sock *csk, struct cnic_sockaddr *saddr)
3054 } 3507 }
3055 } 3508 }
3056 3509
3057 if (local_port >= CNIC_LOCAL_PORT_MIN && 3510 port_id = be16_to_cpu(local_port);
3058 local_port < CNIC_LOCAL_PORT_MAX) { 3511 if (port_id >= CNIC_LOCAL_PORT_MIN &&
3059 if (cnic_alloc_id(&cp->csk_port_tbl, local_port)) 3512 port_id < CNIC_LOCAL_PORT_MAX) {
3060 local_port = 0; 3513 if (cnic_alloc_id(&cp->csk_port_tbl, port_id))
3514 port_id = 0;
3061 } else 3515 } else
3062 local_port = 0; 3516 port_id = 0;
3063 3517
3064 if (!local_port) { 3518 if (!port_id) {
3065 local_port = cnic_alloc_new_id(&cp->csk_port_tbl); 3519 port_id = cnic_alloc_new_id(&cp->csk_port_tbl);
3066 if (local_port == -1) { 3520 if (port_id == -1) {
3067 rc = -ENOMEM; 3521 rc = -ENOMEM;
3068 goto err_out; 3522 goto err_out;
3069 } 3523 }
3524 local_port = cpu_to_be16(port_id);
3070 } 3525 }
3071 csk->src_port = local_port; 3526 csk->src_port = local_port;
3072 3527
@@ -3208,6 +3663,18 @@ done:
3208 csk_put(csk); 3663 csk_put(csk);
3209} 3664}
3210 3665
3666static void cnic_process_fcoe_term_conn(struct cnic_dev *dev, struct kcqe *kcqe)
3667{
3668 struct cnic_local *cp = dev->cnic_priv;
3669 struct fcoe_kcqe *fc_kcqe = (struct fcoe_kcqe *) kcqe;
3670 u32 l5_cid = fc_kcqe->fcoe_conn_id + BNX2X_FCOE_L5_CID_BASE;
3671 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
3672
3673 ctx->timestamp = jiffies;
3674 ctx->wait_cond = 1;
3675 wake_up(&ctx->waitq);
3676}
3677
3211static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe) 3678static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3212{ 3679{
3213 struct cnic_local *cp = dev->cnic_priv; 3680 struct cnic_local *cp = dev->cnic_priv;
@@ -3216,6 +3683,10 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
3216 u32 l5_cid; 3683 u32 l5_cid;
3217 struct cnic_sock *csk; 3684 struct cnic_sock *csk;
3218 3685
3686 if (opcode == FCOE_RAMROD_CMD_ID_TERMINATE_CONN) {
3687 cnic_process_fcoe_term_conn(dev, kcqe);
3688 return;
3689 }
3219 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG || 3690 if (opcode == L4_KCQE_OPCODE_VALUE_OFFLOAD_PG ||
3220 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) { 3691 opcode == L4_KCQE_OPCODE_VALUE_UPDATE_PG) {
3221 cnic_cm_process_offld_pg(dev, l4kcqe); 3692 cnic_cm_process_offld_pg(dev, l4kcqe);
@@ -3852,7 +4323,7 @@ static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *dev)
3852 4323
3853 memset(&l2kwqe, 0, sizeof(l2kwqe)); 4324 memset(&l2kwqe, 0, sizeof(l2kwqe));
3854 wqes[0] = &l2kwqe; 4325 wqes[0] = &l2kwqe;
3855 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_FLAGS_LAYER_SHIFT) | 4326 l2kwqe.kwqe_op_flag = (L2_LAYER_CODE << KWQE_LAYER_SHIFT) |
3856 (L2_KWQE_OPCODE_VALUE_FLUSH << 4327 (L2_KWQE_OPCODE_VALUE_FLUSH <<
3857 KWQE_OPCODE_SHIFT) | 2; 4328 KWQE_OPCODE_SHIFT) | 2;
3858 dev->submit_kwqes(dev, wqes, 1); 4329 dev->submit_kwqes(dev, wqes, 1);
@@ -4106,7 +4577,7 @@ static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev,
4106 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; 4577 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4107 int port = CNIC_PORT(cp); 4578 int port = CNIC_PORT(cp);
4108 int i; 4579 int i;
4109 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 4580 u32 cli = cp->ethdev->iscsi_l2_client_id;
4110 u32 val; 4581 u32 val;
4111 4582
4112 memset(txbd, 0, BCM_PAGE_SIZE); 4583 memset(txbd, 0, BCM_PAGE_SIZE);
@@ -4167,7 +4638,7 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4167 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk; 4638 struct host_sp_status_block *sb = cp->bnx2x_def_status_blk;
4168 int i; 4639 int i;
4169 int port = CNIC_PORT(cp); 4640 int port = CNIC_PORT(cp);
4170 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 4641 u32 cli = cp->ethdev->iscsi_l2_client_id;
4171 int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli); 4642 int cl_qzone_id = BNX2X_CL_QZONE_ID(cp, cli);
4172 u32 val; 4643 u32 val;
4173 dma_addr_t ring_map = udev->l2_ring_map; 4644 dma_addr_t ring_map = udev->l2_ring_map;
@@ -4231,12 +4702,39 @@ static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev,
4231 4702
4232 cp->rx_cons_ptr = 4703 cp->rx_cons_ptr =
4233 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS]; 4704 &sb->sp_sb.index_values[HC_SP_INDEX_ETH_ISCSI_RX_CQ_CONS];
4705 cp->rx_cons = *cp->rx_cons_ptr;
4706}
4707
4708static int cnic_read_bnx2x_iscsi_mac(struct cnic_dev *dev, u32 upper_addr,
4709 u32 lower_addr)
4710{
4711 u32 val;
4712 u8 mac[6];
4713
4714 val = CNIC_RD(dev, upper_addr);
4715
4716 mac[0] = (u8) (val >> 8);
4717 mac[1] = (u8) val;
4718
4719 val = CNIC_RD(dev, lower_addr);
4720
4721 mac[2] = (u8) (val >> 24);
4722 mac[3] = (u8) (val >> 16);
4723 mac[4] = (u8) (val >> 8);
4724 mac[5] = (u8) val;
4725
4726 if (is_valid_ether_addr(mac)) {
4727 memcpy(dev->mac_addr, mac, 6);
4728 return 0;
4729 } else {
4730 return -EINVAL;
4731 }
4234} 4732}
4235 4733
4236static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev) 4734static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
4237{ 4735{
4238 struct cnic_local *cp = dev->cnic_priv; 4736 struct cnic_local *cp = dev->cnic_priv;
4239 u32 base, base2, addr, val; 4737 u32 base, base2, addr, addr1, val;
4240 int port = CNIC_PORT(cp); 4738 int port = CNIC_PORT(cp);
4241 4739
4242 dev->max_iscsi_conn = 0; 4740 dev->max_iscsi_conn = 0;
@@ -4249,20 +4747,10 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
4249 addr = BNX2X_SHMEM_ADDR(base, 4747 addr = BNX2X_SHMEM_ADDR(base,
4250 dev_info.port_hw_config[port].iscsi_mac_upper); 4748 dev_info.port_hw_config[port].iscsi_mac_upper);
4251 4749
4252 val = CNIC_RD(dev, addr); 4750 addr1 = BNX2X_SHMEM_ADDR(base,
4253
4254 dev->mac_addr[0] = (u8) (val >> 8);
4255 dev->mac_addr[1] = (u8) val;
4256
4257 addr = BNX2X_SHMEM_ADDR(base,
4258 dev_info.port_hw_config[port].iscsi_mac_lower); 4751 dev_info.port_hw_config[port].iscsi_mac_lower);
4259 4752
4260 val = CNIC_RD(dev, addr); 4753 cnic_read_bnx2x_iscsi_mac(dev, addr, addr1);
4261
4262 dev->mac_addr[2] = (u8) (val >> 24);
4263 dev->mac_addr[3] = (u8) (val >> 16);
4264 dev->mac_addr[4] = (u8) (val >> 8);
4265 dev->mac_addr[5] = (u8) val;
4266 4754
4267 addr = BNX2X_SHMEM_ADDR(base, validity_map[port]); 4755 addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
4268 val = CNIC_RD(dev, addr); 4756 val = CNIC_RD(dev, addr);
@@ -4278,6 +4766,10 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
4278 val16 ^= 0x1e1e; 4766 val16 ^= 0x1e1e;
4279 dev->max_iscsi_conn = val16; 4767 dev->max_iscsi_conn = val16;
4280 } 4768 }
4769
4770 if (BNX2X_CHIP_IS_E2(cp->chip_id))
4771 dev->max_fcoe_conn = BNX2X_FCOE_NUM_CONNECTIONS;
4772
4281 if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) { 4773 if (BNX2X_CHIP_IS_E1H(cp->chip_id) || BNX2X_CHIP_IS_E2(cp->chip_id)) {
4282 int func = CNIC_FUNC(cp); 4774 int func = CNIC_FUNC(cp);
4283 u32 mf_cfg_addr; 4775 u32 mf_cfg_addr;
@@ -4288,21 +4780,90 @@ static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
4288 else 4780 else
4289 mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET; 4781 mf_cfg_addr = base + BNX2X_SHMEM_MF_BLK_OFFSET;
4290 4782
4291 addr = mf_cfg_addr + 4783 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4292 offsetof(struct mf_cfg, func_mf_config[func].e1hov_tag); 4784 /* Must determine if the MF is SD vs SI mode */
4785 addr = BNX2X_SHMEM_ADDR(base,
4786 dev_info.shared_feature_config.config);
4787 val = CNIC_RD(dev, addr);
4788 if ((val & SHARED_FEAT_CFG_FORCE_SF_MODE_MASK) ==
4789 SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT) {
4790 int rc;
4791
4792 /* MULTI_FUNCTION_SI mode */
4793 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4794 func_ext_config[func].func_cfg);
4795 val = CNIC_RD(dev, addr);
4796 if (!(val & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD))
4797 dev->max_iscsi_conn = 0;
4798
4799 if (!(val & MACP_FUNC_CFG_FLAGS_FCOE_OFFLOAD))
4800 dev->max_fcoe_conn = 0;
4801
4802 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4803 func_ext_config[func].
4804 iscsi_mac_addr_upper);
4805 addr1 = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4806 func_ext_config[func].
4807 iscsi_mac_addr_lower);
4808 rc = cnic_read_bnx2x_iscsi_mac(dev, addr,
4809 addr1);
4810 if (rc && func > 1)
4811 dev->max_iscsi_conn = 0;
4812
4813 return;
4814 }
4815 }
4816
4817 addr = BNX2X_MF_CFG_ADDR(mf_cfg_addr,
4818 func_mf_config[func].e1hov_tag);
4293 4819
4294 val = CNIC_RD(dev, addr); 4820 val = CNIC_RD(dev, addr);
4295 val &= FUNC_MF_CFG_E1HOV_TAG_MASK; 4821 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
4296 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) { 4822 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
4297 addr = mf_cfg_addr + 4823 dev->max_fcoe_conn = 0;
4298 offsetof(struct mf_cfg, 4824 dev->max_iscsi_conn = 0;
4299 func_mf_config[func].config);
4300 val = CNIC_RD(dev, addr);
4301 val &= FUNC_MF_CFG_PROTOCOL_MASK;
4302 if (val != FUNC_MF_CFG_PROTOCOL_ISCSI)
4303 dev->max_iscsi_conn = 0;
4304 } 4825 }
4305 } 4826 }
4827 if (!is_valid_ether_addr(dev->mac_addr))
4828 dev->max_iscsi_conn = 0;
4829}
4830
4831static void cnic_init_bnx2x_kcq(struct cnic_dev *dev)
4832{
4833 struct cnic_local *cp = dev->cnic_priv;
4834 u32 pfid = cp->pfid;
4835
4836 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
4837 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
4838 cp->kcq1.sw_prod_idx = 0;
4839
4840 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4841 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4842
4843 cp->kcq1.hw_prod_idx_ptr =
4844 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4845 cp->kcq1.status_idx_ptr =
4846 &sb->sb.running_index[SM_RX_ID];
4847 } else {
4848 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
4849
4850 cp->kcq1.hw_prod_idx_ptr =
4851 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4852 cp->kcq1.status_idx_ptr =
4853 &sb->sb.running_index[SM_RX_ID];
4854 }
4855
4856 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4857 struct host_hc_status_block_e2 *sb = cp->status_blk.gen;
4858
4859 cp->kcq2.io_addr = BAR_USTRORM_INTMEM +
4860 USTORM_FCOE_EQ_PROD_OFFSET(pfid);
4861 cp->kcq2.sw_prod_idx = 0;
4862 cp->kcq2.hw_prod_idx_ptr =
4863 &sb->sb.index_values[HC_INDEX_FCOE_EQ_CONS];
4864 cp->kcq2.status_idx_ptr =
4865 &sb->sb.running_index[SM_RX_ID];
4866 }
4306} 4867}
4307 4868
4308static int cnic_start_bnx2x_hw(struct cnic_dev *dev) 4869static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
@@ -4335,28 +4896,19 @@ static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4335 if (ret) 4896 if (ret)
4336 return -ENOMEM; 4897 return -ENOMEM;
4337 4898
4338 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
4339
4340 cp->kcq1.io_addr = BAR_CSTRORM_INTMEM +
4341 CSTORM_ISCSI_EQ_PROD_OFFSET(pfid, 0);
4342 cp->kcq1.sw_prod_idx = 0;
4343
4344 if (BNX2X_CHIP_IS_E2(cp->chip_id)) { 4899 if (BNX2X_CHIP_IS_E2(cp->chip_id)) {
4345 struct host_hc_status_block_e2 *sb = cp->status_blk.gen; 4900 ret = cnic_init_id_tbl(&cp->fcoe_cid_tbl,
4346 4901 BNX2X_FCOE_NUM_CONNECTIONS,
4347 cp->kcq1.hw_prod_idx_ptr = 4902 cp->fcoe_start_cid);
4348 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS];
4349 cp->kcq1.status_idx_ptr =
4350 &sb->sb.running_index[SM_RX_ID];
4351 } else {
4352 struct host_hc_status_block_e1x *sb = cp->status_blk.gen;
4353 4903
4354 cp->kcq1.hw_prod_idx_ptr = 4904 if (ret)
4355 &sb->sb.index_values[HC_INDEX_ISCSI_EQ_CONS]; 4905 return -ENOMEM;
4356 cp->kcq1.status_idx_ptr =
4357 &sb->sb.running_index[SM_RX_ID];
4358 } 4906 }
4359 4907
4908 cp->bnx2x_igu_sb_id = ethdev->irq_arr[0].status_blk_num2;
4909
4910 cnic_init_bnx2x_kcq(dev);
4911
4360 cnic_get_bnx2x_iscsi_info(dev); 4912 cnic_get_bnx2x_iscsi_info(dev);
4361 4913
4362 /* Only 1 EQ */ 4914 /* Only 1 EQ */
@@ -4424,8 +4976,9 @@ static void cnic_init_rings(struct cnic_dev *dev)
4424 cnic_init_bnx2_rx_ring(dev); 4976 cnic_init_bnx2_rx_ring(dev);
4425 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 4977 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
4426 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 4978 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4427 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 4979 u32 cli = cp->ethdev->iscsi_l2_client_id;
4428 u32 cl_qzone_id, type; 4980 u32 cid = cp->ethdev->iscsi_l2_cid;
4981 u32 cl_qzone_id;
4429 struct client_init_ramrod_data *data; 4982 struct client_init_ramrod_data *data;
4430 union l5cm_specific_data l5_data; 4983 union l5cm_specific_data l5_data;
4431 struct ustorm_eth_rx_producers rx_prods = {0}; 4984 struct ustorm_eth_rx_producers rx_prods = {0};
@@ -4457,15 +5010,10 @@ static void cnic_init_rings(struct cnic_dev *dev)
4457 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff; 5010 l5_data.phy_address.lo = udev->l2_buf_map & 0xffffffff;
4458 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32; 5011 l5_data.phy_address.hi = (u64) udev->l2_buf_map >> 32;
4459 5012
4460 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
4461 & SPE_HDR_CONN_TYPE;
4462 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
4463 SPE_HDR_FUNCTION_ID);
4464
4465 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 5013 set_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
4466 5014
4467 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP, 5015 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
4468 BNX2X_ISCSI_L2_CID, type, &l5_data); 5016 cid, ETH_CONNECTION_TYPE, &l5_data);
4469 5017
4470 i = 0; 5018 i = 0;
4471 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && 5019 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
@@ -4476,7 +5024,7 @@ static void cnic_init_rings(struct cnic_dev *dev)
4476 netdev_err(dev->netdev, 5024 netdev_err(dev->netdev,
4477 "iSCSI CLIENT_SETUP did not complete\n"); 5025 "iSCSI CLIENT_SETUP did not complete\n");
4478 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); 5026 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
4479 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1); 5027 cnic_ring_ctl(dev, cid, cli, 1);
4480 } 5028 }
4481} 5029}
4482 5030
@@ -4491,19 +5039,19 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
4491 cnic_shutdown_bnx2_rx_ring(dev); 5039 cnic_shutdown_bnx2_rx_ring(dev);
4492 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) { 5040 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4493 struct cnic_local *cp = dev->cnic_priv; 5041 struct cnic_local *cp = dev->cnic_priv;
4494 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp)); 5042 u32 cli = cp->ethdev->iscsi_l2_client_id;
5043 u32 cid = cp->ethdev->iscsi_l2_cid;
4495 union l5cm_specific_data l5_data; 5044 union l5cm_specific_data l5_data;
4496 int i; 5045 int i;
4497 u32 type;
4498 5046
4499 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0); 5047 cnic_ring_ctl(dev, cid, cli, 0);
4500 5048
4501 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags); 5049 set_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags);
4502 5050
4503 l5_data.phy_address.lo = cli; 5051 l5_data.phy_address.lo = cli;
4504 l5_data.phy_address.hi = 0; 5052 l5_data.phy_address.hi = 0;
4505 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT, 5053 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_HALT,
4506 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data); 5054 cid, ETH_CONNECTION_TYPE, &l5_data);
4507 i = 0; 5055 i = 0;
4508 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) && 5056 while (test_bit(CNIC_LCL_FL_L2_WAIT, &cp->cnic_local_flags) &&
4509 ++i < 10) 5057 ++i < 10)
@@ -4515,12 +5063,8 @@ static void cnic_shutdown_rings(struct cnic_dev *dev)
4515 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1); 5063 cnic_spq_completion(dev, DRV_CTL_RET_L2_SPQ_CREDIT_CMD, 1);
4516 5064
4517 memset(&l5_data, 0, sizeof(l5_data)); 5065 memset(&l5_data, 0, sizeof(l5_data));
4518 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
4519 & SPE_HDR_CONN_TYPE;
4520 type |= ((cp->pfid << SPE_HDR_FUNCTION_ID_SHIFT) &
4521 SPE_HDR_FUNCTION_ID);
4522 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL, 5066 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_COMMON_CFC_DEL,
4523 BNX2X_ISCSI_L2_CID, type, &l5_data); 5067 cid, NONE_CONNECTION_TYPE, &l5_data);
4524 msleep(10); 5068 msleep(10);
4525 } 5069 }
4526 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags); 5070 clear_bit(CNIC_LCL_FL_RINGS_INITED, &cp->cnic_local_flags);
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index 6a4a0ae5cfe3..b328f6c924c3 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -82,7 +82,7 @@ struct cnic_redirect_entry {
82#define MAX_ISCSI_TBL_SZ 256 82#define MAX_ISCSI_TBL_SZ 256
83 83
84#define CNIC_LOCAL_PORT_MIN 60000 84#define CNIC_LOCAL_PORT_MIN 60000
85#define CNIC_LOCAL_PORT_MAX 61000 85#define CNIC_LOCAL_PORT_MAX 61024
86#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN) 86#define CNIC_LOCAL_PORT_RANGE (CNIC_LOCAL_PORT_MAX - CNIC_LOCAL_PORT_MIN)
87 87
88#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe)) 88#define KWQE_CNT (BCM_PAGE_SIZE / sizeof(struct kwqe))
@@ -258,6 +258,7 @@ struct cnic_local {
258 u16 kwq_con_idx; 258 u16 kwq_con_idx;
259 259
260 struct kcq_info kcq1; 260 struct kcq_info kcq1;
261 struct kcq_info kcq2;
261 262
262 union { 263 union {
263 void *gen; 264 void *gen;
@@ -290,6 +291,10 @@ struct cnic_local {
290 atomic_t iscsi_conn; 291 atomic_t iscsi_conn;
291 u32 iscsi_start_cid; 292 u32 iscsi_start_cid;
292 293
294 u32 fcoe_init_cid;
295 u32 fcoe_start_cid;
296 struct cnic_id_tbl fcoe_cid_tbl;
297
293 u32 max_cid_space; 298 u32 max_cid_space;
294 299
295 /* per connection parameters */ 300 /* per connection parameters */
@@ -356,11 +361,6 @@ struct bnx2x_bd_chain_next {
356#define BNX2X_CONTEXT_MEM_SIZE 1024 361#define BNX2X_CONTEXT_MEM_SIZE 1024
357#define BNX2X_FCOE_CID 16 362#define BNX2X_FCOE_CID 16
358 363
359/* iSCSI client IDs are 17, 19, 21, 23 */
360#define BNX2X_ISCSI_BASE_CL_ID 17
361#define BNX2X_ISCSI_CL_ID(vn) (BNX2X_ISCSI_BASE_CL_ID + ((vn) << 1))
362
363#define BNX2X_ISCSI_L2_CID 17
364#define BNX2X_ISCSI_START_CID 18 364#define BNX2X_ISCSI_START_CID 18
365#define BNX2X_ISCSI_NUM_CONNECTIONS 128 365#define BNX2X_ISCSI_NUM_CONNECTIONS 128
366#define BNX2X_ISCSI_TASK_CONTEXT_SIZE 128 366#define BNX2X_ISCSI_TASK_CONTEXT_SIZE 128
@@ -372,6 +372,10 @@ struct bnx2x_bd_chain_next {
372#define BNX2X_ISCSI_PBL_NOT_CACHED 0xff 372#define BNX2X_ISCSI_PBL_NOT_CACHED 0xff
373#define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED 0xff 373#define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED 0xff
374 374
375#define BNX2X_FCOE_NUM_CONNECTIONS 128
376
377#define BNX2X_FCOE_L5_CID_BASE MAX_ISCSI_TBL_SZ
378
375#define BNX2X_CHIP_NUM_57710 0x164e 379#define BNX2X_CHIP_NUM_57710 0x164e
376#define BNX2X_CHIP_NUM_57711 0x164f 380#define BNX2X_CHIP_NUM_57711 0x164f
377#define BNX2X_CHIP_NUM_57711E 0x1650 381#define BNX2X_CHIP_NUM_57711E 0x1650
@@ -427,6 +431,13 @@ struct bnx2x_bd_chain_next {
427 (CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base, size)) > \ 431 (CNIC_RD(dev, BNX2X_SHMEM2_ADDR(base, size)) > \
428 offsetof(struct shmem2_region, field))) 432 offsetof(struct shmem2_region, field)))
429 433
434#define BNX2X_MF_CFG_ADDR(base, field) \
435 ((base) + offsetof(struct mf_cfg, field))
436
437#ifndef ETH_MAX_RX_CLIENTS_E2
438#define ETH_MAX_RX_CLIENTS_E2 ETH_MAX_RX_CLIENTS_E1H
439#endif
440
430#define CNIC_PORT(cp) ((cp)->pfid & 1) 441#define CNIC_PORT(cp) ((cp)->pfid & 1)
431#define CNIC_FUNC(cp) ((cp)->func) 442#define CNIC_FUNC(cp) ((cp)->func)
432#define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2(cp->chip_id) ? 0 :\ 443#define CNIC_PATH(cp) (!BNX2X_CHIP_IS_E2(cp->chip_id) ? 0 :\
@@ -439,7 +450,9 @@ struct bnx2x_bd_chain_next {
439#define BNX2X_SW_CID(x) (x & 0x1ffff) 450#define BNX2X_SW_CID(x) (x & 0x1ffff)
440 451
441#define BNX2X_CL_QZONE_ID(cp, cli) \ 452#define BNX2X_CL_QZONE_ID(cp, cli) \
442 (cli + (CNIC_PORT(cp) * ETH_MAX_RX_CLIENTS_E1H)) 453 (cli + (CNIC_PORT(cp) * (BNX2X_CHIP_IS_E2(cp->chip_id) ?\
454 ETH_MAX_RX_CLIENTS_E2 : \
455 ETH_MAX_RX_CLIENTS_E1H)))
443 456
444#define TCP_TSTORM_OOO_DROP_AND_PROC_ACK (0<<4) 457#define TCP_TSTORM_OOO_DROP_AND_PROC_ACK (0<<4)
445#endif 458#endif
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
index 328e8b2765a3..fdbc00415603 100644
--- a/drivers/net/cnic_defs.h
+++ b/drivers/net/cnic_defs.h
@@ -35,6 +35,40 @@
35#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE (L5CM_RAMROD_CMD_ID_BASE + 14) 35#define L5CM_RAMROD_CMD_ID_SEARCHER_DELETE (L5CM_RAMROD_CMD_ID_BASE + 14)
36#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD (L5CM_RAMROD_CMD_ID_BASE + 15) 36#define L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD (L5CM_RAMROD_CMD_ID_BASE + 15)
37 37
38#define FCOE_KCQE_OPCODE_INIT_FUNC (0x10)
39#define FCOE_KCQE_OPCODE_DESTROY_FUNC (0x11)
40#define FCOE_KCQE_OPCODE_STAT_FUNC (0x12)
41#define FCOE_KCQE_OPCODE_OFFLOAD_CONN (0x15)
42#define FCOE_KCQE_OPCODE_ENABLE_CONN (0x16)
43#define FCOE_KCQE_OPCODE_DISABLE_CONN (0x17)
44#define FCOE_KCQE_OPCODE_DESTROY_CONN (0x18)
45#define FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION (0x20)
46#define FCOE_KCQE_OPCODE_FCOE_ERROR (0x21)
47
48#define FCOE_RAMROD_CMD_ID_INIT (FCOE_KCQE_OPCODE_INIT_FUNC)
49#define FCOE_RAMROD_CMD_ID_DESTROY (FCOE_KCQE_OPCODE_DESTROY_FUNC)
50#define FCOE_RAMROD_CMD_ID_OFFLOAD_CONN (FCOE_KCQE_OPCODE_OFFLOAD_CONN)
51#define FCOE_RAMROD_CMD_ID_ENABLE_CONN (FCOE_KCQE_OPCODE_ENABLE_CONN)
52#define FCOE_RAMROD_CMD_ID_DISABLE_CONN (FCOE_KCQE_OPCODE_DISABLE_CONN)
53#define FCOE_RAMROD_CMD_ID_DESTROY_CONN (FCOE_KCQE_OPCODE_DESTROY_CONN)
54#define FCOE_RAMROD_CMD_ID_STAT (FCOE_KCQE_OPCODE_STAT_FUNC)
55#define FCOE_RAMROD_CMD_ID_TERMINATE_CONN (0x81)
56
57#define FCOE_KWQE_OPCODE_INIT1 (0)
58#define FCOE_KWQE_OPCODE_INIT2 (1)
59#define FCOE_KWQE_OPCODE_INIT3 (2)
60#define FCOE_KWQE_OPCODE_OFFLOAD_CONN1 (3)
61#define FCOE_KWQE_OPCODE_OFFLOAD_CONN2 (4)
62#define FCOE_KWQE_OPCODE_OFFLOAD_CONN3 (5)
63#define FCOE_KWQE_OPCODE_OFFLOAD_CONN4 (6)
64#define FCOE_KWQE_OPCODE_ENABLE_CONN (7)
65#define FCOE_KWQE_OPCODE_DISABLE_CONN (8)
66#define FCOE_KWQE_OPCODE_DESTROY_CONN (9)
67#define FCOE_KWQE_OPCODE_DESTROY (10)
68#define FCOE_KWQE_OPCODE_STAT (11)
69
70#define FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE (0x3)
71
38/* KCQ (kernel completion queue) response op codes */ 72/* KCQ (kernel completion queue) response op codes */
39#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53) 73#define L4_KCQE_OPCODE_VALUE_CLOSE_COMP (53)
40#define L4_KCQE_OPCODE_VALUE_RESET_COMP (54) 74#define L4_KCQE_OPCODE_VALUE_RESET_COMP (54)
@@ -683,6 +717,1496 @@ struct cstorm_iscsi_ag_context {
683}; 717};
684 718
685/* 719/*
720 * Parameters initialized during offloaded according to FLOGI/PLOGI/PRLI and used in FCoE context section
721 */
722struct ustorm_fcoe_params {
723#if defined(__BIG_ENDIAN)
724 u16 fcoe_conn_id;
725 u16 flags;
726#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0)
727#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0
728#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1)
729#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1
730#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2)
731#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2
732#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3)
733#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3
734#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4)
735#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4
736#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5)
737#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5
738#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6)
739#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6
740#define USTORM_FCOE_PARAMS_B_C2_VALID (0x1<<7)
741#define USTORM_FCOE_PARAMS_B_C2_VALID_SHIFT 7
742#define USTORM_FCOE_PARAMS_B_ACK_0 (0x1<<8)
743#define USTORM_FCOE_PARAMS_B_ACK_0_SHIFT 8
744#define USTORM_FCOE_PARAMS_RSRV0 (0x7F<<9)
745#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 9
746#elif defined(__LITTLE_ENDIAN)
747 u16 flags;
748#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS (0x1<<0)
749#define USTORM_FCOE_PARAMS_B_MUL_N_PORT_IDS_SHIFT 0
750#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES (0x1<<1)
751#define USTORM_FCOE_PARAMS_B_E_D_TOV_RES_SHIFT 1
752#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT (0x1<<2)
753#define USTORM_FCOE_PARAMS_B_CONT_INCR_SEQ_CNT_SHIFT 2
754#define USTORM_FCOE_PARAMS_B_CONF_REQ (0x1<<3)
755#define USTORM_FCOE_PARAMS_B_CONF_REQ_SHIFT 3
756#define USTORM_FCOE_PARAMS_B_REC_VALID (0x1<<4)
757#define USTORM_FCOE_PARAMS_B_REC_VALID_SHIFT 4
758#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT (0x1<<5)
759#define USTORM_FCOE_PARAMS_B_CQ_TOGGLE_BIT_SHIFT 5
760#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT (0x1<<6)
761#define USTORM_FCOE_PARAMS_B_XFRQ_TOGGLE_BIT_SHIFT 6
762#define USTORM_FCOE_PARAMS_B_C2_VALID (0x1<<7)
763#define USTORM_FCOE_PARAMS_B_C2_VALID_SHIFT 7
764#define USTORM_FCOE_PARAMS_B_ACK_0 (0x1<<8)
765#define USTORM_FCOE_PARAMS_B_ACK_0_SHIFT 8
766#define USTORM_FCOE_PARAMS_RSRV0 (0x7F<<9)
767#define USTORM_FCOE_PARAMS_RSRV0_SHIFT 9
768 u16 fcoe_conn_id;
769#endif
770#if defined(__BIG_ENDIAN)
771 u8 hc_csdm_byte_en;
772 u8 func_id;
773 u8 port_id;
774 u8 vnic_id;
775#elif defined(__LITTLE_ENDIAN)
776 u8 vnic_id;
777 u8 port_id;
778 u8 func_id;
779 u8 hc_csdm_byte_en;
780#endif
781#if defined(__BIG_ENDIAN)
782 u16 rx_total_conc_seqs;
783 u16 rx_max_fc_pay_len;
784#elif defined(__LITTLE_ENDIAN)
785 u16 rx_max_fc_pay_len;
786 u16 rx_total_conc_seqs;
787#endif
788#if defined(__BIG_ENDIAN)
789 u16 ox_id;
790 u16 rx_max_conc_seqs;
791#elif defined(__LITTLE_ENDIAN)
792 u16 rx_max_conc_seqs;
793 u16 ox_id;
794#endif
795};
796
797/*
798 * FCoE 16-bits index structure
799 */
800struct fcoe_idx16_fields {
801 u16 fields;
802#define FCOE_IDX16_FIELDS_IDX (0x7FFF<<0)
803#define FCOE_IDX16_FIELDS_IDX_SHIFT 0
804#define FCOE_IDX16_FIELDS_MSB (0x1<<15)
805#define FCOE_IDX16_FIELDS_MSB_SHIFT 15
806};
807
808/*
809 * FCoE 16-bits index union
810 */
811union fcoe_idx16_field_union {
812 struct fcoe_idx16_fields fields;
813 u16 val;
814};
815
816/*
817 * 4 regs size
818 */
819struct fcoe_bd_ctx {
820 u32 buf_addr_hi;
821 u32 buf_addr_lo;
822#if defined(__BIG_ENDIAN)
823 u16 rsrv0;
824 u16 buf_len;
825#elif defined(__LITTLE_ENDIAN)
826 u16 buf_len;
827 u16 rsrv0;
828#endif
829#if defined(__BIG_ENDIAN)
830 u16 rsrv1;
831 u16 flags;
832#elif defined(__LITTLE_ENDIAN)
833 u16 flags;
834 u16 rsrv1;
835#endif
836};
837
838/*
839 * Parameters required for placement according to SGL
840 */
841struct ustorm_fcoe_data_place {
842#if defined(__BIG_ENDIAN)
843 u16 cached_sge_off;
844 u8 cached_num_sges;
845 u8 cached_sge_idx;
846#elif defined(__LITTLE_ENDIAN)
847 u8 cached_sge_idx;
848 u8 cached_num_sges;
849 u16 cached_sge_off;
850#endif
851 struct fcoe_bd_ctx cached_sge[3];
852};
853
854struct fcoe_task_ctx_entry_txwr_rxrd {
855#if defined(__BIG_ENDIAN)
856 u16 verify_tx_seq;
857 u8 init_flags;
858#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
859#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
860#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
861#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
862#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
863#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
864#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
865#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
866#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
867#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
868 u8 tx_flags;
869#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
870#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
871#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
872#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
873#elif defined(__LITTLE_ENDIAN)
874 u8 tx_flags;
875#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE (0xF<<0)
876#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT 0
877#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4 (0xF<<4)
878#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV4_SHIFT 4
879 u8 init_flags;
880#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE (0x7<<0)
881#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT 0
882#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE (0x1<<3)
883#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT 3
884#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE (0x1<<4)
885#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT 4
886#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE (0x1<<5)
887#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT 5
888#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5 (0x3<<6)
889#define FCOE_TASK_CTX_ENTRY_TXWR_RXRD_RSRV5_SHIFT 6
890 u16 verify_tx_seq;
891#endif
892};
893
894struct fcoe_fcp_cmd_payload {
895 u32 opaque[8];
896};
897
898struct fcoe_fc_hdr {
899#if defined(__BIG_ENDIAN)
900 u8 cs_ctl;
901 u8 s_id[3];
902#elif defined(__LITTLE_ENDIAN)
903 u8 s_id[3];
904 u8 cs_ctl;
905#endif
906#if defined(__BIG_ENDIAN)
907 u8 r_ctl;
908 u8 d_id[3];
909#elif defined(__LITTLE_ENDIAN)
910 u8 d_id[3];
911 u8 r_ctl;
912#endif
913#if defined(__BIG_ENDIAN)
914 u8 seq_id;
915 u8 df_ctl;
916 u16 seq_cnt;
917#elif defined(__LITTLE_ENDIAN)
918 u16 seq_cnt;
919 u8 df_ctl;
920 u8 seq_id;
921#endif
922#if defined(__BIG_ENDIAN)
923 u8 type;
924 u8 f_ctl[3];
925#elif defined(__LITTLE_ENDIAN)
926 u8 f_ctl[3];
927 u8 type;
928#endif
929 u32 parameters;
930#if defined(__BIG_ENDIAN)
931 u16 ox_id;
932 u16 rx_id;
933#elif defined(__LITTLE_ENDIAN)
934 u16 rx_id;
935 u16 ox_id;
936#endif
937};
938
939struct fcoe_fc_frame {
940 struct fcoe_fc_hdr fc_hdr;
941 u32 reserved0[2];
942};
943
944union fcoe_cmd_flow_info {
945 struct fcoe_fcp_cmd_payload fcp_cmd_payload;
946 struct fcoe_fc_frame mp_fc_frame;
947};
948
949struct fcoe_read_flow_info {
950 struct fcoe_fc_hdr fc_data_in_hdr;
951 u32 reserved[2];
952};
953
954struct fcoe_fcp_xfr_rdy_payload {
955 u32 burst_len;
956 u32 data_ro;
957};
958
959struct fcoe_write_flow_info {
960 struct fcoe_fc_hdr fc_data_out_hdr;
961 struct fcoe_fcp_xfr_rdy_payload fcp_xfr_payload;
962};
963
964struct fcoe_fcp_rsp_flags {
965 u8 flags;
966#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID (0x1<<0)
967#define FCOE_FCP_RSP_FLAGS_FCP_RSP_LEN_VALID_SHIFT 0
968#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID (0x1<<1)
969#define FCOE_FCP_RSP_FLAGS_FCP_SNS_LEN_VALID_SHIFT 1
970#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER (0x1<<2)
971#define FCOE_FCP_RSP_FLAGS_FCP_RESID_OVER_SHIFT 2
972#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER (0x1<<3)
973#define FCOE_FCP_RSP_FLAGS_FCP_RESID_UNDER_SHIFT 3
974#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ (0x1<<4)
975#define FCOE_FCP_RSP_FLAGS_FCP_CONF_REQ_SHIFT 4
976#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS (0x7<<5)
977#define FCOE_FCP_RSP_FLAGS_FCP_BIDI_FLAGS_SHIFT 5
978};
979
980struct fcoe_fcp_rsp_payload {
981 struct regpair reserved0;
982 u32 fcp_resid;
983#if defined(__BIG_ENDIAN)
984 u16 retry_delay_timer;
985 struct fcoe_fcp_rsp_flags fcp_flags;
986 u8 scsi_status_code;
987#elif defined(__LITTLE_ENDIAN)
988 u8 scsi_status_code;
989 struct fcoe_fcp_rsp_flags fcp_flags;
990 u16 retry_delay_timer;
991#endif
992 u32 fcp_rsp_len;
993 u32 fcp_sns_len;
994};
995
996/*
997 * Fixed size structure in order to plant it in Union structure
998 */
999struct fcoe_fcp_rsp_union {
1000 struct fcoe_fcp_rsp_payload payload;
1001 struct regpair reserved0;
1002};
1003
1004/*
1005 * Fixed size structure in order to plant it in Union structure
1006 */
1007struct fcoe_abts_rsp_union {
1008 u32 r_ctl;
1009 u32 abts_rsp_payload[7];
1010};
1011
1012union fcoe_rsp_flow_info {
1013 struct fcoe_fcp_rsp_union fcp_rsp;
1014 struct fcoe_abts_rsp_union abts_rsp;
1015};
1016
1017struct fcoe_cleanup_flow_info {
1018#if defined(__BIG_ENDIAN)
1019 u16 reserved1;
1020 u16 task_id;
1021#elif defined(__LITTLE_ENDIAN)
1022 u16 task_id;
1023 u16 reserved1;
1024#endif
1025 u32 reserved2[7];
1026};
1027
1028/*
1029 * 32 bytes used for general purposes
1030 */
1031union fcoe_general_task_ctx {
1032 union fcoe_cmd_flow_info cmd_info;
1033 struct fcoe_read_flow_info read_info;
1034 struct fcoe_write_flow_info write_info;
1035 union fcoe_rsp_flow_info rsp_info;
1036 struct fcoe_cleanup_flow_info cleanup_info;
1037 u32 comp_info[8];
1038};
1039
1040struct fcoe_s_stat_ctx {
1041 u8 flags;
1042#define FCOE_S_STAT_CTX_ACTIVE (0x1<<0)
1043#define FCOE_S_STAT_CTX_ACTIVE_SHIFT 0
1044#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND (0x1<<1)
1045#define FCOE_S_STAT_CTX_ACK_ABORT_SEQ_COND_SHIFT 1
1046#define FCOE_S_STAT_CTX_ABTS_PERFORMED (0x1<<2)
1047#define FCOE_S_STAT_CTX_ABTS_PERFORMED_SHIFT 2
1048#define FCOE_S_STAT_CTX_SEQ_TIMEOUT (0x1<<3)
1049#define FCOE_S_STAT_CTX_SEQ_TIMEOUT_SHIFT 3
1050#define FCOE_S_STAT_CTX_P_RJT (0x1<<4)
1051#define FCOE_S_STAT_CTX_P_RJT_SHIFT 4
1052#define FCOE_S_STAT_CTX_ACK_EOFT (0x1<<5)
1053#define FCOE_S_STAT_CTX_ACK_EOFT_SHIFT 5
1054#define FCOE_S_STAT_CTX_RSRV1 (0x3<<6)
1055#define FCOE_S_STAT_CTX_RSRV1_SHIFT 6
1056};
1057
1058/*
1059 * Common section. Both TX and RX processing might write and read from it in different flows
1060 */
1061struct fcoe_task_ctx_entry_tx_rx_cmn {
1062 u32 data_2_trns;
1063 union fcoe_general_task_ctx general;
1064#if defined(__BIG_ENDIAN)
1065 u16 tx_low_seq_cnt;
1066 struct fcoe_s_stat_ctx tx_s_stat;
1067 u8 tx_seq_id;
1068#elif defined(__LITTLE_ENDIAN)
1069 u8 tx_seq_id;
1070 struct fcoe_s_stat_ctx tx_s_stat;
1071 u16 tx_low_seq_cnt;
1072#endif
1073 u32 common_flags;
1074#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID (0xFFFFFF<<0)
1075#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT 0
1076#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID (0x1<<24)
1077#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT 24
1078#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT (0x1<<25)
1079#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT_SHIFT 25
1080#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER (0x1<<26)
1081#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_XFER_SHIFT 26
1082#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF (0x1<<27)
1083#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_PEND_CONF_SHIFT 27
1084#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME (0x1<<28)
1085#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT 28
1086#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV (0x7<<29)
1087#define FCOE_TASK_CTX_ENTRY_TX_RX_CMN_RSRV_SHIFT 29
1088};
1089
1090struct fcoe_task_ctx_entry_rxwr_txrd {
1091#if defined(__BIG_ENDIAN)
1092 u16 rx_id;
1093 u16 rx_flags;
1094#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
1095#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
1096#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
1097#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
1098#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
1099#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
1100#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
1101#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
1102#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
1103#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
1104#elif defined(__LITTLE_ENDIAN)
1105 u16 rx_flags;
1106#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE (0xF<<0)
1107#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT 0
1108#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE (0x7<<4)
1109#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT 4
1110#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ (0x1<<7)
1111#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_CONF_REQ_SHIFT 7
1112#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME (0x1<<8)
1113#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_MISS_FRAME_SHIFT 8
1114#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0 (0x7F<<9)
1115#define FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RESERVED0_SHIFT 9
1116 u16 rx_id;
1117#endif
1118};
1119
1120struct fcoe_seq_ctx {
1121#if defined(__BIG_ENDIAN)
1122 u16 low_seq_cnt;
1123 struct fcoe_s_stat_ctx s_stat;
1124 u8 seq_id;
1125#elif defined(__LITTLE_ENDIAN)
1126 u8 seq_id;
1127 struct fcoe_s_stat_ctx s_stat;
1128 u16 low_seq_cnt;
1129#endif
1130#if defined(__BIG_ENDIAN)
1131 u16 err_seq_cnt;
1132 u16 high_seq_cnt;
1133#elif defined(__LITTLE_ENDIAN)
1134 u16 high_seq_cnt;
1135 u16 err_seq_cnt;
1136#endif
1137 u32 low_exp_ro;
1138 u32 high_exp_ro;
1139};
1140
1141struct fcoe_single_sge_ctx {
1142 struct regpair cur_buf_addr;
1143#if defined(__BIG_ENDIAN)
1144 u16 reserved0;
1145 u16 cur_buf_rem;
1146#elif defined(__LITTLE_ENDIAN)
1147 u16 cur_buf_rem;
1148 u16 reserved0;
1149#endif
1150};
1151
1152struct fcoe_mul_sges_ctx {
1153 struct regpair cur_sge_addr;
1154#if defined(__BIG_ENDIAN)
1155 u8 sgl_size;
1156 u8 cur_sge_idx;
1157 u16 cur_sge_off;
1158#elif defined(__LITTLE_ENDIAN)
1159 u16 cur_sge_off;
1160 u8 cur_sge_idx;
1161 u8 sgl_size;
1162#endif
1163};
1164
1165union fcoe_sgl_ctx {
1166 struct fcoe_single_sge_ctx single_sge;
1167 struct fcoe_mul_sges_ctx mul_sges;
1168};
1169
1170struct fcoe_task_ctx_entry_rx_only {
1171 struct fcoe_seq_ctx seq_ctx;
1172 struct fcoe_seq_ctx ooo_seq_ctx;
1173 u32 rsrv3;
1174 union fcoe_sgl_ctx sgl_ctx;
1175};
1176
1177struct ustorm_fcoe_task_ctx_entry_rd {
1178 struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd;
1179 struct fcoe_task_ctx_entry_tx_rx_cmn cmn;
1180 struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd;
1181 struct fcoe_task_ctx_entry_rx_only rx_wr;
1182 u32 reserved;
1183};
1184
1185/*
1186 * Ustorm FCoE Storm Context
1187 */
1188struct ustorm_fcoe_st_context {
1189 struct ustorm_fcoe_params fcoe_params;
1190 struct regpair task_addr;
1191 struct regpair cq_base_addr;
1192 struct regpair rq_pbl_base;
1193 struct regpair rq_cur_page_addr;
1194 struct regpair confq_pbl_base_addr;
1195 struct regpair conn_db_base;
1196 struct regpair xfrq_base_addr;
1197 struct regpair lcq_base_addr;
1198#if defined(__BIG_ENDIAN)
1199 union fcoe_idx16_field_union rq_cons;
1200 union fcoe_idx16_field_union rq_prod;
1201#elif defined(__LITTLE_ENDIAN)
1202 union fcoe_idx16_field_union rq_prod;
1203 union fcoe_idx16_field_union rq_cons;
1204#endif
1205#if defined(__BIG_ENDIAN)
1206 u16 xfrq_prod;
1207 u16 cq_cons;
1208#elif defined(__LITTLE_ENDIAN)
1209 u16 cq_cons;
1210 u16 xfrq_prod;
1211#endif
1212#if defined(__BIG_ENDIAN)
1213 u16 lcq_cons;
1214 u16 hc_cram_address;
1215#elif defined(__LITTLE_ENDIAN)
1216 u16 hc_cram_address;
1217 u16 lcq_cons;
1218#endif
1219#if defined(__BIG_ENDIAN)
1220 u16 sq_xfrq_lcq_confq_size;
1221 u16 confq_prod;
1222#elif defined(__LITTLE_ENDIAN)
1223 u16 confq_prod;
1224 u16 sq_xfrq_lcq_confq_size;
1225#endif
1226#if defined(__BIG_ENDIAN)
1227 u8 hc_csdm_agg_int;
1228 u8 flags;
1229#define USTORM_FCOE_ST_CONTEXT_MID_SEQ_PROC_FLAG (0x1<<0)
1230#define USTORM_FCOE_ST_CONTEXT_MID_SEQ_PROC_FLAG_SHIFT 0
1231#define USTORM_FCOE_ST_CONTEXT_CACHED_CONN_FLAG (0x1<<1)
1232#define USTORM_FCOE_ST_CONTEXT_CACHED_CONN_FLAG_SHIFT 1
1233#define USTORM_FCOE_ST_CONTEXT_CACHED_TCE_FLAG (0x1<<2)
1234#define USTORM_FCOE_ST_CONTEXT_CACHED_TCE_FLAG_SHIFT 2
1235#define USTORM_FCOE_ST_CONTEXT_RSRV1 (0x1F<<3)
1236#define USTORM_FCOE_ST_CONTEXT_RSRV1_SHIFT 3
1237 u8 available_rqes;
1238 u8 sp_q_flush_cnt;
1239#elif defined(__LITTLE_ENDIAN)
1240 u8 sp_q_flush_cnt;
1241 u8 available_rqes;
1242 u8 flags;
1243#define USTORM_FCOE_ST_CONTEXT_MID_SEQ_PROC_FLAG (0x1<<0)
1244#define USTORM_FCOE_ST_CONTEXT_MID_SEQ_PROC_FLAG_SHIFT 0
1245#define USTORM_FCOE_ST_CONTEXT_CACHED_CONN_FLAG (0x1<<1)
1246#define USTORM_FCOE_ST_CONTEXT_CACHED_CONN_FLAG_SHIFT 1
1247#define USTORM_FCOE_ST_CONTEXT_CACHED_TCE_FLAG (0x1<<2)
1248#define USTORM_FCOE_ST_CONTEXT_CACHED_TCE_FLAG_SHIFT 2
1249#define USTORM_FCOE_ST_CONTEXT_RSRV1 (0x1F<<3)
1250#define USTORM_FCOE_ST_CONTEXT_RSRV1_SHIFT 3
1251 u8 hc_csdm_agg_int;
1252#endif
1253 struct ustorm_fcoe_data_place data_place;
1254 struct ustorm_fcoe_task_ctx_entry_rd tce;
1255};
1256
1257/*
1258 * The FCoE non-aggregative context of Tstorm
1259 */
1260struct tstorm_fcoe_st_context {
1261 struct regpair reserved0;
1262 struct regpair reserved1;
1263};
1264
1265/*
1266 * The fcoe aggregative context section of Xstorm
1267 */
1268struct xstorm_fcoe_extra_ag_context_section {
1269#if defined(__BIG_ENDIAN)
1270 u8 tcp_agg_vars1;
1271#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51 (0x3<<0)
1272#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0
1273#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
1274#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
1275#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF (0x3<<4)
1276#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_SHIFT 4
1277#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6)
1278#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6
1279#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7)
1280#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG_SHIFT 7
1281 u8 __reserved_da_cnt;
1282 u16 __mtu;
1283#elif defined(__LITTLE_ENDIAN)
1284 u16 __mtu;
1285 u8 __reserved_da_cnt;
1286 u8 tcp_agg_vars1;
1287#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51 (0x3<<0)
1288#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED51_SHIFT 0
1289#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED (0x3<<2)
1290#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_SHIFT 2
1291#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF (0x3<<4)
1292#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_SHIFT 4
1293#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN (0x1<<6)
1294#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_CLEAR_DA_TIMER_EN_SHIFT 6
1295#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG (0x1<<7)
1296#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_DA_EXPIRATION_FLAG_SHIFT 7
1297#endif
1298 u32 __task_addr_lo;
1299 u32 __task_addr_hi;
1300 u32 __reserved55;
1301 u32 __tx_prods;
1302#if defined(__BIG_ENDIAN)
1303 u8 __agg_val8_th;
1304 u8 __agg_val8;
1305 u16 tcp_agg_vars2;
1306#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0)
1307#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57_SHIFT 0
1308#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58 (0x1<<1)
1309#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58_SHIFT 1
1310#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59 (0x1<<2)
1311#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59_SHIFT 2
1312#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
1313#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
1314#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
1315#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
1316#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60 (0x1<<5)
1317#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5
1318#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6)
1319#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6
1320#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_EN (0x1<<7)
1321#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_EN_SHIFT 7
1322#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8)
1323#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8
1324#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
1325#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
1326#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
1327#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
1328#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
1329#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
1330#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14)
1331#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14
1332#elif defined(__LITTLE_ENDIAN)
1333 u16 tcp_agg_vars2;
1334#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57 (0x1<<0)
1335#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED57_SHIFT 0
1336#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58 (0x1<<1)
1337#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED58_SHIFT 1
1338#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59 (0x1<<2)
1339#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED59_SHIFT 2
1340#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
1341#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
1342#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
1343#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
1344#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60 (0x1<<5)
1345#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED60_SHIFT 5
1346#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN (0x1<<6)
1347#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_ACK_TO_FE_UPDATED_EN_SHIFT 6
1348#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_EN (0x1<<7)
1349#define XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_PBF_TX_SEQ_ACK_CF_EN_SHIFT 7
1350#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN (0x1<<8)
1351#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED_TX_FIN_FLAG_EN_SHIFT 8
1352#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
1353#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
1354#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
1355#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
1356#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
1357#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
1358#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14)
1359#define __XSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14
1360 u8 __agg_val8;
1361 u8 __agg_val8_th;
1362#endif
1363 u32 __sq_base_addr_lo;
1364 u32 __sq_base_addr_hi;
1365 u32 __xfrq_base_addr_lo;
1366 u32 __xfrq_base_addr_hi;
1367#if defined(__BIG_ENDIAN)
1368 u16 __xfrq_cons;
1369 u16 __xfrq_prod;
1370#elif defined(__LITTLE_ENDIAN)
1371 u16 __xfrq_prod;
1372 u16 __xfrq_cons;
1373#endif
1374#if defined(__BIG_ENDIAN)
1375 u8 __tcp_agg_vars5;
1376 u8 __tcp_agg_vars4;
1377 u8 __tcp_agg_vars3;
1378 u8 __reserved_force_pure_ack_cnt;
1379#elif defined(__LITTLE_ENDIAN)
1380 u8 __reserved_force_pure_ack_cnt;
1381 u8 __tcp_agg_vars3;
1382 u8 __tcp_agg_vars4;
1383 u8 __tcp_agg_vars5;
1384#endif
1385 u32 __tcp_agg_vars6;
1386#if defined(__BIG_ENDIAN)
1387 u16 __agg_misc6;
1388 u16 __tcp_agg_vars7;
1389#elif defined(__LITTLE_ENDIAN)
1390 u16 __tcp_agg_vars7;
1391 u16 __agg_misc6;
1392#endif
1393 u32 __agg_val10;
1394 u32 __agg_val10_th;
1395#if defined(__BIG_ENDIAN)
1396 u16 __reserved3;
1397 u8 __reserved2;
1398 u8 __da_only_cnt;
1399#elif defined(__LITTLE_ENDIAN)
1400 u8 __da_only_cnt;
1401 u8 __reserved2;
1402 u16 __reserved3;
1403#endif
1404};
1405
1406/*
1407 * The fcoe aggregative context of Xstorm
1408 */
1409struct xstorm_fcoe_ag_context {
1410#if defined(__BIG_ENDIAN)
1411 u16 agg_val1;
1412 u8 agg_vars1;
1413#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
1414#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
1415#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
1416#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
1417#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51 (0x1<<2)
1418#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51_SHIFT 2
1419#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52 (0x1<<3)
1420#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52_SHIFT 3
1421#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
1422#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
1423#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN (0x1<<5)
1424#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN_SHIFT 5
1425#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
1426#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
1427#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN (0x1<<7)
1428#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN_SHIFT 7
1429 u8 __state;
1430#elif defined(__LITTLE_ENDIAN)
1431 u8 __state;
1432 u8 agg_vars1;
1433#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
1434#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
1435#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
1436#define __XSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
1437#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51 (0x1<<2)
1438#define __XSTORM_FCOE_AG_CONTEXT_RESERVED51_SHIFT 2
1439#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52 (0x1<<3)
1440#define __XSTORM_FCOE_AG_CONTEXT_RESERVED52_SHIFT 3
1441#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
1442#define __XSTORM_FCOE_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
1443#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN (0x1<<5)
1444#define XSTORM_FCOE_AG_CONTEXT_NAGLE_EN_SHIFT 5
1445#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
1446#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
1447#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN (0x1<<7)
1448#define __XSTORM_FCOE_AG_CONTEXT_RESERVED_UNA_GT_NXT_EN_SHIFT 7
1449 u16 agg_val1;
1450#endif
1451#if defined(__BIG_ENDIAN)
1452 u8 cdu_reserved;
1453 u8 __agg_vars4;
1454 u8 agg_vars3;
1455#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
1456#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
1457#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF (0x3<<6)
1458#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF_SHIFT 6
1459 u8 agg_vars2;
1460#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF (0x3<<0)
1461#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_SHIFT 0
1462#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
1463#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
1464#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG (0x1<<3)
1465#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG_SHIFT 3
1466#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG (0x1<<4)
1467#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG_SHIFT 4
1468#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
1469#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1_SHIFT 5
1470#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
1471#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
1472#elif defined(__LITTLE_ENDIAN)
1473 u8 agg_vars2;
1474#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF (0x3<<0)
1475#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_SHIFT 0
1476#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
1477#define __XSTORM_FCOE_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
1478#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG (0x1<<3)
1479#define __XSTORM_FCOE_AG_CONTEXT_AUX8_FLAG_SHIFT 3
1480#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG (0x1<<4)
1481#define __XSTORM_FCOE_AG_CONTEXT_AUX9_FLAG_SHIFT 4
1482#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
1483#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE1_SHIFT 5
1484#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
1485#define __XSTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
1486 u8 agg_vars3;
1487#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
1488#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
1489#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF (0x3<<6)
1490#define __XSTORM_FCOE_AG_CONTEXT_AUX19_CF_SHIFT 6
1491 u8 __agg_vars4;
1492 u8 cdu_reserved;
1493#endif
1494 u32 more_to_send;
1495#if defined(__BIG_ENDIAN)
1496 u16 agg_vars5;
1497#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
1498#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5_SHIFT 0
1499#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
1500#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
1501#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
1502#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
1503#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE (0x3<<14)
1504#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE_SHIFT 14
1505 u16 sq_cons;
1506#elif defined(__LITTLE_ENDIAN)
1507 u16 sq_cons;
1508 u16 agg_vars5;
1509#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
1510#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE5_SHIFT 0
1511#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
1512#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
1513#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
1514#define XSTORM_FCOE_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
1515#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE (0x3<<14)
1516#define __XSTORM_FCOE_AG_CONTEXT_CONFQ_DEC_RULE_SHIFT 14
1517#endif
1518 struct xstorm_fcoe_extra_ag_context_section __extra_section;
1519#if defined(__BIG_ENDIAN)
1520 u16 agg_vars7;
1521#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
1522#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
1523#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG (0x1<<3)
1524#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG_SHIFT 3
1525#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF (0x3<<4)
1526#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF_SHIFT 4
1527#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
1528#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3_SHIFT 6
1529#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF (0x3<<8)
1530#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF_SHIFT 8
1531#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62 (0x1<<10)
1532#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62_SHIFT 10
1533#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
1534#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
1535#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG (0x1<<12)
1536#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG_SHIFT 12
1537#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG (0x1<<13)
1538#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG_SHIFT 13
1539#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG (0x1<<14)
1540#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG_SHIFT 14
1541#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG (0x1<<15)
1542#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG_SHIFT 15
1543 u8 agg_val3_th;
1544 u8 agg_vars6;
1545#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
1546#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6_SHIFT 0
1547#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE (0x7<<3)
1548#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE_SHIFT 3
1549#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE (0x3<<6)
1550#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE_SHIFT 6
1551#elif defined(__LITTLE_ENDIAN)
1552 u8 agg_vars6;
1553#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
1554#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE6_SHIFT 0
1555#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE (0x7<<3)
1556#define __XSTORM_FCOE_AG_CONTEXT_XFRQ_DEC_RULE_SHIFT 3
1557#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE (0x3<<6)
1558#define __XSTORM_FCOE_AG_CONTEXT_SQ_DEC_RULE_SHIFT 6
1559 u8 agg_val3_th;
1560 u16 agg_vars7;
1561#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
1562#define __XSTORM_FCOE_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
1563#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG (0x1<<3)
1564#define __XSTORM_FCOE_AG_CONTEXT_AUX13_FLAG_SHIFT 3
1565#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF (0x3<<4)
1566#define __XSTORM_FCOE_AG_CONTEXT_QUEUE0_CF_SHIFT 4
1567#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
1568#define XSTORM_FCOE_AG_CONTEXT_DECISION_RULE3_SHIFT 6
1569#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF (0x3<<8)
1570#define XSTORM_FCOE_AG_CONTEXT_AUX1_CF_SHIFT 8
1571#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62 (0x1<<10)
1572#define __XSTORM_FCOE_AG_CONTEXT_RESERVED62_SHIFT 10
1573#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
1574#define __XSTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
1575#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG (0x1<<12)
1576#define __XSTORM_FCOE_AG_CONTEXT_AUX10_FLAG_SHIFT 12
1577#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG (0x1<<13)
1578#define __XSTORM_FCOE_AG_CONTEXT_AUX11_FLAG_SHIFT 13
1579#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG (0x1<<14)
1580#define __XSTORM_FCOE_AG_CONTEXT_AUX12_FLAG_SHIFT 14
1581#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG (0x1<<15)
1582#define __XSTORM_FCOE_AG_CONTEXT_AUX2_FLAG_SHIFT 15
1583#endif
1584#if defined(__BIG_ENDIAN)
1585 u16 __agg_val11_th;
1586 u16 __agg_val11;
1587#elif defined(__LITTLE_ENDIAN)
1588 u16 __agg_val11;
1589 u16 __agg_val11_th;
1590#endif
1591#if defined(__BIG_ENDIAN)
1592 u8 __reserved1;
1593 u8 __agg_val6_th;
1594 u16 __confq_tx_prod;
1595#elif defined(__LITTLE_ENDIAN)
1596 u16 __confq_tx_prod;
1597 u8 __agg_val6_th;
1598 u8 __reserved1;
1599#endif
1600#if defined(__BIG_ENDIAN)
1601 u16 confq_cons;
1602 u16 confq_prod;
1603#elif defined(__LITTLE_ENDIAN)
1604 u16 confq_prod;
1605 u16 confq_cons;
1606#endif
1607 u32 agg_vars8;
1608#define __XSTORM_FCOE_AG_CONTEXT_CACHE_WQE_IDX (0xFFFFFF<<0)
1609#define __XSTORM_FCOE_AG_CONTEXT_CACHE_WQE_IDX_SHIFT 0
1610#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
1611#define XSTORM_FCOE_AG_CONTEXT_AGG_MISC3_SHIFT 24
1612#if defined(__BIG_ENDIAN)
1613 u16 ox_id;
1614 u16 sq_prod;
1615#elif defined(__LITTLE_ENDIAN)
1616 u16 sq_prod;
1617 u16 ox_id;
1618#endif
1619#if defined(__BIG_ENDIAN)
1620 u8 agg_val3;
1621 u8 agg_val6;
1622 u8 agg_val5_th;
1623 u8 agg_val5;
1624#elif defined(__LITTLE_ENDIAN)
1625 u8 agg_val5;
1626 u8 agg_val5_th;
1627 u8 agg_val6;
1628 u8 agg_val3;
1629#endif
1630#if defined(__BIG_ENDIAN)
1631 u16 __pbf_tx_seq_ack;
1632 u16 agg_limit1;
1633#elif defined(__LITTLE_ENDIAN)
1634 u16 agg_limit1;
1635 u16 __pbf_tx_seq_ack;
1636#endif
1637 u32 completion_seq;
1638 u32 confq_pbl_base_lo;
1639 u32 confq_pbl_base_hi;
1640};
1641
1642/*
1643 * The fcoe extra aggregative context section of Tstorm
1644 */
1645struct tstorm_fcoe_extra_ag_context_section {
1646 u32 __agg_val1;
1647#if defined(__BIG_ENDIAN)
1648 u8 __tcp_agg_vars2;
1649 u8 __agg_val3;
1650 u16 __agg_val2;
1651#elif defined(__LITTLE_ENDIAN)
1652 u16 __agg_val2;
1653 u8 __agg_val3;
1654 u8 __tcp_agg_vars2;
1655#endif
1656#if defined(__BIG_ENDIAN)
1657 u16 __agg_val5;
1658 u8 __agg_val6;
1659 u8 __tcp_agg_vars3;
1660#elif defined(__LITTLE_ENDIAN)
1661 u8 __tcp_agg_vars3;
1662 u8 __agg_val6;
1663 u16 __agg_val5;
1664#endif
1665 u32 __lcq_prod;
1666 u32 rtt_seq;
1667 u32 rtt_time;
1668 u32 __reserved66;
1669 u32 wnd_right_edge;
1670 u32 tcp_agg_vars1;
1671#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
1672#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0
1673#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1)
1674#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1
1675#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2)
1676#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2
1677#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4)
1678#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4
1679#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6)
1680#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6
1681#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7)
1682#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7
1683#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8)
1684#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8
1685#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN (0x1<<9)
1686#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_LCQ_SND_EN_SHIFT 9
1687#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10)
1688#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10
1689#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11)
1690#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11
1691#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12)
1692#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12
1693#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13)
1694#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13
1695#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14)
1696#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14
1697#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16)
1698#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16
1699#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18)
1700#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18
1701#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19)
1702#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19
1703#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20)
1704#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20
1705#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21)
1706#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21
1707#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22)
1708#define __TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22
1709#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24)
1710#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24
1711#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28)
1712#define TSTORM_FCOE_EXTRA_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28
1713 u32 snd_max;
1714 u32 __lcq_cons;
1715 u32 __reserved2;
1716};
1717
1718/*
1719 * The fcoe aggregative context of Tstorm
1720 */
1721struct tstorm_fcoe_ag_context {
1722#if defined(__BIG_ENDIAN)
1723 u16 ulp_credit;
1724 u8 agg_vars1;
1725#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
1726#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
1727#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
1728#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
1729#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
1730#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
1731#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
1732#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
1733#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4)
1734#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4
1735#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6)
1736#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6
1737#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7)
1738#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7
1739 u8 state;
1740#elif defined(__LITTLE_ENDIAN)
1741 u8 state;
1742 u8 agg_vars1;
1743#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
1744#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
1745#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
1746#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
1747#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
1748#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
1749#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
1750#define TSTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
1751#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF (0x3<<4)
1752#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_SHIFT 4
1753#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG (0x1<<6)
1754#define __TSTORM_FCOE_AG_CONTEXT_AUX3_FLAG_SHIFT 6
1755#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG (0x1<<7)
1756#define __TSTORM_FCOE_AG_CONTEXT_AUX4_FLAG_SHIFT 7
1757 u16 ulp_credit;
1758#endif
1759#if defined(__BIG_ENDIAN)
1760 u16 __agg_val4;
1761 u16 agg_vars2;
1762#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0)
1763#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0
1764#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1)
1765#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1
1766#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2)
1767#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2
1768#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4)
1769#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4
1770#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6)
1771#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6
1772#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8)
1773#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8
1774#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10)
1775#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10
1776#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11)
1777#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11
1778#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
1779#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
1780#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
1781#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
1782#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
1783#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
1784#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
1785#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
1786#elif defined(__LITTLE_ENDIAN)
1787 u16 agg_vars2;
1788#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG (0x1<<0)
1789#define __TSTORM_FCOE_AG_CONTEXT_AUX5_FLAG_SHIFT 0
1790#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG (0x1<<1)
1791#define __TSTORM_FCOE_AG_CONTEXT_AUX6_FLAG_SHIFT 1
1792#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF (0x3<<2)
1793#define __TSTORM_FCOE_AG_CONTEXT_AUX4_CF_SHIFT 2
1794#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF (0x3<<4)
1795#define __TSTORM_FCOE_AG_CONTEXT_AUX5_CF_SHIFT 4
1796#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF (0x3<<6)
1797#define __TSTORM_FCOE_AG_CONTEXT_AUX6_CF_SHIFT 6
1798#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF (0x3<<8)
1799#define __TSTORM_FCOE_AG_CONTEXT_AUX7_CF_SHIFT 8
1800#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG (0x1<<10)
1801#define __TSTORM_FCOE_AG_CONTEXT_AUX7_FLAG_SHIFT 10
1802#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN (0x1<<11)
1803#define __TSTORM_FCOE_AG_CONTEXT_QUEUE0_FLUSH_CF_EN_SHIFT 11
1804#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
1805#define TSTORM_FCOE_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
1806#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
1807#define TSTORM_FCOE_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
1808#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
1809#define TSTORM_FCOE_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
1810#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
1811#define TSTORM_FCOE_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
1812 u16 __agg_val4;
1813#endif
1814 struct tstorm_fcoe_extra_ag_context_section __extra_section;
1815};
1816
1817/*
1818 * The fcoe aggregative context of Ustorm
1819 */
1820struct ustorm_fcoe_ag_context {
1821#if defined(__BIG_ENDIAN)
1822 u8 __aux_counter_flags;
1823 u8 agg_vars2;
1824#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0)
1825#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0
1826#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2)
1827#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2
1828#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
1829#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
1830#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
1831#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
1832 u8 agg_vars1;
1833#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
1834#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
1835#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
1836#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
1837#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
1838#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
1839#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
1840#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
1841#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4)
1842#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4
1843#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6)
1844#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6
1845 u8 state;
1846#elif defined(__LITTLE_ENDIAN)
1847 u8 state;
1848 u8 agg_vars1;
1849#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
1850#define __USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
1851#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
1852#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
1853#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
1854#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
1855#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
1856#define USTORM_FCOE_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
1857#define USTORM_FCOE_AG_CONTEXT_INV_CF (0x3<<4)
1858#define USTORM_FCOE_AG_CONTEXT_INV_CF_SHIFT 4
1859#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF (0x3<<6)
1860#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_SHIFT 6
1861 u8 agg_vars2;
1862#define USTORM_FCOE_AG_CONTEXT_TX_CF (0x3<<0)
1863#define USTORM_FCOE_AG_CONTEXT_TX_CF_SHIFT 0
1864#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF (0x3<<2)
1865#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_SHIFT 2
1866#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
1867#define USTORM_FCOE_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
1868#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
1869#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
1870 u8 __aux_counter_flags;
1871#endif
1872#if defined(__BIG_ENDIAN)
1873 u8 cdu_usage;
1874 u8 agg_misc2;
1875 u16 pbf_tx_seq_ack;
1876#elif defined(__LITTLE_ENDIAN)
1877 u16 pbf_tx_seq_ack;
1878 u8 agg_misc2;
1879 u8 cdu_usage;
1880#endif
1881 u32 agg_misc4;
1882#if defined(__BIG_ENDIAN)
1883 u8 agg_val3_th;
1884 u8 agg_val3;
1885 u16 agg_misc3;
1886#elif defined(__LITTLE_ENDIAN)
1887 u16 agg_misc3;
1888 u8 agg_val3;
1889 u8 agg_val3_th;
1890#endif
1891 u32 expired_task_id;
1892 u32 agg_misc4_th;
1893#if defined(__BIG_ENDIAN)
1894 u16 cq_prod;
1895 u16 cq_cons;
1896#elif defined(__LITTLE_ENDIAN)
1897 u16 cq_cons;
1898 u16 cq_prod;
1899#endif
1900#if defined(__BIG_ENDIAN)
1901 u16 __reserved2;
1902 u8 decision_rules;
1903#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0)
1904#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0
1905#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
1906#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
1907#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6)
1908#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6
1909#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7)
1910#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7
1911 u8 decision_rule_enable_bits;
1912#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0)
1913#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0
1914#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
1915#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
1916#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2)
1917#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2
1918#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
1919#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
1920#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4)
1921#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4
1922#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5)
1923#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5
1924#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
1925#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
1926#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
1927#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
1928#elif defined(__LITTLE_ENDIAN)
1929 u8 decision_rule_enable_bits;
1930#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN (0x1<<0)
1931#define __USTORM_FCOE_AG_CONTEXT_RESERVED_INV_CF_EN_SHIFT 0
1932#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
1933#define USTORM_FCOE_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
1934#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN (0x1<<2)
1935#define USTORM_FCOE_AG_CONTEXT_TX_CF_EN_SHIFT 2
1936#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
1937#define __USTORM_FCOE_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
1938#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN (0x1<<4)
1939#define __USTORM_FCOE_AG_CONTEXT_AUX1_CF_EN_SHIFT 4
1940#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN (0x1<<5)
1941#define __USTORM_FCOE_AG_CONTEXT_QUEUE0_CF_EN_SHIFT 5
1942#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
1943#define __USTORM_FCOE_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
1944#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN (0x1<<7)
1945#define __USTORM_FCOE_AG_CONTEXT_DQ_CF_EN_SHIFT 7
1946 u8 decision_rules;
1947#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE (0x7<<0)
1948#define USTORM_FCOE_AG_CONTEXT_CQ_DEC_RULE_SHIFT 0
1949#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
1950#define __USTORM_FCOE_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
1951#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG (0x1<<6)
1952#define USTORM_FCOE_AG_CONTEXT_CQ_ARM_N_FLAG_SHIFT 6
1953#define __USTORM_FCOE_AG_CONTEXT_RESERVED1 (0x1<<7)
1954#define __USTORM_FCOE_AG_CONTEXT_RESERVED1_SHIFT 7
1955 u16 __reserved2;
1956#endif
1957};
1958
1959/*
1960 * Ethernet context section
1961 */
1962struct xstorm_fcoe_eth_context_section {
1963#if defined(__BIG_ENDIAN)
1964 u8 remote_addr_4;
1965 u8 remote_addr_5;
1966 u8 local_addr_0;
1967 u8 local_addr_1;
1968#elif defined(__LITTLE_ENDIAN)
1969 u8 local_addr_1;
1970 u8 local_addr_0;
1971 u8 remote_addr_5;
1972 u8 remote_addr_4;
1973#endif
1974#if defined(__BIG_ENDIAN)
1975 u8 remote_addr_0;
1976 u8 remote_addr_1;
1977 u8 remote_addr_2;
1978 u8 remote_addr_3;
1979#elif defined(__LITTLE_ENDIAN)
1980 u8 remote_addr_3;
1981 u8 remote_addr_2;
1982 u8 remote_addr_1;
1983 u8 remote_addr_0;
1984#endif
1985#if defined(__BIG_ENDIAN)
1986 u16 reserved_vlan_type;
1987 u16 params;
1988#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
1989#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
1990#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12)
1991#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12
1992#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
1993#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
1994#elif defined(__LITTLE_ENDIAN)
1995 u16 params;
1996#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
1997#define XSTORM_FCOE_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
1998#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI (0x1<<12)
1999#define XSTORM_FCOE_ETH_CONTEXT_SECTION_CFI_SHIFT 12
2000#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
2001#define XSTORM_FCOE_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
2002 u16 reserved_vlan_type;
2003#endif
2004#if defined(__BIG_ENDIAN)
2005 u8 local_addr_2;
2006 u8 local_addr_3;
2007 u8 local_addr_4;
2008 u8 local_addr_5;
2009#elif defined(__LITTLE_ENDIAN)
2010 u8 local_addr_5;
2011 u8 local_addr_4;
2012 u8 local_addr_3;
2013 u8 local_addr_2;
2014#endif
2015};
2016
2017/*
2018 * Flags used in FCoE context section - 1 byte
2019 */
2020struct xstorm_fcoe_context_flags {
2021 u8 flags;
2022#define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q (0x3<<0)
2023#define XSTORM_FCOE_CONTEXT_FLAGS_B_PROC_Q_SHIFT 0
2024#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ (0x1<<2)
2025#define XSTORM_FCOE_CONTEXT_FLAGS_B_MID_SEQ_SHIFT 2
2026#define XSTORM_FCOE_CONTEXT_FLAGS_B_EXCHANGE_CLEANUP_DEFFERED (0x1<<3)
2027#define XSTORM_FCOE_CONTEXT_FLAGS_B_EXCHANGE_CLEANUP_DEFFERED_SHIFT 3
2028#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT (0x1<<4)
2029#define XSTORM_FCOE_CONTEXT_FLAGS_B_REC_SUPPORT_SHIFT 4
2030#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE (0x1<<5)
2031#define XSTORM_FCOE_CONTEXT_FLAGS_B_SQ_TOGGLE_SHIFT 5
2032#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE (0x1<<6)
2033#define XSTORM_FCOE_CONTEXT_FLAGS_B_XFRQ_TOGGLE_SHIFT 6
2034#define XSTORM_FCOE_CONTEXT_FLAGS_B_ABTS_DEFFERED (0x1<<7)
2035#define XSTORM_FCOE_CONTEXT_FLAGS_B_ABTS_DEFFERED_SHIFT 7
2036};
2037
2038/*
2039 * FCoE SQ element
2040 */
2041struct fcoe_sqe {
2042 u16 wqe;
2043#define FCOE_SQE_TASK_ID (0x7FFF<<0)
2044#define FCOE_SQE_TASK_ID_SHIFT 0
2045#define FCOE_SQE_TOGGLE_BIT (0x1<<15)
2046#define FCOE_SQE_TOGGLE_BIT_SHIFT 15
2047};
2048
2049/*
2050 * FCoE XFRQ element
2051 */
2052struct fcoe_xfrqe {
2053 u16 wqe;
2054#define FCOE_XFRQE_TASK_ID (0x7FFF<<0)
2055#define FCOE_XFRQE_TASK_ID_SHIFT 0
2056#define FCOE_XFRQE_TOGGLE_BIT (0x1<<15)
2057#define FCOE_XFRQE_TOGGLE_BIT_SHIFT 15
2058};
2059
2060/*
2061 * FCoE SQ\XFRQ element
2062 */
2063struct fcoe_cached_wqe {
2064#if defined(__BIG_ENDIAN)
2065 struct fcoe_xfrqe xfrqe;
2066 struct fcoe_sqe sqe;
2067#elif defined(__LITTLE_ENDIAN)
2068 struct fcoe_sqe sqe;
2069 struct fcoe_xfrqe xfrqe;
2070#endif
2071};
2072
2073struct fcoe_task_ctx_entry_tx_only {
2074 union fcoe_sgl_ctx sgl_ctx;
2075};
2076
2077struct xstorm_fcoe_task_ctx_entry_rd {
2078 struct fcoe_task_ctx_entry_tx_only tx_wr;
2079 struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd;
2080 struct fcoe_task_ctx_entry_tx_rx_cmn cmn;
2081 struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd;
2082};
2083
2084/*
2085 * Cached SGEs
2086 */
2087struct common_fcoe_sgl {
2088 struct fcoe_bd_ctx sge[2];
2089};
2090
2091/*
2092 * FCP_DATA parameters required for transmission
2093 */
2094struct xstorm_fcoe_fcp_data {
2095 u32 io_rem;
2096#if defined(__BIG_ENDIAN)
2097 u16 cached_sge_off;
2098 u8 cached_num_sges;
2099 u8 cached_sge_idx;
2100#elif defined(__LITTLE_ENDIAN)
2101 u8 cached_sge_idx;
2102 u8 cached_num_sges;
2103 u16 cached_sge_off;
2104#endif
2105 struct common_fcoe_sgl cached_sgl;
2106};
2107
2108/*
2109 * FCoE context section
2110 */
2111struct xstorm_fcoe_context_section {
2112#if defined(__BIG_ENDIAN)
2113 u8 vlan_flag;
2114 u8 s_id[3];
2115#elif defined(__LITTLE_ENDIAN)
2116 u8 s_id[3];
2117 u8 vlan_flag;
2118#endif
2119#if defined(__BIG_ENDIAN)
2120 u8 func_id;
2121 u8 d_id[3];
2122#elif defined(__LITTLE_ENDIAN)
2123 u8 d_id[3];
2124 u8 func_id;
2125#endif
2126#if defined(__BIG_ENDIAN)
2127 u16 sq_xfrq_lcq_confq_size;
2128 u16 tx_max_fc_pay_len;
2129#elif defined(__LITTLE_ENDIAN)
2130 u16 tx_max_fc_pay_len;
2131 u16 sq_xfrq_lcq_confq_size;
2132#endif
2133 u32 lcq_prod;
2134#if defined(__BIG_ENDIAN)
2135 u8 port_id;
2136 u8 tx_max_conc_seqs_c3;
2137 u8 seq_id;
2138 struct xstorm_fcoe_context_flags tx_flags;
2139#elif defined(__LITTLE_ENDIAN)
2140 struct xstorm_fcoe_context_flags tx_flags;
2141 u8 seq_id;
2142 u8 tx_max_conc_seqs_c3;
2143 u8 port_id;
2144#endif
2145#if defined(__BIG_ENDIAN)
2146 u16 verify_tx_seq;
2147 u8 func_mode;
2148 u8 vnic_id;
2149#elif defined(__LITTLE_ENDIAN)
2150 u8 vnic_id;
2151 u8 func_mode;
2152 u16 verify_tx_seq;
2153#endif
2154 struct regpair confq_curr_page_addr;
2155 struct fcoe_cached_wqe cached_wqe[8];
2156 struct regpair lcq_base_addr;
2157 struct xstorm_fcoe_task_ctx_entry_rd tce;
2158 struct xstorm_fcoe_fcp_data fcp_data;
2159#if defined(__BIG_ENDIAN)
2160 u16 fcoe_tx_stat_params_ram_addr;
2161 u16 cmng_port_ram_addr;
2162#elif defined(__LITTLE_ENDIAN)
2163 u16 cmng_port_ram_addr;
2164 u16 fcoe_tx_stat_params_ram_addr;
2165#endif
2166#if defined(__BIG_ENDIAN)
2167 u8 fcp_cmd_pb_cmd_size;
2168 u8 eth_hdr_size;
2169 u16 pbf_addr;
2170#elif defined(__LITTLE_ENDIAN)
2171 u16 pbf_addr;
2172 u8 eth_hdr_size;
2173 u8 fcp_cmd_pb_cmd_size;
2174#endif
2175#if defined(__BIG_ENDIAN)
2176 u8 reserved2[2];
2177 u8 cos;
2178 u8 dcb_version;
2179#elif defined(__LITTLE_ENDIAN)
2180 u8 dcb_version;
2181 u8 cos;
2182 u8 reserved2[2];
2183#endif
2184 u32 reserved3;
2185 struct regpair reserved4[2];
2186};
2187
2188/*
2189 * Xstorm FCoE Storm Context
2190 */
2191struct xstorm_fcoe_st_context {
2192 struct xstorm_fcoe_eth_context_section eth;
2193 struct xstorm_fcoe_context_section fcoe;
2194};
2195
2196/*
2197 * Fcoe connection context
2198 */
2199struct fcoe_context {
2200 struct ustorm_fcoe_st_context ustorm_st_context;
2201 struct tstorm_fcoe_st_context tstorm_st_context;
2202 struct xstorm_fcoe_ag_context xstorm_ag_context;
2203 struct tstorm_fcoe_ag_context tstorm_ag_context;
2204 struct ustorm_fcoe_ag_context ustorm_ag_context;
2205 struct timers_block_context timers_context;
2206 struct xstorm_fcoe_st_context xstorm_st_context;
2207};
2208
2209/*
686 * iSCSI context region, used only in iSCSI 2210 * iSCSI context region, used only in iSCSI
687 */ 2211 */
688struct ustorm_iscsi_rq_db { 2212struct ustorm_iscsi_rq_db {
@@ -2268,6 +3792,577 @@ struct iscsi_context {
2268}; 3792};
2269 3793
2270/* 3794/*
3795 * FCoE KCQ CQE parameters
3796 */
3797union fcoe_kcqe_params {
3798 u32 reserved0[4];
3799};
3800
3801/*
3802 * FCoE KCQ CQE
3803 */
3804struct fcoe_kcqe {
3805 u32 fcoe_conn_id;
3806 u32 completion_status;
3807 u32 fcoe_conn_context_id;
3808 union fcoe_kcqe_params params;
3809#if defined(__BIG_ENDIAN)
3810 u8 flags;
3811#define FCOE_KCQE_RESERVED0 (0x7<<0)
3812#define FCOE_KCQE_RESERVED0_SHIFT 0
3813#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
3814#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
3815#define FCOE_KCQE_LAYER_CODE (0x7<<4)
3816#define FCOE_KCQE_LAYER_CODE_SHIFT 4
3817#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
3818#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
3819 u8 op_code;
3820 u16 qe_self_seq;
3821#elif defined(__LITTLE_ENDIAN)
3822 u16 qe_self_seq;
3823 u8 op_code;
3824 u8 flags;
3825#define FCOE_KCQE_RESERVED0 (0x7<<0)
3826#define FCOE_KCQE_RESERVED0_SHIFT 0
3827#define FCOE_KCQE_RAMROD_COMPLETION (0x1<<3)
3828#define FCOE_KCQE_RAMROD_COMPLETION_SHIFT 3
3829#define FCOE_KCQE_LAYER_CODE (0x7<<4)
3830#define FCOE_KCQE_LAYER_CODE_SHIFT 4
3831#define FCOE_KCQE_LINKED_WITH_NEXT (0x1<<7)
3832#define FCOE_KCQE_LINKED_WITH_NEXT_SHIFT 7
3833#endif
3834};
3835
3836/*
3837 * FCoE KWQE header
3838 */
3839struct fcoe_kwqe_header {
3840#if defined(__BIG_ENDIAN)
3841 u8 flags;
3842#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
3843#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
3844#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
3845#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
3846#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
3847#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
3848 u8 op_code;
3849#elif defined(__LITTLE_ENDIAN)
3850 u8 op_code;
3851 u8 flags;
3852#define FCOE_KWQE_HEADER_RESERVED0 (0xF<<0)
3853#define FCOE_KWQE_HEADER_RESERVED0_SHIFT 0
3854#define FCOE_KWQE_HEADER_LAYER_CODE (0x7<<4)
3855#define FCOE_KWQE_HEADER_LAYER_CODE_SHIFT 4
3856#define FCOE_KWQE_HEADER_RESERVED1 (0x1<<7)
3857#define FCOE_KWQE_HEADER_RESERVED1_SHIFT 7
3858#endif
3859};
3860
3861/*
3862 * FCoE firmware init request 1
3863 */
3864struct fcoe_kwqe_init1 {
3865#if defined(__BIG_ENDIAN)
3866 struct fcoe_kwqe_header hdr;
3867 u16 num_tasks;
3868#elif defined(__LITTLE_ENDIAN)
3869 u16 num_tasks;
3870 struct fcoe_kwqe_header hdr;
3871#endif
3872 u32 task_list_pbl_addr_lo;
3873 u32 task_list_pbl_addr_hi;
3874 u32 dummy_buffer_addr_lo;
3875 u32 dummy_buffer_addr_hi;
3876#if defined(__BIG_ENDIAN)
3877 u16 rq_num_wqes;
3878 u16 sq_num_wqes;
3879#elif defined(__LITTLE_ENDIAN)
3880 u16 sq_num_wqes;
3881 u16 rq_num_wqes;
3882#endif
3883#if defined(__BIG_ENDIAN)
3884 u16 cq_num_wqes;
3885 u16 rq_buffer_log_size;
3886#elif defined(__LITTLE_ENDIAN)
3887 u16 rq_buffer_log_size;
3888 u16 cq_num_wqes;
3889#endif
3890#if defined(__BIG_ENDIAN)
3891 u8 flags;
3892#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
3893#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
3894#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
3895#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
3896#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
3897#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
3898 u8 num_sessions_log;
3899 u16 mtu;
3900#elif defined(__LITTLE_ENDIAN)
3901 u16 mtu;
3902 u8 num_sessions_log;
3903 u8 flags;
3904#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE (0xF<<0)
3905#define FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT 0
3906#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC (0x7<<4)
3907#define FCOE_KWQE_INIT1_LOG_CACHED_PBES_PER_FUNC_SHIFT 4
3908#define FCOE_KWQE_INIT1_RESERVED1 (0x1<<7)
3909#define FCOE_KWQE_INIT1_RESERVED1_SHIFT 7
3910#endif
3911};
3912
3913/*
3914 * FCoE firmware init request 2
3915 */
3916struct fcoe_kwqe_init2 {
3917#if defined(__BIG_ENDIAN)
3918 struct fcoe_kwqe_header hdr;
3919 u16 reserved0;
3920#elif defined(__LITTLE_ENDIAN)
3921 u16 reserved0;
3922 struct fcoe_kwqe_header hdr;
3923#endif
3924 u32 hash_tbl_pbl_addr_lo;
3925 u32 hash_tbl_pbl_addr_hi;
3926 u32 t2_hash_tbl_addr_lo;
3927 u32 t2_hash_tbl_addr_hi;
3928 u32 t2_ptr_hash_tbl_addr_lo;
3929 u32 t2_ptr_hash_tbl_addr_hi;
3930 u32 free_list_count;
3931};
3932
3933/*
3934 * FCoE firmware init request 3
3935 */
3936struct fcoe_kwqe_init3 {
3937#if defined(__BIG_ENDIAN)
3938 struct fcoe_kwqe_header hdr;
3939 u16 reserved0;
3940#elif defined(__LITTLE_ENDIAN)
3941 u16 reserved0;
3942 struct fcoe_kwqe_header hdr;
3943#endif
3944 u32 error_bit_map_lo;
3945 u32 error_bit_map_hi;
3946#if defined(__BIG_ENDIAN)
3947 u8 reserved21[3];
3948 u8 cached_session_enable;
3949#elif defined(__LITTLE_ENDIAN)
3950 u8 cached_session_enable;
3951 u8 reserved21[3];
3952#endif
3953 u32 reserved2[4];
3954};
3955
3956/*
3957 * FCoE connection offload request 1
3958 */
3959struct fcoe_kwqe_conn_offload1 {
3960#if defined(__BIG_ENDIAN)
3961 struct fcoe_kwqe_header hdr;
3962 u16 fcoe_conn_id;
3963#elif defined(__LITTLE_ENDIAN)
3964 u16 fcoe_conn_id;
3965 struct fcoe_kwqe_header hdr;
3966#endif
3967 u32 sq_addr_lo;
3968 u32 sq_addr_hi;
3969 u32 rq_pbl_addr_lo;
3970 u32 rq_pbl_addr_hi;
3971 u32 rq_first_pbe_addr_lo;
3972 u32 rq_first_pbe_addr_hi;
3973#if defined(__BIG_ENDIAN)
3974 u16 reserved0;
3975 u16 rq_prod;
3976#elif defined(__LITTLE_ENDIAN)
3977 u16 rq_prod;
3978 u16 reserved0;
3979#endif
3980};
3981
3982/*
3983 * FCoE connection offload request 2
3984 */
3985struct fcoe_kwqe_conn_offload2 {
3986#if defined(__BIG_ENDIAN)
3987 struct fcoe_kwqe_header hdr;
3988 u16 tx_max_fc_pay_len;
3989#elif defined(__LITTLE_ENDIAN)
3990 u16 tx_max_fc_pay_len;
3991 struct fcoe_kwqe_header hdr;
3992#endif
3993 u32 cq_addr_lo;
3994 u32 cq_addr_hi;
3995 u32 xferq_addr_lo;
3996 u32 xferq_addr_hi;
3997 u32 conn_db_addr_lo;
3998 u32 conn_db_addr_hi;
3999 u32 reserved1;
4000};
4001
4002/*
4003 * FCoE connection offload request 3
4004 */
4005struct fcoe_kwqe_conn_offload3 {
4006#if defined(__BIG_ENDIAN)
4007 struct fcoe_kwqe_header hdr;
4008 u16 vlan_tag;
4009#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
4010#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
4011#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
4012#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
4013#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
4014#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
4015#elif defined(__LITTLE_ENDIAN)
4016 u16 vlan_tag;
4017#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID (0xFFF<<0)
4018#define FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT 0
4019#define FCOE_KWQE_CONN_OFFLOAD3_CFI (0x1<<12)
4020#define FCOE_KWQE_CONN_OFFLOAD3_CFI_SHIFT 12
4021#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY (0x7<<13)
4022#define FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT 13
4023 struct fcoe_kwqe_header hdr;
4024#endif
4025#if defined(__BIG_ENDIAN)
4026 u8 tx_max_conc_seqs_c3;
4027 u8 s_id[3];
4028#elif defined(__LITTLE_ENDIAN)
4029 u8 s_id[3];
4030 u8 tx_max_conc_seqs_c3;
4031#endif
4032#if defined(__BIG_ENDIAN)
4033 u8 flags;
4034#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
4035#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
4036#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
4037#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
4038#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
4039#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
4040#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
4041#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
4042#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
4043#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
4044#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
4045#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
4046#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
4047#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
4048#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
4049#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
4050 u8 d_id[3];
4051#elif defined(__LITTLE_ENDIAN)
4052 u8 d_id[3];
4053 u8 flags;
4054#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS (0x1<<0)
4055#define FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT 0
4056#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES (0x1<<1)
4057#define FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT 1
4058#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT (0x1<<2)
4059#define FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT 2
4060#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ (0x1<<3)
4061#define FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT 3
4062#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID (0x1<<4)
4063#define FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT 4
4064#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID (0x1<<5)
4065#define FCOE_KWQE_CONN_OFFLOAD3_B_C2_VALID_SHIFT 5
4066#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0 (0x1<<6)
4067#define FCOE_KWQE_CONN_OFFLOAD3_B_ACK_0_SHIFT 6
4068#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG (0x1<<7)
4069#define FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT 7
4070#endif
4071 u32 reserved;
4072 u32 confq_first_pbe_addr_lo;
4073 u32 confq_first_pbe_addr_hi;
4074#if defined(__BIG_ENDIAN)
4075 u16 rx_max_fc_pay_len;
4076 u16 tx_total_conc_seqs;
4077#elif defined(__LITTLE_ENDIAN)
4078 u16 tx_total_conc_seqs;
4079 u16 rx_max_fc_pay_len;
4080#endif
4081#if defined(__BIG_ENDIAN)
4082 u8 rx_open_seqs_exch_c3;
4083 u8 rx_max_conc_seqs_c3;
4084 u16 rx_total_conc_seqs;
4085#elif defined(__LITTLE_ENDIAN)
4086 u16 rx_total_conc_seqs;
4087 u8 rx_max_conc_seqs_c3;
4088 u8 rx_open_seqs_exch_c3;
4089#endif
4090};
4091
4092/*
4093 * FCoE connection offload request 4
4094 */
4095struct fcoe_kwqe_conn_offload4 {
4096#if defined(__BIG_ENDIAN)
4097 struct fcoe_kwqe_header hdr;
4098 u8 reserved2;
4099 u8 e_d_tov_timer_val;
4100#elif defined(__LITTLE_ENDIAN)
4101 u8 e_d_tov_timer_val;
4102 u8 reserved2;
4103 struct fcoe_kwqe_header hdr;
4104#endif
4105 u8 src_mac_addr_lo32[4];
4106#if defined(__BIG_ENDIAN)
4107 u8 dst_mac_addr_hi16[2];
4108 u8 src_mac_addr_hi16[2];
4109#elif defined(__LITTLE_ENDIAN)
4110 u8 src_mac_addr_hi16[2];
4111 u8 dst_mac_addr_hi16[2];
4112#endif
4113 u8 dst_mac_addr_lo32[4];
4114 u32 lcq_addr_lo;
4115 u32 lcq_addr_hi;
4116 u32 confq_pbl_base_addr_lo;
4117 u32 confq_pbl_base_addr_hi;
4118};
4119
4120/*
4121 * FCoE connection enable request
4122 */
4123struct fcoe_kwqe_conn_enable_disable {
4124#if defined(__BIG_ENDIAN)
4125 struct fcoe_kwqe_header hdr;
4126 u16 reserved0;
4127#elif defined(__LITTLE_ENDIAN)
4128 u16 reserved0;
4129 struct fcoe_kwqe_header hdr;
4130#endif
4131 u8 src_mac_addr_lo32[4];
4132#if defined(__BIG_ENDIAN)
4133 u16 vlan_tag;
4134#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
4135#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
4136#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
4137#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
4138#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
4139#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
4140 u8 src_mac_addr_hi16[2];
4141#elif defined(__LITTLE_ENDIAN)
4142 u8 src_mac_addr_hi16[2];
4143 u16 vlan_tag;
4144#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID (0xFFF<<0)
4145#define FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT 0
4146#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI (0x1<<12)
4147#define FCOE_KWQE_CONN_ENABLE_DISABLE_CFI_SHIFT 12
4148#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY (0x7<<13)
4149#define FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT 13
4150#endif
4151 u8 dst_mac_addr_lo32[4];
4152#if defined(__BIG_ENDIAN)
4153 u16 reserved1;
4154 u8 dst_mac_addr_hi16[2];
4155#elif defined(__LITTLE_ENDIAN)
4156 u8 dst_mac_addr_hi16[2];
4157 u16 reserved1;
4158#endif
4159#if defined(__BIG_ENDIAN)
4160 u8 vlan_flag;
4161 u8 s_id[3];
4162#elif defined(__LITTLE_ENDIAN)
4163 u8 s_id[3];
4164 u8 vlan_flag;
4165#endif
4166#if defined(__BIG_ENDIAN)
4167 u8 reserved3;
4168 u8 d_id[3];
4169#elif defined(__LITTLE_ENDIAN)
4170 u8 d_id[3];
4171 u8 reserved3;
4172#endif
4173 u32 context_id;
4174 u32 conn_id;
4175 u32 reserved4;
4176};
4177
4178/*
4179 * FCoE connection destroy request
4180 */
4181struct fcoe_kwqe_conn_destroy {
4182#if defined(__BIG_ENDIAN)
4183 struct fcoe_kwqe_header hdr;
4184 u16 reserved0;
4185#elif defined(__LITTLE_ENDIAN)
4186 u16 reserved0;
4187 struct fcoe_kwqe_header hdr;
4188#endif
4189 u32 context_id;
4190 u32 conn_id;
4191 u32 reserved1[5];
4192};
4193
4194/*
4195 * FCoe destroy request
4196 */
4197struct fcoe_kwqe_destroy {
4198#if defined(__BIG_ENDIAN)
4199 struct fcoe_kwqe_header hdr;
4200 u16 reserved0;
4201#elif defined(__LITTLE_ENDIAN)
4202 u16 reserved0;
4203 struct fcoe_kwqe_header hdr;
4204#endif
4205 u32 reserved1[7];
4206};
4207
4208/*
4209 * FCoe statistics request
4210 */
4211struct fcoe_kwqe_stat {
4212#if defined(__BIG_ENDIAN)
4213 struct fcoe_kwqe_header hdr;
4214 u16 reserved0;
4215#elif defined(__LITTLE_ENDIAN)
4216 u16 reserved0;
4217 struct fcoe_kwqe_header hdr;
4218#endif
4219 u32 stat_params_addr_lo;
4220 u32 stat_params_addr_hi;
4221 u32 reserved1[5];
4222};
4223
4224/*
4225 * FCoE KWQ WQE
4226 */
4227union fcoe_kwqe {
4228 struct fcoe_kwqe_init1 init1;
4229 struct fcoe_kwqe_init2 init2;
4230 struct fcoe_kwqe_init3 init3;
4231 struct fcoe_kwqe_conn_offload1 conn_offload1;
4232 struct fcoe_kwqe_conn_offload2 conn_offload2;
4233 struct fcoe_kwqe_conn_offload3 conn_offload3;
4234 struct fcoe_kwqe_conn_offload4 conn_offload4;
4235 struct fcoe_kwqe_conn_enable_disable conn_enable_disable;
4236 struct fcoe_kwqe_conn_destroy conn_destroy;
4237 struct fcoe_kwqe_destroy destroy;
4238 struct fcoe_kwqe_stat statistics;
4239};
4240
4241struct fcoe_task_ctx_entry {
4242 struct fcoe_task_ctx_entry_tx_only tx_wr_only;
4243 struct fcoe_task_ctx_entry_txwr_rxrd tx_wr_rx_rd;
4244 struct fcoe_task_ctx_entry_tx_rx_cmn cmn;
4245 struct fcoe_task_ctx_entry_rxwr_txrd rx_wr_tx_rd;
4246 struct fcoe_task_ctx_entry_rx_only rx_wr_only;
4247 u32 reserved[4];
4248};
4249
4250/*
4251 * FCoE connection enable\disable params passed by driver to FW in FCoE enable ramrod
4252 */
4253struct fcoe_conn_enable_disable_ramrod_params {
4254 struct fcoe_kwqe_conn_enable_disable enable_disable_kwqe;
4255};
4256
4257
4258/*
4259 * FCoE connection offload params passed by driver to FW in FCoE offload ramrod
4260 */
4261struct fcoe_conn_offload_ramrod_params {
4262 struct fcoe_kwqe_conn_offload1 offload_kwqe1;
4263 struct fcoe_kwqe_conn_offload2 offload_kwqe2;
4264 struct fcoe_kwqe_conn_offload3 offload_kwqe3;
4265 struct fcoe_kwqe_conn_offload4 offload_kwqe4;
4266};
4267
4268/*
4269 * FCoE init params passed by driver to FW in FCoE init ramrod
4270 */
4271struct fcoe_init_ramrod_params {
4272 struct fcoe_kwqe_init1 init_kwqe1;
4273 struct fcoe_kwqe_init2 init_kwqe2;
4274 struct fcoe_kwqe_init3 init_kwqe3;
4275 struct regpair eq_addr;
4276 struct regpair eq_next_page_addr;
4277#if defined(__BIG_ENDIAN)
4278 u16 sb_num;
4279 u16 eq_prod;
4280#elif defined(__LITTLE_ENDIAN)
4281 u16 eq_prod;
4282 u16 sb_num;
4283#endif
4284#if defined(__BIG_ENDIAN)
4285 u16 reserved1;
4286 u8 reserved0;
4287 u8 sb_id;
4288#elif defined(__LITTLE_ENDIAN)
4289 u8 sb_id;
4290 u8 reserved0;
4291 u16 reserved1;
4292#endif
4293};
4294
4295
4296/*
4297 * FCoE statistics params buffer passed by driver to FW in FCoE statistics ramrod
4298 */
4299struct fcoe_stat_ramrod_params {
4300 struct fcoe_kwqe_stat stat_kwqe;
4301};
4302
4303
4304/*
4305 * FCoE 16-bits vlan structure
4306 */
4307struct fcoe_vlan_fields {
4308 u16 fields;
4309#define FCOE_VLAN_FIELDS_VID (0xFFF<<0)
4310#define FCOE_VLAN_FIELDS_VID_SHIFT 0
4311#define FCOE_VLAN_FIELDS_CLI (0x1<<12)
4312#define FCOE_VLAN_FIELDS_CLI_SHIFT 12
4313#define FCOE_VLAN_FIELDS_PRI (0x7<<13)
4314#define FCOE_VLAN_FIELDS_PRI_SHIFT 13
4315};
4316
4317
4318/*
4319 * FCoE 16-bits vlan union
4320 */
4321union fcoe_vlan_field_union {
4322 struct fcoe_vlan_fields fields;
4323 u16 val;
4324};
4325
4326/*
4327 * Parameters used for Class 2 verifications
4328 */
4329struct ustorm_fcoe_c2_params {
4330#if defined(__BIG_ENDIAN)
4331 u16 e2e_credit;
4332 u16 con_seq;
4333#elif defined(__LITTLE_ENDIAN)
4334 u16 con_seq;
4335 u16 e2e_credit;
4336#endif
4337#if defined(__BIG_ENDIAN)
4338 u16 ackq_prod;
4339 u16 open_seq_per_exch;
4340#elif defined(__LITTLE_ENDIAN)
4341 u16 open_seq_per_exch;
4342 u16 ackq_prod;
4343#endif
4344 struct regpair ackq_pbl_base;
4345 struct regpair ackq_cur_seg;
4346};
4347
4348/*
4349 * Parameters used for Class 2 verifications
4350 */
4351struct xstorm_fcoe_c2_params {
4352#if defined(__BIG_ENDIAN)
4353 u16 reserved0;
4354 u8 ackq_x_prod;
4355 u8 max_conc_seqs_c2;
4356#elif defined(__LITTLE_ENDIAN)
4357 u8 max_conc_seqs_c2;
4358 u8 ackq_x_prod;
4359 u16 reserved0;
4360#endif
4361 struct regpair ackq_pbl_base;
4362 struct regpair ackq_cur_seg;
4363};
4364
4365/*
2271 * Buffer per connection, used in Tstorm 4366 * Buffer per connection, used in Tstorm
2272 */ 4367 */
2273struct iscsi_conn_buf { 4368struct iscsi_conn_buf {
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index 0dbeaec4f03a..9f44e0ffe003 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -1,6 +1,6 @@
1/* cnic_if.h: Broadcom CNIC core network driver. 1/* cnic_if.h: Broadcom CNIC core network driver.
2 * 2 *
3 * Copyright (c) 2006-2010 Broadcom Corporation 3 * Copyright (c) 2006-2011 Broadcom Corporation
4 * 4 *
5 * This program is free software; you can redistribute it and/or modify 5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by 6 * it under the terms of the GNU General Public License as published by
@@ -12,22 +12,31 @@
12#ifndef CNIC_IF_H 12#ifndef CNIC_IF_H
13#define CNIC_IF_H 13#define CNIC_IF_H
14 14
15#define CNIC_MODULE_VERSION "2.2.6" 15#define CNIC_MODULE_VERSION "2.2.12"
16#define CNIC_MODULE_RELDATE "Oct 12, 2010" 16#define CNIC_MODULE_RELDATE "Jan 03, 2011"
17 17
18#define CNIC_ULP_RDMA 0 18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1 19#define CNIC_ULP_ISCSI 1
20#define CNIC_ULP_L4 2 20#define CNIC_ULP_FCOE 2
21#define MAX_CNIC_ULP_TYPE_EXT 2 21#define CNIC_ULP_L4 3
22#define MAX_CNIC_ULP_TYPE 3 22#define MAX_CNIC_ULP_TYPE_EXT 3
23#define MAX_CNIC_ULP_TYPE 4
23 24
24struct kwqe { 25struct kwqe {
25 u32 kwqe_op_flag; 26 u32 kwqe_op_flag;
26 27
28#define KWQE_QID_SHIFT 8
27#define KWQE_OPCODE_MASK 0x00ff0000 29#define KWQE_OPCODE_MASK 0x00ff0000
28#define KWQE_OPCODE_SHIFT 16 30#define KWQE_OPCODE_SHIFT 16
29#define KWQE_FLAGS_LAYER_SHIFT 28
30#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT) 31#define KWQE_OPCODE(x) ((x & KWQE_OPCODE_MASK) >> KWQE_OPCODE_SHIFT)
32#define KWQE_LAYER_MASK 0x70000000
33#define KWQE_LAYER_SHIFT 28
34#define KWQE_FLAGS_LAYER_MASK_L2 (2<<28)
35#define KWQE_FLAGS_LAYER_MASK_L3 (3<<28)
36#define KWQE_FLAGS_LAYER_MASK_L4 (4<<28)
37#define KWQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
38#define KWQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
39#define KWQE_FLAGS_LAYER_MASK_L5_FCOE (7<<28)
31 40
32 u32 kwqe_info0; 41 u32 kwqe_info0;
33 u32 kwqe_info1; 42 u32 kwqe_info1;
@@ -62,6 +71,7 @@ struct kcqe {
62 #define KCQE_FLAGS_LAYER_MASK_L4 (4<<28) 71 #define KCQE_FLAGS_LAYER_MASK_L4 (4<<28)
63 #define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28) 72 #define KCQE_FLAGS_LAYER_MASK_L5_RDMA (5<<28)
64 #define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28) 73 #define KCQE_FLAGS_LAYER_MASK_L5_ISCSI (6<<28)
74 #define KCQE_FLAGS_LAYER_MASK_L5_FCOE (7<<28)
65 #define KCQE_FLAGS_NEXT (1<<31) 75 #define KCQE_FLAGS_NEXT (1<<31)
66 #define KCQE_FLAGS_OPCODE_MASK (0xff<<16) 76 #define KCQE_FLAGS_OPCODE_MASK (0xff<<16)
67 #define KCQE_FLAGS_OPCODE_SHIFT (16) 77 #define KCQE_FLAGS_OPCODE_SHIFT (16)
@@ -301,7 +311,7 @@ struct cnic_ulp_ops {
301 void (*cm_abort_complete)(struct cnic_sock *); 311 void (*cm_abort_complete)(struct cnic_sock *);
302 void (*cm_remote_close)(struct cnic_sock *); 312 void (*cm_remote_close)(struct cnic_sock *);
303 void (*cm_remote_abort)(struct cnic_sock *); 313 void (*cm_remote_abort)(struct cnic_sock *);
304 void (*iscsi_nl_send_msg)(struct cnic_dev *dev, u32 msg_type, 314 int (*iscsi_nl_send_msg)(void *ulp_ctx, u32 msg_type,
305 char *data, u16 data_size); 315 char *data, u16 data_size);
306 struct module *owner; 316 struct module *owner;
307 atomic_t ref_count; 317 atomic_t ref_count;
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 81475cc80e1c..80c2feeefec5 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -59,7 +59,6 @@ static struct sockaddr default_mac = {
59 59
60/* Information that need to be kept for each board. */ 60/* Information that need to be kept for each board. */
61struct net_local { 61struct net_local {
62 struct net_device_stats stats;
63 struct mii_if_info mii_if; 62 struct mii_if_info mii_if;
64 63
65 /* Tx control lock. This protects the transmit buffer ring 64 /* Tx control lock. This protects the transmit buffer ring
@@ -1059,7 +1058,7 @@ e100_tx_timeout(struct net_device *dev)
1059 1058
1060 /* remember we got an error */ 1059 /* remember we got an error */
1061 1060
1062 np->stats.tx_errors++; 1061 dev->stats.tx_errors++;
1063 1062
1064 /* reset the TX DMA in case it has hung on something */ 1063 /* reset the TX DMA in case it has hung on something */
1065 1064
@@ -1157,7 +1156,7 @@ e100rxtx_interrupt(int irq, void *dev_id)
1157 * allocate a new buffer to put a packet in. 1156 * allocate a new buffer to put a packet in.
1158 */ 1157 */
1159 e100_rx(dev); 1158 e100_rx(dev);
1160 np->stats.rx_packets++; 1159 dev->stats.rx_packets++;
1161 /* restart/continue on the channel, for safety */ 1160 /* restart/continue on the channel, for safety */
1162 *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart); 1161 *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart);
1163 /* clear dma channel 1 eop/descr irq bits */ 1162 /* clear dma channel 1 eop/descr irq bits */
@@ -1173,8 +1172,8 @@ e100rxtx_interrupt(int irq, void *dev_id)
1173 /* Report any packets that have been sent */ 1172 /* Report any packets that have been sent */
1174 while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST && 1173 while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST &&
1175 (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) { 1174 (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) {
1176 np->stats.tx_bytes += myFirstTxDesc->skb->len; 1175 dev->stats.tx_bytes += myFirstTxDesc->skb->len;
1177 np->stats.tx_packets++; 1176 dev->stats.tx_packets++;
1178 1177
1179 /* dma is ready with the transmission of the data in tx_skb, so now 1178 /* dma is ready with the transmission of the data in tx_skb, so now
1180 we can release the skb memory */ 1179 we can release the skb memory */
@@ -1197,7 +1196,6 @@ static irqreturn_t
1197e100nw_interrupt(int irq, void *dev_id) 1196e100nw_interrupt(int irq, void *dev_id)
1198{ 1197{
1199 struct net_device *dev = (struct net_device *)dev_id; 1198 struct net_device *dev = (struct net_device *)dev_id;
1200 struct net_local *np = netdev_priv(dev);
1201 unsigned long irqbits = *R_IRQ_MASK0_RD; 1199 unsigned long irqbits = *R_IRQ_MASK0_RD;
1202 1200
1203 /* check for underrun irq */ 1201 /* check for underrun irq */
@@ -1205,13 +1203,13 @@ e100nw_interrupt(int irq, void *dev_id)
1205 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); 1203 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
1206 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; 1204 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
1207 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); 1205 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
1208 np->stats.tx_errors++; 1206 dev->stats.tx_errors++;
1209 D(printk("ethernet receiver underrun!\n")); 1207 D(printk("ethernet receiver underrun!\n"));
1210 } 1208 }
1211 1209
1212 /* check for overrun irq */ 1210 /* check for overrun irq */
1213 if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) { 1211 if (irqbits & IO_STATE(R_IRQ_MASK0_RD, overrun, active)) {
1214 update_rx_stats(&np->stats); /* this will ack the irq */ 1212 update_rx_stats(&dev->stats); /* this will ack the irq */
1215 D(printk("ethernet receiver overrun!\n")); 1213 D(printk("ethernet receiver overrun!\n"));
1216 } 1214 }
1217 /* check for excessive collision irq */ 1215 /* check for excessive collision irq */
@@ -1219,7 +1217,7 @@ e100nw_interrupt(int irq, void *dev_id)
1219 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); 1217 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr);
1220 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; 1218 *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow;
1221 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); 1219 SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop);
1222 np->stats.tx_errors++; 1220 dev->stats.tx_errors++;
1223 D(printk("ethernet excessive collisions!\n")); 1221 D(printk("ethernet excessive collisions!\n"));
1224 } 1222 }
1225 return IRQ_HANDLED; 1223 return IRQ_HANDLED;
@@ -1250,7 +1248,7 @@ e100_rx(struct net_device *dev)
1250 spin_unlock(&np->led_lock); 1248 spin_unlock(&np->led_lock);
1251 1249
1252 length = myNextRxDesc->descr.hw_len - 4; 1250 length = myNextRxDesc->descr.hw_len - 4;
1253 np->stats.rx_bytes += length; 1251 dev->stats.rx_bytes += length;
1254 1252
1255#ifdef ETHDEBUG 1253#ifdef ETHDEBUG
1256 printk("Got a packet of length %d:\n", length); 1254 printk("Got a packet of length %d:\n", length);
@@ -1268,7 +1266,7 @@ e100_rx(struct net_device *dev)
1268 /* Small packet, copy data */ 1266 /* Small packet, copy data */
1269 skb = dev_alloc_skb(length - ETHER_HEAD_LEN); 1267 skb = dev_alloc_skb(length - ETHER_HEAD_LEN);
1270 if (!skb) { 1268 if (!skb) {
1271 np->stats.rx_errors++; 1269 dev->stats.rx_errors++;
1272 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); 1270 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
1273 goto update_nextrxdesc; 1271 goto update_nextrxdesc;
1274 } 1272 }
@@ -1294,7 +1292,7 @@ e100_rx(struct net_device *dev)
1294 int align; 1292 int align;
1295 struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); 1293 struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES);
1296 if (!new_skb) { 1294 if (!new_skb) {
1297 np->stats.rx_errors++; 1295 dev->stats.rx_errors++;
1298 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); 1296 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name);
1299 goto update_nextrxdesc; 1297 goto update_nextrxdesc;
1300 } 1298 }
@@ -1333,8 +1331,6 @@ e100_rx(struct net_device *dev)
1333static int 1331static int
1334e100_close(struct net_device *dev) 1332e100_close(struct net_device *dev)
1335{ 1333{
1336 struct net_local *np = netdev_priv(dev);
1337
1338 printk(KERN_INFO "Closing %s.\n", dev->name); 1334 printk(KERN_INFO "Closing %s.\n", dev->name);
1339 1335
1340 netif_stop_queue(dev); 1336 netif_stop_queue(dev);
@@ -1366,8 +1362,8 @@ e100_close(struct net_device *dev)
1366 1362
1367 /* Update the statistics here. */ 1363 /* Update the statistics here. */
1368 1364
1369 update_rx_stats(&np->stats); 1365 update_rx_stats(&dev->stats);
1370 update_tx_stats(&np->stats); 1366 update_tx_stats(&dev->stats);
1371 1367
1372 /* Stop speed/duplex timers */ 1368 /* Stop speed/duplex timers */
1373 del_timer(&speed_timer); 1369 del_timer(&speed_timer);
@@ -1545,11 +1541,11 @@ e100_get_stats(struct net_device *dev)
1545 1541
1546 spin_lock_irqsave(&lp->lock, flags); 1542 spin_lock_irqsave(&lp->lock, flags);
1547 1543
1548 update_rx_stats(&lp->stats); 1544 update_rx_stats(&dev->stats);
1549 update_tx_stats(&lp->stats); 1545 update_tx_stats(&dev->stats);
1550 1546
1551 spin_unlock_irqrestore(&lp->lock, flags); 1547 spin_unlock_irqrestore(&lp->lock, flags);
1552 return &lp->stats; 1548 return &dev->stats;
1553} 1549}
1554 1550
1555/* 1551/*
diff --git a/drivers/net/cxgb3/ael1002.c b/drivers/net/cxgb3/ael1002.c
index 35cd36729155..2028da95afa1 100644
--- a/drivers/net/cxgb3/ael1002.c
+++ b/drivers/net/cxgb3/ael1002.c
@@ -292,7 +292,7 @@ unknown:
292 */ 292 */
293static int ael2005_setup_sr_edc(struct cphy *phy) 293static int ael2005_setup_sr_edc(struct cphy *phy)
294{ 294{
295 static struct reg_val regs[] = { 295 static const struct reg_val regs[] = {
296 { MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x181 }, 296 { MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x181 },
297 { MDIO_MMD_PMAPMD, 0xc010, 0xffff, 0x448a }, 297 { MDIO_MMD_PMAPMD, 0xc010, 0xffff, 0x448a },
298 { MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5200 }, 298 { MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5200 },
@@ -324,11 +324,11 @@ static int ael2005_setup_sr_edc(struct cphy *phy)
324 324
325static int ael2005_setup_twinax_edc(struct cphy *phy, int modtype) 325static int ael2005_setup_twinax_edc(struct cphy *phy, int modtype)
326{ 326{
327 static struct reg_val regs[] = { 327 static const struct reg_val regs[] = {
328 { MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5a00 }, 328 { MDIO_MMD_PMAPMD, 0xc04a, 0xffff, 0x5a00 },
329 { 0, 0, 0, 0 } 329 { 0, 0, 0, 0 }
330 }; 330 };
331 static struct reg_val preemphasis[] = { 331 static const struct reg_val preemphasis[] = {
332 { MDIO_MMD_PMAPMD, 0xc014, 0xffff, 0xfe16 }, 332 { MDIO_MMD_PMAPMD, 0xc014, 0xffff, 0xfe16 },
333 { MDIO_MMD_PMAPMD, 0xc015, 0xffff, 0xa000 }, 333 { MDIO_MMD_PMAPMD, 0xc015, 0xffff, 0xa000 },
334 { 0, 0, 0, 0 } 334 { 0, 0, 0, 0 }
@@ -393,7 +393,7 @@ static int ael2005_intr_clear(struct cphy *phy)
393 393
394static int ael2005_reset(struct cphy *phy, int wait) 394static int ael2005_reset(struct cphy *phy, int wait)
395{ 395{
396 static struct reg_val regs0[] = { 396 static const struct reg_val regs0[] = {
397 { MDIO_MMD_PMAPMD, 0xc001, 0, 1 << 5 }, 397 { MDIO_MMD_PMAPMD, 0xc001, 0, 1 << 5 },
398 { MDIO_MMD_PMAPMD, 0xc017, 0, 1 << 5 }, 398 { MDIO_MMD_PMAPMD, 0xc017, 0, 1 << 5 },
399 { MDIO_MMD_PMAPMD, 0xc013, 0xffff, 0xf341 }, 399 { MDIO_MMD_PMAPMD, 0xc013, 0xffff, 0xf341 },
@@ -403,7 +403,7 @@ static int ael2005_reset(struct cphy *phy, int wait)
403 { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0 }, 403 { MDIO_MMD_PMAPMD, 0xc210, 0xffff, 0 },
404 { 0, 0, 0, 0 } 404 { 0, 0, 0, 0 }
405 }; 405 };
406 static struct reg_val regs1[] = { 406 static const struct reg_val regs1[] = {
407 { MDIO_MMD_PMAPMD, 0xca00, 0xffff, 0x0080 }, 407 { MDIO_MMD_PMAPMD, 0xca00, 0xffff, 0x0080 },
408 { MDIO_MMD_PMAPMD, 0xca12, 0xffff, 0 }, 408 { MDIO_MMD_PMAPMD, 0xca12, 0xffff, 0 },
409 { 0, 0, 0, 0 } 409 { 0, 0, 0, 0 }
@@ -522,7 +522,7 @@ int t3_ael2005_phy_prep(struct cphy *phy, struct adapter *adapter,
522 */ 522 */
523static int ael2020_setup_sr_edc(struct cphy *phy) 523static int ael2020_setup_sr_edc(struct cphy *phy)
524{ 524{
525 static struct reg_val regs[] = { 525 static const struct reg_val regs[] = {
526 /* set CDR offset to 10 */ 526 /* set CDR offset to 10 */
527 { MDIO_MMD_PMAPMD, 0xcc01, 0xffff, 0x488a }, 527 { MDIO_MMD_PMAPMD, 0xcc01, 0xffff, 0x488a },
528 528
@@ -551,20 +551,20 @@ static int ael2020_setup_sr_edc(struct cphy *phy)
551static int ael2020_setup_twinax_edc(struct cphy *phy, int modtype) 551static int ael2020_setup_twinax_edc(struct cphy *phy, int modtype)
552{ 552{
553 /* set uC to 40MHz */ 553 /* set uC to 40MHz */
554 static struct reg_val uCclock40MHz[] = { 554 static const struct reg_val uCclock40MHz[] = {
555 { MDIO_MMD_PMAPMD, 0xff28, 0xffff, 0x4001 }, 555 { MDIO_MMD_PMAPMD, 0xff28, 0xffff, 0x4001 },
556 { MDIO_MMD_PMAPMD, 0xff2a, 0xffff, 0x0002 }, 556 { MDIO_MMD_PMAPMD, 0xff2a, 0xffff, 0x0002 },
557 { 0, 0, 0, 0 } 557 { 0, 0, 0, 0 }
558 }; 558 };
559 559
560 /* activate uC clock */ 560 /* activate uC clock */
561 static struct reg_val uCclockActivate[] = { 561 static const struct reg_val uCclockActivate[] = {
562 { MDIO_MMD_PMAPMD, 0xd000, 0xffff, 0x5200 }, 562 { MDIO_MMD_PMAPMD, 0xd000, 0xffff, 0x5200 },
563 { 0, 0, 0, 0 } 563 { 0, 0, 0, 0 }
564 }; 564 };
565 565
566 /* set PC to start of SRAM and activate uC */ 566 /* set PC to start of SRAM and activate uC */
567 static struct reg_val uCactivate[] = { 567 static const struct reg_val uCactivate[] = {
568 { MDIO_MMD_PMAPMD, 0xd080, 0xffff, 0x0100 }, 568 { MDIO_MMD_PMAPMD, 0xd080, 0xffff, 0x0100 },
569 { MDIO_MMD_PMAPMD, 0xd092, 0xffff, 0x0000 }, 569 { MDIO_MMD_PMAPMD, 0xd092, 0xffff, 0x0000 },
570 { 0, 0, 0, 0 } 570 { 0, 0, 0, 0 }
@@ -624,7 +624,7 @@ static int ael2020_get_module_type(struct cphy *phy, int delay_ms)
624 */ 624 */
625static int ael2020_intr_enable(struct cphy *phy) 625static int ael2020_intr_enable(struct cphy *phy)
626{ 626{
627 struct reg_val regs[] = { 627 static const struct reg_val regs[] = {
628 /* output Module's Loss Of Signal (LOS) to LED */ 628 /* output Module's Loss Of Signal (LOS) to LED */
629 { MDIO_MMD_PMAPMD, AEL2020_GPIO_CFG+AEL2020_GPIO_LSTAT, 629 { MDIO_MMD_PMAPMD, AEL2020_GPIO_CFG+AEL2020_GPIO_LSTAT,
630 0xffff, 0x4 }, 630 0xffff, 0x4 },
@@ -664,7 +664,7 @@ static int ael2020_intr_enable(struct cphy *phy)
664 */ 664 */
665static int ael2020_intr_disable(struct cphy *phy) 665static int ael2020_intr_disable(struct cphy *phy)
666{ 666{
667 struct reg_val regs[] = { 667 static const struct reg_val regs[] = {
668 /* reset "link status" LED to "off" */ 668 /* reset "link status" LED to "off" */
669 { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL, 669 { MDIO_MMD_PMAPMD, AEL2020_GPIO_CTRL,
670 0xffff, 0xb << (AEL2020_GPIO_LSTAT*4) }, 670 0xffff, 0xb << (AEL2020_GPIO_LSTAT*4) },
@@ -701,7 +701,7 @@ static int ael2020_intr_clear(struct cphy *phy)
701 return err ? err : t3_phy_lasi_intr_clear(phy); 701 return err ? err : t3_phy_lasi_intr_clear(phy);
702} 702}
703 703
704static struct reg_val ael2020_reset_regs[] = { 704static const struct reg_val ael2020_reset_regs[] = {
705 /* Erratum #2: CDRLOL asserted, causing PMA link down status */ 705 /* Erratum #2: CDRLOL asserted, causing PMA link down status */
706 { MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x3101 }, 706 { MDIO_MMD_PMAPMD, 0xc003, 0xffff, 0x3101 },
707 707
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 046d846c652d..4d538a4e9d55 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -1359,6 +1359,7 @@ out:
1359static int offload_close(struct t3cdev *tdev) 1359static int offload_close(struct t3cdev *tdev)
1360{ 1360{
1361 struct adapter *adapter = tdev2adap(tdev); 1361 struct adapter *adapter = tdev2adap(tdev);
1362 struct t3c_data *td = T3C_DATA(tdev);
1362 1363
1363 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map)) 1364 if (!test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map))
1364 return 0; 1365 return 0;
@@ -1369,7 +1370,7 @@ static int offload_close(struct t3cdev *tdev)
1369 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group); 1370 sysfs_remove_group(&tdev->lldev->dev.kobj, &offload_attr_group);
1370 1371
1371 /* Flush work scheduled while releasing TIDs */ 1372 /* Flush work scheduled while releasing TIDs */
1372 flush_scheduled_work(); 1373 flush_work_sync(&td->tid_release_task);
1373 1374
1374 tdev->lldev = NULL; 1375 tdev->lldev = NULL;
1375 cxgb3_set_dummy_ops(tdev); 1376 cxgb3_set_dummy_ops(tdev);
@@ -3006,12 +3007,11 @@ static pci_ers_result_t t3_io_error_detected(struct pci_dev *pdev,
3006 pci_channel_state_t state) 3007 pci_channel_state_t state)
3007{ 3008{
3008 struct adapter *adapter = pci_get_drvdata(pdev); 3009 struct adapter *adapter = pci_get_drvdata(pdev);
3009 int ret;
3010 3010
3011 if (state == pci_channel_io_perm_failure) 3011 if (state == pci_channel_io_perm_failure)
3012 return PCI_ERS_RESULT_DISCONNECT; 3012 return PCI_ERS_RESULT_DISCONNECT;
3013 3013
3014 ret = t3_adapter_error(adapter, 0, 0); 3014 t3_adapter_error(adapter, 0, 0);
3015 3015
3016 /* Request a slot reset. */ 3016 /* Request a slot reset. */
3017 return PCI_ERS_RESULT_NEED_RESET; 3017 return PCI_ERS_RESULT_NEED_RESET;
diff --git a/drivers/net/cxgb3/cxgb3_offload.c b/drivers/net/cxgb3/cxgb3_offload.c
index bcf07532953d..ef02aa68c926 100644
--- a/drivers/net/cxgb3/cxgb3_offload.c
+++ b/drivers/net/cxgb3/cxgb3_offload.c
@@ -1164,12 +1164,10 @@ static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new)
1164 */ 1164 */
1165void *cxgb_alloc_mem(unsigned long size) 1165void *cxgb_alloc_mem(unsigned long size)
1166{ 1166{
1167 void *p = kmalloc(size, GFP_KERNEL); 1167 void *p = kzalloc(size, GFP_KERNEL);
1168 1168
1169 if (!p) 1169 if (!p)
1170 p = vmalloc(size); 1170 p = vzalloc(size);
1171 if (p)
1172 memset(p, 0, size);
1173 return p; 1171 return p;
1174} 1172}
1175 1173
diff --git a/drivers/net/cxgb3/t3_hw.c b/drivers/net/cxgb3/t3_hw.c
index 0b197043bc34..d55db6b38e7b 100644
--- a/drivers/net/cxgb3/t3_hw.c
+++ b/drivers/net/cxgb3/t3_hw.c
@@ -1562,7 +1562,7 @@ static void tp_intr_handler(struct adapter *adapter)
1562 {0} 1562 {0}
1563 }; 1563 };
1564 1564
1565 static struct intr_info tp_intr_info_t3c[] = { 1565 static const struct intr_info tp_intr_info_t3c[] = {
1566 {0x1fffffff, "TP parity error", -1, 1}, 1566 {0x1fffffff, "TP parity error", -1, 1},
1567 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1}, 1567 {F_FLMRXFLSTEMPTY, "TP out of Rx pages", -1, 1},
1568 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1}, 1568 {F_FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1},
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
index 3d4253d311eb..01d49eaa44d2 100644
--- a/drivers/net/cxgb4/cxgb4.h
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -482,11 +482,9 @@ struct adapter {
482 void __iomem *regs; 482 void __iomem *regs;
483 struct pci_dev *pdev; 483 struct pci_dev *pdev;
484 struct device *pdev_dev; 484 struct device *pdev_dev;
485 unsigned long registered_device_map;
486 unsigned int fn; 485 unsigned int fn;
487 unsigned int flags; 486 unsigned int flags;
488 487
489 const char *name;
490 int msg_enable; 488 int msg_enable;
491 489
492 struct adapter_params params; 490 struct adapter_params params;
@@ -497,7 +495,7 @@ struct adapter {
497 495
498 struct { 496 struct {
499 unsigned short vec; 497 unsigned short vec;
500 char desc[14]; 498 char desc[IFNAMSIZ + 10];
501 } msix_info[MAX_INGQ + 1]; 499 } msix_info[MAX_INGQ + 1];
502 500
503 struct sge sge; 501 struct sge sge;
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index f50bc98310f8..059c1eec8c3f 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -522,39 +522,33 @@ static irqreturn_t t4_nondata_intr(int irq, void *cookie)
522 */ 522 */
523static void name_msix_vecs(struct adapter *adap) 523static void name_msix_vecs(struct adapter *adap)
524{ 524{
525 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc) - 1; 525 int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
526 526
527 /* non-data interrupts */ 527 /* non-data interrupts */
528 snprintf(adap->msix_info[0].desc, n, "%s", adap->name); 528 snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
529 adap->msix_info[0].desc[n] = 0;
530 529
531 /* FW events */ 530 /* FW events */
532 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", adap->name); 531 snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
533 adap->msix_info[1].desc[n] = 0; 532 adap->port[0]->name);
534 533
535 /* Ethernet queues */ 534 /* Ethernet queues */
536 for_each_port(adap, j) { 535 for_each_port(adap, j) {
537 struct net_device *d = adap->port[j]; 536 struct net_device *d = adap->port[j];
538 const struct port_info *pi = netdev_priv(d); 537 const struct port_info *pi = netdev_priv(d);
539 538
540 for (i = 0; i < pi->nqsets; i++, msi_idx++) { 539 for (i = 0; i < pi->nqsets; i++, msi_idx++)
541 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d", 540 snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
542 d->name, i); 541 d->name, i);
543 adap->msix_info[msi_idx].desc[n] = 0;
544 }
545 } 542 }
546 543
547 /* offload queues */ 544 /* offload queues */
548 for_each_ofldrxq(&adap->sge, i) { 545 for_each_ofldrxq(&adap->sge, i)
549 snprintf(adap->msix_info[msi_idx].desc, n, "%s-ofld%d", 546 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
550 adap->name, i); 547 adap->port[0]->name, i);
551 adap->msix_info[msi_idx++].desc[n] = 0; 548
552 } 549 for_each_rdmarxq(&adap->sge, i)
553 for_each_rdmarxq(&adap->sge, i) { 550 snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
554 snprintf(adap->msix_info[msi_idx].desc, n, "%s-rdma%d", 551 adap->port[0]->name, i);
555 adap->name, i);
556 adap->msix_info[msi_idx++].desc[n] = 0;
557 }
558} 552}
559 553
560static int request_msix_queue_irqs(struct adapter *adap) 554static int request_msix_queue_irqs(struct adapter *adap)
@@ -868,12 +862,10 @@ out: release_firmware(fw);
868 */ 862 */
869void *t4_alloc_mem(size_t size) 863void *t4_alloc_mem(size_t size)
870{ 864{
871 void *p = kmalloc(size, GFP_KERNEL); 865 void *p = kzalloc(size, GFP_KERNEL);
872 866
873 if (!p) 867 if (!p)
874 p = vmalloc(size); 868 p = vzalloc(size);
875 if (p)
876 memset(p, 0, size);
877 return p; 869 return p;
878} 870}
879 871
@@ -1377,7 +1369,12 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1377 } else if (type == FW_PORT_TYPE_KR) 1369 } else if (type == FW_PORT_TYPE_KR)
1378 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; 1370 v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
1379 else if (type == FW_PORT_TYPE_BP_AP) 1371 else if (type == FW_PORT_TYPE_BP_AP)
1380 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC; 1372 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1373 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
1374 else if (type == FW_PORT_TYPE_BP4_AP)
1375 v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1376 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
1377 SUPPORTED_10000baseKX4_Full;
1381 else if (type == FW_PORT_TYPE_FIBER_XFI || 1378 else if (type == FW_PORT_TYPE_FIBER_XFI ||
1382 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) 1379 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
1383 v |= SUPPORTED_FIBRE; 1380 v |= SUPPORTED_FIBRE;
@@ -2668,7 +2665,7 @@ static int cxgb_up(struct adapter *adap)
2668 } else { 2665 } else {
2669 err = request_irq(adap->pdev->irq, t4_intr_handler(adap), 2666 err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2670 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED, 2667 (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2671 adap->name, adap); 2668 adap->port[0]->name, adap);
2672 if (err) 2669 if (err)
2673 goto irq_err; 2670 goto irq_err;
2674 } 2671 }
@@ -2719,10 +2716,6 @@ static int cxgb_open(struct net_device *dev)
2719 return err; 2716 return err;
2720 } 2717 }
2721 2718
2722 netif_set_real_num_tx_queues(dev, pi->nqsets);
2723 err = netif_set_real_num_rx_queues(dev, pi->nqsets);
2724 if (err)
2725 return err;
2726 err = link_start(dev); 2719 err = link_start(dev);
2727 if (!err) 2720 if (!err)
2728 netif_tx_start_all_queues(dev); 2721 netif_tx_start_all_queues(dev);
@@ -3491,49 +3484,53 @@ static int __devinit init_rss(struct adapter *adap)
3491 return 0; 3484 return 0;
3492} 3485}
3493 3486
3494static void __devinit print_port_info(struct adapter *adap) 3487static void __devinit print_port_info(const struct net_device *dev)
3495{ 3488{
3496 static const char *base[] = { 3489 static const char *base[] = {
3497 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4", 3490 "R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
3498 "KX", "KR", "KR SFP+", "KR FEC" 3491 "KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
3499 }; 3492 };
3500 3493
3501 int i;
3502 char buf[80]; 3494 char buf[80];
3495 char *bufp = buf;
3503 const char *spd = ""; 3496 const char *spd = "";
3497 const struct port_info *pi = netdev_priv(dev);
3498 const struct adapter *adap = pi->adapter;
3504 3499
3505 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB) 3500 if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
3506 spd = " 2.5 GT/s"; 3501 spd = " 2.5 GT/s";
3507 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB) 3502 else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
3508 spd = " 5 GT/s"; 3503 spd = " 5 GT/s";
3509 3504
3510 for_each_port(adap, i) { 3505 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
3511 struct net_device *dev = adap->port[i]; 3506 bufp += sprintf(bufp, "100/");
3512 const struct port_info *pi = netdev_priv(dev); 3507 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
3513 char *bufp = buf; 3508 bufp += sprintf(bufp, "1000/");
3509 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
3510 bufp += sprintf(bufp, "10G/");
3511 if (bufp != buf)
3512 --bufp;
3513 sprintf(bufp, "BASE-%s", base[pi->port_type]);
3514 3514
3515 if (!test_bit(i, &adap->registered_device_map)) 3515 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
3516 continue; 3516 adap->params.vpd.id, adap->params.rev, buf,
3517 is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
3518 (adap->flags & USING_MSIX) ? " MSI-X" :
3519 (adap->flags & USING_MSI) ? " MSI" : "");
3520 netdev_info(dev, "S/N: %s, E/C: %s\n",
3521 adap->params.vpd.sn, adap->params.vpd.ec);
3522}
3517 3523
3518 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M) 3524static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev)
3519 bufp += sprintf(bufp, "100/"); 3525{
3520 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G) 3526 u16 v;
3521 bufp += sprintf(bufp, "1000/"); 3527 int pos;
3522 if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
3523 bufp += sprintf(bufp, "10G/");
3524 if (bufp != buf)
3525 --bufp;
3526 sprintf(bufp, "BASE-%s", base[pi->port_type]);
3527 3528
3528 netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n", 3529 pos = pci_pcie_cap(dev);
3529 adap->params.vpd.id, adap->params.rev, 3530 if (pos > 0) {
3530 buf, is_offload(adap) ? "R" : "", 3531 pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v);
3531 adap->params.pci.width, spd, 3532 v |= PCI_EXP_DEVCTL_RELAX_EN;
3532 (adap->flags & USING_MSIX) ? " MSI-X" : 3533 pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v);
3533 (adap->flags & USING_MSI) ? " MSI" : "");
3534 if (adap->name == dev->name)
3535 netdev_info(dev, "S/N: %s, E/C: %s\n",
3536 adap->params.vpd.sn, adap->params.vpd.ec);
3537 } 3534 }
3538} 3535}
3539 3536
@@ -3611,6 +3608,7 @@ static int __devinit init_one(struct pci_dev *pdev,
3611 } 3608 }
3612 3609
3613 pci_enable_pcie_error_reporting(pdev); 3610 pci_enable_pcie_error_reporting(pdev);
3611 enable_pcie_relaxed_ordering(pdev);
3614 pci_set_master(pdev); 3612 pci_set_master(pdev);
3615 pci_save_state(pdev); 3613 pci_save_state(pdev);
3616 3614
@@ -3630,7 +3628,6 @@ static int __devinit init_one(struct pci_dev *pdev,
3630 adapter->pdev = pdev; 3628 adapter->pdev = pdev;
3631 adapter->pdev_dev = &pdev->dev; 3629 adapter->pdev_dev = &pdev->dev;
3632 adapter->fn = func; 3630 adapter->fn = func;
3633 adapter->name = pci_name(pdev);
3634 adapter->msg_enable = dflt_msg_enable; 3631 adapter->msg_enable = dflt_msg_enable;
3635 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map)); 3632 memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
3636 3633
@@ -3721,27 +3718,24 @@ static int __devinit init_one(struct pci_dev *pdev,
3721 * register at least one net device. 3718 * register at least one net device.
3722 */ 3719 */
3723 for_each_port(adapter, i) { 3720 for_each_port(adapter, i) {
3721 pi = adap2pinfo(adapter, i);
3722 netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
3723 netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
3724
3724 err = register_netdev(adapter->port[i]); 3725 err = register_netdev(adapter->port[i]);
3725 if (err) 3726 if (err)
3726 dev_warn(&pdev->dev, 3727 break;
3727 "cannot register net device %s, skipping\n", 3728 adapter->chan_map[pi->tx_chan] = i;
3728 adapter->port[i]->name); 3729 print_port_info(adapter->port[i]);
3729 else {
3730 /*
3731 * Change the name we use for messages to the name of
3732 * the first successfully registered interface.
3733 */
3734 if (!adapter->registered_device_map)
3735 adapter->name = adapter->port[i]->name;
3736
3737 __set_bit(i, &adapter->registered_device_map);
3738 adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
3739 }
3740 } 3730 }
3741 if (!adapter->registered_device_map) { 3731 if (i == 0) {
3742 dev_err(&pdev->dev, "could not register any net devices\n"); 3732 dev_err(&pdev->dev, "could not register any net devices\n");
3743 goto out_free_dev; 3733 goto out_free_dev;
3744 } 3734 }
3735 if (err) {
3736 dev_warn(&pdev->dev, "only %d net devices registered\n", i);
3737 err = 0;
3738 };
3745 3739
3746 if (cxgb4_debugfs_root) { 3740 if (cxgb4_debugfs_root) {
3747 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev), 3741 adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
@@ -3752,8 +3746,6 @@ static int __devinit init_one(struct pci_dev *pdev,
3752 if (is_offload(adapter)) 3746 if (is_offload(adapter))
3753 attach_ulds(adapter); 3747 attach_ulds(adapter);
3754 3748
3755 print_port_info(adapter);
3756
3757sriov: 3749sriov:
3758#ifdef CONFIG_PCI_IOV 3750#ifdef CONFIG_PCI_IOV
3759 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0) 3751 if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
@@ -3792,7 +3784,7 @@ static void __devexit remove_one(struct pci_dev *pdev)
3792 detach_ulds(adapter); 3784 detach_ulds(adapter);
3793 3785
3794 for_each_port(adapter, i) 3786 for_each_port(adapter, i)
3795 if (test_bit(i, &adapter->registered_device_map)) 3787 if (adapter->port[i]->reg_state == NETREG_REGISTERED)
3796 unregister_netdev(adapter->port[i]); 3788 unregister_netdev(adapter->port[i]);
3797 3789
3798 if (adapter->debugfs_root) 3790 if (adapter->debugfs_root)
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
index 17022258ed68..311471b439a8 100644
--- a/drivers/net/cxgb4/sge.c
+++ b/drivers/net/cxgb4/sge.c
@@ -579,6 +579,7 @@ static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
579 * @phys: the physical address of the allocated ring 579 * @phys: the physical address of the allocated ring
580 * @metadata: address of the array holding the SW state for the ring 580 * @metadata: address of the array holding the SW state for the ring
581 * @stat_size: extra space in HW ring for status information 581 * @stat_size: extra space in HW ring for status information
582 * @node: preferred node for memory allocations
582 * 583 *
583 * Allocates resources for an SGE descriptor ring, such as Tx queues, 584 * Allocates resources for an SGE descriptor ring, such as Tx queues,
584 * free buffer lists, or response queues. Each SGE ring requires 585 * free buffer lists, or response queues. Each SGE ring requires
@@ -590,7 +591,7 @@ static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
590 */ 591 */
591static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size, 592static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
592 size_t sw_size, dma_addr_t *phys, void *metadata, 593 size_t sw_size, dma_addr_t *phys, void *metadata,
593 size_t stat_size) 594 size_t stat_size, int node)
594{ 595{
595 size_t len = nelem * elem_size + stat_size; 596 size_t len = nelem * elem_size + stat_size;
596 void *s = NULL; 597 void *s = NULL;
@@ -599,7 +600,7 @@ static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
599 if (!p) 600 if (!p)
600 return NULL; 601 return NULL;
601 if (sw_size) { 602 if (sw_size) {
602 s = kcalloc(nelem, sw_size, GFP_KERNEL); 603 s = kzalloc_node(nelem * sw_size, GFP_KERNEL, node);
603 604
604 if (!s) { 605 if (!s) {
605 dma_free_coherent(dev, len, p, *phys); 606 dma_free_coherent(dev, len, p, *phys);
@@ -1982,7 +1983,7 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
1982 iq->size = roundup(iq->size, 16); 1983 iq->size = roundup(iq->size, 16);
1983 1984
1984 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, 1985 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
1985 &iq->phys_addr, NULL, 0); 1986 &iq->phys_addr, NULL, 0, NUMA_NO_NODE);
1986 if (!iq->desc) 1987 if (!iq->desc)
1987 return -ENOMEM; 1988 return -ENOMEM;
1988 1989
@@ -2008,12 +2009,14 @@ int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
2008 fl->size = roundup(fl->size, 8); 2009 fl->size = roundup(fl->size, 8);
2009 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64), 2010 fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
2010 sizeof(struct rx_sw_desc), &fl->addr, 2011 sizeof(struct rx_sw_desc), &fl->addr,
2011 &fl->sdesc, STAT_LEN); 2012 &fl->sdesc, STAT_LEN, NUMA_NO_NODE);
2012 if (!fl->desc) 2013 if (!fl->desc)
2013 goto fl_nomem; 2014 goto fl_nomem;
2014 2015
2015 flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc); 2016 flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc);
2016 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN | 2017 c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
2018 FW_IQ_CMD_FL0FETCHRO(1) |
2019 FW_IQ_CMD_FL0DATARO(1) |
2017 FW_IQ_CMD_FL0PADEN); 2020 FW_IQ_CMD_FL0PADEN);
2018 c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) | 2021 c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
2019 FW_IQ_CMD_FL0FBMAX(3)); 2022 FW_IQ_CMD_FL0FBMAX(3));
@@ -2093,7 +2096,8 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2093 2096
2094 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, 2097 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2095 sizeof(struct tx_desc), sizeof(struct tx_sw_desc), 2098 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2096 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); 2099 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
2100 netdev_queue_numa_node_read(netdevq));
2097 if (!txq->q.desc) 2101 if (!txq->q.desc)
2098 return -ENOMEM; 2102 return -ENOMEM;
2099 2103
@@ -2106,6 +2110,7 @@ int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
2106 c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid)); 2110 c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
2107 c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) | 2111 c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
2108 FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) | 2112 FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
2113 FW_EQ_ETH_CMD_FETCHRO(1) |
2109 FW_EQ_ETH_CMD_IQID(iqid)); 2114 FW_EQ_ETH_CMD_IQID(iqid));
2110 c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) | 2115 c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) |
2111 FW_EQ_ETH_CMD_FBMAX(3) | 2116 FW_EQ_ETH_CMD_FBMAX(3) |
@@ -2144,7 +2149,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2144 2149
2145 txq->q.desc = alloc_ring(adap->pdev_dev, nentries, 2150 txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
2146 sizeof(struct tx_desc), 0, &txq->q.phys_addr, 2151 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
2147 NULL, 0); 2152 NULL, 0, NUMA_NO_NODE);
2148 if (!txq->q.desc) 2153 if (!txq->q.desc)
2149 return -ENOMEM; 2154 return -ENOMEM;
2150 2155
@@ -2158,6 +2163,7 @@ int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
2158 c.physeqid_pkd = htonl(0); 2163 c.physeqid_pkd = htonl(0);
2159 c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) | 2164 c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
2160 FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) | 2165 FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
2166 FW_EQ_CTRL_CMD_FETCHRO |
2161 FW_EQ_CTRL_CMD_IQID(iqid)); 2167 FW_EQ_CTRL_CMD_IQID(iqid));
2162 c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) | 2168 c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
2163 FW_EQ_CTRL_CMD_FBMAX(3) | 2169 FW_EQ_CTRL_CMD_FBMAX(3) |
@@ -2194,7 +2200,8 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2194 2200
2195 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size, 2201 txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
2196 sizeof(struct tx_desc), sizeof(struct tx_sw_desc), 2202 sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
2197 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); 2203 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN,
2204 NUMA_NO_NODE);
2198 if (!txq->q.desc) 2205 if (!txq->q.desc)
2199 return -ENOMEM; 2206 return -ENOMEM;
2200 2207
@@ -2207,6 +2214,7 @@ int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
2207 FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c)); 2214 FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
2208 c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) | 2215 c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
2209 FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) | 2216 FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) |
2217 FW_EQ_OFLD_CMD_FETCHRO(1) |
2210 FW_EQ_OFLD_CMD_IQID(iqid)); 2218 FW_EQ_OFLD_CMD_IQID(iqid));
2211 c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) | 2219 c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
2212 FW_EQ_OFLD_CMD_FBMAX(3) | 2220 FW_EQ_OFLD_CMD_FBMAX(3) |
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
index e97521c801ea..b9fd8a6f2cc4 100644
--- a/drivers/net/cxgb4/t4_hw.c
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -183,7 +183,7 @@ static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
183int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size, 183int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
184 void *rpl, bool sleep_ok) 184 void *rpl, bool sleep_ok)
185{ 185{
186 static int delay[] = { 186 static const int delay[] = {
187 1, 1, 3, 5, 10, 10, 20, 50, 100, 200 187 1, 1, 3, 5, 10, 10, 20, 50, 100, 200
188 }; 188 };
189 189
@@ -330,18 +330,6 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
330 return 0; 330 return 0;
331} 331}
332 332
333/*
334 * Partial EEPROM Vital Product Data structure. Includes only the ID and
335 * VPD-R header.
336 */
337struct t4_vpd_hdr {
338 u8 id_tag;
339 u8 id_len[2];
340 u8 id_data[ID_LEN];
341 u8 vpdr_tag;
342 u8 vpdr_len[2];
343};
344
345#define EEPROM_STAT_ADDR 0x7bfc 333#define EEPROM_STAT_ADDR 0x7bfc
346#define VPD_BASE 0 334#define VPD_BASE 0
347#define VPD_LEN 512 335#define VPD_LEN 512
@@ -370,25 +358,38 @@ int t4_seeprom_wp(struct adapter *adapter, bool enable)
370static int get_vpd_params(struct adapter *adapter, struct vpd_params *p) 358static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
371{ 359{
372 int i, ret; 360 int i, ret;
373 int ec, sn, v2; 361 int ec, sn;
374 u8 vpd[VPD_LEN], csum; 362 u8 vpd[VPD_LEN], csum;
375 unsigned int vpdr_len; 363 unsigned int vpdr_len, kw_offset, id_len;
376 const struct t4_vpd_hdr *v;
377 364
378 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd); 365 ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), vpd);
379 if (ret < 0) 366 if (ret < 0)
380 return ret; 367 return ret;
381 368
382 v = (const struct t4_vpd_hdr *)vpd; 369 if (vpd[0] != PCI_VPD_LRDT_ID_STRING) {
383 vpdr_len = pci_vpd_lrdt_size(&v->vpdr_tag); 370 dev_err(adapter->pdev_dev, "missing VPD ID string\n");
384 if (vpdr_len + sizeof(struct t4_vpd_hdr) > VPD_LEN) { 371 return -EINVAL;
372 }
373
374 id_len = pci_vpd_lrdt_size(vpd);
375 if (id_len > ID_LEN)
376 id_len = ID_LEN;
377
378 i = pci_vpd_find_tag(vpd, 0, VPD_LEN, PCI_VPD_LRDT_RO_DATA);
379 if (i < 0) {
380 dev_err(adapter->pdev_dev, "missing VPD-R section\n");
381 return -EINVAL;
382 }
383
384 vpdr_len = pci_vpd_lrdt_size(&vpd[i]);
385 kw_offset = i + PCI_VPD_LRDT_TAG_SIZE;
386 if (vpdr_len + kw_offset > VPD_LEN) {
385 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len); 387 dev_err(adapter->pdev_dev, "bad VPD-R length %u\n", vpdr_len);
386 return -EINVAL; 388 return -EINVAL;
387 } 389 }
388 390
389#define FIND_VPD_KW(var, name) do { \ 391#define FIND_VPD_KW(var, name) do { \
390 var = pci_vpd_find_info_keyword(&v->id_tag, sizeof(struct t4_vpd_hdr), \ 392 var = pci_vpd_find_info_keyword(vpd, kw_offset, vpdr_len, name); \
391 vpdr_len, name); \
392 if (var < 0) { \ 393 if (var < 0) { \
393 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \ 394 dev_err(adapter->pdev_dev, "missing VPD keyword " name "\n"); \
394 return -EINVAL; \ 395 return -EINVAL; \
@@ -408,11 +409,9 @@ static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
408 409
409 FIND_VPD_KW(ec, "EC"); 410 FIND_VPD_KW(ec, "EC");
410 FIND_VPD_KW(sn, "SN"); 411 FIND_VPD_KW(sn, "SN");
411 FIND_VPD_KW(v2, "V2");
412#undef FIND_VPD_KW 412#undef FIND_VPD_KW
413 413
414 p->cclk = simple_strtoul(vpd + v2, NULL, 10); 414 memcpy(p->id, vpd + PCI_VPD_LRDT_TAG_SIZE, id_len);
415 memcpy(p->id, v->id_data, ID_LEN);
416 strim(p->id); 415 strim(p->id);
417 memcpy(p->ec, vpd + ec, EC_LEN); 416 memcpy(p->ec, vpd + ec, EC_LEN);
418 strim(p->ec); 417 strim(p->ec);
@@ -919,7 +918,7 @@ static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
919 */ 918 */
920static void pcie_intr_handler(struct adapter *adapter) 919static void pcie_intr_handler(struct adapter *adapter)
921{ 920{
922 static struct intr_info sysbus_intr_info[] = { 921 static const struct intr_info sysbus_intr_info[] = {
923 { RNPP, "RXNP array parity error", -1, 1 }, 922 { RNPP, "RXNP array parity error", -1, 1 },
924 { RPCP, "RXPC array parity error", -1, 1 }, 923 { RPCP, "RXPC array parity error", -1, 1 },
925 { RCIP, "RXCIF array parity error", -1, 1 }, 924 { RCIP, "RXCIF array parity error", -1, 1 },
@@ -927,7 +926,7 @@ static void pcie_intr_handler(struct adapter *adapter)
927 { RFTP, "RXFT array parity error", -1, 1 }, 926 { RFTP, "RXFT array parity error", -1, 1 },
928 { 0 } 927 { 0 }
929 }; 928 };
930 static struct intr_info pcie_port_intr_info[] = { 929 static const struct intr_info pcie_port_intr_info[] = {
931 { TPCP, "TXPC array parity error", -1, 1 }, 930 { TPCP, "TXPC array parity error", -1, 1 },
932 { TNPP, "TXNP array parity error", -1, 1 }, 931 { TNPP, "TXNP array parity error", -1, 1 },
933 { TFTP, "TXFT array parity error", -1, 1 }, 932 { TFTP, "TXFT array parity error", -1, 1 },
@@ -939,7 +938,7 @@ static void pcie_intr_handler(struct adapter *adapter)
939 { TDUE, "Tx uncorrectable data error", -1, 1 }, 938 { TDUE, "Tx uncorrectable data error", -1, 1 },
940 { 0 } 939 { 0 }
941 }; 940 };
942 static struct intr_info pcie_intr_info[] = { 941 static const struct intr_info pcie_intr_info[] = {
943 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 }, 942 { MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
944 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 }, 943 { MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
945 { MSIDATAPERR, "MSI data parity error", -1, 1 }, 944 { MSIDATAPERR, "MSI data parity error", -1, 1 },
@@ -991,7 +990,7 @@ static void pcie_intr_handler(struct adapter *adapter)
991 */ 990 */
992static void tp_intr_handler(struct adapter *adapter) 991static void tp_intr_handler(struct adapter *adapter)
993{ 992{
994 static struct intr_info tp_intr_info[] = { 993 static const struct intr_info tp_intr_info[] = {
995 { 0x3fffffff, "TP parity error", -1, 1 }, 994 { 0x3fffffff, "TP parity error", -1, 1 },
996 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 }, 995 { FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
997 { 0 } 996 { 0 }
@@ -1008,7 +1007,7 @@ static void sge_intr_handler(struct adapter *adapter)
1008{ 1007{
1009 u64 v; 1008 u64 v;
1010 1009
1011 static struct intr_info sge_intr_info[] = { 1010 static const struct intr_info sge_intr_info[] = {
1012 { ERR_CPL_EXCEED_IQE_SIZE, 1011 { ERR_CPL_EXCEED_IQE_SIZE,
1013 "SGE received CPL exceeding IQE size", -1, 1 }, 1012 "SGE received CPL exceeding IQE size", -1, 1 },
1014 { ERR_INVALID_CIDX_INC, 1013 { ERR_INVALID_CIDX_INC,
@@ -1053,7 +1052,7 @@ static void sge_intr_handler(struct adapter *adapter)
1053 */ 1052 */
1054static void cim_intr_handler(struct adapter *adapter) 1053static void cim_intr_handler(struct adapter *adapter)
1055{ 1054{
1056 static struct intr_info cim_intr_info[] = { 1055 static const struct intr_info cim_intr_info[] = {
1057 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 }, 1056 { PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
1058 { OBQPARERR, "CIM OBQ parity error", -1, 1 }, 1057 { OBQPARERR, "CIM OBQ parity error", -1, 1 },
1059 { IBQPARERR, "CIM IBQ parity error", -1, 1 }, 1058 { IBQPARERR, "CIM IBQ parity error", -1, 1 },
@@ -1063,7 +1062,7 @@ static void cim_intr_handler(struct adapter *adapter)
1063 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 }, 1062 { TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
1064 { 0 } 1063 { 0 }
1065 }; 1064 };
1066 static struct intr_info cim_upintr_info[] = { 1065 static const struct intr_info cim_upintr_info[] = {
1067 { RSVDSPACEINT, "CIM reserved space access", -1, 1 }, 1066 { RSVDSPACEINT, "CIM reserved space access", -1, 1 },
1068 { ILLTRANSINT, "CIM illegal transaction", -1, 1 }, 1067 { ILLTRANSINT, "CIM illegal transaction", -1, 1 },
1069 { ILLWRINT, "CIM illegal write", -1, 1 }, 1068 { ILLWRINT, "CIM illegal write", -1, 1 },
@@ -1110,7 +1109,7 @@ static void cim_intr_handler(struct adapter *adapter)
1110 */ 1109 */
1111static void ulprx_intr_handler(struct adapter *adapter) 1110static void ulprx_intr_handler(struct adapter *adapter)
1112{ 1111{
1113 static struct intr_info ulprx_intr_info[] = { 1112 static const struct intr_info ulprx_intr_info[] = {
1114 { 0x1800000, "ULPRX context error", -1, 1 }, 1113 { 0x1800000, "ULPRX context error", -1, 1 },
1115 { 0x7fffff, "ULPRX parity error", -1, 1 }, 1114 { 0x7fffff, "ULPRX parity error", -1, 1 },
1116 { 0 } 1115 { 0 }
@@ -1125,7 +1124,7 @@ static void ulprx_intr_handler(struct adapter *adapter)
1125 */ 1124 */
1126static void ulptx_intr_handler(struct adapter *adapter) 1125static void ulptx_intr_handler(struct adapter *adapter)
1127{ 1126{
1128 static struct intr_info ulptx_intr_info[] = { 1127 static const struct intr_info ulptx_intr_info[] = {
1129 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1, 1128 { PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
1130 0 }, 1129 0 },
1131 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1, 1130 { PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
@@ -1147,7 +1146,7 @@ static void ulptx_intr_handler(struct adapter *adapter)
1147 */ 1146 */
1148static void pmtx_intr_handler(struct adapter *adapter) 1147static void pmtx_intr_handler(struct adapter *adapter)
1149{ 1148{
1150 static struct intr_info pmtx_intr_info[] = { 1149 static const struct intr_info pmtx_intr_info[] = {
1151 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 }, 1150 { PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
1152 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 }, 1151 { PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
1153 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 }, 1152 { PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
@@ -1169,7 +1168,7 @@ static void pmtx_intr_handler(struct adapter *adapter)
1169 */ 1168 */
1170static void pmrx_intr_handler(struct adapter *adapter) 1169static void pmrx_intr_handler(struct adapter *adapter)
1171{ 1170{
1172 static struct intr_info pmrx_intr_info[] = { 1171 static const struct intr_info pmrx_intr_info[] = {
1173 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 }, 1172 { ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
1174 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 }, 1173 { PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
1175 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 }, 1174 { OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
@@ -1188,7 +1187,7 @@ static void pmrx_intr_handler(struct adapter *adapter)
1188 */ 1187 */
1189static void cplsw_intr_handler(struct adapter *adapter) 1188static void cplsw_intr_handler(struct adapter *adapter)
1190{ 1189{
1191 static struct intr_info cplsw_intr_info[] = { 1190 static const struct intr_info cplsw_intr_info[] = {
1192 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 }, 1191 { CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
1193 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 }, 1192 { CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
1194 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 }, 1193 { TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
@@ -1207,7 +1206,7 @@ static void cplsw_intr_handler(struct adapter *adapter)
1207 */ 1206 */
1208static void le_intr_handler(struct adapter *adap) 1207static void le_intr_handler(struct adapter *adap)
1209{ 1208{
1210 static struct intr_info le_intr_info[] = { 1209 static const struct intr_info le_intr_info[] = {
1211 { LIPMISS, "LE LIP miss", -1, 0 }, 1210 { LIPMISS, "LE LIP miss", -1, 0 },
1212 { LIP0, "LE 0 LIP error", -1, 0 }, 1211 { LIP0, "LE 0 LIP error", -1, 0 },
1213 { PARITYERR, "LE parity error", -1, 1 }, 1212 { PARITYERR, "LE parity error", -1, 1 },
@@ -1225,11 +1224,11 @@ static void le_intr_handler(struct adapter *adap)
1225 */ 1224 */
1226static void mps_intr_handler(struct adapter *adapter) 1225static void mps_intr_handler(struct adapter *adapter)
1227{ 1226{
1228 static struct intr_info mps_rx_intr_info[] = { 1227 static const struct intr_info mps_rx_intr_info[] = {
1229 { 0xffffff, "MPS Rx parity error", -1, 1 }, 1228 { 0xffffff, "MPS Rx parity error", -1, 1 },
1230 { 0 } 1229 { 0 }
1231 }; 1230 };
1232 static struct intr_info mps_tx_intr_info[] = { 1231 static const struct intr_info mps_tx_intr_info[] = {
1233 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 }, 1232 { TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
1234 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 }, 1233 { NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
1235 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 }, 1234 { TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
@@ -1239,25 +1238,25 @@ static void mps_intr_handler(struct adapter *adapter)
1239 { FRMERR, "MPS Tx framing error", -1, 1 }, 1238 { FRMERR, "MPS Tx framing error", -1, 1 },
1240 { 0 } 1239 { 0 }
1241 }; 1240 };
1242 static struct intr_info mps_trc_intr_info[] = { 1241 static const struct intr_info mps_trc_intr_info[] = {
1243 { FILTMEM, "MPS TRC filter parity error", -1, 1 }, 1242 { FILTMEM, "MPS TRC filter parity error", -1, 1 },
1244 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 }, 1243 { PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
1245 { MISCPERR, "MPS TRC misc parity error", -1, 1 }, 1244 { MISCPERR, "MPS TRC misc parity error", -1, 1 },
1246 { 0 } 1245 { 0 }
1247 }; 1246 };
1248 static struct intr_info mps_stat_sram_intr_info[] = { 1247 static const struct intr_info mps_stat_sram_intr_info[] = {
1249 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 }, 1248 { 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
1250 { 0 } 1249 { 0 }
1251 }; 1250 };
1252 static struct intr_info mps_stat_tx_intr_info[] = { 1251 static const struct intr_info mps_stat_tx_intr_info[] = {
1253 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 }, 1252 { 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
1254 { 0 } 1253 { 0 }
1255 }; 1254 };
1256 static struct intr_info mps_stat_rx_intr_info[] = { 1255 static const struct intr_info mps_stat_rx_intr_info[] = {
1257 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 }, 1256 { 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
1258 { 0 } 1257 { 0 }
1259 }; 1258 };
1260 static struct intr_info mps_cls_intr_info[] = { 1259 static const struct intr_info mps_cls_intr_info[] = {
1261 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 }, 1260 { MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
1262 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 }, 1261 { MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
1263 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 }, 1262 { HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
@@ -1356,7 +1355,7 @@ static void ma_intr_handler(struct adapter *adap)
1356 */ 1355 */
1357static void smb_intr_handler(struct adapter *adap) 1356static void smb_intr_handler(struct adapter *adap)
1358{ 1357{
1359 static struct intr_info smb_intr_info[] = { 1358 static const struct intr_info smb_intr_info[] = {
1360 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 }, 1359 { MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
1361 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 }, 1360 { MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
1362 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 }, 1361 { SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
@@ -1372,7 +1371,7 @@ static void smb_intr_handler(struct adapter *adap)
1372 */ 1371 */
1373static void ncsi_intr_handler(struct adapter *adap) 1372static void ncsi_intr_handler(struct adapter *adap)
1374{ 1373{
1375 static struct intr_info ncsi_intr_info[] = { 1374 static const struct intr_info ncsi_intr_info[] = {
1376 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 }, 1375 { CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
1377 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 }, 1376 { MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
1378 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 }, 1377 { TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
@@ -1410,7 +1409,7 @@ static void xgmac_intr_handler(struct adapter *adap, int port)
1410 */ 1409 */
1411static void pl_intr_handler(struct adapter *adap) 1410static void pl_intr_handler(struct adapter *adap)
1412{ 1411{
1413 static struct intr_info pl_intr_info[] = { 1412 static const struct intr_info pl_intr_info[] = {
1414 { FATALPERR, "T4 fatal parity error", -1, 1 }, 1413 { FATALPERR, "T4 fatal parity error", -1, 1 },
1415 { PERRVFID, "PL VFID_MAP parity error", -1, 1 }, 1414 { PERRVFID, "PL VFID_MAP parity error", -1, 1 },
1416 { 0 } 1415 { 0 }
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
index 940584a8a640..edcfd7ec7802 100644
--- a/drivers/net/cxgb4/t4fw_api.h
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -1239,6 +1239,7 @@ enum fw_port_type {
1239 FW_PORT_TYPE_KR, 1239 FW_PORT_TYPE_KR,
1240 FW_PORT_TYPE_SFP, 1240 FW_PORT_TYPE_SFP,
1241 FW_PORT_TYPE_BP_AP, 1241 FW_PORT_TYPE_BP_AP,
1242 FW_PORT_TYPE_BP4_AP,
1242 1243
1243 FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK 1244 FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
1244}; 1245};
diff --git a/drivers/net/cxgb4vf/adapter.h b/drivers/net/cxgb4vf/adapter.h
index 8ea01962e045..4766b4116b41 100644
--- a/drivers/net/cxgb4vf/adapter.h
+++ b/drivers/net/cxgb4vf/adapter.h
@@ -60,7 +60,7 @@ enum {
60 * MSI-X interrupt index usage. 60 * MSI-X interrupt index usage.
61 */ 61 */
62 MSIX_FW = 0, /* MSI-X index for firmware Q */ 62 MSIX_FW = 0, /* MSI-X index for firmware Q */
63 MSIX_NIQFLINT = 1, /* MSI-X index base for Ingress Qs */ 63 MSIX_IQFLINT = 1, /* MSI-X index base for Ingress Qs */
64 MSIX_EXTRAS = 1, 64 MSIX_EXTRAS = 1,
65 MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS, 65 MSIX_ENTRIES = MAX_ETH_QSETS + MSIX_EXTRAS,
66 66
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 6bf464afa90e..56166ae2059f 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -280,9 +280,7 @@ static void name_msix_vecs(struct adapter *adapter)
280 const struct port_info *pi = netdev_priv(dev); 280 const struct port_info *pi = netdev_priv(dev);
281 int qs, msi; 281 int qs, msi;
282 282
283 for (qs = 0, msi = MSIX_NIQFLINT; 283 for (qs = 0, msi = MSIX_IQFLINT; qs < pi->nqsets; qs++, msi++) {
284 qs < pi->nqsets;
285 qs++, msi++) {
286 snprintf(adapter->msix_info[msi].desc, namelen, 284 snprintf(adapter->msix_info[msi].desc, namelen,
287 "%s-%d", dev->name, qs); 285 "%s-%d", dev->name, qs);
288 adapter->msix_info[msi].desc[namelen] = 0; 286 adapter->msix_info[msi].desc[namelen] = 0;
@@ -309,7 +307,7 @@ static int request_msix_queue_irqs(struct adapter *adapter)
309 /* 307 /*
310 * Ethernet queues. 308 * Ethernet queues.
311 */ 309 */
312 msi = MSIX_NIQFLINT; 310 msi = MSIX_IQFLINT;
313 for_each_ethrxq(s, rxq) { 311 for_each_ethrxq(s, rxq) {
314 err = request_irq(adapter->msix_info[msi].vec, 312 err = request_irq(adapter->msix_info[msi].vec,
315 t4vf_sge_intr_msix, 0, 313 t4vf_sge_intr_msix, 0,
@@ -337,7 +335,7 @@ static void free_msix_queue_irqs(struct adapter *adapter)
337 int rxq, msi; 335 int rxq, msi;
338 336
339 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq); 337 free_irq(adapter->msix_info[MSIX_FW].vec, &s->fw_evtq);
340 msi = MSIX_NIQFLINT; 338 msi = MSIX_IQFLINT;
341 for_each_ethrxq(s, rxq) 339 for_each_ethrxq(s, rxq)
342 free_irq(adapter->msix_info[msi++].vec, 340 free_irq(adapter->msix_info[msi++].vec,
343 &s->ethrxq[rxq].rspq); 341 &s->ethrxq[rxq].rspq);
@@ -527,7 +525,7 @@ static int setup_sge_queues(struct adapter *adapter)
527 * brought up at which point lots of things get nailed down 525 * brought up at which point lots of things get nailed down
528 * permanently ... 526 * permanently ...
529 */ 527 */
530 msix = MSIX_NIQFLINT; 528 msix = MSIX_IQFLINT;
531 for_each_port(adapter, pidx) { 529 for_each_port(adapter, pidx) {
532 struct net_device *dev = adapter->port[pidx]; 530 struct net_device *dev = adapter->port[pidx];
533 struct port_info *pi = netdev_priv(dev); 531 struct port_info *pi = netdev_priv(dev);
@@ -751,13 +749,19 @@ static int cxgb4vf_open(struct net_device *dev)
751 netif_set_real_num_tx_queues(dev, pi->nqsets); 749 netif_set_real_num_tx_queues(dev, pi->nqsets);
752 err = netif_set_real_num_rx_queues(dev, pi->nqsets); 750 err = netif_set_real_num_rx_queues(dev, pi->nqsets);
753 if (err) 751 if (err)
754 return err; 752 goto err_unwind;
755 set_bit(pi->port_id, &adapter->open_device_map);
756 err = link_start(dev); 753 err = link_start(dev);
757 if (err) 754 if (err)
758 return err; 755 goto err_unwind;
756
759 netif_tx_start_all_queues(dev); 757 netif_tx_start_all_queues(dev);
758 set_bit(pi->port_id, &adapter->open_device_map);
760 return 0; 759 return 0;
760
761err_unwind:
762 if (adapter->open_device_map == 0)
763 adapter_down(adapter);
764 return err;
761} 765}
762 766
763/* 767/*
@@ -766,13 +770,12 @@ static int cxgb4vf_open(struct net_device *dev)
766 */ 770 */
767static int cxgb4vf_stop(struct net_device *dev) 771static int cxgb4vf_stop(struct net_device *dev)
768{ 772{
769 int ret;
770 struct port_info *pi = netdev_priv(dev); 773 struct port_info *pi = netdev_priv(dev);
771 struct adapter *adapter = pi->adapter; 774 struct adapter *adapter = pi->adapter;
772 775
773 netif_tx_stop_all_queues(dev); 776 netif_tx_stop_all_queues(dev);
774 netif_carrier_off(dev); 777 netif_carrier_off(dev);
775 ret = t4vf_enable_vi(adapter, pi->viid, false, false); 778 t4vf_enable_vi(adapter, pi->viid, false, false);
776 pi->link_cfg.link_ok = 0; 779 pi->link_cfg.link_ok = 0;
777 780
778 clear_bit(pi->port_id, &adapter->open_device_map); 781 clear_bit(pi->port_id, &adapter->open_device_map);
@@ -1365,6 +1368,8 @@ struct queue_port_stats {
1365 u64 rx_csum; 1368 u64 rx_csum;
1366 u64 vlan_ex; 1369 u64 vlan_ex;
1367 u64 vlan_ins; 1370 u64 vlan_ins;
1371 u64 lro_pkts;
1372 u64 lro_merged;
1368}; 1373};
1369 1374
1370/* 1375/*
@@ -1402,6 +1407,8 @@ static const char stats_strings[][ETH_GSTRING_LEN] = {
1402 "RxCsumGood ", 1407 "RxCsumGood ",
1403 "VLANextractions ", 1408 "VLANextractions ",
1404 "VLANinsertions ", 1409 "VLANinsertions ",
1410 "GROPackets ",
1411 "GROMerged ",
1405}; 1412};
1406 1413
1407/* 1414/*
@@ -1451,6 +1458,8 @@ static void collect_sge_port_stats(const struct adapter *adapter,
1451 stats->rx_csum += rxq->stats.rx_cso; 1458 stats->rx_csum += rxq->stats.rx_cso;
1452 stats->vlan_ex += rxq->stats.vlan_ex; 1459 stats->vlan_ex += rxq->stats.vlan_ex;
1453 stats->vlan_ins += txq->vlan_ins; 1460 stats->vlan_ins += txq->vlan_ins;
1461 stats->lro_pkts += rxq->stats.lro_pkts;
1462 stats->lro_merged += rxq->stats.lro_merged;
1454 } 1463 }
1455} 1464}
1456 1465
@@ -1547,14 +1556,19 @@ static void cxgb4vf_get_wol(struct net_device *dev,
1547} 1556}
1548 1557
1549/* 1558/*
1559 * TCP Segmentation Offload flags which we support.
1560 */
1561#define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
1562
1563/*
1550 * Set TCP Segmentation Offloading feature capabilities. 1564 * Set TCP Segmentation Offloading feature capabilities.
1551 */ 1565 */
1552static int cxgb4vf_set_tso(struct net_device *dev, u32 tso) 1566static int cxgb4vf_set_tso(struct net_device *dev, u32 tso)
1553{ 1567{
1554 if (tso) 1568 if (tso)
1555 dev->features |= NETIF_F_TSO | NETIF_F_TSO6; 1569 dev->features |= TSO_FLAGS;
1556 else 1570 else
1557 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6); 1571 dev->features &= ~TSO_FLAGS;
1558 return 0; 1572 return 0;
1559} 1573}
1560 1574
@@ -2045,7 +2059,7 @@ static int __devinit setup_debugfs(struct adapter *adapter)
2045 * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave 2059 * Tear down the /sys/kernel/debug/cxgb4vf sub-nodes created above. We leave
2046 * it to our caller to tear down the directory (debugfs_root). 2060 * it to our caller to tear down the directory (debugfs_root).
2047 */ 2061 */
2048static void __devexit cleanup_debugfs(struct adapter *adapter) 2062static void cleanup_debugfs(struct adapter *adapter)
2049{ 2063{
2050 BUG_ON(adapter->debugfs_root == NULL); 2064 BUG_ON(adapter->debugfs_root == NULL);
2051 2065
@@ -2063,7 +2077,7 @@ static void __devexit cleanup_debugfs(struct adapter *adapter)
2063 * adapter parameters we're going to be using and initialize basic adapter 2077 * adapter parameters we're going to be using and initialize basic adapter
2064 * hardware support. 2078 * hardware support.
2065 */ 2079 */
2066static int adap_init0(struct adapter *adapter) 2080static int __devinit adap_init0(struct adapter *adapter)
2067{ 2081{
2068 struct vf_resources *vfres = &adapter->params.vfres; 2082 struct vf_resources *vfres = &adapter->params.vfres;
2069 struct sge_params *sge_params = &adapter->params.sge; 2083 struct sge_params *sge_params = &adapter->params.sge;
@@ -2494,7 +2508,6 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2494 version_printed = 1; 2508 version_printed = 1;
2495 } 2509 }
2496 2510
2497
2498 /* 2511 /*
2499 * Initialize generic PCI device state. 2512 * Initialize generic PCI device state.
2500 */ 2513 */
@@ -2631,7 +2644,7 @@ static int __devinit cxgb4vf_pci_probe(struct pci_dev *pdev,
2631 netif_carrier_off(netdev); 2644 netif_carrier_off(netdev);
2632 netdev->irq = pdev->irq; 2645 netdev->irq = pdev->irq;
2633 2646
2634 netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | 2647 netdev->features = (NETIF_F_SG | TSO_FLAGS |
2635 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 2648 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2636 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | 2649 NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
2637 NETIF_F_GRO); 2650 NETIF_F_GRO);
diff --git a/drivers/net/cxgb4vf/sge.c b/drivers/net/cxgb4vf/sge.c
index ecf0770bf0ff..e0b3d1bc2fdf 100644
--- a/drivers/net/cxgb4vf/sge.c
+++ b/drivers/net/cxgb4vf/sge.c
@@ -1568,6 +1568,9 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1568 } else 1568 } else
1569 skb_checksum_none_assert(skb); 1569 skb_checksum_none_assert(skb);
1570 1570
1571 /*
1572 * Deliver the packet to the stack.
1573 */
1571 if (unlikely(pkt->vlan_ex)) { 1574 if (unlikely(pkt->vlan_ex)) {
1572 struct vlan_group *grp = pi->vlan_grp; 1575 struct vlan_group *grp = pi->vlan_grp;
1573 1576
@@ -2143,7 +2146,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2143 2146
2144 /* 2147 /*
2145 * Calculate the size of the hardware free list ring plus 2148 * Calculate the size of the hardware free list ring plus
2146 * status page (which the SGE will place at the end of the 2149 * Status Page (which the SGE will place after the end of the
2147 * free list ring) in Egress Queue Units. 2150 * free list ring) in Egress Queue Units.
2148 */ 2151 */
2149 flsz = (fl->size / FL_PER_EQ_UNIT + 2152 flsz = (fl->size / FL_PER_EQ_UNIT +
@@ -2240,8 +2243,8 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2240 struct port_info *pi = netdev_priv(dev); 2243 struct port_info *pi = netdev_priv(dev);
2241 2244
2242 /* 2245 /*
2243 * Calculate the size of the hardware TX Queue (including the 2246 * Calculate the size of the hardware TX Queue (including the Status
2244 * status age on the end) in units of TX Descriptors. 2247 * Page on the end of the TX Queue) in units of TX Descriptors.
2245 */ 2248 */
2246 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); 2249 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
2247 2250
diff --git a/drivers/net/cxgb4vf/t4vf_hw.c b/drivers/net/cxgb4vf/t4vf_hw.c
index 19520afe1a12..0f51c80475ce 100644
--- a/drivers/net/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/cxgb4vf/t4vf_hw.c
@@ -116,7 +116,7 @@ static void dump_mbox(struct adapter *adapter, const char *tag, u32 mbox_data)
116int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size, 116int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
117 void *rpl, bool sleep_ok) 117 void *rpl, bool sleep_ok)
118{ 118{
119 static int delay[] = { 119 static const int delay[] = {
120 1, 1, 3, 5, 10, 10, 20, 50, 100 120 1, 1, 3, 5, 10, 10, 20, 50, 100
121 }; 121 };
122 122
@@ -147,9 +147,20 @@ int t4vf_wr_mbox_core(struct adapter *adapter, const void *cmd, int size,
147 /* 147 /*
148 * Write the command array into the Mailbox Data register array and 148 * Write the command array into the Mailbox Data register array and
149 * transfer ownership of the mailbox to the firmware. 149 * transfer ownership of the mailbox to the firmware.
150 *
151 * For the VFs, the Mailbox Data "registers" are actually backed by
152 * T4's "MA" interface rather than PL Registers (as is the case for
153 * the PFs). Because these are in different coherency domains, the
154 * write to the VF's PL-register-backed Mailbox Control can race in
155 * front of the writes to the MA-backed VF Mailbox Data "registers".
156 * So we need to do a read-back on at least one byte of the VF Mailbox
157 * Data registers before doing the write to the VF Mailbox Control
158 * register.
150 */ 159 */
151 for (i = 0, p = cmd; i < size; i += 8) 160 for (i = 0, p = cmd; i < size; i += 8)
152 t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++)); 161 t4_write_reg64(adapter, mbox_data + i, be64_to_cpu(*p++));
162 t4_read_reg(adapter, mbox_data); /* flush write */
163
153 t4_write_reg(adapter, mbox_ctl, 164 t4_write_reg(adapter, mbox_ctl,
154 MBMSGVALID | MBOWNER(MBOX_OWNER_FW)); 165 MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
155 t4_read_reg(adapter, mbox_ctl); /* flush write */ 166 t4_read_reg(adapter, mbox_ctl); /* flush write */
@@ -1300,7 +1311,7 @@ int t4vf_eth_eq_free(struct adapter *adapter, unsigned int eqid)
1300 */ 1311 */
1301int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl) 1312int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
1302{ 1313{
1303 struct fw_cmd_hdr *cmd_hdr = (struct fw_cmd_hdr *)rpl; 1314 const struct fw_cmd_hdr *cmd_hdr = (const struct fw_cmd_hdr *)rpl;
1304 u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi)); 1315 u8 opcode = FW_CMD_OP_GET(be32_to_cpu(cmd_hdr->hi));
1305 1316
1306 switch (opcode) { 1317 switch (opcode) {
@@ -1308,7 +1319,8 @@ int t4vf_handle_fw_rpl(struct adapter *adapter, const __be64 *rpl)
1308 /* 1319 /*
1309 * Link/module state change message. 1320 * Link/module state change message.
1310 */ 1321 */
1311 const struct fw_port_cmd *port_cmd = (void *)rpl; 1322 const struct fw_port_cmd *port_cmd =
1323 (const struct fw_port_cmd *)rpl;
1312 u32 word; 1324 u32 word;
1313 int action, port_id, link_ok, speed, fc, pidx; 1325 int action, port_id, link_ok, speed, fc, pidx;
1314 1326
diff --git a/drivers/net/depca.c b/drivers/net/depca.c
index 91b3846ffc8a..1b48b68ad4fd 100644
--- a/drivers/net/depca.c
+++ b/drivers/net/depca.c
@@ -1513,7 +1513,7 @@ static enum depca_type __init depca_shmem_probe (ulong *mem_start)
1513 return adapter; 1513 return adapter;
1514} 1514}
1515 1515
1516static int __init depca_isa_probe (struct platform_device *device) 1516static int __devinit depca_isa_probe (struct platform_device *device)
1517{ 1517{
1518 struct net_device *dev; 1518 struct net_device *dev;
1519 struct depca_private *lp; 1519 struct depca_private *lp;
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 9f6aeefa06bf..2d4c4fc1d900 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -1675,7 +1675,7 @@ dm9000_drv_remove(struct platform_device *pdev)
1675 platform_set_drvdata(pdev, NULL); 1675 platform_set_drvdata(pdev, NULL);
1676 1676
1677 unregister_netdev(ndev); 1677 unregister_netdev(ndev);
1678 dm9000_release_board(pdev, (board_info_t *) netdev_priv(ndev)); 1678 dm9000_release_board(pdev, netdev_priv(ndev));
1679 free_netdev(ndev); /* free device structure */ 1679 free_netdev(ndev); /* free device structure */
1680 1680
1681 dev_dbg(&pdev->dev, "released and freed device\n"); 1681 dev_dbg(&pdev->dev, "released and freed device\n");
diff --git a/drivers/net/e1000/e1000_hw.c b/drivers/net/e1000/e1000_hw.c
index c7e242b69a18..aed223b1b897 100644
--- a/drivers/net/e1000/e1000_hw.c
+++ b/drivers/net/e1000/e1000_hw.c
@@ -130,10 +130,15 @@ static s32 e1000_set_phy_type(struct e1000_hw *hw)
130 if (hw->mac_type == e1000_82541 || 130 if (hw->mac_type == e1000_82541 ||
131 hw->mac_type == e1000_82541_rev_2 || 131 hw->mac_type == e1000_82541_rev_2 ||
132 hw->mac_type == e1000_82547 || 132 hw->mac_type == e1000_82547 ||
133 hw->mac_type == e1000_82547_rev_2) { 133 hw->mac_type == e1000_82547_rev_2)
134 hw->phy_type = e1000_phy_igp; 134 hw->phy_type = e1000_phy_igp;
135 break; 135 break;
136 } 136 case RTL8211B_PHY_ID:
137 hw->phy_type = e1000_phy_8211;
138 break;
139 case RTL8201N_PHY_ID:
140 hw->phy_type = e1000_phy_8201;
141 break;
137 default: 142 default:
138 /* Should never have loaded on this device */ 143 /* Should never have loaded on this device */
139 hw->phy_type = e1000_phy_undefined; 144 hw->phy_type = e1000_phy_undefined;
@@ -318,6 +323,9 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
318 case E1000_DEV_ID_82547GI: 323 case E1000_DEV_ID_82547GI:
319 hw->mac_type = e1000_82547_rev_2; 324 hw->mac_type = e1000_82547_rev_2;
320 break; 325 break;
326 case E1000_DEV_ID_INTEL_CE4100_GBE:
327 hw->mac_type = e1000_ce4100;
328 break;
321 default: 329 default:
322 /* Should never have loaded on this device */ 330 /* Should never have loaded on this device */
323 return -E1000_ERR_MAC_TYPE; 331 return -E1000_ERR_MAC_TYPE;
@@ -372,6 +380,9 @@ void e1000_set_media_type(struct e1000_hw *hw)
372 case e1000_82542_rev2_1: 380 case e1000_82542_rev2_1:
373 hw->media_type = e1000_media_type_fiber; 381 hw->media_type = e1000_media_type_fiber;
374 break; 382 break;
383 case e1000_ce4100:
384 hw->media_type = e1000_media_type_copper;
385 break;
375 default: 386 default:
376 status = er32(STATUS); 387 status = er32(STATUS);
377 if (status & E1000_STATUS_TBIMODE) { 388 if (status & E1000_STATUS_TBIMODE) {
@@ -460,6 +471,7 @@ s32 e1000_reset_hw(struct e1000_hw *hw)
460 /* Reset is performed on a shadow of the control register */ 471 /* Reset is performed on a shadow of the control register */
461 ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST)); 472 ew32(CTRL_DUP, (ctrl | E1000_CTRL_RST));
462 break; 473 break;
474 case e1000_ce4100:
463 default: 475 default:
464 ew32(CTRL, (ctrl | E1000_CTRL_RST)); 476 ew32(CTRL, (ctrl | E1000_CTRL_RST));
465 break; 477 break;
@@ -952,6 +964,67 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
952} 964}
953 965
954/** 966/**
967 * e1000_copper_link_rtl_setup - Copper link setup for e1000_phy_rtl series.
968 * @hw: Struct containing variables accessed by shared code
969 *
970 * Commits changes to PHY configuration by calling e1000_phy_reset().
971 */
972static s32 e1000_copper_link_rtl_setup(struct e1000_hw *hw)
973{
974 s32 ret_val;
975
976 /* SW reset the PHY so all changes take effect */
977 ret_val = e1000_phy_reset(hw);
978 if (ret_val) {
979 e_dbg("Error Resetting the PHY\n");
980 return ret_val;
981 }
982
983 return E1000_SUCCESS;
984}
985
986static s32 gbe_dhg_phy_setup(struct e1000_hw *hw)
987{
988 s32 ret_val;
989 u32 ctrl_aux;
990
991 switch (hw->phy_type) {
992 case e1000_phy_8211:
993 ret_val = e1000_copper_link_rtl_setup(hw);
994 if (ret_val) {
995 e_dbg("e1000_copper_link_rtl_setup failed!\n");
996 return ret_val;
997 }
998 break;
999 case e1000_phy_8201:
1000 /* Set RMII mode */
1001 ctrl_aux = er32(CTL_AUX);
1002 ctrl_aux |= E1000_CTL_AUX_RMII;
1003 ew32(CTL_AUX, ctrl_aux);
1004 E1000_WRITE_FLUSH();
1005
1006 /* Disable the J/K bits required for receive */
1007 ctrl_aux = er32(CTL_AUX);
1008 ctrl_aux |= 0x4;
1009 ctrl_aux &= ~0x2;
1010 ew32(CTL_AUX, ctrl_aux);
1011 E1000_WRITE_FLUSH();
1012 ret_val = e1000_copper_link_rtl_setup(hw);
1013
1014 if (ret_val) {
1015 e_dbg("e1000_copper_link_rtl_setup failed!\n");
1016 return ret_val;
1017 }
1018 break;
1019 default:
1020 e_dbg("Error Resetting the PHY\n");
1021 return E1000_ERR_PHY_TYPE;
1022 }
1023
1024 return E1000_SUCCESS;
1025}
1026
1027/**
955 * e1000_copper_link_preconfig - early configuration for copper 1028 * e1000_copper_link_preconfig - early configuration for copper
956 * @hw: Struct containing variables accessed by shared code 1029 * @hw: Struct containing variables accessed by shared code
957 * 1030 *
@@ -1286,6 +1359,10 @@ static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
1286 if (hw->autoneg_advertised == 0) 1359 if (hw->autoneg_advertised == 0)
1287 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT; 1360 hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
1288 1361
1362 /* IFE/RTL8201N PHY only supports 10/100 */
1363 if (hw->phy_type == e1000_phy_8201)
1364 hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
1365
1289 e_dbg("Reconfiguring auto-neg advertisement params\n"); 1366 e_dbg("Reconfiguring auto-neg advertisement params\n");
1290 ret_val = e1000_phy_setup_autoneg(hw); 1367 ret_val = e1000_phy_setup_autoneg(hw);
1291 if (ret_val) { 1368 if (ret_val) {
@@ -1341,7 +1418,7 @@ static s32 e1000_copper_link_postconfig(struct e1000_hw *hw)
1341 s32 ret_val; 1418 s32 ret_val;
1342 e_dbg("e1000_copper_link_postconfig"); 1419 e_dbg("e1000_copper_link_postconfig");
1343 1420
1344 if (hw->mac_type >= e1000_82544) { 1421 if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100)) {
1345 e1000_config_collision_dist(hw); 1422 e1000_config_collision_dist(hw);
1346 } else { 1423 } else {
1347 ret_val = e1000_config_mac_to_phy(hw); 1424 ret_val = e1000_config_mac_to_phy(hw);
@@ -1395,6 +1472,12 @@ static s32 e1000_setup_copper_link(struct e1000_hw *hw)
1395 ret_val = e1000_copper_link_mgp_setup(hw); 1472 ret_val = e1000_copper_link_mgp_setup(hw);
1396 if (ret_val) 1473 if (ret_val)
1397 return ret_val; 1474 return ret_val;
1475 } else {
1476 ret_val = gbe_dhg_phy_setup(hw);
1477 if (ret_val) {
1478 e_dbg("gbe_dhg_phy_setup failed!\n");
1479 return ret_val;
1480 }
1398 } 1481 }
1399 1482
1400 if (hw->autoneg) { 1483 if (hw->autoneg) {
@@ -1461,10 +1544,11 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1461 return ret_val; 1544 return ret_val;
1462 1545
1463 /* Read the MII 1000Base-T Control Register (Address 9). */ 1546 /* Read the MII 1000Base-T Control Register (Address 9). */
1464 ret_val = 1547 ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
1465 e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
1466 if (ret_val) 1548 if (ret_val)
1467 return ret_val; 1549 return ret_val;
1550 else if (hw->phy_type == e1000_phy_8201)
1551 mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
1468 1552
1469 /* Need to parse both autoneg_advertised and fc and set up 1553 /* Need to parse both autoneg_advertised and fc and set up
1470 * the appropriate PHY registers. First we will parse for 1554 * the appropriate PHY registers. First we will parse for
@@ -1577,9 +1661,14 @@ s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1577 1661
1578 e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); 1662 e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
1579 1663
1580 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); 1664 if (hw->phy_type == e1000_phy_8201) {
1581 if (ret_val) 1665 mii_1000t_ctrl_reg = 0;
1582 return ret_val; 1666 } else {
1667 ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL,
1668 mii_1000t_ctrl_reg);
1669 if (ret_val)
1670 return ret_val;
1671 }
1583 1672
1584 return E1000_SUCCESS; 1673 return E1000_SUCCESS;
1585} 1674}
@@ -1860,7 +1949,7 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
1860 1949
1861 /* 82544 or newer MAC, Auto Speed Detection takes care of 1950 /* 82544 or newer MAC, Auto Speed Detection takes care of
1862 * MAC speed/duplex configuration.*/ 1951 * MAC speed/duplex configuration.*/
1863 if (hw->mac_type >= e1000_82544) 1952 if ((hw->mac_type >= e1000_82544) && (hw->mac_type != e1000_ce4100))
1864 return E1000_SUCCESS; 1953 return E1000_SUCCESS;
1865 1954
1866 /* Read the Device Control Register and set the bits to Force Speed 1955 /* Read the Device Control Register and set the bits to Force Speed
@@ -1870,27 +1959,49 @@ static s32 e1000_config_mac_to_phy(struct e1000_hw *hw)
1870 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX); 1959 ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
1871 ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS); 1960 ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
1872 1961
1873 /* Set up duplex in the Device Control and Transmit Control 1962 switch (hw->phy_type) {
1874 * registers depending on negotiated values. 1963 case e1000_phy_8201:
1875 */ 1964 ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
1876 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data); 1965 if (ret_val)
1877 if (ret_val) 1966 return ret_val;
1878 return ret_val;
1879 1967
1880 if (phy_data & M88E1000_PSSR_DPLX) 1968 if (phy_data & RTL_PHY_CTRL_FD)
1881 ctrl |= E1000_CTRL_FD; 1969 ctrl |= E1000_CTRL_FD;
1882 else 1970 else
1883 ctrl &= ~E1000_CTRL_FD; 1971 ctrl &= ~E1000_CTRL_FD;
1884 1972
1885 e1000_config_collision_dist(hw); 1973 if (phy_data & RTL_PHY_CTRL_SPD_100)
1974 ctrl |= E1000_CTRL_SPD_100;
1975 else
1976 ctrl |= E1000_CTRL_SPD_10;
1886 1977
1887 /* Set up speed in the Device Control register depending on 1978 e1000_config_collision_dist(hw);
1888 * negotiated values. 1979 break;
1889 */ 1980 default:
1890 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) 1981 /* Set up duplex in the Device Control and Transmit Control
1891 ctrl |= E1000_CTRL_SPD_1000; 1982 * registers depending on negotiated values.
1892 else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS) 1983 */
1893 ctrl |= E1000_CTRL_SPD_100; 1984 ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
1985 &phy_data);
1986 if (ret_val)
1987 return ret_val;
1988
1989 if (phy_data & M88E1000_PSSR_DPLX)
1990 ctrl |= E1000_CTRL_FD;
1991 else
1992 ctrl &= ~E1000_CTRL_FD;
1993
1994 e1000_config_collision_dist(hw);
1995
1996 /* Set up speed in the Device Control register depending on
1997 * negotiated values.
1998 */
1999 if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
2000 ctrl |= E1000_CTRL_SPD_1000;
2001 else if ((phy_data & M88E1000_PSSR_SPEED) ==
2002 M88E1000_PSSR_100MBS)
2003 ctrl |= E1000_CTRL_SPD_100;
2004 }
1894 2005
1895 /* Write the configured values back to the Device Control Reg. */ 2006 /* Write the configured values back to the Device Control Reg. */
1896 ew32(CTRL, ctrl); 2007 ew32(CTRL, ctrl);
@@ -2401,7 +2512,8 @@ s32 e1000_check_for_link(struct e1000_hw *hw)
2401 * speed/duplex on the MAC to the current PHY speed/duplex 2512 * speed/duplex on the MAC to the current PHY speed/duplex
2402 * settings. 2513 * settings.
2403 */ 2514 */
2404 if (hw->mac_type >= e1000_82544) 2515 if ((hw->mac_type >= e1000_82544) &&
2516 (hw->mac_type != e1000_ce4100))
2405 e1000_config_collision_dist(hw); 2517 e1000_config_collision_dist(hw);
2406 else { 2518 else {
2407 ret_val = e1000_config_mac_to_phy(hw); 2519 ret_val = e1000_config_mac_to_phy(hw);
@@ -2738,7 +2850,7 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2738{ 2850{
2739 u32 i; 2851 u32 i;
2740 u32 mdic = 0; 2852 u32 mdic = 0;
2741 const u32 phy_addr = 1; 2853 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
2742 2854
2743 e_dbg("e1000_read_phy_reg_ex"); 2855 e_dbg("e1000_read_phy_reg_ex");
2744 2856
@@ -2752,28 +2864,61 @@ static s32 e1000_read_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2752 * Control register. The MAC will take care of interfacing with the 2864 * Control register. The MAC will take care of interfacing with the
2753 * PHY to retrieve the desired data. 2865 * PHY to retrieve the desired data.
2754 */ 2866 */
2755 mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) | 2867 if (hw->mac_type == e1000_ce4100) {
2756 (phy_addr << E1000_MDIC_PHY_SHIFT) | 2868 mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
2757 (E1000_MDIC_OP_READ)); 2869 (phy_addr << E1000_MDIC_PHY_SHIFT) |
2870 (INTEL_CE_GBE_MDIC_OP_READ) |
2871 (INTEL_CE_GBE_MDIC_GO));
2758 2872
2759 ew32(MDIC, mdic); 2873 writel(mdic, E1000_MDIO_CMD);
2760 2874
2761 /* Poll the ready bit to see if the MDI read completed */ 2875 /* Poll the ready bit to see if the MDI read
2762 for (i = 0; i < 64; i++) { 2876 * completed
2763 udelay(50); 2877 */
2764 mdic = er32(MDIC); 2878 for (i = 0; i < 64; i++) {
2765 if (mdic & E1000_MDIC_READY) 2879 udelay(50);
2766 break; 2880 mdic = readl(E1000_MDIO_CMD);
2767 } 2881 if (!(mdic & INTEL_CE_GBE_MDIC_GO))
2768 if (!(mdic & E1000_MDIC_READY)) { 2882 break;
2769 e_dbg("MDI Read did not complete\n"); 2883 }
2770 return -E1000_ERR_PHY; 2884
2771 } 2885 if (mdic & INTEL_CE_GBE_MDIC_GO) {
2772 if (mdic & E1000_MDIC_ERROR) { 2886 e_dbg("MDI Read did not complete\n");
2773 e_dbg("MDI Error\n"); 2887 return -E1000_ERR_PHY;
2774 return -E1000_ERR_PHY; 2888 }
2889
2890 mdic = readl(E1000_MDIO_STS);
2891 if (mdic & INTEL_CE_GBE_MDIC_READ_ERROR) {
2892 e_dbg("MDI Read Error\n");
2893 return -E1000_ERR_PHY;
2894 }
2895 *phy_data = (u16) mdic;
2896 } else {
2897 mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
2898 (phy_addr << E1000_MDIC_PHY_SHIFT) |
2899 (E1000_MDIC_OP_READ));
2900
2901 ew32(MDIC, mdic);
2902
2903 /* Poll the ready bit to see if the MDI read
2904 * completed
2905 */
2906 for (i = 0; i < 64; i++) {
2907 udelay(50);
2908 mdic = er32(MDIC);
2909 if (mdic & E1000_MDIC_READY)
2910 break;
2911 }
2912 if (!(mdic & E1000_MDIC_READY)) {
2913 e_dbg("MDI Read did not complete\n");
2914 return -E1000_ERR_PHY;
2915 }
2916 if (mdic & E1000_MDIC_ERROR) {
2917 e_dbg("MDI Error\n");
2918 return -E1000_ERR_PHY;
2919 }
2920 *phy_data = (u16) mdic;
2775 } 2921 }
2776 *phy_data = (u16) mdic;
2777 } else { 2922 } else {
2778 /* We must first send a preamble through the MDIO pin to signal the 2923 /* We must first send a preamble through the MDIO pin to signal the
2779 * beginning of an MII instruction. This is done by sending 32 2924 * beginning of an MII instruction. This is done by sending 32
@@ -2840,7 +2985,7 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2840{ 2985{
2841 u32 i; 2986 u32 i;
2842 u32 mdic = 0; 2987 u32 mdic = 0;
2843 const u32 phy_addr = 1; 2988 const u32 phy_addr = (hw->mac_type == e1000_ce4100) ? hw->phy_addr : 1;
2844 2989
2845 e_dbg("e1000_write_phy_reg_ex"); 2990 e_dbg("e1000_write_phy_reg_ex");
2846 2991
@@ -2850,27 +2995,54 @@ static s32 e1000_write_phy_reg_ex(struct e1000_hw *hw, u32 reg_addr,
2850 } 2995 }
2851 2996
2852 if (hw->mac_type > e1000_82543) { 2997 if (hw->mac_type > e1000_82543) {
2853 /* Set up Op-code, Phy Address, register address, and data intended 2998 /* Set up Op-code, Phy Address, register address, and data
2854 * for the PHY register in the MDI Control register. The MAC will take 2999 * intended for the PHY register in the MDI Control register.
2855 * care of interfacing with the PHY to send the desired data. 3000 * The MAC will take care of interfacing with the PHY to send
3001 * the desired data.
2856 */ 3002 */
2857 mdic = (((u32) phy_data) | 3003 if (hw->mac_type == e1000_ce4100) {
2858 (reg_addr << E1000_MDIC_REG_SHIFT) | 3004 mdic = (((u32) phy_data) |
2859 (phy_addr << E1000_MDIC_PHY_SHIFT) | 3005 (reg_addr << E1000_MDIC_REG_SHIFT) |
2860 (E1000_MDIC_OP_WRITE)); 3006 (phy_addr << E1000_MDIC_PHY_SHIFT) |
3007 (INTEL_CE_GBE_MDIC_OP_WRITE) |
3008 (INTEL_CE_GBE_MDIC_GO));
2861 3009
2862 ew32(MDIC, mdic); 3010 writel(mdic, E1000_MDIO_CMD);
2863 3011
2864 /* Poll the ready bit to see if the MDI read completed */ 3012 /* Poll the ready bit to see if the MDI read
2865 for (i = 0; i < 641; i++) { 3013 * completed
2866 udelay(5); 3014 */
2867 mdic = er32(MDIC); 3015 for (i = 0; i < 640; i++) {
2868 if (mdic & E1000_MDIC_READY) 3016 udelay(5);
2869 break; 3017 mdic = readl(E1000_MDIO_CMD);
2870 } 3018 if (!(mdic & INTEL_CE_GBE_MDIC_GO))
2871 if (!(mdic & E1000_MDIC_READY)) { 3019 break;
2872 e_dbg("MDI Write did not complete\n"); 3020 }
2873 return -E1000_ERR_PHY; 3021 if (mdic & INTEL_CE_GBE_MDIC_GO) {
3022 e_dbg("MDI Write did not complete\n");
3023 return -E1000_ERR_PHY;
3024 }
3025 } else {
3026 mdic = (((u32) phy_data) |
3027 (reg_addr << E1000_MDIC_REG_SHIFT) |
3028 (phy_addr << E1000_MDIC_PHY_SHIFT) |
3029 (E1000_MDIC_OP_WRITE));
3030
3031 ew32(MDIC, mdic);
3032
3033 /* Poll the ready bit to see if the MDI read
3034 * completed
3035 */
3036 for (i = 0; i < 641; i++) {
3037 udelay(5);
3038 mdic = er32(MDIC);
3039 if (mdic & E1000_MDIC_READY)
3040 break;
3041 }
3042 if (!(mdic & E1000_MDIC_READY)) {
3043 e_dbg("MDI Write did not complete\n");
3044 return -E1000_ERR_PHY;
3045 }
2874 } 3046 }
2875 } else { 3047 } else {
2876 /* We'll need to use the SW defined pins to shift the write command 3048 /* We'll need to use the SW defined pins to shift the write command
@@ -3048,6 +3220,11 @@ static s32 e1000_detect_gig_phy(struct e1000_hw *hw)
3048 if (hw->phy_id == M88E1011_I_PHY_ID) 3220 if (hw->phy_id == M88E1011_I_PHY_ID)
3049 match = true; 3221 match = true;
3050 break; 3222 break;
3223 case e1000_ce4100:
3224 if ((hw->phy_id == RTL8211B_PHY_ID) ||
3225 (hw->phy_id == RTL8201N_PHY_ID))
3226 match = true;
3227 break;
3051 case e1000_82541: 3228 case e1000_82541:
3052 case e1000_82541_rev_2: 3229 case e1000_82541_rev_2:
3053 case e1000_82547: 3230 case e1000_82547:
@@ -3291,6 +3468,9 @@ s32 e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info)
3291 3468
3292 if (hw->phy_type == e1000_phy_igp) 3469 if (hw->phy_type == e1000_phy_igp)
3293 return e1000_phy_igp_get_info(hw, phy_info); 3470 return e1000_phy_igp_get_info(hw, phy_info);
3471 else if ((hw->phy_type == e1000_phy_8211) ||
3472 (hw->phy_type == e1000_phy_8201))
3473 return E1000_SUCCESS;
3294 else 3474 else
3295 return e1000_phy_m88_get_info(hw, phy_info); 3475 return e1000_phy_m88_get_info(hw, phy_info);
3296} 3476}
@@ -3742,6 +3922,12 @@ static s32 e1000_do_read_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
3742 3922
3743 e_dbg("e1000_read_eeprom"); 3923 e_dbg("e1000_read_eeprom");
3744 3924
3925 if (hw->mac_type == e1000_ce4100) {
3926 GBE_CONFIG_FLASH_READ(GBE_CONFIG_BASE_VIRT, offset, words,
3927 data);
3928 return E1000_SUCCESS;
3929 }
3930
3745 /* If eeprom is not yet detected, do so now */ 3931 /* If eeprom is not yet detected, do so now */
3746 if (eeprom->word_size == 0) 3932 if (eeprom->word_size == 0)
3747 e1000_init_eeprom_params(hw); 3933 e1000_init_eeprom_params(hw);
@@ -3904,6 +4090,12 @@ static s32 e1000_do_write_eeprom(struct e1000_hw *hw, u16 offset, u16 words,
3904 4090
3905 e_dbg("e1000_write_eeprom"); 4091 e_dbg("e1000_write_eeprom");
3906 4092
4093 if (hw->mac_type == e1000_ce4100) {
4094 GBE_CONFIG_FLASH_WRITE(GBE_CONFIG_BASE_VIRT, offset, words,
4095 data);
4096 return E1000_SUCCESS;
4097 }
4098
3907 /* If eeprom is not yet detected, do so now */ 4099 /* If eeprom is not yet detected, do so now */
3908 if (eeprom->word_size == 0) 4100 if (eeprom->word_size == 0)
3909 e1000_init_eeprom_params(hw); 4101 e1000_init_eeprom_params(hw);
@@ -4892,11 +5084,11 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
4892 } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */ 5084 } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
4893 u16 cur_agc_value; 5085 u16 cur_agc_value;
4894 u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE; 5086 u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
4895 u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = 5087 static const u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {
4896 { IGP01E1000_PHY_AGC_A, 5088 IGP01E1000_PHY_AGC_A,
4897 IGP01E1000_PHY_AGC_B, 5089 IGP01E1000_PHY_AGC_B,
4898 IGP01E1000_PHY_AGC_C, 5090 IGP01E1000_PHY_AGC_C,
4899 IGP01E1000_PHY_AGC_D 5091 IGP01E1000_PHY_AGC_D
4900 }; 5092 };
4901 /* Read the AGC registers for all channels */ 5093 /* Read the AGC registers for all channels */
4902 for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) { 5094 for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
@@ -5071,11 +5263,11 @@ static s32 e1000_config_dsp_after_link_change(struct e1000_hw *hw, bool link_up)
5071{ 5263{
5072 s32 ret_val; 5264 s32 ret_val;
5073 u16 phy_data, phy_saved_data, speed, duplex, i; 5265 u16 phy_data, phy_saved_data, speed, duplex, i;
5074 u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = 5266 static const u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] = {
5075 { IGP01E1000_PHY_AGC_PARAM_A, 5267 IGP01E1000_PHY_AGC_PARAM_A,
5076 IGP01E1000_PHY_AGC_PARAM_B, 5268 IGP01E1000_PHY_AGC_PARAM_B,
5077 IGP01E1000_PHY_AGC_PARAM_C, 5269 IGP01E1000_PHY_AGC_PARAM_C,
5078 IGP01E1000_PHY_AGC_PARAM_D 5270 IGP01E1000_PHY_AGC_PARAM_D
5079 }; 5271 };
5080 u16 min_length, max_length; 5272 u16 min_length, max_length;
5081 5273
diff --git a/drivers/net/e1000/e1000_hw.h b/drivers/net/e1000/e1000_hw.h
index 5e820f4e68b9..196eeda2dd6c 100644
--- a/drivers/net/e1000/e1000_hw.h
+++ b/drivers/net/e1000/e1000_hw.h
@@ -52,6 +52,7 @@ typedef enum {
52 e1000_82545, 52 e1000_82545,
53 e1000_82545_rev_3, 53 e1000_82545_rev_3,
54 e1000_82546, 54 e1000_82546,
55 e1000_ce4100,
55 e1000_82546_rev_3, 56 e1000_82546_rev_3,
56 e1000_82541, 57 e1000_82541,
57 e1000_82541_rev_2, 58 e1000_82541_rev_2,
@@ -209,9 +210,11 @@ typedef enum {
209} e1000_1000t_rx_status; 210} e1000_1000t_rx_status;
210 211
211typedef enum { 212typedef enum {
212 e1000_phy_m88 = 0, 213 e1000_phy_m88 = 0,
213 e1000_phy_igp, 214 e1000_phy_igp,
214 e1000_phy_undefined = 0xFF 215 e1000_phy_8211,
216 e1000_phy_8201,
217 e1000_phy_undefined = 0xFF
215} e1000_phy_type; 218} e1000_phy_type;
216 219
217typedef enum { 220typedef enum {
@@ -442,6 +445,7 @@ void e1000_io_write(struct e1000_hw *hw, unsigned long port, u32 value);
442#define E1000_DEV_ID_82547EI 0x1019 445#define E1000_DEV_ID_82547EI 0x1019
443#define E1000_DEV_ID_82547EI_MOBILE 0x101A 446#define E1000_DEV_ID_82547EI_MOBILE 0x101A
444#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5 447#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
448#define E1000_DEV_ID_INTEL_CE4100_GBE 0x2E6E
445 449
446#define NODE_ADDRESS_SIZE 6 450#define NODE_ADDRESS_SIZE 6
447#define ETH_LENGTH_OF_ADDRESS 6 451#define ETH_LENGTH_OF_ADDRESS 6
@@ -808,6 +812,16 @@ struct e1000_ffvt_entry {
808#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */ 812#define E1000_CTRL_EXT 0x00018 /* Extended Device Control - RW */
809#define E1000_FLA 0x0001C /* Flash Access - RW */ 813#define E1000_FLA 0x0001C /* Flash Access - RW */
810#define E1000_MDIC 0x00020 /* MDI Control - RW */ 814#define E1000_MDIC 0x00020 /* MDI Control - RW */
815
816extern void __iomem *ce4100_gbe_mdio_base_virt;
817#define INTEL_CE_GBE_MDIO_RCOMP_BASE (ce4100_gbe_mdio_base_virt)
818#define E1000_MDIO_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0)
819#define E1000_MDIO_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 4)
820#define E1000_MDIO_DRV (INTEL_CE_GBE_MDIO_RCOMP_BASE + 8)
821#define E1000_MDC_CMD (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0xC)
822#define E1000_RCOMP_CTL (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x20)
823#define E1000_RCOMP_STS (INTEL_CE_GBE_MDIO_RCOMP_BASE + 0x24)
824
811#define E1000_SCTL 0x00024 /* SerDes Control - RW */ 825#define E1000_SCTL 0x00024 /* SerDes Control - RW */
812#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */ 826#define E1000_FEXTNVM 0x00028 /* Future Extended NVM register */
813#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */ 827#define E1000_FCAL 0x00028 /* Flow Control Address Low - RW */
@@ -820,6 +834,34 @@ struct e1000_ffvt_entry {
820#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */ 834#define E1000_IMS 0x000D0 /* Interrupt Mask Set - RW */
821#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */ 835#define E1000_IMC 0x000D8 /* Interrupt Mask Clear - WO */
822#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */ 836#define E1000_IAM 0x000E0 /* Interrupt Acknowledge Auto Mask */
837
838/* Auxiliary Control Register. This register is CE4100 specific,
839 * RMII/RGMII function is switched by this register - RW
840 * Following are bits definitions of the Auxiliary Control Register
841 */
842#define E1000_CTL_AUX 0x000E0
843#define E1000_CTL_AUX_END_SEL_SHIFT 10
844#define E1000_CTL_AUX_ENDIANESS_SHIFT 8
845#define E1000_CTL_AUX_RGMII_RMII_SHIFT 0
846
847/* descriptor and packet transfer use CTL_AUX.ENDIANESS */
848#define E1000_CTL_AUX_DES_PKT (0x0 << E1000_CTL_AUX_END_SEL_SHIFT)
849/* descriptor use CTL_AUX.ENDIANESS, packet use default */
850#define E1000_CTL_AUX_DES (0x1 << E1000_CTL_AUX_END_SEL_SHIFT)
851/* descriptor use default, packet use CTL_AUX.ENDIANESS */
852#define E1000_CTL_AUX_PKT (0x2 << E1000_CTL_AUX_END_SEL_SHIFT)
853/* all use CTL_AUX.ENDIANESS */
854#define E1000_CTL_AUX_ALL (0x3 << E1000_CTL_AUX_END_SEL_SHIFT)
855
856#define E1000_CTL_AUX_RGMII (0x0 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
857#define E1000_CTL_AUX_RMII (0x1 << E1000_CTL_AUX_RGMII_RMII_SHIFT)
858
859/* LW little endian, Byte big endian */
860#define E1000_CTL_AUX_LWLE_BBE (0x0 << E1000_CTL_AUX_ENDIANESS_SHIFT)
861#define E1000_CTL_AUX_LWLE_BLE (0x1 << E1000_CTL_AUX_ENDIANESS_SHIFT)
862#define E1000_CTL_AUX_LWBE_BBE (0x2 << E1000_CTL_AUX_ENDIANESS_SHIFT)
863#define E1000_CTL_AUX_LWBE_BLE (0x3 << E1000_CTL_AUX_ENDIANESS_SHIFT)
864
823#define E1000_RCTL 0x00100 /* RX Control - RW */ 865#define E1000_RCTL 0x00100 /* RX Control - RW */
824#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */ 866#define E1000_RDTR1 0x02820 /* RX Delay Timer (1) - RW */
825#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */ 867#define E1000_RDBAL1 0x02900 /* RX Descriptor Base Address Low (1) - RW */
@@ -1011,6 +1053,7 @@ struct e1000_ffvt_entry {
1011 * in more current versions of the 8254x. Despite the difference in location, 1053 * in more current versions of the 8254x. Despite the difference in location,
1012 * the registers function in the same manner. 1054 * the registers function in the same manner.
1013 */ 1055 */
1056#define E1000_82542_CTL_AUX E1000_CTL_AUX
1014#define E1000_82542_CTRL E1000_CTRL 1057#define E1000_82542_CTRL E1000_CTRL
1015#define E1000_82542_CTRL_DUP E1000_CTRL_DUP 1058#define E1000_82542_CTRL_DUP E1000_CTRL_DUP
1016#define E1000_82542_STATUS E1000_STATUS 1059#define E1000_82542_STATUS E1000_STATUS
@@ -1571,6 +1614,11 @@ struct e1000_hw {
1571#define E1000_MDIC_INT_EN 0x20000000 1614#define E1000_MDIC_INT_EN 0x20000000
1572#define E1000_MDIC_ERROR 0x40000000 1615#define E1000_MDIC_ERROR 0x40000000
1573 1616
1617#define INTEL_CE_GBE_MDIC_OP_WRITE 0x04000000
1618#define INTEL_CE_GBE_MDIC_OP_READ 0x00000000
1619#define INTEL_CE_GBE_MDIC_GO 0x80000000
1620#define INTEL_CE_GBE_MDIC_READ_ERROR 0x80000000
1621
1574#define E1000_KUMCTRLSTA_MASK 0x0000FFFF 1622#define E1000_KUMCTRLSTA_MASK 0x0000FFFF
1575#define E1000_KUMCTRLSTA_OFFSET 0x001F0000 1623#define E1000_KUMCTRLSTA_OFFSET 0x001F0000
1576#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16 1624#define E1000_KUMCTRLSTA_OFFSET_SHIFT 16
@@ -2871,6 +2919,11 @@ struct e1000_host_command_info {
2871#define M88E1111_I_PHY_ID 0x01410CC0 2919#define M88E1111_I_PHY_ID 0x01410CC0
2872#define L1LXT971A_PHY_ID 0x001378E0 2920#define L1LXT971A_PHY_ID 0x001378E0
2873 2921
2922#define RTL8211B_PHY_ID 0x001CC910
2923#define RTL8201N_PHY_ID 0x8200
2924#define RTL_PHY_CTRL_FD 0x0100 /* Full duplex.0=half; 1=full */
2925#define RTL_PHY_CTRL_SPD_100 0x200000 /* Force 100Mb */
2926
2874/* Bits... 2927/* Bits...
2875 * 15-5: page 2928 * 15-5: page
2876 * 4-0: register offset 2929 * 4-0: register offset
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index e02c5d17af07..de69c54301c1 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -28,6 +28,12 @@
28 28
29#include "e1000.h" 29#include "e1000.h"
30#include <net/ip6_checksum.h> 30#include <net/ip6_checksum.h>
31#include <linux/io.h>
32
33/* Intel Media SOC GbE MDIO physical base address */
34static unsigned long ce4100_gbe_mdio_base_phy;
35/* Intel Media SOC GbE MDIO virtual base address */
36void __iomem *ce4100_gbe_mdio_base_virt;
31 37
32char e1000_driver_name[] = "e1000"; 38char e1000_driver_name[] = "e1000";
33static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver"; 39static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
@@ -79,6 +85,7 @@ static DEFINE_PCI_DEVICE_TABLE(e1000_pci_tbl) = {
79 INTEL_E1000_ETHERNET_DEVICE(0x108A), 85 INTEL_E1000_ETHERNET_DEVICE(0x108A),
80 INTEL_E1000_ETHERNET_DEVICE(0x1099), 86 INTEL_E1000_ETHERNET_DEVICE(0x1099),
81 INTEL_E1000_ETHERNET_DEVICE(0x10B5), 87 INTEL_E1000_ETHERNET_DEVICE(0x10B5),
88 INTEL_E1000_ETHERNET_DEVICE(0x2E6E),
82 /* required last entry */ 89 /* required last entry */
83 {0,} 90 {0,}
84}; 91};
@@ -459,6 +466,7 @@ static void e1000_power_down_phy(struct e1000_adapter *adapter)
459 case e1000_82545: 466 case e1000_82545:
460 case e1000_82545_rev_3: 467 case e1000_82545_rev_3:
461 case e1000_82546: 468 case e1000_82546:
469 case e1000_ce4100:
462 case e1000_82546_rev_3: 470 case e1000_82546_rev_3:
463 case e1000_82541: 471 case e1000_82541:
464 case e1000_82541_rev_2: 472 case e1000_82541_rev_2:
@@ -573,6 +581,7 @@ void e1000_reset(struct e1000_adapter *adapter)
573 case e1000_82545: 581 case e1000_82545:
574 case e1000_82545_rev_3: 582 case e1000_82545_rev_3:
575 case e1000_82546: 583 case e1000_82546:
584 case e1000_ce4100:
576 case e1000_82546_rev_3: 585 case e1000_82546_rev_3:
577 pba = E1000_PBA_48K; 586 pba = E1000_PBA_48K;
578 break; 587 break;
@@ -894,6 +903,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
894 static int global_quad_port_a = 0; /* global ksp3 port a indication */ 903 static int global_quad_port_a = 0; /* global ksp3 port a indication */
895 int i, err, pci_using_dac; 904 int i, err, pci_using_dac;
896 u16 eeprom_data = 0; 905 u16 eeprom_data = 0;
906 u16 tmp = 0;
897 u16 eeprom_apme_mask = E1000_EEPROM_APME; 907 u16 eeprom_apme_mask = E1000_EEPROM_APME;
898 int bars, need_ioport; 908 int bars, need_ioport;
899 909
@@ -971,11 +981,13 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
971 */ 981 */
972 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); 982 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
973 pci_using_dac = 1; 983 pci_using_dac = 1;
974 } else if (!dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
975 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
976 } else { 984 } else {
977 pr_err("No usable DMA config, aborting\n"); 985 err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
978 goto err_dma; 986 if (err) {
987 pr_err("No usable DMA config, aborting\n");
988 goto err_dma;
989 }
990 dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
979 } 991 }
980 992
981 netdev->netdev_ops = &e1000_netdev_ops; 993 netdev->netdev_ops = &e1000_netdev_ops;
@@ -994,6 +1006,14 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
994 goto err_sw_init; 1006 goto err_sw_init;
995 1007
996 err = -EIO; 1008 err = -EIO;
1009 if (hw->mac_type == e1000_ce4100) {
1010 ce4100_gbe_mdio_base_phy = pci_resource_start(pdev, BAR_1);
1011 ce4100_gbe_mdio_base_virt = ioremap(ce4100_gbe_mdio_base_phy,
1012 pci_resource_len(pdev, BAR_1));
1013
1014 if (!ce4100_gbe_mdio_base_virt)
1015 goto err_mdio_ioremap;
1016 }
997 1017
998 if (hw->mac_type >= e1000_82543) { 1018 if (hw->mac_type >= e1000_82543) {
999 netdev->features = NETIF_F_SG | 1019 netdev->features = NETIF_F_SG |
@@ -1133,6 +1153,20 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
1133 adapter->wol = adapter->eeprom_wol; 1153 adapter->wol = adapter->eeprom_wol;
1134 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1154 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1135 1155
1156 /* Auto detect PHY address */
1157 if (hw->mac_type == e1000_ce4100) {
1158 for (i = 0; i < 32; i++) {
1159 hw->phy_addr = i;
1160 e1000_read_phy_reg(hw, PHY_ID2, &tmp);
1161 if (tmp == 0 || tmp == 0xFF) {
1162 if (i == 31)
1163 goto err_eeprom;
1164 continue;
1165 } else
1166 break;
1167 }
1168 }
1169
1136 /* reset the hardware with the new settings */ 1170 /* reset the hardware with the new settings */
1137 e1000_reset(adapter); 1171 e1000_reset(adapter);
1138 1172
@@ -1169,6 +1203,8 @@ err_eeprom:
1169 kfree(adapter->rx_ring); 1203 kfree(adapter->rx_ring);
1170err_dma: 1204err_dma:
1171err_sw_init: 1205err_sw_init:
1206err_mdio_ioremap:
1207 iounmap(ce4100_gbe_mdio_base_virt);
1172 iounmap(hw->hw_addr); 1208 iounmap(hw->hw_addr);
1173err_ioremap: 1209err_ioremap:
1174 free_netdev(netdev); 1210 free_netdev(netdev);
@@ -1407,6 +1443,7 @@ static bool e1000_check_64k_bound(struct e1000_adapter *adapter, void *start,
1407 /* First rev 82545 and 82546 need to not allow any memory 1443 /* First rev 82545 and 82546 need to not allow any memory
1408 * write location to cross 64k boundary due to errata 23 */ 1444 * write location to cross 64k boundary due to errata 23 */
1409 if (hw->mac_type == e1000_82545 || 1445 if (hw->mac_type == e1000_82545 ||
1446 hw->mac_type == e1000_ce4100 ||
1410 hw->mac_type == e1000_82546) { 1447 hw->mac_type == e1000_82546) {
1411 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true; 1448 return ((begin ^ (end - 1)) >> 16) != 0 ? false : true;
1412 } 1449 }
@@ -1429,13 +1466,12 @@ static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
1429 int size; 1466 int size;
1430 1467
1431 size = sizeof(struct e1000_buffer) * txdr->count; 1468 size = sizeof(struct e1000_buffer) * txdr->count;
1432 txdr->buffer_info = vmalloc(size); 1469 txdr->buffer_info = vzalloc(size);
1433 if (!txdr->buffer_info) { 1470 if (!txdr->buffer_info) {
1434 e_err(probe, "Unable to allocate memory for the Tx descriptor " 1471 e_err(probe, "Unable to allocate memory for the Tx descriptor "
1435 "ring\n"); 1472 "ring\n");
1436 return -ENOMEM; 1473 return -ENOMEM;
1437 } 1474 }
1438 memset(txdr->buffer_info, 0, size);
1439 1475
1440 /* round up to nearest 4K */ 1476 /* round up to nearest 4K */
1441 1477
@@ -1625,13 +1661,12 @@ static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
1625 int size, desc_len; 1661 int size, desc_len;
1626 1662
1627 size = sizeof(struct e1000_buffer) * rxdr->count; 1663 size = sizeof(struct e1000_buffer) * rxdr->count;
1628 rxdr->buffer_info = vmalloc(size); 1664 rxdr->buffer_info = vzalloc(size);
1629 if (!rxdr->buffer_info) { 1665 if (!rxdr->buffer_info) {
1630 e_err(probe, "Unable to allocate memory for the Rx descriptor " 1666 e_err(probe, "Unable to allocate memory for the Rx descriptor "
1631 "ring\n"); 1667 "ring\n");
1632 return -ENOMEM; 1668 return -ENOMEM;
1633 } 1669 }
1634 memset(rxdr->buffer_info, 0, size);
1635 1670
1636 desc_len = sizeof(struct e1000_rx_desc); 1671 desc_len = sizeof(struct e1000_rx_desc);
1637 1672
@@ -2726,7 +2761,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter,
2726 break; 2761 break;
2727 } 2762 }
2728 2763
2729 css = skb_transport_offset(skb); 2764 css = skb_checksum_start_offset(skb);
2730 2765
2731 i = tx_ring->next_to_use; 2766 i = tx_ring->next_to_use;
2732 buffer_info = &tx_ring->buffer_info[i]; 2767 buffer_info = &tx_ring->buffer_info[i];
diff --git a/drivers/net/e1000/e1000_osdep.h b/drivers/net/e1000/e1000_osdep.h
index edd1c75aa895..55c1711f1688 100644
--- a/drivers/net/e1000/e1000_osdep.h
+++ b/drivers/net/e1000/e1000_osdep.h
@@ -34,12 +34,21 @@
34#ifndef _E1000_OSDEP_H_ 34#ifndef _E1000_OSDEP_H_
35#define _E1000_OSDEP_H_ 35#define _E1000_OSDEP_H_
36 36
37#include <linux/types.h>
38#include <linux/pci.h>
39#include <linux/delay.h>
40#include <asm/io.h> 37#include <asm/io.h>
41#include <linux/interrupt.h> 38
42#include <linux/sched.h> 39#define CONFIG_RAM_BASE 0x60000
40#define GBE_CONFIG_OFFSET 0x0
41
42#define GBE_CONFIG_RAM_BASE \
43 ((unsigned int)(CONFIG_RAM_BASE + GBE_CONFIG_OFFSET))
44
45#define GBE_CONFIG_BASE_VIRT phys_to_virt(GBE_CONFIG_RAM_BASE)
46
47#define GBE_CONFIG_FLASH_WRITE(base, offset, count, data) \
48 (iowrite16_rep(base + offset, data, count))
49
50#define GBE_CONFIG_FLASH_READ(base, offset, count, data) \
51 (ioread16_rep(base + (offset << 1), data, count))
43 52
44#define er32(reg) \ 53#define er32(reg) \
45 (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \ 54 (readl(hw->hw_addr + ((hw->mac_type >= e1000_82543) \
diff --git a/drivers/net/e1000/e1000_param.c b/drivers/net/e1000/e1000_param.c
index 10d8d98bb797..1301eba8b57a 100644
--- a/drivers/net/e1000/e1000_param.c
+++ b/drivers/net/e1000/e1000_param.c
@@ -352,12 +352,13 @@ void __devinit e1000_check_options(struct e1000_adapter *adapter)
352 } 352 }
353 { /* Flow Control */ 353 { /* Flow Control */
354 354
355 struct e1000_opt_list fc_list[] = 355 static const struct e1000_opt_list fc_list[] = {
356 {{ E1000_FC_NONE, "Flow Control Disabled" }, 356 { E1000_FC_NONE, "Flow Control Disabled" },
357 { E1000_FC_RX_PAUSE,"Flow Control Receive Only" }, 357 { E1000_FC_RX_PAUSE, "Flow Control Receive Only" },
358 { E1000_FC_TX_PAUSE,"Flow Control Transmit Only" }, 358 { E1000_FC_TX_PAUSE, "Flow Control Transmit Only" },
359 { E1000_FC_FULL, "Flow Control Enabled" }, 359 { E1000_FC_FULL, "Flow Control Enabled" },
360 { E1000_FC_DEFAULT, "Flow Control Hardware Default" }}; 360 { E1000_FC_DEFAULT, "Flow Control Hardware Default" }
361 };
361 362
362 opt = (struct e1000_option) { 363 opt = (struct e1000_option) {
363 .type = list_option, 364 .type = list_option,
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index a655beb69320..1397da118f0d 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -52,6 +52,7 @@
52 (ID_LED_DEF1_DEF2)) 52 (ID_LED_DEF1_DEF2))
53 53
54#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000 54#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
55#define AN_RETRY_COUNT 5 /* Autoneg Retry Count value */
55#define E1000_BASE1000T_STATUS 10 56#define E1000_BASE1000T_STATUS 10
56#define E1000_IDLE_ERROR_COUNT_MASK 0xFF 57#define E1000_IDLE_ERROR_COUNT_MASK 0xFF
57#define E1000_RECEIVE_ERROR_COUNTER 21 58#define E1000_RECEIVE_ERROR_COUNTER 21
@@ -74,6 +75,11 @@ static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
74static s32 e1000_led_on_82574(struct e1000_hw *hw); 75static s32 e1000_led_on_82574(struct e1000_hw *hw);
75static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw); 76static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
76static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw); 77static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
78static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
79static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
80static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
81static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active);
82static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active);
77 83
78/** 84/**
79 * e1000_init_phy_params_82571 - Init PHY func ptrs. 85 * e1000_init_phy_params_82571 - Init PHY func ptrs.
@@ -107,6 +113,10 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
107 case e1000_82574: 113 case e1000_82574:
108 case e1000_82583: 114 case e1000_82583:
109 phy->type = e1000_phy_bm; 115 phy->type = e1000_phy_bm;
116 phy->ops.acquire = e1000_get_hw_semaphore_82574;
117 phy->ops.release = e1000_put_hw_semaphore_82574;
118 phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
119 phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574;
110 break; 120 break;
111 default: 121 default:
112 return -E1000_ERR_PHY; 122 return -E1000_ERR_PHY;
@@ -115,29 +125,36 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
115 125
116 /* This can only be done after all function pointers are setup. */ 126 /* This can only be done after all function pointers are setup. */
117 ret_val = e1000_get_phy_id_82571(hw); 127 ret_val = e1000_get_phy_id_82571(hw);
128 if (ret_val) {
129 e_dbg("Error getting PHY ID\n");
130 return ret_val;
131 }
118 132
119 /* Verify phy id */ 133 /* Verify phy id */
120 switch (hw->mac.type) { 134 switch (hw->mac.type) {
121 case e1000_82571: 135 case e1000_82571:
122 case e1000_82572: 136 case e1000_82572:
123 if (phy->id != IGP01E1000_I_PHY_ID) 137 if (phy->id != IGP01E1000_I_PHY_ID)
124 return -E1000_ERR_PHY; 138 ret_val = -E1000_ERR_PHY;
125 break; 139 break;
126 case e1000_82573: 140 case e1000_82573:
127 if (phy->id != M88E1111_I_PHY_ID) 141 if (phy->id != M88E1111_I_PHY_ID)
128 return -E1000_ERR_PHY; 142 ret_val = -E1000_ERR_PHY;
129 break; 143 break;
130 case e1000_82574: 144 case e1000_82574:
131 case e1000_82583: 145 case e1000_82583:
132 if (phy->id != BME1000_E_PHY_ID_R2) 146 if (phy->id != BME1000_E_PHY_ID_R2)
133 return -E1000_ERR_PHY; 147 ret_val = -E1000_ERR_PHY;
134 break; 148 break;
135 default: 149 default:
136 return -E1000_ERR_PHY; 150 ret_val = -E1000_ERR_PHY;
137 break; 151 break;
138 } 152 }
139 153
140 return 0; 154 if (ret_val)
155 e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id);
156
157 return ret_val;
141} 158}
142 159
143/** 160/**
@@ -200,6 +217,17 @@ static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
200 break; 217 break;
201 } 218 }
202 219
220 /* Function Pointers */
221 switch (hw->mac.type) {
222 case e1000_82574:
223 case e1000_82583:
224 nvm->ops.acquire = e1000_get_hw_semaphore_82574;
225 nvm->ops.release = e1000_put_hw_semaphore_82574;
226 break;
227 default:
228 break;
229 }
230
203 return 0; 231 return 0;
204} 232}
205 233
@@ -542,6 +570,146 @@ static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
542 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI); 570 swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
543 ew32(SWSM, swsm); 571 ew32(SWSM, swsm);
544} 572}
573/**
574 * e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
575 * @hw: pointer to the HW structure
576 *
577 * Acquire the HW semaphore during reset.
578 *
579 **/
580static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
581{
582 u32 extcnf_ctrl;
583 s32 ret_val = 0;
584 s32 i = 0;
585
586 extcnf_ctrl = er32(EXTCNF_CTRL);
587 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
588 do {
589 ew32(EXTCNF_CTRL, extcnf_ctrl);
590 extcnf_ctrl = er32(EXTCNF_CTRL);
591
592 if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
593 break;
594
595 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
596
597 msleep(2);
598 i++;
599 } while (i < MDIO_OWNERSHIP_TIMEOUT);
600
601 if (i == MDIO_OWNERSHIP_TIMEOUT) {
602 /* Release semaphores */
603 e1000_put_hw_semaphore_82573(hw);
604 e_dbg("Driver can't access the PHY\n");
605 ret_val = -E1000_ERR_PHY;
606 goto out;
607 }
608
609out:
610 return ret_val;
611}
612
613/**
614 * e1000_put_hw_semaphore_82573 - Release hardware semaphore
615 * @hw: pointer to the HW structure
616 *
617 * Release hardware semaphore used during reset.
618 *
619 **/
620static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
621{
622 u32 extcnf_ctrl;
623
624 extcnf_ctrl = er32(EXTCNF_CTRL);
625 extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
626 ew32(EXTCNF_CTRL, extcnf_ctrl);
627}
628
629static DEFINE_MUTEX(swflag_mutex);
630
631/**
632 * e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
633 * @hw: pointer to the HW structure
634 *
635 * Acquire the HW semaphore to access the PHY or NVM.
636 *
637 **/
638static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
639{
640 s32 ret_val;
641
642 mutex_lock(&swflag_mutex);
643 ret_val = e1000_get_hw_semaphore_82573(hw);
644 if (ret_val)
645 mutex_unlock(&swflag_mutex);
646 return ret_val;
647}
648
649/**
650 * e1000_put_hw_semaphore_82574 - Release hardware semaphore
651 * @hw: pointer to the HW structure
652 *
653 * Release hardware semaphore used to access the PHY or NVM
654 *
655 **/
656static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
657{
658 e1000_put_hw_semaphore_82573(hw);
659 mutex_unlock(&swflag_mutex);
660}
661
662/**
663 * e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
664 * @hw: pointer to the HW structure
665 * @active: true to enable LPLU, false to disable
666 *
667 * Sets the LPLU D0 state according to the active flag.
668 * LPLU will not be activated unless the
669 * device autonegotiation advertisement meets standards of
670 * either 10 or 10/100 or 10/100/1000 at all duplexes.
671 * This is a function pointer entry point only called by
672 * PHY setup routines.
673 **/
674static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
675{
676 u16 data = er32(POEMB);
677
678 if (active)
679 data |= E1000_PHY_CTRL_D0A_LPLU;
680 else
681 data &= ~E1000_PHY_CTRL_D0A_LPLU;
682
683 ew32(POEMB, data);
684 return 0;
685}
686
687/**
688 * e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3
689 * @hw: pointer to the HW structure
690 * @active: boolean used to enable/disable lplu
691 *
692 * The low power link up (lplu) state is set to the power management level D3
693 * when active is true, else clear lplu for D3. LPLU
694 * is used during Dx states where the power conservation is most important.
695 * During driver activity, SmartSpeed should be enabled so performance is
696 * maintained.
697 **/
698static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
699{
700 u16 data = er32(POEMB);
701
702 if (!active) {
703 data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
704 } else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
705 (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) ||
706 (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) {
707 data |= E1000_PHY_CTRL_NOND0A_LPLU;
708 }
709
710 ew32(POEMB, data);
711 return 0;
712}
545 713
546/** 714/**
547 * e1000_acquire_nvm_82571 - Request for access to the EEPROM 715 * e1000_acquire_nvm_82571 - Request for access to the EEPROM
@@ -562,8 +730,6 @@ static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
562 730
563 switch (hw->mac.type) { 731 switch (hw->mac.type) {
564 case e1000_82573: 732 case e1000_82573:
565 case e1000_82574:
566 case e1000_82583:
567 break; 733 break;
568 default: 734 default:
569 ret_val = e1000e_acquire_nvm(hw); 735 ret_val = e1000e_acquire_nvm(hw);
@@ -853,9 +1019,8 @@ static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
853 **/ 1019 **/
854static s32 e1000_reset_hw_82571(struct e1000_hw *hw) 1020static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
855{ 1021{
856 u32 ctrl, extcnf_ctrl, ctrl_ext, icr; 1022 u32 ctrl, ctrl_ext;
857 s32 ret_val; 1023 s32 ret_val;
858 u16 i = 0;
859 1024
860 /* 1025 /*
861 * Prevent the PCI-E bus from sticking if there is no TLP connection 1026 * Prevent the PCI-E bus from sticking if there is no TLP connection
@@ -880,33 +1045,33 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
880 */ 1045 */
881 switch (hw->mac.type) { 1046 switch (hw->mac.type) {
882 case e1000_82573: 1047 case e1000_82573:
1048 ret_val = e1000_get_hw_semaphore_82573(hw);
1049 break;
883 case e1000_82574: 1050 case e1000_82574:
884 case e1000_82583: 1051 case e1000_82583:
885 extcnf_ctrl = er32(EXTCNF_CTRL); 1052 ret_val = e1000_get_hw_semaphore_82574(hw);
886 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
887
888 do {
889 ew32(EXTCNF_CTRL, extcnf_ctrl);
890 extcnf_ctrl = er32(EXTCNF_CTRL);
891
892 if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
893 break;
894
895 extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
896
897 msleep(2);
898 i++;
899 } while (i < MDIO_OWNERSHIP_TIMEOUT);
900 break; 1053 break;
901 default: 1054 default:
902 break; 1055 break;
903 } 1056 }
1057 if (ret_val)
1058 e_dbg("Cannot acquire MDIO ownership\n");
904 1059
905 ctrl = er32(CTRL); 1060 ctrl = er32(CTRL);
906 1061
907 e_dbg("Issuing a global reset to MAC\n"); 1062 e_dbg("Issuing a global reset to MAC\n");
908 ew32(CTRL, ctrl | E1000_CTRL_RST); 1063 ew32(CTRL, ctrl | E1000_CTRL_RST);
909 1064
1065 /* Must release MDIO ownership and mutex after MAC reset. */
1066 switch (hw->mac.type) {
1067 case e1000_82574:
1068 case e1000_82583:
1069 e1000_put_hw_semaphore_82574(hw);
1070 break;
1071 default:
1072 break;
1073 }
1074
910 if (hw->nvm.type == e1000_nvm_flash_hw) { 1075 if (hw->nvm.type == e1000_nvm_flash_hw) {
911 udelay(10); 1076 udelay(10);
912 ctrl_ext = er32(CTRL_EXT); 1077 ctrl_ext = er32(CTRL_EXT);
@@ -938,7 +1103,7 @@ static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
938 1103
939 /* Clear any pending interrupt events. */ 1104 /* Clear any pending interrupt events. */
940 ew32(IMC, 0xffffffff); 1105 ew32(IMC, 0xffffffff);
941 icr = er32(ICR); 1106 er32(ICR);
942 1107
943 if (hw->mac.type == e1000_82571) { 1108 if (hw->mac.type == e1000_82571) {
944 /* Install any alternate MAC address into RAR0 */ 1109 /* Install any alternate MAC address into RAR0 */
@@ -1402,6 +1567,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1402 u32 rxcw; 1567 u32 rxcw;
1403 u32 ctrl; 1568 u32 ctrl;
1404 u32 status; 1569 u32 status;
1570 u32 txcw;
1571 u32 i;
1405 s32 ret_val = 0; 1572 s32 ret_val = 0;
1406 1573
1407 ctrl = er32(CTRL); 1574 ctrl = er32(CTRL);
@@ -1422,8 +1589,10 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1422 e1000_serdes_link_autoneg_progress; 1589 e1000_serdes_link_autoneg_progress;
1423 mac->serdes_has_link = false; 1590 mac->serdes_has_link = false;
1424 e_dbg("AN_UP -> AN_PROG\n"); 1591 e_dbg("AN_UP -> AN_PROG\n");
1592 } else {
1593 mac->serdes_has_link = true;
1425 } 1594 }
1426 break; 1595 break;
1427 1596
1428 case e1000_serdes_link_forced_up: 1597 case e1000_serdes_link_forced_up:
1429 /* 1598 /*
@@ -1431,8 +1600,10 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1431 * auto-negotiation in the TXCW register and disable 1600 * auto-negotiation in the TXCW register and disable
1432 * forced link in the Device Control register in an 1601 * forced link in the Device Control register in an
1433 * attempt to auto-negotiate with our link partner. 1602 * attempt to auto-negotiate with our link partner.
1603 * If the partner code word is null, stop forcing
1604 * and restart auto negotiation.
1434 */ 1605 */
1435 if (rxcw & E1000_RXCW_C) { 1606 if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
1436 /* Enable autoneg, and unforce link up */ 1607 /* Enable autoneg, and unforce link up */
1437 ew32(TXCW, mac->txcw); 1608 ew32(TXCW, mac->txcw);
1438 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); 1609 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
@@ -1440,6 +1611,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1440 e1000_serdes_link_autoneg_progress; 1611 e1000_serdes_link_autoneg_progress;
1441 mac->serdes_has_link = false; 1612 mac->serdes_has_link = false;
1442 e_dbg("FORCED_UP -> AN_PROG\n"); 1613 e_dbg("FORCED_UP -> AN_PROG\n");
1614 } else {
1615 mac->serdes_has_link = true;
1443 } 1616 }
1444 break; 1617 break;
1445 1618
@@ -1495,6 +1668,7 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1495 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU)); 1668 ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
1496 mac->serdes_link_state = 1669 mac->serdes_link_state =
1497 e1000_serdes_link_autoneg_progress; 1670 e1000_serdes_link_autoneg_progress;
1671 mac->serdes_has_link = false;
1498 e_dbg("DOWN -> AN_PROG\n"); 1672 e_dbg("DOWN -> AN_PROG\n");
1499 break; 1673 break;
1500 } 1674 }
@@ -1505,16 +1679,32 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
1505 e_dbg("ANYSTATE -> DOWN\n"); 1679 e_dbg("ANYSTATE -> DOWN\n");
1506 } else { 1680 } else {
1507 /* 1681 /*
1508 * We have sync, and can tolerate one invalid (IV) 1682 * Check several times, if Sync and Config
1509 * codeword before declaring link down, so reread 1683 * both are consistently 1 then simply ignore
1510 * to look again. 1684 * the Invalid bit and restart Autoneg
1511 */ 1685 */
1512 udelay(10); 1686 for (i = 0; i < AN_RETRY_COUNT; i++) {
1513 rxcw = er32(RXCW); 1687 udelay(10);
1514 if (rxcw & E1000_RXCW_IV) { 1688 rxcw = er32(RXCW);
1515 mac->serdes_link_state = e1000_serdes_link_down; 1689 if ((rxcw & E1000_RXCW_IV) &&
1690 !((rxcw & E1000_RXCW_SYNCH) &&
1691 (rxcw & E1000_RXCW_C))) {
1692 mac->serdes_has_link = false;
1693 mac->serdes_link_state =
1694 e1000_serdes_link_down;
1695 e_dbg("ANYSTATE -> DOWN\n");
1696 break;
1697 }
1698 }
1699
1700 if (i == AN_RETRY_COUNT) {
1701 txcw = er32(TXCW);
1702 txcw |= E1000_TXCW_ANE;
1703 ew32(TXCW, txcw);
1704 mac->serdes_link_state =
1705 e1000_serdes_link_autoneg_progress;
1516 mac->serdes_has_link = false; 1706 mac->serdes_has_link = false;
1517 e_dbg("ANYSTATE -> DOWN\n"); 1707 e_dbg("ANYSTATE -> AN_PROG\n");
1518 } 1708 }
1519 } 1709 }
1520 } 1710 }
@@ -1897,7 +2087,7 @@ struct e1000_info e1000_82574_info = {
1897 | FLAG_HAS_AMT 2087 | FLAG_HAS_AMT
1898 | FLAG_HAS_CTRLEXT_ON_LOAD, 2088 | FLAG_HAS_CTRLEXT_ON_LOAD,
1899 .flags2 = FLAG2_CHECK_PHY_HANG, 2089 .flags2 = FLAG2_CHECK_PHY_HANG,
1900 .pba = 36, 2090 .pba = 32,
1901 .max_hw_frame_size = DEFAULT_JUMBO, 2091 .max_hw_frame_size = DEFAULT_JUMBO,
1902 .get_variants = e1000_get_variants_82571, 2092 .get_variants = e1000_get_variants_82571,
1903 .mac_ops = &e82571_mac_ops, 2093 .mac_ops = &e82571_mac_ops,
@@ -1914,7 +2104,7 @@ struct e1000_info e1000_82583_info = {
1914 | FLAG_HAS_SMART_POWER_DOWN 2104 | FLAG_HAS_SMART_POWER_DOWN
1915 | FLAG_HAS_AMT 2105 | FLAG_HAS_AMT
1916 | FLAG_HAS_CTRLEXT_ON_LOAD, 2106 | FLAG_HAS_CTRLEXT_ON_LOAD,
1917 .pba = 36, 2107 .pba = 32,
1918 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN, 2108 .max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN,
1919 .get_variants = e1000_get_variants_82571, 2109 .get_variants = e1000_get_variants_82571,
1920 .mac_ops = &e82571_mac_ops, 2110 .mac_ops = &e82571_mac_ops,
diff --git a/drivers/net/e1000e/defines.h b/drivers/net/e1000e/defines.h
index d3f7a9c3f973..7245dc2e0b7c 100644
--- a/drivers/net/e1000e/defines.h
+++ b/drivers/net/e1000e/defines.h
@@ -488,6 +488,9 @@
488#define E1000_BLK_PHY_RESET 12 488#define E1000_BLK_PHY_RESET 12
489#define E1000_ERR_SWFW_SYNC 13 489#define E1000_ERR_SWFW_SYNC 13
490#define E1000_NOT_IMPLEMENTED 14 490#define E1000_NOT_IMPLEMENTED 14
491#define E1000_ERR_INVALID_ARGUMENT 16
492#define E1000_ERR_NO_SPACE 17
493#define E1000_ERR_NVM_PBA_SECTION 18
491 494
492/* Loop limit on how long we wait for auto-negotiation to complete */ 495/* Loop limit on how long we wait for auto-negotiation to complete */
493#define FIBER_LINK_UP_LIMIT 50 496#define FIBER_LINK_UP_LIMIT 50
@@ -516,6 +519,7 @@
516#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */ 519#define E1000_TXCW_ANE 0x80000000 /* Auto-neg enable */
517 520
518/* Receive Configuration Word */ 521/* Receive Configuration Word */
522#define E1000_RXCW_CW 0x0000ffff /* RxConfigWord mask */
519#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */ 523#define E1000_RXCW_IV 0x08000000 /* Receive config invalid */
520#define E1000_RXCW_C 0x20000000 /* Receive config */ 524#define E1000_RXCW_C 0x20000000 /* Receive config */
521#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */ 525#define E1000_RXCW_SYNCH 0x40000000 /* Receive config synch */
@@ -649,13 +653,16 @@
649/* Mask bits for fields in Word 0x03 of the EEPROM */ 653/* Mask bits for fields in Word 0x03 of the EEPROM */
650#define NVM_COMPAT_LOM 0x0800 654#define NVM_COMPAT_LOM 0x0800
651 655
656/* length of string needed to store PBA number */
657#define E1000_PBANUM_LENGTH 11
658
652/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ 659/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
653#define NVM_SUM 0xBABA 660#define NVM_SUM 0xBABA
654 661
655/* PBA (printed board assembly) number words */ 662/* PBA (printed board assembly) number words */
656#define NVM_PBA_OFFSET_0 8 663#define NVM_PBA_OFFSET_0 8
657#define NVM_PBA_OFFSET_1 9 664#define NVM_PBA_OFFSET_1 9
658 665#define NVM_PBA_PTR_GUARD 0xFAFA
659#define NVM_WORD_SIZE_BASE_SHIFT 6 666#define NVM_WORD_SIZE_BASE_SHIFT 6
660 667
661/* NVM Commands - SPI */ 668/* NVM Commands - SPI */
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index fdc67fead4ea..5255be753746 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -38,6 +38,7 @@
38#include <linux/netdevice.h> 38#include <linux/netdevice.h>
39#include <linux/pci.h> 39#include <linux/pci.h>
40#include <linux/pci-aspm.h> 40#include <linux/pci-aspm.h>
41#include <linux/crc32.h>
41 42
42#include "hw.h" 43#include "hw.h"
43 44
@@ -482,6 +483,7 @@ extern const char e1000e_driver_version[];
482 483
483extern void e1000e_check_options(struct e1000_adapter *adapter); 484extern void e1000e_check_options(struct e1000_adapter *adapter);
484extern void e1000e_set_ethtool_ops(struct net_device *netdev); 485extern void e1000e_set_ethtool_ops(struct net_device *netdev);
486extern void e1000e_led_blink_task(struct work_struct *work);
485 487
486extern int e1000e_up(struct e1000_adapter *adapter); 488extern int e1000e_up(struct e1000_adapter *adapter);
487extern void e1000e_down(struct e1000_adapter *adapter); 489extern void e1000e_down(struct e1000_adapter *adapter);
@@ -495,6 +497,8 @@ extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
495extern void e1000e_update_stats(struct e1000_adapter *adapter); 497extern void e1000e_update_stats(struct e1000_adapter *adapter);
496extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter); 498extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
497extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter); 499extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
500extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
501extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
498extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state); 502extern void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
499 503
500extern unsigned int copybreak; 504extern unsigned int copybreak;
@@ -513,7 +517,8 @@ extern struct e1000_info e1000_pch_info;
513extern struct e1000_info e1000_pch2_info; 517extern struct e1000_info e1000_pch2_info;
514extern struct e1000_info e1000_es2_info; 518extern struct e1000_info e1000_es2_info;
515 519
516extern s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num); 520extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
521 u32 pba_num_size);
517 522
518extern s32 e1000e_commit_phy(struct e1000_hw *hw); 523extern s32 e1000e_commit_phy(struct e1000_hw *hw);
519 524
diff --git a/drivers/net/e1000e/es2lan.c b/drivers/net/e1000e/es2lan.c
index 24f8ac9cf703..e45a61c8930a 100644
--- a/drivers/net/e1000e/es2lan.c
+++ b/drivers/net/e1000e/es2lan.c
@@ -100,8 +100,8 @@
100 * with a lower bound at "index" and the upper bound at 100 * with a lower bound at "index" and the upper bound at
101 * "index + 5". 101 * "index + 5".
102 */ 102 */
103static const u16 e1000_gg82563_cable_length_table[] = 103static const u16 e1000_gg82563_cable_length_table[] = {
104 { 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF }; 104 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF };
105#define GG82563_CABLE_LENGTH_TABLE_SIZE \ 105#define GG82563_CABLE_LENGTH_TABLE_SIZE \
106 ARRAY_SIZE(e1000_gg82563_cable_length_table) 106 ARRAY_SIZE(e1000_gg82563_cable_length_table)
107 107
@@ -426,8 +426,8 @@ static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
426{ 426{
427 u32 swfw_sync; 427 u32 swfw_sync;
428 428
429 while (e1000e_get_hw_semaphore(hw) != 0); 429 while (e1000e_get_hw_semaphore(hw) != 0)
430 /* Empty */ 430 ; /* Empty */
431 431
432 swfw_sync = er32(SW_FW_SYNC); 432 swfw_sync = er32(SW_FW_SYNC);
433 swfw_sync &= ~mask; 433 swfw_sync &= ~mask;
@@ -784,7 +784,7 @@ static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
784 **/ 784 **/
785static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw) 785static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
786{ 786{
787 u32 ctrl, icr; 787 u32 ctrl;
788 s32 ret_val; 788 s32 ret_val;
789 789
790 /* 790 /*
@@ -818,7 +818,7 @@ static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
818 818
819 /* Clear any pending interrupt events. */ 819 /* Clear any pending interrupt events. */
820 ew32(IMC, 0xffffffff); 820 ew32(IMC, 0xffffffff);
821 icr = er32(ICR); 821 er32(ICR);
822 822
823 ret_val = e1000_check_alt_mac_addr_generic(hw); 823 ret_val = e1000_check_alt_mac_addr_generic(hw);
824 824
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 8984d165a39b..f8ed03dab9b1 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -45,63 +45,67 @@ struct e1000_stats {
45 int stat_offset; 45 int stat_offset;
46}; 46};
47 47
48#define E1000_STAT(m) E1000_STATS, \ 48#define E1000_STAT(str, m) { \
49 sizeof(((struct e1000_adapter *)0)->m), \ 49 .stat_string = str, \
50 offsetof(struct e1000_adapter, m) 50 .type = E1000_STATS, \
51#define E1000_NETDEV_STAT(m) NETDEV_STATS, \ 51 .sizeof_stat = sizeof(((struct e1000_adapter *)0)->m), \
52 sizeof(((struct net_device *)0)->m), \ 52 .stat_offset = offsetof(struct e1000_adapter, m) }
53 offsetof(struct net_device, m) 53#define E1000_NETDEV_STAT(str, m) { \
54 .stat_string = str, \
55 .type = NETDEV_STATS, \
56 .sizeof_stat = sizeof(((struct net_device *)0)->m), \
57 .stat_offset = offsetof(struct net_device, m) }
54 58
55static const struct e1000_stats e1000_gstrings_stats[] = { 59static const struct e1000_stats e1000_gstrings_stats[] = {
56 { "rx_packets", E1000_STAT(stats.gprc) }, 60 E1000_STAT("rx_packets", stats.gprc),
57 { "tx_packets", E1000_STAT(stats.gptc) }, 61 E1000_STAT("tx_packets", stats.gptc),
58 { "rx_bytes", E1000_STAT(stats.gorc) }, 62 E1000_STAT("rx_bytes", stats.gorc),
59 { "tx_bytes", E1000_STAT(stats.gotc) }, 63 E1000_STAT("tx_bytes", stats.gotc),
60 { "rx_broadcast", E1000_STAT(stats.bprc) }, 64 E1000_STAT("rx_broadcast", stats.bprc),
61 { "tx_broadcast", E1000_STAT(stats.bptc) }, 65 E1000_STAT("tx_broadcast", stats.bptc),
62 { "rx_multicast", E1000_STAT(stats.mprc) }, 66 E1000_STAT("rx_multicast", stats.mprc),
63 { "tx_multicast", E1000_STAT(stats.mptc) }, 67 E1000_STAT("tx_multicast", stats.mptc),
64 { "rx_errors", E1000_NETDEV_STAT(stats.rx_errors) }, 68 E1000_NETDEV_STAT("rx_errors", stats.rx_errors),
65 { "tx_errors", E1000_NETDEV_STAT(stats.tx_errors) }, 69 E1000_NETDEV_STAT("tx_errors", stats.tx_errors),
66 { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) }, 70 E1000_NETDEV_STAT("tx_dropped", stats.tx_dropped),
67 { "multicast", E1000_STAT(stats.mprc) }, 71 E1000_STAT("multicast", stats.mprc),
68 { "collisions", E1000_STAT(stats.colc) }, 72 E1000_STAT("collisions", stats.colc),
69 { "rx_length_errors", E1000_NETDEV_STAT(stats.rx_length_errors) }, 73 E1000_NETDEV_STAT("rx_length_errors", stats.rx_length_errors),
70 { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) }, 74 E1000_NETDEV_STAT("rx_over_errors", stats.rx_over_errors),
71 { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, 75 E1000_STAT("rx_crc_errors", stats.crcerrs),
72 { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) }, 76 E1000_NETDEV_STAT("rx_frame_errors", stats.rx_frame_errors),
73 { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, 77 E1000_STAT("rx_no_buffer_count", stats.rnbc),
74 { "rx_missed_errors", E1000_STAT(stats.mpc) }, 78 E1000_STAT("rx_missed_errors", stats.mpc),
75 { "tx_aborted_errors", E1000_STAT(stats.ecol) }, 79 E1000_STAT("tx_aborted_errors", stats.ecol),
76 { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, 80 E1000_STAT("tx_carrier_errors", stats.tncrs),
77 { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) }, 81 E1000_NETDEV_STAT("tx_fifo_errors", stats.tx_fifo_errors),
78 { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) }, 82 E1000_NETDEV_STAT("tx_heartbeat_errors", stats.tx_heartbeat_errors),
79 { "tx_window_errors", E1000_STAT(stats.latecol) }, 83 E1000_STAT("tx_window_errors", stats.latecol),
80 { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, 84 E1000_STAT("tx_abort_late_coll", stats.latecol),
81 { "tx_deferred_ok", E1000_STAT(stats.dc) }, 85 E1000_STAT("tx_deferred_ok", stats.dc),
82 { "tx_single_coll_ok", E1000_STAT(stats.scc) }, 86 E1000_STAT("tx_single_coll_ok", stats.scc),
83 { "tx_multi_coll_ok", E1000_STAT(stats.mcc) }, 87 E1000_STAT("tx_multi_coll_ok", stats.mcc),
84 { "tx_timeout_count", E1000_STAT(tx_timeout_count) }, 88 E1000_STAT("tx_timeout_count", tx_timeout_count),
85 { "tx_restart_queue", E1000_STAT(restart_queue) }, 89 E1000_STAT("tx_restart_queue", restart_queue),
86 { "rx_long_length_errors", E1000_STAT(stats.roc) }, 90 E1000_STAT("rx_long_length_errors", stats.roc),
87 { "rx_short_length_errors", E1000_STAT(stats.ruc) }, 91 E1000_STAT("rx_short_length_errors", stats.ruc),
88 { "rx_align_errors", E1000_STAT(stats.algnerrc) }, 92 E1000_STAT("rx_align_errors", stats.algnerrc),
89 { "tx_tcp_seg_good", E1000_STAT(stats.tsctc) }, 93 E1000_STAT("tx_tcp_seg_good", stats.tsctc),
90 { "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) }, 94 E1000_STAT("tx_tcp_seg_failed", stats.tsctfc),
91 { "rx_flow_control_xon", E1000_STAT(stats.xonrxc) }, 95 E1000_STAT("rx_flow_control_xon", stats.xonrxc),
92 { "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) }, 96 E1000_STAT("rx_flow_control_xoff", stats.xoffrxc),
93 { "tx_flow_control_xon", E1000_STAT(stats.xontxc) }, 97 E1000_STAT("tx_flow_control_xon", stats.xontxc),
94 { "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) }, 98 E1000_STAT("tx_flow_control_xoff", stats.xofftxc),
95 { "rx_long_byte_count", E1000_STAT(stats.gorc) }, 99 E1000_STAT("rx_long_byte_count", stats.gorc),
96 { "rx_csum_offload_good", E1000_STAT(hw_csum_good) }, 100 E1000_STAT("rx_csum_offload_good", hw_csum_good),
97 { "rx_csum_offload_errors", E1000_STAT(hw_csum_err) }, 101 E1000_STAT("rx_csum_offload_errors", hw_csum_err),
98 { "rx_header_split", E1000_STAT(rx_hdr_split) }, 102 E1000_STAT("rx_header_split", rx_hdr_split),
99 { "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) }, 103 E1000_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed),
100 { "tx_smbus", E1000_STAT(stats.mgptc) }, 104 E1000_STAT("tx_smbus", stats.mgptc),
101 { "rx_smbus", E1000_STAT(stats.mgprc) }, 105 E1000_STAT("rx_smbus", stats.mgprc),
102 { "dropped_smbus", E1000_STAT(stats.mgpdc) }, 106 E1000_STAT("dropped_smbus", stats.mgpdc),
103 { "rx_dma_failed", E1000_STAT(rx_dma_failed) }, 107 E1000_STAT("rx_dma_failed", rx_dma_failed),
104 { "tx_dma_failed", E1000_STAT(tx_dma_failed) }, 108 E1000_STAT("tx_dma_failed", tx_dma_failed),
105}; 109};
106 110
107#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats) 111#define E1000_GLOBAL_STATS_LEN ARRAY_SIZE(e1000_gstrings_stats)
@@ -194,20 +198,6 @@ static int e1000_get_settings(struct net_device *netdev,
194 return 0; 198 return 0;
195} 199}
196 200
197static u32 e1000_get_link(struct net_device *netdev)
198{
199 struct e1000_adapter *adapter = netdev_priv(netdev);
200 struct e1000_hw *hw = &adapter->hw;
201
202 /*
203 * Avoid touching hardware registers when possible, otherwise
204 * link negotiation can get messed up when user-level scripts
205 * are rapidly polling the driver to see if link is up.
206 */
207 return netif_running(netdev) ? netif_carrier_ok(netdev) :
208 !!(er32(STATUS) & E1000_STATUS_LU);
209}
210
211static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx) 201static int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
212{ 202{
213 struct e1000_mac_info *mac = &adapter->hw.mac; 203 struct e1000_mac_info *mac = &adapter->hw.mac;
@@ -634,20 +624,24 @@ static void e1000_get_drvinfo(struct net_device *netdev,
634 struct e1000_adapter *adapter = netdev_priv(netdev); 624 struct e1000_adapter *adapter = netdev_priv(netdev);
635 char firmware_version[32]; 625 char firmware_version[32];
636 626
637 strncpy(drvinfo->driver, e1000e_driver_name, 32); 627 strncpy(drvinfo->driver, e1000e_driver_name,
638 strncpy(drvinfo->version, e1000e_driver_version, 32); 628 sizeof(drvinfo->driver) - 1);
629 strncpy(drvinfo->version, e1000e_driver_version,
630 sizeof(drvinfo->version) - 1);
639 631
640 /* 632 /*
641 * EEPROM image version # is reported as firmware version # for 633 * EEPROM image version # is reported as firmware version # for
642 * PCI-E controllers 634 * PCI-E controllers
643 */ 635 */
644 sprintf(firmware_version, "%d.%d-%d", 636 snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
645 (adapter->eeprom_vers & 0xF000) >> 12, 637 (adapter->eeprom_vers & 0xF000) >> 12,
646 (adapter->eeprom_vers & 0x0FF0) >> 4, 638 (adapter->eeprom_vers & 0x0FF0) >> 4,
647 (adapter->eeprom_vers & 0x000F)); 639 (adapter->eeprom_vers & 0x000F));
648 640
649 strncpy(drvinfo->fw_version, firmware_version, 32); 641 strncpy(drvinfo->fw_version, firmware_version,
650 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 642 sizeof(drvinfo->fw_version) - 1);
643 strncpy(drvinfo->bus_info, pci_name(adapter->pdev),
644 sizeof(drvinfo->bus_info) - 1);
651 drvinfo->regdump_len = e1000_get_regs_len(netdev); 645 drvinfo->regdump_len = e1000_get_regs_len(netdev);
652 drvinfo->eedump_len = e1000_get_eeprom_len(netdev); 646 drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
653} 647}
@@ -763,8 +757,8 @@ static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
763 int reg, int offset, u32 mask, u32 write) 757 int reg, int offset, u32 mask, u32 write)
764{ 758{
765 u32 pat, val; 759 u32 pat, val;
766 static const u32 test[] = 760 static const u32 test[] = {
767 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 761 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
768 for (pat = 0; pat < ARRAY_SIZE(test); pat++) { 762 for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
769 E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset, 763 E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
770 (test[pat] & write)); 764 (test[pat] & write));
@@ -1263,6 +1257,7 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1263 u32 ctrl_reg = 0; 1257 u32 ctrl_reg = 0;
1264 u32 stat_reg = 0; 1258 u32 stat_reg = 0;
1265 u16 phy_reg = 0; 1259 u16 phy_reg = 0;
1260 s32 ret_val = 0;
1266 1261
1267 hw->mac.autoneg = 0; 1262 hw->mac.autoneg = 0;
1268 1263
@@ -1322,7 +1317,13 @@ static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
1322 case e1000_phy_82577: 1317 case e1000_phy_82577:
1323 case e1000_phy_82578: 1318 case e1000_phy_82578:
1324 /* Workaround: K1 must be disabled for stable 1Gbps operation */ 1319 /* Workaround: K1 must be disabled for stable 1Gbps operation */
1320 ret_val = hw->phy.ops.acquire(hw);
1321 if (ret_val) {
1322 e_err("Cannot setup 1Gbps loopback.\n");
1323 return ret_val;
1324 }
1325 e1000_configure_k1_ich8lan(hw, false); 1325 e1000_configure_k1_ich8lan(hw, false);
1326 hw->phy.ops.release(hw);
1326 break; 1327 break;
1327 case e1000_phy_82579: 1328 case e1000_phy_82579:
1328 /* Disable PHY energy detect power down */ 1329 /* Disable PHY energy detect power down */
@@ -1707,6 +1708,19 @@ static void e1000_diag_test(struct net_device *netdev,
1707 bool if_running = netif_running(netdev); 1708 bool if_running = netif_running(netdev);
1708 1709
1709 set_bit(__E1000_TESTING, &adapter->state); 1710 set_bit(__E1000_TESTING, &adapter->state);
1711
1712 if (!if_running) {
1713 /* Get control of and reset hardware */
1714 if (adapter->flags & FLAG_HAS_AMT)
1715 e1000e_get_hw_control(adapter);
1716
1717 e1000e_power_up_phy(adapter);
1718
1719 adapter->hw.phy.autoneg_wait_to_complete = 1;
1720 e1000e_reset(adapter);
1721 adapter->hw.phy.autoneg_wait_to_complete = 0;
1722 }
1723
1710 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 1724 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
1711 /* Offline tests */ 1725 /* Offline tests */
1712 1726
@@ -1720,8 +1734,6 @@ static void e1000_diag_test(struct net_device *netdev,
1720 if (if_running) 1734 if (if_running)
1721 /* indicate we're in test mode */ 1735 /* indicate we're in test mode */
1722 dev_close(netdev); 1736 dev_close(netdev);
1723 else
1724 e1000e_reset(adapter);
1725 1737
1726 if (e1000_reg_test(adapter, &data[0])) 1738 if (e1000_reg_test(adapter, &data[0]))
1727 eth_test->flags |= ETH_TEST_FL_FAILED; 1739 eth_test->flags |= ETH_TEST_FL_FAILED;
@@ -1735,8 +1747,6 @@ static void e1000_diag_test(struct net_device *netdev,
1735 eth_test->flags |= ETH_TEST_FL_FAILED; 1747 eth_test->flags |= ETH_TEST_FL_FAILED;
1736 1748
1737 e1000e_reset(adapter); 1749 e1000e_reset(adapter);
1738 /* make sure the phy is powered up */
1739 e1000e_power_up_phy(adapter);
1740 if (e1000_loopback_test(adapter, &data[3])) 1750 if (e1000_loopback_test(adapter, &data[3]))
1741 eth_test->flags |= ETH_TEST_FL_FAILED; 1751 eth_test->flags |= ETH_TEST_FL_FAILED;
1742 1752
@@ -1758,28 +1768,29 @@ static void e1000_diag_test(struct net_device *netdev,
1758 if (if_running) 1768 if (if_running)
1759 dev_open(netdev); 1769 dev_open(netdev);
1760 } else { 1770 } else {
1761 if (!if_running && (adapter->flags & FLAG_HAS_AMT)) { 1771 /* Online tests */
1762 clear_bit(__E1000_TESTING, &adapter->state);
1763 dev_open(netdev);
1764 set_bit(__E1000_TESTING, &adapter->state);
1765 }
1766 1772
1767 e_info("online testing starting\n"); 1773 e_info("online testing starting\n");
1768 /* Online tests */
1769 if (e1000_link_test(adapter, &data[4]))
1770 eth_test->flags |= ETH_TEST_FL_FAILED;
1771 1774
1772 /* Online tests aren't run; pass by default */ 1775 /* register, eeprom, intr and loopback tests not run online */
1773 data[0] = 0; 1776 data[0] = 0;
1774 data[1] = 0; 1777 data[1] = 0;
1775 data[2] = 0; 1778 data[2] = 0;
1776 data[3] = 0; 1779 data[3] = 0;
1777 1780
1778 if (!if_running && (adapter->flags & FLAG_HAS_AMT)) 1781 if (e1000_link_test(adapter, &data[4]))
1779 dev_close(netdev); 1782 eth_test->flags |= ETH_TEST_FL_FAILED;
1780 1783
1781 clear_bit(__E1000_TESTING, &adapter->state); 1784 clear_bit(__E1000_TESTING, &adapter->state);
1782 } 1785 }
1786
1787 if (!if_running) {
1788 e1000e_reset(adapter);
1789
1790 if (adapter->flags & FLAG_HAS_AMT)
1791 e1000e_release_hw_control(adapter);
1792 }
1793
1783 msleep_interruptible(4 * 1000); 1794 msleep_interruptible(4 * 1000);
1784} 1795}
1785 1796
@@ -1860,7 +1871,7 @@ static int e1000_set_wol(struct net_device *netdev,
1860/* bit defines for adapter->led_status */ 1871/* bit defines for adapter->led_status */
1861#define E1000_LED_ON 0 1872#define E1000_LED_ON 0
1862 1873
1863static void e1000e_led_blink_task(struct work_struct *work) 1874void e1000e_led_blink_task(struct work_struct *work)
1864{ 1875{
1865 struct e1000_adapter *adapter = container_of(work, 1876 struct e1000_adapter *adapter = container_of(work,
1866 struct e1000_adapter, led_blink_task); 1877 struct e1000_adapter, led_blink_task);
@@ -1892,7 +1903,6 @@ static int e1000_phys_id(struct net_device *netdev, u32 data)
1892 (hw->mac.type == e1000_pch2lan) || 1903 (hw->mac.type == e1000_pch2lan) ||
1893 (hw->mac.type == e1000_82583) || 1904 (hw->mac.type == e1000_82583) ||
1894 (hw->mac.type == e1000_82574)) { 1905 (hw->mac.type == e1000_82574)) {
1895 INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
1896 if (!adapter->blink_timer.function) { 1906 if (!adapter->blink_timer.function) {
1897 init_timer(&adapter->blink_timer); 1907 init_timer(&adapter->blink_timer);
1898 adapter->blink_timer.function = 1908 adapter->blink_timer.function =
@@ -1986,6 +1996,9 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1986 p = (char *) adapter + 1996 p = (char *) adapter +
1987 e1000_gstrings_stats[i].stat_offset; 1997 e1000_gstrings_stats[i].stat_offset;
1988 break; 1998 break;
1999 default:
2000 data[i] = 0;
2001 continue;
1989 } 2002 }
1990 2003
1991 data[i] = (e1000_gstrings_stats[i].sizeof_stat == 2004 data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
@@ -2024,7 +2037,7 @@ static const struct ethtool_ops e1000_ethtool_ops = {
2024 .get_msglevel = e1000_get_msglevel, 2037 .get_msglevel = e1000_get_msglevel,
2025 .set_msglevel = e1000_set_msglevel, 2038 .set_msglevel = e1000_set_msglevel,
2026 .nway_reset = e1000_nway_reset, 2039 .nway_reset = e1000_nway_reset,
2027 .get_link = e1000_get_link, 2040 .get_link = ethtool_op_get_link,
2028 .get_eeprom_len = e1000_get_eeprom_len, 2041 .get_eeprom_len = e1000_get_eeprom_len,
2029 .get_eeprom = e1000_get_eeprom, 2042 .get_eeprom = e1000_get_eeprom,
2030 .set_eeprom = e1000_set_eeprom, 2043 .set_eeprom = e1000_set_eeprom,
diff --git a/drivers/net/e1000e/hw.h b/drivers/net/e1000e/hw.h
index ba302a5c2c30..e774380c7cec 100644
--- a/drivers/net/e1000e/hw.h
+++ b/drivers/net/e1000e/hw.h
@@ -83,6 +83,7 @@ enum e1e_registers {
83 E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */ 83 E1000_EXTCNF_CTRL = 0x00F00, /* Extended Configuration Control */
84 E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */ 84 E1000_EXTCNF_SIZE = 0x00F08, /* Extended Configuration Size */
85 E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */ 85 E1000_PHY_CTRL = 0x00F10, /* PHY Control Register in CSR */
86#define E1000_POEMB E1000_PHY_CTRL /* PHY OEM Bits */
86 E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */ 87 E1000_PBA = 0x01000, /* Packet Buffer Allocation - RW */
87 E1000_PBS = 0x01008, /* Packet Buffer Size */ 88 E1000_PBS = 0x01008, /* Packet Buffer Size */
88 E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */ 89 E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
diff --git a/drivers/net/e1000e/ich8lan.c b/drivers/net/e1000e/ich8lan.c
index 38c84ba3e3c1..5bb65b7382db 100644
--- a/drivers/net/e1000e/ich8lan.c
+++ b/drivers/net/e1000e/ich8lan.c
@@ -338,12 +338,17 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
338 } 338 }
339 339
340 phy->id = e1000_phy_unknown; 340 phy->id = e1000_phy_unknown;
341 ret_val = e1000e_get_phy_id(hw); 341 switch (hw->mac.type) {
342 if (ret_val) 342 default:
343 goto out; 343 ret_val = e1000e_get_phy_id(hw);
344 if ((phy->id == 0) || (phy->id == PHY_REVISION_MASK)) { 344 if (ret_val)
345 goto out;
346 if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
347 break;
348 /* fall-through */
349 case e1000_pch2lan:
345 /* 350 /*
346 * In case the PHY needs to be in mdio slow mode (eg. 82577), 351 * In case the PHY needs to be in mdio slow mode,
347 * set slow mode and try to get the PHY id again. 352 * set slow mode and try to get the PHY id again.
348 */ 353 */
349 ret_val = e1000_set_mdio_slow_mode_hv(hw); 354 ret_val = e1000_set_mdio_slow_mode_hv(hw);
@@ -352,6 +357,7 @@ static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
352 ret_val = e1000e_get_phy_id(hw); 357 ret_val = e1000e_get_phy_id(hw);
353 if (ret_val) 358 if (ret_val)
354 goto out; 359 goto out;
360 break;
355 } 361 }
356 phy->type = e1000e_get_phy_type_from_id(phy->id); 362 phy->type = e1000e_get_phy_type_from_id(phy->id);
357 363
@@ -1389,22 +1395,6 @@ void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
1389 } 1395 }
1390} 1396}
1391 1397
1392static u32 e1000_calc_rx_da_crc(u8 mac[])
1393{
1394 u32 poly = 0xEDB88320; /* Polynomial for 802.3 CRC calculation */
1395 u32 i, j, mask, crc;
1396
1397 crc = 0xffffffff;
1398 for (i = 0; i < 6; i++) {
1399 crc = crc ^ mac[i];
1400 for (j = 8; j > 0; j--) {
1401 mask = (crc & 1) * (-1);
1402 crc = (crc >> 1) ^ (poly & mask);
1403 }
1404 }
1405 return ~crc;
1406}
1407
1408/** 1398/**
1409 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation 1399 * e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
1410 * with 82579 PHY 1400 * with 82579 PHY
@@ -1447,8 +1437,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
1447 mac_addr[4] = (addr_high & 0xFF); 1437 mac_addr[4] = (addr_high & 0xFF);
1448 mac_addr[5] = ((addr_high >> 8) & 0xFF); 1438 mac_addr[5] = ((addr_high >> 8) & 0xFF);
1449 1439
1450 ew32(PCH_RAICC(i), 1440 ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
1451 e1000_calc_rx_da_crc(mac_addr));
1452 } 1441 }
1453 1442
1454 /* Write Rx addresses to the PHY */ 1443 /* Write Rx addresses to the PHY */
@@ -2303,11 +2292,10 @@ static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
2303 */ 2292 */
2304 if (ret_val == 0) { 2293 if (ret_val == 0) {
2305 flash_data = er32flash(ICH_FLASH_FDATA0); 2294 flash_data = er32flash(ICH_FLASH_FDATA0);
2306 if (size == 1) { 2295 if (size == 1)
2307 *data = (u8)(flash_data & 0x000000FF); 2296 *data = (u8)(flash_data & 0x000000FF);
2308 } else if (size == 2) { 2297 else if (size == 2)
2309 *data = (u16)(flash_data & 0x0000FFFF); 2298 *data = (u16)(flash_data & 0x0000FFFF);
2310 }
2311 break; 2299 break;
2312 } else { 2300 } else {
2313 /* 2301 /*
@@ -2972,7 +2960,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
2972{ 2960{
2973 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan; 2961 struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
2974 u16 reg; 2962 u16 reg;
2975 u32 ctrl, icr, kab; 2963 u32 ctrl, kab;
2976 s32 ret_val; 2964 s32 ret_val;
2977 2965
2978 /* 2966 /*
@@ -3062,7 +3050,7 @@ static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
3062 ew32(CRC_OFFSET, 0x65656565); 3050 ew32(CRC_OFFSET, 0x65656565);
3063 3051
3064 ew32(IMC, 0xffffffff); 3052 ew32(IMC, 0xffffffff);
3065 icr = er32(ICR); 3053 er32(ICR);
3066 3054
3067 kab = er32(KABGTXD); 3055 kab = er32(KABGTXD);
3068 kab |= E1000_KABGTXD_BGSQLBIAS; 3056 kab |= E1000_KABGTXD_BGSQLBIAS;
@@ -3113,7 +3101,7 @@ static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
3113 * Reset the phy after disabling host wakeup to reset the Rx buffer. 3101 * Reset the phy after disabling host wakeup to reset the Rx buffer.
3114 */ 3102 */
3115 if (hw->phy.type == e1000_phy_82578) { 3103 if (hw->phy.type == e1000_phy_82578) {
3116 hw->phy.ops.read_reg(hw, BM_WUC, &i); 3104 e1e_rphy(hw, BM_WUC, &i);
3117 ret_val = e1000_phy_hw_reset_ich8lan(hw); 3105 ret_val = e1000_phy_hw_reset_ich8lan(hw);
3118 if (ret_val) 3106 if (ret_val)
3119 return ret_val; 3107 return ret_val;
@@ -3271,9 +3259,8 @@ static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
3271 (hw->phy.type == e1000_phy_82577)) { 3259 (hw->phy.type == e1000_phy_82577)) {
3272 ew32(FCRTV_PCH, hw->fc.refresh_time); 3260 ew32(FCRTV_PCH, hw->fc.refresh_time);
3273 3261
3274 ret_val = hw->phy.ops.write_reg(hw, 3262 ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
3275 PHY_REG(BM_PORT_CTRL_PAGE, 27), 3263 hw->fc.pause_time);
3276 hw->fc.pause_time);
3277 if (ret_val) 3264 if (ret_val)
3278 return ret_val; 3265 return ret_val;
3279 } 3266 }
@@ -3337,8 +3324,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3337 return ret_val; 3324 return ret_val;
3338 break; 3325 break;
3339 case e1000_phy_ife: 3326 case e1000_phy_ife:
3340 ret_val = hw->phy.ops.read_reg(hw, IFE_PHY_MDIX_CONTROL, 3327 ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
3341 &reg_data);
3342 if (ret_val) 3328 if (ret_val)
3343 return ret_val; 3329 return ret_val;
3344 3330
@@ -3356,8 +3342,7 @@ static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
3356 reg_data |= IFE_PMC_AUTO_MDIX; 3342 reg_data |= IFE_PMC_AUTO_MDIX;
3357 break; 3343 break;
3358 } 3344 }
3359 ret_val = hw->phy.ops.write_reg(hw, IFE_PHY_MDIX_CONTROL, 3345 ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
3360 reg_data);
3361 if (ret_val) 3346 if (ret_val)
3362 return ret_val; 3347 return ret_val;
3363 break; 3348 break;
@@ -3591,7 +3576,7 @@ void e1000e_disable_gig_wol_ich8lan(struct e1000_hw *hw)
3591 ew32(PHY_CTRL, phy_ctrl); 3576 ew32(PHY_CTRL, phy_ctrl);
3592 3577
3593 if (hw->mac.type >= e1000_pchlan) { 3578 if (hw->mac.type >= e1000_pchlan) {
3594 e1000_oem_bits_config_ich8lan(hw, true); 3579 e1000_oem_bits_config_ich8lan(hw, false);
3595 ret_val = hw->phy.ops.acquire(hw); 3580 ret_val = hw->phy.ops.acquire(hw);
3596 if (ret_val) 3581 if (ret_val)
3597 return; 3582 return;
@@ -3641,7 +3626,8 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
3641{ 3626{
3642 if (hw->phy.type == e1000_phy_ife) 3627 if (hw->phy.type == e1000_phy_ife)
3643 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 3628 return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
3644 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF)); 3629 (IFE_PSCL_PROBE_MODE |
3630 IFE_PSCL_PROBE_LEDS_OFF));
3645 3631
3646 ew32(LEDCTL, hw->mac.ledctl_mode1); 3632 ew32(LEDCTL, hw->mac.ledctl_mode1);
3647 return 0; 3633 return 0;
@@ -3655,8 +3641,7 @@ static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
3655 **/ 3641 **/
3656static s32 e1000_setup_led_pchlan(struct e1000_hw *hw) 3642static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
3657{ 3643{
3658 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, 3644 return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
3659 (u16)hw->mac.ledctl_mode1);
3660} 3645}
3661 3646
3662/** 3647/**
@@ -3667,8 +3652,7 @@ static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
3667 **/ 3652 **/
3668static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw) 3653static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
3669{ 3654{
3670 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, 3655 return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
3671 (u16)hw->mac.ledctl_default);
3672} 3656}
3673 3657
3674/** 3658/**
@@ -3699,7 +3683,7 @@ static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
3699 } 3683 }
3700 } 3684 }
3701 3685
3702 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); 3686 return e1e_wphy(hw, HV_LED_CONFIG, data);
3703} 3687}
3704 3688
3705/** 3689/**
@@ -3730,7 +3714,7 @@ static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
3730 } 3714 }
3731 } 3715 }
3732 3716
3733 return hw->phy.ops.write_reg(hw, HV_LED_CONFIG, data); 3717 return e1e_wphy(hw, HV_LED_CONFIG, data);
3734} 3718}
3735 3719
3736/** 3720/**
@@ -3839,20 +3823,20 @@ static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
3839 if ((hw->phy.type == e1000_phy_82578) || 3823 if ((hw->phy.type == e1000_phy_82578) ||
3840 (hw->phy.type == e1000_phy_82579) || 3824 (hw->phy.type == e1000_phy_82579) ||
3841 (hw->phy.type == e1000_phy_82577)) { 3825 (hw->phy.type == e1000_phy_82577)) {
3842 hw->phy.ops.read_reg(hw, HV_SCC_UPPER, &phy_data); 3826 e1e_rphy(hw, HV_SCC_UPPER, &phy_data);
3843 hw->phy.ops.read_reg(hw, HV_SCC_LOWER, &phy_data); 3827 e1e_rphy(hw, HV_SCC_LOWER, &phy_data);
3844 hw->phy.ops.read_reg(hw, HV_ECOL_UPPER, &phy_data); 3828 e1e_rphy(hw, HV_ECOL_UPPER, &phy_data);
3845 hw->phy.ops.read_reg(hw, HV_ECOL_LOWER, &phy_data); 3829 e1e_rphy(hw, HV_ECOL_LOWER, &phy_data);
3846 hw->phy.ops.read_reg(hw, HV_MCC_UPPER, &phy_data); 3830 e1e_rphy(hw, HV_MCC_UPPER, &phy_data);
3847 hw->phy.ops.read_reg(hw, HV_MCC_LOWER, &phy_data); 3831 e1e_rphy(hw, HV_MCC_LOWER, &phy_data);
3848 hw->phy.ops.read_reg(hw, HV_LATECOL_UPPER, &phy_data); 3832 e1e_rphy(hw, HV_LATECOL_UPPER, &phy_data);
3849 hw->phy.ops.read_reg(hw, HV_LATECOL_LOWER, &phy_data); 3833 e1e_rphy(hw, HV_LATECOL_LOWER, &phy_data);
3850 hw->phy.ops.read_reg(hw, HV_COLC_UPPER, &phy_data); 3834 e1e_rphy(hw, HV_COLC_UPPER, &phy_data);
3851 hw->phy.ops.read_reg(hw, HV_COLC_LOWER, &phy_data); 3835 e1e_rphy(hw, HV_COLC_LOWER, &phy_data);
3852 hw->phy.ops.read_reg(hw, HV_DC_UPPER, &phy_data); 3836 e1e_rphy(hw, HV_DC_UPPER, &phy_data);
3853 hw->phy.ops.read_reg(hw, HV_DC_LOWER, &phy_data); 3837 e1e_rphy(hw, HV_DC_LOWER, &phy_data);
3854 hw->phy.ops.read_reg(hw, HV_TNCRS_UPPER, &phy_data); 3838 e1e_rphy(hw, HV_TNCRS_UPPER, &phy_data);
3855 hw->phy.ops.read_reg(hw, HV_TNCRS_LOWER, &phy_data); 3839 e1e_rphy(hw, HV_TNCRS_LOWER, &phy_data);
3856 } 3840 }
3857} 3841}
3858 3842
diff --git a/drivers/net/e1000e/lib.c b/drivers/net/e1000e/lib.c
index 0fd4eb5ac5fb..ff2872153b21 100644
--- a/drivers/net/e1000e/lib.c
+++ b/drivers/net/e1000e/lib.c
@@ -493,9 +493,8 @@ s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
493 * different link partner. 493 * different link partner.
494 */ 494 */
495 ret_val = e1000e_config_fc_after_link_up(hw); 495 ret_val = e1000e_config_fc_after_link_up(hw);
496 if (ret_val) { 496 if (ret_val)
497 e_dbg("Error configuring flow control\n"); 497 e_dbg("Error configuring flow control\n");
498 }
499 498
500 return ret_val; 499 return ret_val;
501} 500}
@@ -1136,7 +1135,8 @@ s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
1136 ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg); 1135 ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
1137 if (ret_val) 1136 if (ret_val)
1138 return ret_val; 1137 return ret_val;
1139 ret_val = e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg); 1138 ret_val =
1139 e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
1140 if (ret_val) 1140 if (ret_val)
1141 return ret_val; 1141 return ret_val;
1142 1142
@@ -1496,9 +1496,8 @@ s32 e1000e_setup_led_generic(struct e1000_hw *hw)
1496{ 1496{
1497 u32 ledctl; 1497 u32 ledctl;
1498 1498
1499 if (hw->mac.ops.setup_led != e1000e_setup_led_generic) { 1499 if (hw->mac.ops.setup_led != e1000e_setup_led_generic)
1500 return -E1000_ERR_CONFIG; 1500 return -E1000_ERR_CONFIG;
1501 }
1502 1501
1503 if (hw->phy.media_type == e1000_media_type_fiber) { 1502 if (hw->phy.media_type == e1000_media_type_fiber) {
1504 ledctl = er32(LEDCTL); 1503 ledctl = er32(LEDCTL);
@@ -2139,6 +2138,119 @@ s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
2139} 2138}
2140 2139
2141/** 2140/**
2141 * e1000_read_pba_string_generic - Read device part number
2142 * @hw: pointer to the HW structure
2143 * @pba_num: pointer to device part number
2144 * @pba_num_size: size of part number buffer
2145 *
2146 * Reads the product board assembly (PBA) number from the EEPROM and stores
2147 * the value in pba_num.
2148 **/
2149s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
2150 u32 pba_num_size)
2151{
2152 s32 ret_val;
2153 u16 nvm_data;
2154 u16 pba_ptr;
2155 u16 offset;
2156 u16 length;
2157
2158 if (pba_num == NULL) {
2159 e_dbg("PBA string buffer was null\n");
2160 ret_val = E1000_ERR_INVALID_ARGUMENT;
2161 goto out;
2162 }
2163
2164 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
2165 if (ret_val) {
2166 e_dbg("NVM Read Error\n");
2167 goto out;
2168 }
2169
2170 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
2171 if (ret_val) {
2172 e_dbg("NVM Read Error\n");
2173 goto out;
2174 }
2175
2176 /*
2177 * if nvm_data is not ptr guard the PBA must be in legacy format which
2178 * means pba_ptr is actually our second data word for the PBA number
2179 * and we can decode it into an ascii string
2180 */
2181 if (nvm_data != NVM_PBA_PTR_GUARD) {
2182 e_dbg("NVM PBA number is not stored as string\n");
2183
2184 /* we will need 11 characters to store the PBA */
2185 if (pba_num_size < 11) {
2186 e_dbg("PBA string buffer too small\n");
2187 return E1000_ERR_NO_SPACE;
2188 }
2189
2190 /* extract hex string from data and pba_ptr */
2191 pba_num[0] = (nvm_data >> 12) & 0xF;
2192 pba_num[1] = (nvm_data >> 8) & 0xF;
2193 pba_num[2] = (nvm_data >> 4) & 0xF;
2194 pba_num[3] = nvm_data & 0xF;
2195 pba_num[4] = (pba_ptr >> 12) & 0xF;
2196 pba_num[5] = (pba_ptr >> 8) & 0xF;
2197 pba_num[6] = '-';
2198 pba_num[7] = 0;
2199 pba_num[8] = (pba_ptr >> 4) & 0xF;
2200 pba_num[9] = pba_ptr & 0xF;
2201
2202 /* put a null character on the end of our string */
2203 pba_num[10] = '\0';
2204
2205 /* switch all the data but the '-' to hex char */
2206 for (offset = 0; offset < 10; offset++) {
2207 if (pba_num[offset] < 0xA)
2208 pba_num[offset] += '0';
2209 else if (pba_num[offset] < 0x10)
2210 pba_num[offset] += 'A' - 0xA;
2211 }
2212
2213 goto out;
2214 }
2215
2216 ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length);
2217 if (ret_val) {
2218 e_dbg("NVM Read Error\n");
2219 goto out;
2220 }
2221
2222 if (length == 0xFFFF || length == 0) {
2223 e_dbg("NVM PBA number section invalid length\n");
2224 ret_val = E1000_ERR_NVM_PBA_SECTION;
2225 goto out;
2226 }
2227 /* check if pba_num buffer is big enough */
2228 if (pba_num_size < (((u32)length * 2) - 1)) {
2229 e_dbg("PBA string buffer too small\n");
2230 ret_val = E1000_ERR_NO_SPACE;
2231 goto out;
2232 }
2233
2234 /* trim pba length from start of string */
2235 pba_ptr++;
2236 length--;
2237
2238 for (offset = 0; offset < length; offset++) {
2239 ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data);
2240 if (ret_val) {
2241 e_dbg("NVM Read Error\n");
2242 goto out;
2243 }
2244 pba_num[offset * 2] = (u8)(nvm_data >> 8);
2245 pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
2246 }
2247 pba_num[offset * 2] = '\0';
2248
2249out:
2250 return ret_val;
2251}
2252
2253/**
2142 * e1000_read_mac_addr_generic - Read device MAC address 2254 * e1000_read_mac_addr_generic - Read device MAC address
2143 * @hw: pointer to the HW structure 2255 * @hw: pointer to the HW structure
2144 * 2256 *
@@ -2579,25 +2691,3 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
2579out: 2691out:
2580 return ret_val; 2692 return ret_val;
2581} 2693}
2582
2583s32 e1000e_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
2584{
2585 s32 ret_val;
2586 u16 nvm_data;
2587
2588 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
2589 if (ret_val) {
2590 e_dbg("NVM Read Error\n");
2591 return ret_val;
2592 }
2593 *pba_num = (u32)(nvm_data << 16);
2594
2595 ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
2596 if (ret_val) {
2597 e_dbg("NVM Read Error\n");
2598 return ret_val;
2599 }
2600 *pba_num |= nvm_data;
2601
2602 return 0;
2603}
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index c4ca1629f532..fa5b60452547 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -54,7 +54,7 @@
54 54
55#define DRV_EXTRAVERSION "-k2" 55#define DRV_EXTRAVERSION "-k2"
56 56
57#define DRV_VERSION "1.2.7" DRV_EXTRAVERSION 57#define DRV_VERSION "1.2.20" DRV_EXTRAVERSION
58char e1000e_driver_name[] = "e1000e"; 58char e1000e_driver_name[] = "e1000e";
59const char e1000e_driver_version[] = DRV_VERSION; 59const char e1000e_driver_version[] = DRV_VERSION;
60 60
@@ -1325,7 +1325,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
1325 goto next_desc; 1325 goto next_desc;
1326 } 1326 }
1327 1327
1328#define rxtop rx_ring->rx_skb_top 1328#define rxtop (rx_ring->rx_skb_top)
1329 if (!(status & E1000_RXD_STAT_EOP)) { 1329 if (!(status & E1000_RXD_STAT_EOP)) {
1330 /* this descriptor is only the beginning (or middle) */ 1330 /* this descriptor is only the beginning (or middle) */
1331 if (!rxtop) { 1331 if (!rxtop) {
@@ -1806,9 +1806,8 @@ void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
1806 err = pci_enable_msix(adapter->pdev, 1806 err = pci_enable_msix(adapter->pdev,
1807 adapter->msix_entries, 1807 adapter->msix_entries,
1808 adapter->num_vectors); 1808 adapter->num_vectors);
1809 if (err == 0) { 1809 if (err == 0)
1810 return; 1810 return;
1811 }
1812 } 1811 }
1813 /* MSI-X failed, so fall through and try MSI */ 1812 /* MSI-X failed, so fall through and try MSI */
1814 e_err("Failed to initialize MSI-X interrupts. " 1813 e_err("Failed to initialize MSI-X interrupts. "
@@ -1981,15 +1980,15 @@ static void e1000_irq_enable(struct e1000_adapter *adapter)
1981} 1980}
1982 1981
1983/** 1982/**
1984 * e1000_get_hw_control - get control of the h/w from f/w 1983 * e1000e_get_hw_control - get control of the h/w from f/w
1985 * @adapter: address of board private structure 1984 * @adapter: address of board private structure
1986 * 1985 *
1987 * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit. 1986 * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
1988 * For ASF and Pass Through versions of f/w this means that 1987 * For ASF and Pass Through versions of f/w this means that
1989 * the driver is loaded. For AMT version (only with 82573) 1988 * the driver is loaded. For AMT version (only with 82573)
1990 * of the f/w this means that the network i/f is open. 1989 * of the f/w this means that the network i/f is open.
1991 **/ 1990 **/
1992static void e1000_get_hw_control(struct e1000_adapter *adapter) 1991void e1000e_get_hw_control(struct e1000_adapter *adapter)
1993{ 1992{
1994 struct e1000_hw *hw = &adapter->hw; 1993 struct e1000_hw *hw = &adapter->hw;
1995 u32 ctrl_ext; 1994 u32 ctrl_ext;
@@ -2006,16 +2005,16 @@ static void e1000_get_hw_control(struct e1000_adapter *adapter)
2006} 2005}
2007 2006
2008/** 2007/**
2009 * e1000_release_hw_control - release control of the h/w to f/w 2008 * e1000e_release_hw_control - release control of the h/w to f/w
2010 * @adapter: address of board private structure 2009 * @adapter: address of board private structure
2011 * 2010 *
2012 * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit. 2011 * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
2013 * For ASF and Pass Through versions of f/w this means that the 2012 * For ASF and Pass Through versions of f/w this means that the
2014 * driver is no longer loaded. For AMT version (only with 82573) i 2013 * driver is no longer loaded. For AMT version (only with 82573) i
2015 * of the f/w this means that the network i/f is closed. 2014 * of the f/w this means that the network i/f is closed.
2016 * 2015 *
2017 **/ 2016 **/
2018static void e1000_release_hw_control(struct e1000_adapter *adapter) 2017void e1000e_release_hw_control(struct e1000_adapter *adapter)
2019{ 2018{
2020 struct e1000_hw *hw = &adapter->hw; 2019 struct e1000_hw *hw = &adapter->hw;
2021 u32 ctrl_ext; 2020 u32 ctrl_ext;
@@ -2059,10 +2058,9 @@ int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
2059 int err = -ENOMEM, size; 2058 int err = -ENOMEM, size;
2060 2059
2061 size = sizeof(struct e1000_buffer) * tx_ring->count; 2060 size = sizeof(struct e1000_buffer) * tx_ring->count;
2062 tx_ring->buffer_info = vmalloc(size); 2061 tx_ring->buffer_info = vzalloc(size);
2063 if (!tx_ring->buffer_info) 2062 if (!tx_ring->buffer_info)
2064 goto err; 2063 goto err;
2065 memset(tx_ring->buffer_info, 0, size);
2066 2064
2067 /* round up to nearest 4K */ 2065 /* round up to nearest 4K */
2068 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc); 2066 tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
@@ -2095,10 +2093,9 @@ int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
2095 int i, size, desc_len, err = -ENOMEM; 2093 int i, size, desc_len, err = -ENOMEM;
2096 2094
2097 size = sizeof(struct e1000_buffer) * rx_ring->count; 2095 size = sizeof(struct e1000_buffer) * rx_ring->count;
2098 rx_ring->buffer_info = vmalloc(size); 2096 rx_ring->buffer_info = vzalloc(size);
2099 if (!rx_ring->buffer_info) 2097 if (!rx_ring->buffer_info)
2100 goto err; 2098 goto err;
2101 memset(rx_ring->buffer_info, 0, size);
2102 2099
2103 for (i = 0; i < rx_ring->count; i++) { 2100 for (i = 0; i < rx_ring->count; i++) {
2104 buffer_info = &rx_ring->buffer_info[i]; 2101 buffer_info = &rx_ring->buffer_info[i];
@@ -2132,7 +2129,7 @@ err_pages:
2132 } 2129 }
2133err: 2130err:
2134 vfree(rx_ring->buffer_info); 2131 vfree(rx_ring->buffer_info);
2135 e_err("Unable to allocate memory for the transmit descriptor ring\n"); 2132 e_err("Unable to allocate memory for the receive descriptor ring\n");
2136 return err; 2133 return err;
2137} 2134}
2138 2135
@@ -2200,9 +2197,8 @@ void e1000e_free_rx_resources(struct e1000_adapter *adapter)
2200 2197
2201 e1000_clean_rx_ring(adapter); 2198 e1000_clean_rx_ring(adapter);
2202 2199
2203 for (i = 0; i < rx_ring->count; i++) { 2200 for (i = 0; i < rx_ring->count; i++)
2204 kfree(rx_ring->buffer_info[i].ps_pages); 2201 kfree(rx_ring->buffer_info[i].ps_pages);
2205 }
2206 2202
2207 vfree(rx_ring->buffer_info); 2203 vfree(rx_ring->buffer_info);
2208 rx_ring->buffer_info = NULL; 2204 rx_ring->buffer_info = NULL;
@@ -2242,20 +2238,18 @@ static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2242 /* handle TSO and jumbo frames */ 2238 /* handle TSO and jumbo frames */
2243 if (bytes/packets > 8000) 2239 if (bytes/packets > 8000)
2244 retval = bulk_latency; 2240 retval = bulk_latency;
2245 else if ((packets < 5) && (bytes > 512)) { 2241 else if ((packets < 5) && (bytes > 512))
2246 retval = low_latency; 2242 retval = low_latency;
2247 }
2248 break; 2243 break;
2249 case low_latency: /* 50 usec aka 20000 ints/s */ 2244 case low_latency: /* 50 usec aka 20000 ints/s */
2250 if (bytes > 10000) { 2245 if (bytes > 10000) {
2251 /* this if handles the TSO accounting */ 2246 /* this if handles the TSO accounting */
2252 if (bytes/packets > 8000) { 2247 if (bytes/packets > 8000)
2253 retval = bulk_latency; 2248 retval = bulk_latency;
2254 } else if ((packets < 10) || ((bytes/packets) > 1200)) { 2249 else if ((packets < 10) || ((bytes/packets) > 1200))
2255 retval = bulk_latency; 2250 retval = bulk_latency;
2256 } else if ((packets > 35)) { 2251 else if ((packets > 35))
2257 retval = lowest_latency; 2252 retval = lowest_latency;
2258 }
2259 } else if (bytes/packets > 2000) { 2253 } else if (bytes/packets > 2000) {
2260 retval = bulk_latency; 2254 retval = bulk_latency;
2261 } else if (packets <= 2 && bytes < 512) { 2255 } else if (packets <= 2 && bytes < 512) {
@@ -2264,9 +2258,8 @@ static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
2264 break; 2258 break;
2265 case bulk_latency: /* 250 usec aka 4000 ints/s */ 2259 case bulk_latency: /* 250 usec aka 4000 ints/s */
2266 if (bytes > 25000) { 2260 if (bytes > 25000) {
2267 if (packets > 35) { 2261 if (packets > 35)
2268 retval = low_latency; 2262 retval = low_latency;
2269 }
2270 } else if (bytes < 6000) { 2263 } else if (bytes < 6000) {
2271 retval = low_latency; 2264 retval = low_latency;
2272 } 2265 }
@@ -2452,7 +2445,7 @@ static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
2452 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 2445 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
2453 (vid == adapter->mng_vlan_id)) { 2446 (vid == adapter->mng_vlan_id)) {
2454 /* release control to f/w */ 2447 /* release control to f/w */
2455 e1000_release_hw_control(adapter); 2448 e1000e_release_hw_control(adapter);
2456 return; 2449 return;
2457 } 2450 }
2458 2451
@@ -2741,6 +2734,9 @@ static void e1000_setup_rctl(struct e1000_adapter *adapter)
2741 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true); 2734 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
2742 else 2735 else
2743 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false); 2736 ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
2737
2738 if (ret_val)
2739 e_dbg("failed to enable jumbo frame workaround mode\n");
2744 } 2740 }
2745 2741
2746 /* Program MC offset vector base */ 2742 /* Program MC offset vector base */
@@ -3191,7 +3187,6 @@ void e1000e_reset(struct e1000_adapter *adapter)
3191 ew32(PBA, pba); 3187 ew32(PBA, pba);
3192 } 3188 }
3193 3189
3194
3195 /* 3190 /*
3196 * flow control settings 3191 * flow control settings
3197 * 3192 *
@@ -3279,7 +3274,7 @@ void e1000e_reset(struct e1000_adapter *adapter)
3279 * that the network interface is in control 3274 * that the network interface is in control
3280 */ 3275 */
3281 if (adapter->flags & FLAG_HAS_AMT) 3276 if (adapter->flags & FLAG_HAS_AMT)
3282 e1000_get_hw_control(adapter); 3277 e1000e_get_hw_control(adapter);
3283 3278
3284 ew32(WUC, 0); 3279 ew32(WUC, 0);
3285 3280
@@ -3292,6 +3287,13 @@ void e1000e_reset(struct e1000_adapter *adapter)
3292 ew32(VET, ETH_P_8021Q); 3287 ew32(VET, ETH_P_8021Q);
3293 3288
3294 e1000e_reset_adaptive(hw); 3289 e1000e_reset_adaptive(hw);
3290
3291 if (!netif_running(adapter->netdev) &&
3292 !test_bit(__E1000_TESTING, &adapter->state)) {
3293 e1000_power_down_phy(adapter);
3294 return;
3295 }
3296
3295 e1000_get_phy_info(hw); 3297 e1000_get_phy_info(hw);
3296 3298
3297 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) && 3299 if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
@@ -3577,7 +3579,7 @@ static int e1000_open(struct net_device *netdev)
3577 * interface is now open and reset the part to a known state. 3579 * interface is now open and reset the part to a known state.
3578 */ 3580 */
3579 if (adapter->flags & FLAG_HAS_AMT) { 3581 if (adapter->flags & FLAG_HAS_AMT) {
3580 e1000_get_hw_control(adapter); 3582 e1000e_get_hw_control(adapter);
3581 e1000e_reset(adapter); 3583 e1000e_reset(adapter);
3582 } 3584 }
3583 3585
@@ -3641,7 +3643,7 @@ static int e1000_open(struct net_device *netdev)
3641 return 0; 3643 return 0;
3642 3644
3643err_req_irq: 3645err_req_irq:
3644 e1000_release_hw_control(adapter); 3646 e1000e_release_hw_control(adapter);
3645 e1000_power_down_phy(adapter); 3647 e1000_power_down_phy(adapter);
3646 e1000e_free_rx_resources(adapter); 3648 e1000e_free_rx_resources(adapter);
3647err_setup_rx: 3649err_setup_rx:
@@ -3696,8 +3698,9 @@ static int e1000_close(struct net_device *netdev)
3696 * If AMT is enabled, let the firmware know that the network 3698 * If AMT is enabled, let the firmware know that the network
3697 * interface is now closed 3699 * interface is now closed
3698 */ 3700 */
3699 if (adapter->flags & FLAG_HAS_AMT) 3701 if ((adapter->flags & FLAG_HAS_AMT) &&
3700 e1000_release_hw_control(adapter); 3702 !test_bit(__E1000_TESTING, &adapter->state))
3703 e1000e_release_hw_control(adapter);
3701 3704
3702 if ((adapter->flags & FLAG_HAS_ERT) || 3705 if ((adapter->flags & FLAG_HAS_ERT) ||
3703 (adapter->hw.mac.type == e1000_pch2lan)) 3706 (adapter->hw.mac.type == e1000_pch2lan))
@@ -4475,7 +4478,7 @@ static bool e1000_tx_csum(struct e1000_adapter *adapter, struct sk_buff *skb)
4475 break; 4478 break;
4476 } 4479 }
4477 4480
4478 css = skb_transport_offset(skb); 4481 css = skb_checksum_start_offset(skb);
4479 4482
4480 i = tx_ring->next_to_use; 4483 i = tx_ring->next_to_use;
4481 buffer_info = &tx_ring->buffer_info[i]; 4484 buffer_info = &tx_ring->buffer_info[i];
@@ -4595,7 +4598,7 @@ dma_error:
4595 i += tx_ring->count; 4598 i += tx_ring->count;
4596 i--; 4599 i--;
4597 buffer_info = &tx_ring->buffer_info[i]; 4600 buffer_info = &tx_ring->buffer_info[i];
4598 e1000_put_txbuf(adapter, buffer_info);; 4601 e1000_put_txbuf(adapter, buffer_info);
4599 } 4602 }
4600 4603
4601 return 0; 4604 return 0;
@@ -4631,7 +4634,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
4631 4634
4632 i = tx_ring->next_to_use; 4635 i = tx_ring->next_to_use;
4633 4636
4634 while (count--) { 4637 do {
4635 buffer_info = &tx_ring->buffer_info[i]; 4638 buffer_info = &tx_ring->buffer_info[i];
4636 tx_desc = E1000_TX_DESC(*tx_ring, i); 4639 tx_desc = E1000_TX_DESC(*tx_ring, i);
4637 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 4640 tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
@@ -4642,7 +4645,7 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
4642 i++; 4645 i++;
4643 if (i == tx_ring->count) 4646 if (i == tx_ring->count)
4644 i = 0; 4647 i = 0;
4645 } 4648 } while (--count > 0);
4646 4649
4647 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd); 4650 tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
4648 4651
@@ -5216,7 +5219,7 @@ static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
5216 * Release control of h/w to f/w. If f/w is AMT enabled, this 5219 * Release control of h/w to f/w. If f/w is AMT enabled, this
5217 * would have already happened in close and is redundant. 5220 * would have already happened in close and is redundant.
5218 */ 5221 */
5219 e1000_release_hw_control(adapter); 5222 e1000e_release_hw_control(adapter);
5220 5223
5221 pci_disable_device(pdev); 5224 pci_disable_device(pdev);
5222 5225
@@ -5373,7 +5376,7 @@ static int __e1000_resume(struct pci_dev *pdev)
5373 * under the control of the driver. 5376 * under the control of the driver.
5374 */ 5377 */
5375 if (!(adapter->flags & FLAG_HAS_AMT)) 5378 if (!(adapter->flags & FLAG_HAS_AMT))
5376 e1000_get_hw_control(adapter); 5379 e1000e_get_hw_control(adapter);
5377 5380
5378 return 0; 5381 return 0;
5379} 5382}
@@ -5465,6 +5468,36 @@ static void e1000_shutdown(struct pci_dev *pdev)
5465} 5468}
5466 5469
5467#ifdef CONFIG_NET_POLL_CONTROLLER 5470#ifdef CONFIG_NET_POLL_CONTROLLER
5471
5472static irqreturn_t e1000_intr_msix(int irq, void *data)
5473{
5474 struct net_device *netdev = data;
5475 struct e1000_adapter *adapter = netdev_priv(netdev);
5476 int vector, msix_irq;
5477
5478 if (adapter->msix_entries) {
5479 vector = 0;
5480 msix_irq = adapter->msix_entries[vector].vector;
5481 disable_irq(msix_irq);
5482 e1000_intr_msix_rx(msix_irq, netdev);
5483 enable_irq(msix_irq);
5484
5485 vector++;
5486 msix_irq = adapter->msix_entries[vector].vector;
5487 disable_irq(msix_irq);
5488 e1000_intr_msix_tx(msix_irq, netdev);
5489 enable_irq(msix_irq);
5490
5491 vector++;
5492 msix_irq = adapter->msix_entries[vector].vector;
5493 disable_irq(msix_irq);
5494 e1000_msix_other(msix_irq, netdev);
5495 enable_irq(msix_irq);
5496 }
5497
5498 return IRQ_HANDLED;
5499}
5500
5468/* 5501/*
5469 * Polling 'interrupt' - used by things like netconsole to send skbs 5502 * Polling 'interrupt' - used by things like netconsole to send skbs
5470 * without having to re-enable interrupts. It's not called while 5503 * without having to re-enable interrupts. It's not called while
@@ -5474,10 +5507,21 @@ static void e1000_netpoll(struct net_device *netdev)
5474{ 5507{
5475 struct e1000_adapter *adapter = netdev_priv(netdev); 5508 struct e1000_adapter *adapter = netdev_priv(netdev);
5476 5509
5477 disable_irq(adapter->pdev->irq); 5510 switch (adapter->int_mode) {
5478 e1000_intr(adapter->pdev->irq, netdev); 5511 case E1000E_INT_MODE_MSIX:
5479 5512 e1000_intr_msix(adapter->pdev->irq, netdev);
5480 enable_irq(adapter->pdev->irq); 5513 break;
5514 case E1000E_INT_MODE_MSI:
5515 disable_irq(adapter->pdev->irq);
5516 e1000_intr_msi(adapter->pdev->irq, netdev);
5517 enable_irq(adapter->pdev->irq);
5518 break;
5519 default: /* E1000E_INT_MODE_LEGACY */
5520 disable_irq(adapter->pdev->irq);
5521 e1000_intr(adapter->pdev->irq, netdev);
5522 enable_irq(adapter->pdev->irq);
5523 break;
5524 }
5481} 5525}
5482#endif 5526#endif
5483 5527
@@ -5579,7 +5623,7 @@ static void e1000_io_resume(struct pci_dev *pdev)
5579 * under the control of the driver. 5623 * under the control of the driver.
5580 */ 5624 */
5581 if (!(adapter->flags & FLAG_HAS_AMT)) 5625 if (!(adapter->flags & FLAG_HAS_AMT))
5582 e1000_get_hw_control(adapter); 5626 e1000e_get_hw_control(adapter);
5583 5627
5584} 5628}
5585 5629
@@ -5587,7 +5631,8 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
5587{ 5631{
5588 struct e1000_hw *hw = &adapter->hw; 5632 struct e1000_hw *hw = &adapter->hw;
5589 struct net_device *netdev = adapter->netdev; 5633 struct net_device *netdev = adapter->netdev;
5590 u32 pba_num; 5634 u32 ret_val;
5635 u8 pba_str[E1000_PBANUM_LENGTH];
5591 5636
5592 /* print bus type/speed/width info */ 5637 /* print bus type/speed/width info */
5593 e_info("(PCI Express:2.5GB/s:%s) %pM\n", 5638 e_info("(PCI Express:2.5GB/s:%s) %pM\n",
@@ -5598,9 +5643,12 @@ static void e1000_print_device_info(struct e1000_adapter *adapter)
5598 netdev->dev_addr); 5643 netdev->dev_addr);
5599 e_info("Intel(R) PRO/%s Network Connection\n", 5644 e_info("Intel(R) PRO/%s Network Connection\n",
5600 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000"); 5645 (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
5601 e1000e_read_pba_num(hw, &pba_num); 5646 ret_val = e1000_read_pba_string_generic(hw, pba_str,
5602 e_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 5647 E1000_PBANUM_LENGTH);
5603 hw->mac.type, hw->phy.type, (pba_num >> 8), (pba_num & 0xff)); 5648 if (ret_val)
5649 strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
5650 e_info("MAC: %d, PHY: %d, PBA No: %s\n",
5651 hw->mac.type, hw->phy.type, pba_str);
5604} 5652}
5605 5653
5606static void e1000_eeprom_checks(struct e1000_adapter *adapter) 5654static void e1000_eeprom_checks(struct e1000_adapter *adapter)
@@ -5864,6 +5912,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5864 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround); 5912 INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
5865 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task); 5913 INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
5866 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang); 5914 INIT_WORK(&adapter->print_hang_task, e1000_print_hw_hang);
5915 INIT_WORK(&adapter->led_blink_task, e1000e_led_blink_task);
5867 5916
5868 /* Initialize link parameters. User can change them with ethtool */ 5917 /* Initialize link parameters. User can change them with ethtool */
5869 adapter->hw.mac.autoneg = 1; 5918 adapter->hw.mac.autoneg = 1;
@@ -5924,9 +5973,9 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5924 * under the control of the driver. 5973 * under the control of the driver.
5925 */ 5974 */
5926 if (!(adapter->flags & FLAG_HAS_AMT)) 5975 if (!(adapter->flags & FLAG_HAS_AMT))
5927 e1000_get_hw_control(adapter); 5976 e1000e_get_hw_control(adapter);
5928 5977
5929 strcpy(netdev->name, "eth%d"); 5978 strncpy(netdev->name, "eth%d", sizeof(netdev->name) - 1);
5930 err = register_netdev(netdev); 5979 err = register_netdev(netdev);
5931 if (err) 5980 if (err)
5932 goto err_register; 5981 goto err_register;
@@ -5943,12 +5992,11 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
5943 5992
5944err_register: 5993err_register:
5945 if (!(adapter->flags & FLAG_HAS_AMT)) 5994 if (!(adapter->flags & FLAG_HAS_AMT))
5946 e1000_release_hw_control(adapter); 5995 e1000e_release_hw_control(adapter);
5947err_eeprom: 5996err_eeprom:
5948 if (!e1000_check_reset_block(&adapter->hw)) 5997 if (!e1000_check_reset_block(&adapter->hw))
5949 e1000_phy_hw_reset(&adapter->hw); 5998 e1000_phy_hw_reset(&adapter->hw);
5950err_hw_init: 5999err_hw_init:
5951
5952 kfree(adapter->tx_ring); 6000 kfree(adapter->tx_ring);
5953 kfree(adapter->rx_ring); 6001 kfree(adapter->rx_ring);
5954err_sw_init: 6002err_sw_init:
@@ -5984,8 +6032,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5984 bool down = test_bit(__E1000_DOWN, &adapter->state); 6032 bool down = test_bit(__E1000_DOWN, &adapter->state);
5985 6033
5986 /* 6034 /*
5987 * flush_scheduled work may reschedule our watchdog task, so 6035 * The timers may be rescheduled, so explicitly disable them
5988 * explicitly disable watchdog tasks from being rescheduled 6036 * from being rescheduled.
5989 */ 6037 */
5990 if (!down) 6038 if (!down)
5991 set_bit(__E1000_DOWN, &adapter->state); 6039 set_bit(__E1000_DOWN, &adapter->state);
@@ -5996,8 +6044,8 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
5996 cancel_work_sync(&adapter->watchdog_task); 6044 cancel_work_sync(&adapter->watchdog_task);
5997 cancel_work_sync(&adapter->downshift_task); 6045 cancel_work_sync(&adapter->downshift_task);
5998 cancel_work_sync(&adapter->update_phy_task); 6046 cancel_work_sync(&adapter->update_phy_task);
6047 cancel_work_sync(&adapter->led_blink_task);
5999 cancel_work_sync(&adapter->print_hang_task); 6048 cancel_work_sync(&adapter->print_hang_task);
6000 flush_scheduled_work();
6001 6049
6002 if (!(netdev->flags & IFF_UP)) 6050 if (!(netdev->flags & IFF_UP))
6003 e1000_power_down_phy(adapter); 6051 e1000_power_down_phy(adapter);
@@ -6014,7 +6062,7 @@ static void __devexit e1000_remove(struct pci_dev *pdev)
6014 * Release control of h/w to f/w. If f/w is AMT enabled, this 6062 * Release control of h/w to f/w. If f/w is AMT enabled, this
6015 * would have already happened in close and is redundant. 6063 * would have already happened in close and is redundant.
6016 */ 6064 */
6017 e1000_release_hw_control(adapter); 6065 e1000e_release_hw_control(adapter);
6018 6066
6019 e1000e_reset_interrupt_capability(adapter); 6067 e1000e_reset_interrupt_capability(adapter);
6020 kfree(adapter->tx_ring); 6068 kfree(adapter->tx_ring);
diff --git a/drivers/net/e1000e/param.c b/drivers/net/e1000e/param.c
index 3d36911f77f3..a9612b0e4bca 100644
--- a/drivers/net/e1000e/param.c
+++ b/drivers/net/e1000e/param.c
@@ -421,7 +421,7 @@ void __devinit e1000e_check_options(struct e1000_adapter *adapter)
421 static const struct e1000_option opt = { 421 static const struct e1000_option opt = {
422 .type = enable_option, 422 .type = enable_option,
423 .name = "CRC Stripping", 423 .name = "CRC Stripping",
424 .err = "defaulting to enabled", 424 .err = "defaulting to Enabled",
425 .def = OPTION_ENABLED 425 .def = OPTION_ENABLED
426 }; 426 };
427 427
diff --git a/drivers/net/e1000e/phy.c b/drivers/net/e1000e/phy.c
index b9bff5ba009f..00f89e8a9fa0 100644
--- a/drivers/net/e1000e/phy.c
+++ b/drivers/net/e1000e/phy.c
@@ -42,20 +42,20 @@ static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
42 u16 *data, bool read); 42 u16 *data, bool read);
43 43
44/* Cable length tables */ 44/* Cable length tables */
45static const u16 e1000_m88_cable_length_table[] = 45static const u16 e1000_m88_cable_length_table[] = {
46 { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; 46 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
47#define M88E1000_CABLE_LENGTH_TABLE_SIZE \ 47#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
48 ARRAY_SIZE(e1000_m88_cable_length_table) 48 ARRAY_SIZE(e1000_m88_cable_length_table)
49 49
50static const u16 e1000_igp_2_cable_length_table[] = 50static const u16 e1000_igp_2_cable_length_table[] = {
51 { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3, 51 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
52 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22, 52 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22,
53 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40, 53 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40,
54 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61, 54 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61,
55 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82, 55 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
56 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95, 56 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
57 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121, 57 100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
58 124}; 58 124};
59#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \ 59#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
60 ARRAY_SIZE(e1000_igp_2_cable_length_table) 60 ARRAY_SIZE(e1000_igp_2_cable_length_table)
61 61
@@ -226,6 +226,13 @@ s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
226 } 226 }
227 *data = (u16) mdic; 227 *data = (u16) mdic;
228 228
229 /*
230 * Allow some time after each MDIC transaction to avoid
231 * reading duplicate data in the next MDIC transaction.
232 */
233 if (hw->mac.type == e1000_pch2lan)
234 udelay(100);
235
229 return 0; 236 return 0;
230} 237}
231 238
@@ -279,6 +286,13 @@ s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
279 return -E1000_ERR_PHY; 286 return -E1000_ERR_PHY;
280 } 287 }
281 288
289 /*
290 * Allow some time after each MDIC transaction to avoid
291 * reading duplicate data in the next MDIC transaction.
292 */
293 if (hw->mac.type == e1000_pch2lan)
294 udelay(100);
295
282 return 0; 296 return 0;
283} 297}
284 298
@@ -623,12 +637,11 @@ s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
623 **/ 637 **/
624s32 e1000_copper_link_setup_82577(struct e1000_hw *hw) 638s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
625{ 639{
626 struct e1000_phy_info *phy = &hw->phy;
627 s32 ret_val; 640 s32 ret_val;
628 u16 phy_data; 641 u16 phy_data;
629 642
630 /* Enable CRS on TX. This must be set for half-duplex operation. */ 643 /* Enable CRS on TX. This must be set for half-duplex operation. */
631 ret_val = phy->ops.read_reg(hw, I82577_CFG_REG, &phy_data); 644 ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data);
632 if (ret_val) 645 if (ret_val)
633 goto out; 646 goto out;
634 647
@@ -637,7 +650,7 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
637 /* Enable downshift */ 650 /* Enable downshift */
638 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT; 651 phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
639 652
640 ret_val = phy->ops.write_reg(hw, I82577_CFG_REG, phy_data); 653 ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data);
641 654
642out: 655out:
643 return ret_val; 656 return ret_val;
@@ -760,16 +773,14 @@ s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
760 } 773 }
761 774
762 if (phy->type == e1000_phy_82578) { 775 if (phy->type == e1000_phy_82578) {
763 ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 776 ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
764 &phy_data);
765 if (ret_val) 777 if (ret_val)
766 return ret_val; 778 return ret_val;
767 779
768 /* 82578 PHY - set the downshift count to 1x. */ 780 /* 82578 PHY - set the downshift count to 1x. */
769 phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE; 781 phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
770 phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK; 782 phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
771 ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, 783 ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
772 phy_data);
773 if (ret_val) 784 if (ret_val)
774 return ret_val; 785 return ret_val;
775 } 786 }
@@ -1043,9 +1054,8 @@ static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
1043 1054
1044 e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg); 1055 e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
1045 1056
1046 if (phy->autoneg_mask & ADVERTISE_1000_FULL) { 1057 if (phy->autoneg_mask & ADVERTISE_1000_FULL)
1047 ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg); 1058 ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
1048 }
1049 1059
1050 return ret_val; 1060 return ret_val;
1051} 1061}
@@ -1306,9 +1316,8 @@ s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
1306 * We didn't get link. 1316 * We didn't get link.
1307 * Reset the DSP and cross our fingers. 1317 * Reset the DSP and cross our fingers.
1308 */ 1318 */
1309 ret_val = e1e_wphy(hw, 1319 ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
1310 M88E1000_PHY_PAGE_SELECT, 1320 0x001d);
1311 0x001d);
1312 if (ret_val) 1321 if (ret_val)
1313 return ret_val; 1322 return ret_val;
1314 ret_val = e1000e_phy_reset_dsp(hw); 1323 ret_val = e1000e_phy_reset_dsp(hw);
@@ -1840,11 +1849,12 @@ s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
1840 u16 phy_data, i, agc_value = 0; 1849 u16 phy_data, i, agc_value = 0;
1841 u16 cur_agc_index, max_agc_index = 0; 1850 u16 cur_agc_index, max_agc_index = 0;
1842 u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; 1851 u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
1843 u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = 1852 static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
1844 {IGP02E1000_PHY_AGC_A, 1853 IGP02E1000_PHY_AGC_A,
1845 IGP02E1000_PHY_AGC_B, 1854 IGP02E1000_PHY_AGC_B,
1846 IGP02E1000_PHY_AGC_C, 1855 IGP02E1000_PHY_AGC_C,
1847 IGP02E1000_PHY_AGC_D}; 1856 IGP02E1000_PHY_AGC_D
1857 };
1848 1858
1849 /* Read the AGC registers for all channels */ 1859 /* Read the AGC registers for all channels */
1850 for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { 1860 for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
@@ -3057,12 +3067,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
3057 goto out; 3067 goto out;
3058 3068
3059 /* Do not apply workaround if in PHY loopback bit 14 set */ 3069 /* Do not apply workaround if in PHY loopback bit 14 set */
3060 hw->phy.ops.read_reg(hw, PHY_CONTROL, &data); 3070 e1e_rphy(hw, PHY_CONTROL, &data);
3061 if (data & PHY_CONTROL_LB) 3071 if (data & PHY_CONTROL_LB)
3062 goto out; 3072 goto out;
3063 3073
3064 /* check if link is up and at 1Gbps */ 3074 /* check if link is up and at 1Gbps */
3065 ret_val = hw->phy.ops.read_reg(hw, BM_CS_STATUS, &data); 3075 ret_val = e1e_rphy(hw, BM_CS_STATUS, &data);
3066 if (ret_val) 3076 if (ret_val)
3067 goto out; 3077 goto out;
3068 3078
@@ -3078,14 +3088,12 @@ s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
3078 mdelay(200); 3088 mdelay(200);
3079 3089
3080 /* flush the packets in the fifo buffer */ 3090 /* flush the packets in the fifo buffer */
3081 ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, 3091 ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC |
3082 HV_MUX_DATA_CTRL_GEN_TO_MAC | 3092 HV_MUX_DATA_CTRL_FORCE_SPEED);
3083 HV_MUX_DATA_CTRL_FORCE_SPEED);
3084 if (ret_val) 3093 if (ret_val)
3085 goto out; 3094 goto out;
3086 3095
3087 ret_val = hw->phy.ops.write_reg(hw, HV_MUX_DATA_CTRL, 3096 ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC);
3088 HV_MUX_DATA_CTRL_GEN_TO_MAC);
3089 3097
3090out: 3098out:
3091 return ret_val; 3099 return ret_val;
@@ -3105,7 +3113,7 @@ s32 e1000_check_polarity_82577(struct e1000_hw *hw)
3105 s32 ret_val; 3113 s32 ret_val;
3106 u16 data; 3114 u16 data;
3107 3115
3108 ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); 3116 ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
3109 3117
3110 if (!ret_val) 3118 if (!ret_val)
3111 phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY) 3119 phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
@@ -3128,13 +3136,13 @@ s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
3128 u16 phy_data; 3136 u16 phy_data;
3129 bool link; 3137 bool link;
3130 3138
3131 ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data); 3139 ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
3132 if (ret_val) 3140 if (ret_val)
3133 goto out; 3141 goto out;
3134 3142
3135 e1000e_phy_force_speed_duplex_setup(hw, &phy_data); 3143 e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
3136 3144
3137 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); 3145 ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
3138 if (ret_val) 3146 if (ret_val)
3139 goto out; 3147 goto out;
3140 3148
@@ -3198,7 +3206,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
3198 if (ret_val) 3206 if (ret_val)
3199 goto out; 3207 goto out;
3200 3208
3201 ret_val = phy->ops.read_reg(hw, I82577_PHY_STATUS_2, &data); 3209 ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
3202 if (ret_val) 3210 if (ret_val)
3203 goto out; 3211 goto out;
3204 3212
@@ -3210,7 +3218,7 @@ s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
3210 if (ret_val) 3218 if (ret_val)
3211 goto out; 3219 goto out;
3212 3220
3213 ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data); 3221 ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
3214 if (ret_val) 3222 if (ret_val)
3215 goto out; 3223 goto out;
3216 3224
@@ -3244,7 +3252,7 @@ s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
3244 s32 ret_val; 3252 s32 ret_val;
3245 u16 phy_data, length; 3253 u16 phy_data, length;
3246 3254
3247 ret_val = phy->ops.read_reg(hw, I82577_PHY_DIAG_STATUS, &phy_data); 3255 ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data);
3248 if (ret_val) 3256 if (ret_val)
3249 goto out; 3257 goto out;
3250 3258
diff --git a/drivers/net/e2100.c b/drivers/net/e2100.c
index 06e72fbef862..94ec973b2bdc 100644
--- a/drivers/net/e2100.c
+++ b/drivers/net/e2100.c
@@ -216,7 +216,7 @@ static int __init e21_probe1(struct net_device *dev, int ioaddr)
216 printk(" %02X", station_addr[i]); 216 printk(" %02X", station_addr[i]);
217 217
218 if (dev->irq < 2) { 218 if (dev->irq < 2) {
219 int irqlist[] = {15, 11, 10, 12, 5, 9, 3, 4}; 219 static const int irqlist[] = {15, 11, 10, 12, 5, 9, 3, 4};
220 for (i = 0; i < ARRAY_SIZE(irqlist); i++) 220 for (i = 0; i < ARRAY_SIZE(irqlist); i++)
221 if (request_irq (irqlist[i], NULL, 0, "bogus", NULL) != -EBUSY) { 221 if (request_irq (irqlist[i], NULL, 0, "bogus", NULL) != -EBUSY) {
222 dev->irq = irqlist[i]; 222 dev->irq = irqlist[i];
diff --git a/drivers/net/eepro.c b/drivers/net/eepro.c
index dbaec546c428..eb35951a2442 100644
--- a/drivers/net/eepro.c
+++ b/drivers/net/eepro.c
@@ -302,7 +302,7 @@ struct eepro_local {
302#define ee_id_eepro10p0 0x10 /* ID for eepro/10+ */ 302#define ee_id_eepro10p0 0x10 /* ID for eepro/10+ */
303#define ee_id_eepro10p1 0x31 303#define ee_id_eepro10p1 0x31
304 304
305#define TX_TIMEOUT 40 305#define TX_TIMEOUT ((4*HZ)/10)
306 306
307/* Index to functions, as function prototypes. */ 307/* Index to functions, as function prototypes. */
308 308
@@ -891,12 +891,13 @@ err:
891 there is non-reboot way to recover if something goes wrong. 891 there is non-reboot way to recover if something goes wrong.
892 */ 892 */
893 893
894static char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1}; 894static const char irqrmap[] = {-1,-1,0,1,-1,2,-1,-1,-1,0,3,4,-1,-1,-1,-1};
895static char irqrmap2[] = {-1,-1,4,0,1,2,-1,3,-1,4,5,6,7,-1,-1,-1}; 895static const char irqrmap2[] = {-1,-1,4,0,1,2,-1,3,-1,4,5,6,7,-1,-1,-1};
896static int eepro_grab_irq(struct net_device *dev) 896static int eepro_grab_irq(struct net_device *dev)
897{ 897{
898 int irqlist[] = { 3, 4, 5, 7, 9, 10, 11, 12, 0 }; 898 static const int irqlist[] = { 3, 4, 5, 7, 9, 10, 11, 12, 0 };
899 int *irqp = irqlist, temp_reg, ioaddr = dev->base_addr; 899 const int *irqp = irqlist;
900 int temp_reg, ioaddr = dev->base_addr;
900 901
901 eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */ 902 eepro_sw2bank1(ioaddr); /* be CAREFUL, BANK 1 now */
902 903
diff --git a/drivers/net/eexpress.c b/drivers/net/eexpress.c
index 12c37d264108..48ee51bb9e50 100644
--- a/drivers/net/eexpress.c
+++ b/drivers/net/eexpress.c
@@ -1103,7 +1103,7 @@ static int __init eexp_hw_probe(struct net_device *dev, unsigned short ioaddr)
1103 dev->dev_addr[i] = ((unsigned char *)hw_addr)[5-i]; 1103 dev->dev_addr[i] = ((unsigned char *)hw_addr)[5-i];
1104 1104
1105 { 1105 {
1106 static char irqmap[]={0, 9, 3, 4, 5, 10, 11, 0}; 1106 static const char irqmap[] = { 0, 9, 3, 4, 5, 10, 11, 0 };
1107 unsigned short setupval = eexp_hw_readeeprom(ioaddr,0); 1107 unsigned short setupval = eexp_hw_readeeprom(ioaddr,0);
1108 1108
1109 /* Use the IRQ from EEPROM if none was given */ 1109 /* Use the IRQ from EEPROM if none was given */
diff --git a/drivers/net/ehea/ehea.h b/drivers/net/ehea/ehea.h
index 8e745e74828d..6c7257bd73fc 100644
--- a/drivers/net/ehea/ehea.h
+++ b/drivers/net/ehea/ehea.h
@@ -40,7 +40,7 @@
40#include <asm/io.h> 40#include <asm/io.h>
41 41
42#define DRV_NAME "ehea" 42#define DRV_NAME "ehea"
43#define DRV_VERSION "EHEA_0106" 43#define DRV_VERSION "EHEA_0107"
44 44
45/* eHEA capability flags */ 45/* eHEA capability flags */
46#define DLPAR_PORT_ADD_REM 1 46#define DLPAR_PORT_ADD_REM 1
@@ -130,19 +130,6 @@
130 130
131/* utility functions */ 131/* utility functions */
132 132
133#define ehea_info(fmt, args...) \
134 printk(KERN_INFO DRV_NAME ": " fmt "\n", ## args)
135
136#define ehea_error(fmt, args...) \
137 printk(KERN_ERR DRV_NAME ": Error in %s: " fmt "\n", __func__, ## args)
138
139#ifdef DEBUG
140#define ehea_debug(fmt, args...) \
141 printk(KERN_DEBUG DRV_NAME ": " fmt, ## args)
142#else
143#define ehea_debug(fmt, args...) do {} while (0)
144#endif
145
146void ehea_dump(void *adr, int len, char *msg); 133void ehea_dump(void *adr, int len, char *msg);
147 134
148#define EHEA_BMASK(pos, length) (((pos) << 16) + (length)) 135#define EHEA_BMASK(pos, length) (((pos) << 16) + (length))
@@ -515,6 +502,4 @@ void ehea_set_ethtool_ops(struct net_device *netdev);
515int ehea_sense_port_attr(struct ehea_port *port); 502int ehea_sense_port_attr(struct ehea_port *port);
516int ehea_set_portspeed(struct ehea_port *port, u32 port_speed); 503int ehea_set_portspeed(struct ehea_port *port, u32 port_speed);
517 504
518extern struct work_struct ehea_rereg_mr_task;
519
520#endif /* __EHEA_H__ */ 505#endif /* __EHEA_H__ */
diff --git a/drivers/net/ehea/ehea_ethtool.c b/drivers/net/ehea/ehea_ethtool.c
index 1f37ee6b2a26..3e2e734fecb7 100644
--- a/drivers/net/ehea/ehea_ethtool.c
+++ b/drivers/net/ehea/ehea_ethtool.c
@@ -26,6 +26,8 @@
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include "ehea.h" 31#include "ehea.h"
30#include "ehea_phyp.h" 32#include "ehea_phyp.h"
31 33
@@ -118,10 +120,10 @@ doit:
118 ret = ehea_set_portspeed(port, sp); 120 ret = ehea_set_portspeed(port, sp);
119 121
120 if (!ret) 122 if (!ret)
121 ehea_info("%s: Port speed successfully set: %dMbps " 123 netdev_info(dev,
122 "%s Duplex", 124 "Port speed successfully set: %dMbps %s Duplex\n",
123 port->netdev->name, port->port_speed, 125 port->port_speed,
124 port->full_duplex == 1 ? "Full" : "Half"); 126 port->full_duplex == 1 ? "Full" : "Half");
125out: 127out:
126 return ret; 128 return ret;
127} 129}
@@ -134,10 +136,10 @@ static int ehea_nway_reset(struct net_device *dev)
134 ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG); 136 ret = ehea_set_portspeed(port, EHEA_SPEED_AUTONEG);
135 137
136 if (!ret) 138 if (!ret)
137 ehea_info("%s: Port speed successfully set: %dMbps " 139 netdev_info(port->netdev,
138 "%s Duplex", 140 "Port speed successfully set: %dMbps %s Duplex\n",
139 port->netdev->name, port->port_speed, 141 port->port_speed,
140 port->full_duplex == 1 ? "Full" : "Half"); 142 port->full_duplex == 1 ? "Full" : "Half");
141 return ret; 143 return ret;
142} 144}
143 145
@@ -263,6 +265,13 @@ static void ehea_get_ethtool_stats(struct net_device *dev,
263 265
264static int ehea_set_flags(struct net_device *dev, u32 data) 266static int ehea_set_flags(struct net_device *dev, u32 data)
265{ 267{
268 /* Avoid changing the VLAN flags */
269 if ((data & (ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN)) !=
270 (ethtool_op_get_flags(dev) & (ETH_FLAG_RXVLAN |
271 ETH_FLAG_TXVLAN))){
272 return -EINVAL;
273 }
274
266 return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO 275 return ethtool_op_set_flags(dev, data, ETH_FLAG_LRO
267 | ETH_FLAG_TXVLAN 276 | ETH_FLAG_TXVLAN
268 | ETH_FLAG_RXVLAN); 277 | ETH_FLAG_RXVLAN);
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index b95f087cd5a9..f75d3144b8a5 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -26,6 +26,8 @@
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include <linux/in.h> 31#include <linux/in.h>
30#include <linux/ip.h> 32#include <linux/ip.h>
31#include <linux/tcp.h> 33#include <linux/tcp.h>
@@ -101,7 +103,6 @@ MODULE_PARM_DESC(use_lro, " Large Receive Offload, 1: enable, 0: disable, "
101static int port_name_cnt; 103static int port_name_cnt;
102static LIST_HEAD(adapter_list); 104static LIST_HEAD(adapter_list);
103static unsigned long ehea_driver_flags; 105static unsigned long ehea_driver_flags;
104struct work_struct ehea_rereg_mr_task;
105static DEFINE_MUTEX(dlpar_mem_lock); 106static DEFINE_MUTEX(dlpar_mem_lock);
106struct ehea_fw_handle_array ehea_fw_handles; 107struct ehea_fw_handle_array ehea_fw_handles;
107struct ehea_bcmc_reg_array ehea_bcmc_regs; 108struct ehea_bcmc_reg_array ehea_bcmc_regs;
@@ -136,8 +137,8 @@ void ehea_dump(void *adr, int len, char *msg)
136 int x; 137 int x;
137 unsigned char *deb = adr; 138 unsigned char *deb = adr;
138 for (x = 0; x < len; x += 16) { 139 for (x = 0; x < len; x += 16) {
139 printk(DRV_NAME " %s adr=%p ofs=%04x %016llx %016llx\n", msg, 140 pr_info("%s adr=%p ofs=%04x %016llx %016llx\n",
140 deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8])); 141 msg, deb, x, *((u64 *)&deb[0]), *((u64 *)&deb[8]));
141 deb += 16; 142 deb += 16;
142 } 143 }
143} 144}
@@ -337,7 +338,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
337 338
338 cb2 = (void *)get_zeroed_page(GFP_KERNEL); 339 cb2 = (void *)get_zeroed_page(GFP_KERNEL);
339 if (!cb2) { 340 if (!cb2) {
340 ehea_error("no mem for cb2"); 341 netdev_err(dev, "no mem for cb2\n");
341 goto out; 342 goto out;
342 } 343 }
343 344
@@ -345,7 +346,7 @@ static struct net_device_stats *ehea_get_stats(struct net_device *dev)
345 port->logical_port_id, 346 port->logical_port_id,
346 H_PORT_CB2, H_PORT_CB2_ALL, cb2); 347 H_PORT_CB2, H_PORT_CB2_ALL, cb2);
347 if (hret != H_SUCCESS) { 348 if (hret != H_SUCCESS) {
348 ehea_error("query_ehea_port failed"); 349 netdev_err(dev, "query_ehea_port failed\n");
349 goto out_herr; 350 goto out_herr;
350 } 351 }
351 352
@@ -400,7 +401,7 @@ static void ehea_refill_rq1(struct ehea_port_res *pr, int index, int nr_of_wqes)
400 skb_arr_rq1[index] = netdev_alloc_skb(dev, 401 skb_arr_rq1[index] = netdev_alloc_skb(dev,
401 EHEA_L_PKT_SIZE); 402 EHEA_L_PKT_SIZE);
402 if (!skb_arr_rq1[index]) { 403 if (!skb_arr_rq1[index]) {
403 ehea_info("Unable to allocate enough skb in the array\n"); 404 netdev_info(dev, "Unable to allocate enough skb in the array\n");
404 pr->rq1_skba.os_skbs = fill_wqes - i; 405 pr->rq1_skba.os_skbs = fill_wqes - i;
405 break; 406 break;
406 } 407 }
@@ -424,19 +425,19 @@ static void ehea_init_fill_rq1(struct ehea_port_res *pr, int nr_rq1a)
424 int i; 425 int i;
425 426
426 if (nr_rq1a > pr->rq1_skba.len) { 427 if (nr_rq1a > pr->rq1_skba.len) {
427 ehea_error("NR_RQ1A bigger than skb array len\n"); 428 netdev_err(dev, "NR_RQ1A bigger than skb array len\n");
428 return; 429 return;
429 } 430 }
430 431
431 for (i = 0; i < nr_rq1a; i++) { 432 for (i = 0; i < nr_rq1a; i++) {
432 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE); 433 skb_arr_rq1[i] = netdev_alloc_skb(dev, EHEA_L_PKT_SIZE);
433 if (!skb_arr_rq1[i]) { 434 if (!skb_arr_rq1[i]) {
434 ehea_info("No enough memory to allocate skb array\n"); 435 netdev_info(dev, "Not enough memory to allocate skb array\n");
435 break; 436 break;
436 } 437 }
437 } 438 }
438 /* Ring doorbell */ 439 /* Ring doorbell */
439 ehea_update_rq1a(pr->qp, i); 440 ehea_update_rq1a(pr->qp, i - 1);
440} 441}
441 442
442static int ehea_refill_rq_def(struct ehea_port_res *pr, 443static int ehea_refill_rq_def(struct ehea_port_res *pr,
@@ -469,8 +470,9 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
469 if (!skb) { 470 if (!skb) {
470 q_skba->os_skbs = fill_wqes - i; 471 q_skba->os_skbs = fill_wqes - i;
471 if (q_skba->os_skbs == q_skba->len - 2) { 472 if (q_skba->os_skbs == q_skba->len - 2) {
472 ehea_info("%s: rq%i ran dry - no mem for skb", 473 netdev_info(pr->port->netdev,
473 pr->port->netdev->name, rq_nr); 474 "rq%i ran dry - no mem for skb\n",
475 rq_nr);
474 ret = -ENOMEM; 476 ret = -ENOMEM;
475 } 477 }
476 break; 478 break;
@@ -635,8 +637,8 @@ static int ehea_treat_poll_error(struct ehea_port_res *pr, int rq,
635 637
636 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) { 638 if (cqe->status & EHEA_CQE_STAT_FAT_ERR_MASK) {
637 if (netif_msg_rx_err(pr->port)) { 639 if (netif_msg_rx_err(pr->port)) {
638 ehea_error("Critical receive error for QP %d. " 640 pr_err("Critical receive error for QP %d. Resetting port.\n",
639 "Resetting port.", pr->qp->init_attr.qp_nr); 641 pr->qp->init_attr.qp_nr);
640 ehea_dump(cqe, sizeof(*cqe), "CQE"); 642 ehea_dump(cqe, sizeof(*cqe), "CQE");
641 } 643 }
642 ehea_schedule_port_reset(pr->port); 644 ehea_schedule_port_reset(pr->port);
@@ -738,13 +740,13 @@ static int ehea_proc_rwqes(struct net_device *dev,
738 skb_arr_rq1_len, 740 skb_arr_rq1_len,
739 wqe_index); 741 wqe_index);
740 if (unlikely(!skb)) { 742 if (unlikely(!skb)) {
741 if (netif_msg_rx_err(port)) 743 netif_info(port, rx_err, dev,
742 ehea_error("LL rq1: skb=NULL"); 744 "LL rq1: skb=NULL\n");
743 745
744 skb = netdev_alloc_skb(dev, 746 skb = netdev_alloc_skb(dev,
745 EHEA_L_PKT_SIZE); 747 EHEA_L_PKT_SIZE);
746 if (!skb) { 748 if (!skb) {
747 ehea_info("Not enough memory to allocate skb\n"); 749 netdev_err(dev, "Not enough memory to allocate skb\n");
748 break; 750 break;
749 } 751 }
750 } 752 }
@@ -756,8 +758,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
756 skb = get_skb_by_index(skb_arr_rq2, 758 skb = get_skb_by_index(skb_arr_rq2,
757 skb_arr_rq2_len, cqe); 759 skb_arr_rq2_len, cqe);
758 if (unlikely(!skb)) { 760 if (unlikely(!skb)) {
759 if (netif_msg_rx_err(port)) 761 netif_err(port, rx_err, dev,
760 ehea_error("rq2: skb=NULL"); 762 "rq2: skb=NULL\n");
761 break; 763 break;
762 } 764 }
763 ehea_fill_skb(dev, skb, cqe); 765 ehea_fill_skb(dev, skb, cqe);
@@ -767,8 +769,8 @@ static int ehea_proc_rwqes(struct net_device *dev,
767 skb = get_skb_by_index(skb_arr_rq3, 769 skb = get_skb_by_index(skb_arr_rq3,
768 skb_arr_rq3_len, cqe); 770 skb_arr_rq3_len, cqe);
769 if (unlikely(!skb)) { 771 if (unlikely(!skb)) {
770 if (netif_msg_rx_err(port)) 772 netif_err(port, rx_err, dev,
771 ehea_error("rq3: skb=NULL"); 773 "rq3: skb=NULL\n");
772 break; 774 break;
773 } 775 }
774 ehea_fill_skb(dev, skb, cqe); 776 ehea_fill_skb(dev, skb, cqe);
@@ -840,7 +842,7 @@ static void check_sqs(struct ehea_port *port)
840 msecs_to_jiffies(100)); 842 msecs_to_jiffies(100));
841 843
842 if (!ret) { 844 if (!ret) {
843 ehea_error("HW/SW queues out of sync"); 845 pr_err("HW/SW queues out of sync\n");
844 ehea_schedule_port_reset(pr->port); 846 ehea_schedule_port_reset(pr->port);
845 return; 847 return;
846 } 848 }
@@ -873,14 +875,14 @@ static struct ehea_cqe *ehea_proc_cqes(struct ehea_port_res *pr, int my_quota)
873 } 875 }
874 876
875 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) { 877 if (cqe->status & EHEA_CQE_STAT_ERR_MASK) {
876 ehea_error("Bad send completion status=0x%04X", 878 pr_err("Bad send completion status=0x%04X\n",
877 cqe->status); 879 cqe->status);
878 880
879 if (netif_msg_tx_err(pr->port)) 881 if (netif_msg_tx_err(pr->port))
880 ehea_dump(cqe, sizeof(*cqe), "Send CQE"); 882 ehea_dump(cqe, sizeof(*cqe), "Send CQE");
881 883
882 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) { 884 if (cqe->status & EHEA_CQE_STAT_RESET_MASK) {
883 ehea_error("Resetting port"); 885 pr_err("Resetting port\n");
884 ehea_schedule_port_reset(pr->port); 886 ehea_schedule_port_reset(pr->port);
885 break; 887 break;
886 } 888 }
@@ -998,8 +1000,8 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
998 1000
999 while (eqe) { 1001 while (eqe) {
1000 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry); 1002 qp_token = EHEA_BMASK_GET(EHEA_EQE_QP_TOKEN, eqe->entry);
1001 ehea_error("QP aff_err: entry=0x%llx, token=0x%x", 1003 pr_err("QP aff_err: entry=0x%llx, token=0x%x\n",
1002 eqe->entry, qp_token); 1004 eqe->entry, qp_token);
1003 1005
1004 qp = port->port_res[qp_token].qp; 1006 qp = port->port_res[qp_token].qp;
1005 1007
@@ -1017,7 +1019,7 @@ static irqreturn_t ehea_qp_aff_irq_handler(int irq, void *param)
1017 } 1019 }
1018 1020
1019 if (reset_port) { 1021 if (reset_port) {
1020 ehea_error("Resetting port"); 1022 pr_err("Resetting port\n");
1021 ehea_schedule_port_reset(port); 1023 ehea_schedule_port_reset(port);
1022 } 1024 }
1023 1025
@@ -1045,7 +1047,7 @@ int ehea_sense_port_attr(struct ehea_port *port)
1045 /* may be called via ehea_neq_tasklet() */ 1047 /* may be called via ehea_neq_tasklet() */
1046 cb0 = (void *)get_zeroed_page(GFP_ATOMIC); 1048 cb0 = (void *)get_zeroed_page(GFP_ATOMIC);
1047 if (!cb0) { 1049 if (!cb0) {
1048 ehea_error("no mem for cb0"); 1050 pr_err("no mem for cb0\n");
1049 ret = -ENOMEM; 1051 ret = -ENOMEM;
1050 goto out; 1052 goto out;
1051 } 1053 }
@@ -1137,7 +1139,7 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1137 1139
1138 cb4 = (void *)get_zeroed_page(GFP_KERNEL); 1140 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
1139 if (!cb4) { 1141 if (!cb4) {
1140 ehea_error("no mem for cb4"); 1142 pr_err("no mem for cb4\n");
1141 ret = -ENOMEM; 1143 ret = -ENOMEM;
1142 goto out; 1144 goto out;
1143 } 1145 }
@@ -1188,16 +1190,16 @@ int ehea_set_portspeed(struct ehea_port *port, u32 port_speed)
1188 break; 1190 break;
1189 } 1191 }
1190 } else { 1192 } else {
1191 ehea_error("Failed sensing port speed"); 1193 pr_err("Failed sensing port speed\n");
1192 ret = -EIO; 1194 ret = -EIO;
1193 } 1195 }
1194 } else { 1196 } else {
1195 if (hret == H_AUTHORITY) { 1197 if (hret == H_AUTHORITY) {
1196 ehea_info("Hypervisor denied setting port speed"); 1198 pr_info("Hypervisor denied setting port speed\n");
1197 ret = -EPERM; 1199 ret = -EPERM;
1198 } else { 1200 } else {
1199 ret = -EIO; 1201 ret = -EIO;
1200 ehea_error("Failed setting port speed"); 1202 pr_err("Failed setting port speed\n");
1201 } 1203 }
1202 } 1204 }
1203 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP)) 1205 if (!prop_carrier_state || (port->phy_link == EHEA_PHY_LINK_UP))
@@ -1214,80 +1216,78 @@ static void ehea_parse_eqe(struct ehea_adapter *adapter, u64 eqe)
1214 u8 ec; 1216 u8 ec;
1215 u8 portnum; 1217 u8 portnum;
1216 struct ehea_port *port; 1218 struct ehea_port *port;
1219 struct net_device *dev;
1217 1220
1218 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe); 1221 ec = EHEA_BMASK_GET(NEQE_EVENT_CODE, eqe);
1219 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe); 1222 portnum = EHEA_BMASK_GET(NEQE_PORTNUM, eqe);
1220 port = ehea_get_port(adapter, portnum); 1223 port = ehea_get_port(adapter, portnum);
1224 dev = port->netdev;
1221 1225
1222 switch (ec) { 1226 switch (ec) {
1223 case EHEA_EC_PORTSTATE_CHG: /* port state change */ 1227 case EHEA_EC_PORTSTATE_CHG: /* port state change */
1224 1228
1225 if (!port) { 1229 if (!port) {
1226 ehea_error("unknown portnum %x", portnum); 1230 netdev_err(dev, "unknown portnum %x\n", portnum);
1227 break; 1231 break;
1228 } 1232 }
1229 1233
1230 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) { 1234 if (EHEA_BMASK_GET(NEQE_PORT_UP, eqe)) {
1231 if (!netif_carrier_ok(port->netdev)) { 1235 if (!netif_carrier_ok(dev)) {
1232 ret = ehea_sense_port_attr(port); 1236 ret = ehea_sense_port_attr(port);
1233 if (ret) { 1237 if (ret) {
1234 ehea_error("failed resensing port " 1238 netdev_err(dev, "failed resensing port attributes\n");
1235 "attributes");
1236 break; 1239 break;
1237 } 1240 }
1238 1241
1239 if (netif_msg_link(port)) 1242 netif_info(port, link, dev,
1240 ehea_info("%s: Logical port up: %dMbps " 1243 "Logical port up: %dMbps %s Duplex\n",
1241 "%s Duplex", 1244 port->port_speed,
1242 port->netdev->name, 1245 port->full_duplex == 1 ?
1243 port->port_speed, 1246 "Full" : "Half");
1244 port->full_duplex ==
1245 1 ? "Full" : "Half");
1246 1247
1247 netif_carrier_on(port->netdev); 1248 netif_carrier_on(dev);
1248 netif_wake_queue(port->netdev); 1249 netif_wake_queue(dev);
1249 } 1250 }
1250 } else 1251 } else
1251 if (netif_carrier_ok(port->netdev)) { 1252 if (netif_carrier_ok(dev)) {
1252 if (netif_msg_link(port)) 1253 netif_info(port, link, dev,
1253 ehea_info("%s: Logical port down", 1254 "Logical port down\n");
1254 port->netdev->name); 1255 netif_carrier_off(dev);
1255 netif_carrier_off(port->netdev); 1256 netif_stop_queue(dev);
1256 netif_stop_queue(port->netdev);
1257 } 1257 }
1258 1258
1259 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) { 1259 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PORT_UP, eqe)) {
1260 port->phy_link = EHEA_PHY_LINK_UP; 1260 port->phy_link = EHEA_PHY_LINK_UP;
1261 if (netif_msg_link(port)) 1261 netif_info(port, link, dev,
1262 ehea_info("%s: Physical port up", 1262 "Physical port up\n");
1263 port->netdev->name);
1264 if (prop_carrier_state) 1263 if (prop_carrier_state)
1265 netif_carrier_on(port->netdev); 1264 netif_carrier_on(dev);
1266 } else { 1265 } else {
1267 port->phy_link = EHEA_PHY_LINK_DOWN; 1266 port->phy_link = EHEA_PHY_LINK_DOWN;
1268 if (netif_msg_link(port)) 1267 netif_info(port, link, dev,
1269 ehea_info("%s: Physical port down", 1268 "Physical port down\n");
1270 port->netdev->name);
1271 if (prop_carrier_state) 1269 if (prop_carrier_state)
1272 netif_carrier_off(port->netdev); 1270 netif_carrier_off(dev);
1273 } 1271 }
1274 1272
1275 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe)) 1273 if (EHEA_BMASK_GET(NEQE_EXTSWITCH_PRIMARY, eqe))
1276 ehea_info("External switch port is primary port"); 1274 netdev_info(dev,
1275 "External switch port is primary port\n");
1277 else 1276 else
1278 ehea_info("External switch port is backup port"); 1277 netdev_info(dev,
1278 "External switch port is backup port\n");
1279 1279
1280 break; 1280 break;
1281 case EHEA_EC_ADAPTER_MALFUNC: 1281 case EHEA_EC_ADAPTER_MALFUNC:
1282 ehea_error("Adapter malfunction"); 1282 netdev_err(dev, "Adapter malfunction\n");
1283 break; 1283 break;
1284 case EHEA_EC_PORT_MALFUNC: 1284 case EHEA_EC_PORT_MALFUNC:
1285 ehea_info("Port malfunction: Device: %s", port->netdev->name); 1285 netdev_info(dev, "Port malfunction\n");
1286 netif_carrier_off(port->netdev); 1286 netif_carrier_off(dev);
1287 netif_stop_queue(port->netdev); 1287 netif_stop_queue(dev);
1288 break; 1288 break;
1289 default: 1289 default:
1290 ehea_error("unknown event code %x, eqe=0x%llX", ec, eqe); 1290 netdev_err(dev, "unknown event code %x, eqe=0x%llX\n", ec, eqe);
1291 break; 1291 break;
1292 } 1292 }
1293} 1293}
@@ -1299,13 +1299,13 @@ static void ehea_neq_tasklet(unsigned long data)
1299 u64 event_mask; 1299 u64 event_mask;
1300 1300
1301 eqe = ehea_poll_eq(adapter->neq); 1301 eqe = ehea_poll_eq(adapter->neq);
1302 ehea_debug("eqe=%p", eqe); 1302 pr_debug("eqe=%p\n", eqe);
1303 1303
1304 while (eqe) { 1304 while (eqe) {
1305 ehea_debug("*eqe=%lx", eqe->entry); 1305 pr_debug("*eqe=%lx\n", (unsigned long) eqe->entry);
1306 ehea_parse_eqe(adapter, eqe->entry); 1306 ehea_parse_eqe(adapter, eqe->entry);
1307 eqe = ehea_poll_eq(adapter->neq); 1307 eqe = ehea_poll_eq(adapter->neq);
1308 ehea_debug("next eqe=%p", eqe); 1308 pr_debug("next eqe=%p\n", eqe);
1309 } 1309 }
1310 1310
1311 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1) 1311 event_mask = EHEA_BMASK_SET(NELR_PORTSTATE_CHG, 1)
@@ -1329,9 +1329,7 @@ static int ehea_fill_port_res(struct ehea_port_res *pr)
1329 int ret; 1329 int ret;
1330 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr; 1330 struct ehea_qp_init_attr *init_attr = &pr->qp->init_attr;
1331 1331
1332 ehea_init_fill_rq1(pr, init_attr->act_nr_rwqes_rq1 1332 ehea_init_fill_rq1(pr, pr->rq1_skba.len);
1333 - init_attr->act_nr_rwqes_rq2
1334 - init_attr->act_nr_rwqes_rq3 - 1);
1335 1333
1336 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1); 1334 ret = ehea_refill_rq2(pr, init_attr->act_nr_rwqes_rq2 - 1);
1337 1335
@@ -1354,14 +1352,14 @@ static int ehea_reg_interrupts(struct net_device *dev)
1354 ehea_qp_aff_irq_handler, 1352 ehea_qp_aff_irq_handler,
1355 IRQF_DISABLED, port->int_aff_name, port); 1353 IRQF_DISABLED, port->int_aff_name, port);
1356 if (ret) { 1354 if (ret) {
1357 ehea_error("failed registering irq for qp_aff_irq_handler:" 1355 netdev_err(dev, "failed registering irq for qp_aff_irq_handler:ist=%X\n",
1358 "ist=%X", port->qp_eq->attr.ist1); 1356 port->qp_eq->attr.ist1);
1359 goto out_free_qpeq; 1357 goto out_free_qpeq;
1360 } 1358 }
1361 1359
1362 if (netif_msg_ifup(port)) 1360 netif_info(port, ifup, dev,
1363 ehea_info("irq_handle 0x%X for function qp_aff_irq_handler " 1361 "irq_handle 0x%X for function qp_aff_irq_handler registered\n",
1364 "registered", port->qp_eq->attr.ist1); 1362 port->qp_eq->attr.ist1);
1365 1363
1366 1364
1367 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 1365 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
@@ -1373,14 +1371,13 @@ static int ehea_reg_interrupts(struct net_device *dev)
1373 IRQF_DISABLED, pr->int_send_name, 1371 IRQF_DISABLED, pr->int_send_name,
1374 pr); 1372 pr);
1375 if (ret) { 1373 if (ret) {
1376 ehea_error("failed registering irq for ehea_queue " 1374 netdev_err(dev, "failed registering irq for ehea_queue port_res_nr:%d, ist=%X\n",
1377 "port_res_nr:%d, ist=%X", i, 1375 i, pr->eq->attr.ist1);
1378 pr->eq->attr.ist1);
1379 goto out_free_req; 1376 goto out_free_req;
1380 } 1377 }
1381 if (netif_msg_ifup(port)) 1378 netif_info(port, ifup, dev,
1382 ehea_info("irq_handle 0x%X for function ehea_queue_int " 1379 "irq_handle 0x%X for function ehea_queue_int %d registered\n",
1383 "%d registered", pr->eq->attr.ist1, i); 1380 pr->eq->attr.ist1, i);
1384 } 1381 }
1385out: 1382out:
1386 return ret; 1383 return ret;
@@ -1411,16 +1408,16 @@ static void ehea_free_interrupts(struct net_device *dev)
1411 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 1408 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
1412 pr = &port->port_res[i]; 1409 pr = &port->port_res[i];
1413 ibmebus_free_irq(pr->eq->attr.ist1, pr); 1410 ibmebus_free_irq(pr->eq->attr.ist1, pr);
1414 if (netif_msg_intr(port)) 1411 netif_info(port, intr, dev,
1415 ehea_info("free send irq for res %d with handle 0x%X", 1412 "free send irq for res %d with handle 0x%X\n",
1416 i, pr->eq->attr.ist1); 1413 i, pr->eq->attr.ist1);
1417 } 1414 }
1418 1415
1419 /* associated events */ 1416 /* associated events */
1420 ibmebus_free_irq(port->qp_eq->attr.ist1, port); 1417 ibmebus_free_irq(port->qp_eq->attr.ist1, port);
1421 if (netif_msg_intr(port)) 1418 netif_info(port, intr, dev,
1422 ehea_info("associated event interrupt for handle 0x%X freed", 1419 "associated event interrupt for handle 0x%X freed\n",
1423 port->qp_eq->attr.ist1); 1420 port->qp_eq->attr.ist1);
1424} 1421}
1425 1422
1426static int ehea_configure_port(struct ehea_port *port) 1423static int ehea_configure_port(struct ehea_port *port)
@@ -1489,7 +1486,7 @@ int ehea_gen_smrs(struct ehea_port_res *pr)
1489out_free: 1486out_free:
1490 ehea_rem_mr(&pr->send_mr); 1487 ehea_rem_mr(&pr->send_mr);
1491out: 1488out:
1492 ehea_error("Generating SMRS failed\n"); 1489 pr_err("Generating SMRS failed\n");
1493 return -EIO; 1490 return -EIO;
1494} 1491}
1495 1492
@@ -1506,12 +1503,10 @@ static int ehea_init_q_skba(struct ehea_q_skb_arr *q_skba, int max_q_entries)
1506{ 1503{
1507 int arr_size = sizeof(void *) * max_q_entries; 1504 int arr_size = sizeof(void *) * max_q_entries;
1508 1505
1509 q_skba->arr = vmalloc(arr_size); 1506 q_skba->arr = vzalloc(arr_size);
1510 if (!q_skba->arr) 1507 if (!q_skba->arr)
1511 return -ENOMEM; 1508 return -ENOMEM;
1512 1509
1513 memset(q_skba->arr, 0, arr_size);
1514
1515 q_skba->len = max_q_entries; 1510 q_skba->len = max_q_entries;
1516 q_skba->index = 0; 1511 q_skba->index = 0;
1517 q_skba->os_skbs = 0; 1512 q_skba->os_skbs = 0;
@@ -1546,7 +1541,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1546 1541
1547 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0); 1542 pr->eq = ehea_create_eq(adapter, eq_type, EHEA_MAX_ENTRIES_EQ, 0);
1548 if (!pr->eq) { 1543 if (!pr->eq) {
1549 ehea_error("create_eq failed (eq)"); 1544 pr_err("create_eq failed (eq)\n");
1550 goto out_free; 1545 goto out_free;
1551 } 1546 }
1552 1547
@@ -1554,7 +1549,7 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1554 pr->eq->fw_handle, 1549 pr->eq->fw_handle,
1555 port->logical_port_id); 1550 port->logical_port_id);
1556 if (!pr->recv_cq) { 1551 if (!pr->recv_cq) {
1557 ehea_error("create_cq failed (cq_recv)"); 1552 pr_err("create_cq failed (cq_recv)\n");
1558 goto out_free; 1553 goto out_free;
1559 } 1554 }
1560 1555
@@ -1562,19 +1557,19 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1562 pr->eq->fw_handle, 1557 pr->eq->fw_handle,
1563 port->logical_port_id); 1558 port->logical_port_id);
1564 if (!pr->send_cq) { 1559 if (!pr->send_cq) {
1565 ehea_error("create_cq failed (cq_send)"); 1560 pr_err("create_cq failed (cq_send)\n");
1566 goto out_free; 1561 goto out_free;
1567 } 1562 }
1568 1563
1569 if (netif_msg_ifup(port)) 1564 if (netif_msg_ifup(port))
1570 ehea_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d", 1565 pr_info("Send CQ: act_nr_cqes=%d, Recv CQ: act_nr_cqes=%d\n",
1571 pr->send_cq->attr.act_nr_of_cqes, 1566 pr->send_cq->attr.act_nr_of_cqes,
1572 pr->recv_cq->attr.act_nr_of_cqes); 1567 pr->recv_cq->attr.act_nr_of_cqes);
1573 1568
1574 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL); 1569 init_attr = kzalloc(sizeof(*init_attr), GFP_KERNEL);
1575 if (!init_attr) { 1570 if (!init_attr) {
1576 ret = -ENOMEM; 1571 ret = -ENOMEM;
1577 ehea_error("no mem for ehea_qp_init_attr"); 1572 pr_err("no mem for ehea_qp_init_attr\n");
1578 goto out_free; 1573 goto out_free;
1579 } 1574 }
1580 1575
@@ -1599,18 +1594,18 @@ static int ehea_init_port_res(struct ehea_port *port, struct ehea_port_res *pr,
1599 1594
1600 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr); 1595 pr->qp = ehea_create_qp(adapter, adapter->pd, init_attr);
1601 if (!pr->qp) { 1596 if (!pr->qp) {
1602 ehea_error("create_qp failed"); 1597 pr_err("create_qp failed\n");
1603 ret = -EIO; 1598 ret = -EIO;
1604 goto out_free; 1599 goto out_free;
1605 } 1600 }
1606 1601
1607 if (netif_msg_ifup(port)) 1602 if (netif_msg_ifup(port))
1608 ehea_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n " 1603 pr_info("QP: qp_nr=%d\n act_nr_snd_wqe=%d\n nr_rwqe_rq1=%d\n nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d\n",
1609 "nr_rwqe_rq2=%d\n nr_rwqe_rq3=%d", init_attr->qp_nr, 1604 init_attr->qp_nr,
1610 init_attr->act_nr_send_wqes, 1605 init_attr->act_nr_send_wqes,
1611 init_attr->act_nr_rwqes_rq1, 1606 init_attr->act_nr_rwqes_rq1,
1612 init_attr->act_nr_rwqes_rq2, 1607 init_attr->act_nr_rwqes_rq2,
1613 init_attr->act_nr_rwqes_rq3); 1608 init_attr->act_nr_rwqes_rq3);
1614 1609
1615 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1; 1610 pr->sq_skba_size = init_attr->act_nr_send_wqes + 1;
1616 1611
@@ -1761,7 +1756,7 @@ static void write_swqe2_TSO(struct sk_buff *skb,
1761 swqe->descriptors++; 1756 swqe->descriptors++;
1762 } 1757 }
1763 } else 1758 } else
1764 ehea_error("cannot handle fragmented headers"); 1759 pr_err("cannot handle fragmented headers\n");
1765} 1760}
1766 1761
1767static void write_swqe2_nonTSO(struct sk_buff *skb, 1762static void write_swqe2_nonTSO(struct sk_buff *skb,
@@ -1857,8 +1852,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1857 port->logical_port_id, 1852 port->logical_port_id,
1858 reg_type, port->mac_addr, 0, hcallid); 1853 reg_type, port->mac_addr, 0, hcallid);
1859 if (hret != H_SUCCESS) { 1854 if (hret != H_SUCCESS) {
1860 ehea_error("%sregistering bc address failed (tagged)", 1855 pr_err("%sregistering bc address failed (tagged)\n",
1861 hcallid == H_REG_BCMC ? "" : "de"); 1856 hcallid == H_REG_BCMC ? "" : "de");
1862 ret = -EIO; 1857 ret = -EIO;
1863 goto out_herr; 1858 goto out_herr;
1864 } 1859 }
@@ -1869,8 +1864,8 @@ static int ehea_broadcast_reg_helper(struct ehea_port *port, u32 hcallid)
1869 port->logical_port_id, 1864 port->logical_port_id,
1870 reg_type, port->mac_addr, 0, hcallid); 1865 reg_type, port->mac_addr, 0, hcallid);
1871 if (hret != H_SUCCESS) { 1866 if (hret != H_SUCCESS) {
1872 ehea_error("%sregistering bc address failed (vlan)", 1867 pr_err("%sregistering bc address failed (vlan)\n",
1873 hcallid == H_REG_BCMC ? "" : "de"); 1868 hcallid == H_REG_BCMC ? "" : "de");
1874 ret = -EIO; 1869 ret = -EIO;
1875 } 1870 }
1876out_herr: 1871out_herr:
@@ -1892,7 +1887,7 @@ static int ehea_set_mac_addr(struct net_device *dev, void *sa)
1892 1887
1893 cb0 = (void *)get_zeroed_page(GFP_KERNEL); 1888 cb0 = (void *)get_zeroed_page(GFP_KERNEL);
1894 if (!cb0) { 1889 if (!cb0) {
1895 ehea_error("no mem for cb0"); 1890 pr_err("no mem for cb0\n");
1896 ret = -ENOMEM; 1891 ret = -ENOMEM;
1897 goto out; 1892 goto out;
1898 } 1893 }
@@ -1940,11 +1935,11 @@ out:
1940static void ehea_promiscuous_error(u64 hret, int enable) 1935static void ehea_promiscuous_error(u64 hret, int enable)
1941{ 1936{
1942 if (hret == H_AUTHORITY) 1937 if (hret == H_AUTHORITY)
1943 ehea_info("Hypervisor denied %sabling promiscuous mode", 1938 pr_info("Hypervisor denied %sabling promiscuous mode\n",
1944 enable == 1 ? "en" : "dis"); 1939 enable == 1 ? "en" : "dis");
1945 else 1940 else
1946 ehea_error("failed %sabling promiscuous mode", 1941 pr_err("failed %sabling promiscuous mode\n",
1947 enable == 1 ? "en" : "dis"); 1942 enable == 1 ? "en" : "dis");
1948} 1943}
1949 1944
1950static void ehea_promiscuous(struct net_device *dev, int enable) 1945static void ehea_promiscuous(struct net_device *dev, int enable)
@@ -1958,7 +1953,7 @@ static void ehea_promiscuous(struct net_device *dev, int enable)
1958 1953
1959 cb7 = (void *)get_zeroed_page(GFP_ATOMIC); 1954 cb7 = (void *)get_zeroed_page(GFP_ATOMIC);
1960 if (!cb7) { 1955 if (!cb7) {
1961 ehea_error("no mem for cb7"); 1956 pr_err("no mem for cb7\n");
1962 goto out; 1957 goto out;
1963 } 1958 }
1964 1959
@@ -2018,7 +2013,7 @@ static int ehea_drop_multicast_list(struct net_device *dev)
2018 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr, 2013 hret = ehea_multicast_reg_helper(port, mc_entry->macaddr,
2019 H_DEREG_BCMC); 2014 H_DEREG_BCMC);
2020 if (hret) { 2015 if (hret) {
2021 ehea_error("failed deregistering mcast MAC"); 2016 pr_err("failed deregistering mcast MAC\n");
2022 ret = -EIO; 2017 ret = -EIO;
2023 } 2018 }
2024 2019
@@ -2041,7 +2036,8 @@ static void ehea_allmulti(struct net_device *dev, int enable)
2041 if (!hret) 2036 if (!hret)
2042 port->allmulti = 1; 2037 port->allmulti = 1;
2043 else 2038 else
2044 ehea_error("failed enabling IFF_ALLMULTI"); 2039 netdev_err(dev,
2040 "failed enabling IFF_ALLMULTI\n");
2045 } 2041 }
2046 } else 2042 } else
2047 if (!enable) { 2043 if (!enable) {
@@ -2050,7 +2046,8 @@ static void ehea_allmulti(struct net_device *dev, int enable)
2050 if (!hret) 2046 if (!hret)
2051 port->allmulti = 0; 2047 port->allmulti = 0;
2052 else 2048 else
2053 ehea_error("failed disabling IFF_ALLMULTI"); 2049 netdev_err(dev,
2050 "failed disabling IFF_ALLMULTI\n");
2054 } 2051 }
2055} 2052}
2056 2053
@@ -2061,7 +2058,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
2061 2058
2062 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC); 2059 ehea_mcl_entry = kzalloc(sizeof(*ehea_mcl_entry), GFP_ATOMIC);
2063 if (!ehea_mcl_entry) { 2060 if (!ehea_mcl_entry) {
2064 ehea_error("no mem for mcl_entry"); 2061 pr_err("no mem for mcl_entry\n");
2065 return; 2062 return;
2066 } 2063 }
2067 2064
@@ -2074,7 +2071,7 @@ static void ehea_add_multicast_entry(struct ehea_port *port, u8 *mc_mac_addr)
2074 if (!hret) 2071 if (!hret)
2075 list_add(&ehea_mcl_entry->list, &port->mc_list->list); 2072 list_add(&ehea_mcl_entry->list, &port->mc_list->list);
2076 else { 2073 else {
2077 ehea_error("failed registering mcast MAC"); 2074 pr_err("failed registering mcast MAC\n");
2078 kfree(ehea_mcl_entry); 2075 kfree(ehea_mcl_entry);
2079 } 2076 }
2080} 2077}
@@ -2107,9 +2104,8 @@ static void ehea_set_multicast_list(struct net_device *dev)
2107 } 2104 }
2108 2105
2109 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) { 2106 if (netdev_mc_count(dev) > port->adapter->max_mc_mac) {
2110 ehea_info("Mcast registration limit reached (0x%llx). " 2107 pr_info("Mcast registration limit reached (0x%llx). Use ALLMULTI!\n",
2111 "Use ALLMULTI!", 2108 port->adapter->max_mc_mac);
2112 port->adapter->max_mc_mac);
2113 goto out; 2109 goto out;
2114 } 2110 }
2115 2111
@@ -2315,10 +2311,10 @@ static int ehea_start_xmit(struct sk_buff *skb, struct net_device *dev)
2315 } 2311 }
2316 pr->swqe_id_counter += 1; 2312 pr->swqe_id_counter += 1;
2317 2313
2318 if (netif_msg_tx_queued(port)) { 2314 netif_info(port, tx_queued, dev,
2319 ehea_info("post swqe on QP %d", pr->qp->init_attr.qp_nr); 2315 "post swqe on QP %d\n", pr->qp->init_attr.qp_nr);
2316 if (netif_msg_tx_queued(port))
2320 ehea_dump(swqe, 512, "swqe"); 2317 ehea_dump(swqe, 512, "swqe");
2321 }
2322 2318
2323 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) { 2319 if (unlikely(test_bit(__EHEA_STOP_XFER, &ehea_driver_flags))) {
2324 netif_stop_queue(dev); 2320 netif_stop_queue(dev);
@@ -2354,14 +2350,14 @@ static void ehea_vlan_rx_register(struct net_device *dev,
2354 2350
2355 cb1 = (void *)get_zeroed_page(GFP_KERNEL); 2351 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2356 if (!cb1) { 2352 if (!cb1) {
2357 ehea_error("no mem for cb1"); 2353 pr_err("no mem for cb1\n");
2358 goto out; 2354 goto out;
2359 } 2355 }
2360 2356
2361 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, 2357 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2362 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2358 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2363 if (hret != H_SUCCESS) 2359 if (hret != H_SUCCESS)
2364 ehea_error("modify_ehea_port failed"); 2360 pr_err("modify_ehea_port failed\n");
2365 2361
2366 free_page((unsigned long)cb1); 2362 free_page((unsigned long)cb1);
2367out: 2363out:
@@ -2378,14 +2374,14 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2378 2374
2379 cb1 = (void *)get_zeroed_page(GFP_KERNEL); 2375 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2380 if (!cb1) { 2376 if (!cb1) {
2381 ehea_error("no mem for cb1"); 2377 pr_err("no mem for cb1\n");
2382 goto out; 2378 goto out;
2383 } 2379 }
2384 2380
2385 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, 2381 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2386 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2382 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2387 if (hret != H_SUCCESS) { 2383 if (hret != H_SUCCESS) {
2388 ehea_error("query_ehea_port failed"); 2384 pr_err("query_ehea_port failed\n");
2389 goto out; 2385 goto out;
2390 } 2386 }
2391 2387
@@ -2395,7 +2391,7 @@ static void ehea_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
2395 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, 2391 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2396 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2392 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2397 if (hret != H_SUCCESS) 2393 if (hret != H_SUCCESS)
2398 ehea_error("modify_ehea_port failed"); 2394 pr_err("modify_ehea_port failed\n");
2399out: 2395out:
2400 free_page((unsigned long)cb1); 2396 free_page((unsigned long)cb1);
2401 return; 2397 return;
@@ -2413,14 +2409,14 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2413 2409
2414 cb1 = (void *)get_zeroed_page(GFP_KERNEL); 2410 cb1 = (void *)get_zeroed_page(GFP_KERNEL);
2415 if (!cb1) { 2411 if (!cb1) {
2416 ehea_error("no mem for cb1"); 2412 pr_err("no mem for cb1\n");
2417 goto out; 2413 goto out;
2418 } 2414 }
2419 2415
2420 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id, 2416 hret = ehea_h_query_ehea_port(adapter->handle, port->logical_port_id,
2421 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2417 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2422 if (hret != H_SUCCESS) { 2418 if (hret != H_SUCCESS) {
2423 ehea_error("query_ehea_port failed"); 2419 pr_err("query_ehea_port failed\n");
2424 goto out; 2420 goto out;
2425 } 2421 }
2426 2422
@@ -2430,7 +2426,7 @@ static void ehea_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
2430 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id, 2426 hret = ehea_h_modify_ehea_port(adapter->handle, port->logical_port_id,
2431 H_PORT_CB1, H_PORT_CB1_ALL, cb1); 2427 H_PORT_CB1, H_PORT_CB1_ALL, cb1);
2432 if (hret != H_SUCCESS) 2428 if (hret != H_SUCCESS)
2433 ehea_error("modify_ehea_port failed"); 2429 pr_err("modify_ehea_port failed\n");
2434out: 2430out:
2435 free_page((unsigned long)cb1); 2431 free_page((unsigned long)cb1);
2436} 2432}
@@ -2452,7 +2448,7 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2452 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2448 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2453 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); 2449 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2454 if (hret != H_SUCCESS) { 2450 if (hret != H_SUCCESS) {
2455 ehea_error("query_ehea_qp failed (1)"); 2451 pr_err("query_ehea_qp failed (1)\n");
2456 goto out; 2452 goto out;
2457 } 2453 }
2458 2454
@@ -2461,14 +2457,14 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2461 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, 2457 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2462 &dummy64, &dummy64, &dummy16, &dummy16); 2458 &dummy64, &dummy64, &dummy16, &dummy16);
2463 if (hret != H_SUCCESS) { 2459 if (hret != H_SUCCESS) {
2464 ehea_error("modify_ehea_qp failed (1)"); 2460 pr_err("modify_ehea_qp failed (1)\n");
2465 goto out; 2461 goto out;
2466 } 2462 }
2467 2463
2468 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2464 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2469 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); 2465 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2470 if (hret != H_SUCCESS) { 2466 if (hret != H_SUCCESS) {
2471 ehea_error("query_ehea_qp failed (2)"); 2467 pr_err("query_ehea_qp failed (2)\n");
2472 goto out; 2468 goto out;
2473 } 2469 }
2474 2470
@@ -2477,14 +2473,14 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2477 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, 2473 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2478 &dummy64, &dummy64, &dummy16, &dummy16); 2474 &dummy64, &dummy64, &dummy16, &dummy16);
2479 if (hret != H_SUCCESS) { 2475 if (hret != H_SUCCESS) {
2480 ehea_error("modify_ehea_qp failed (2)"); 2476 pr_err("modify_ehea_qp failed (2)\n");
2481 goto out; 2477 goto out;
2482 } 2478 }
2483 2479
2484 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2480 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2485 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); 2481 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2486 if (hret != H_SUCCESS) { 2482 if (hret != H_SUCCESS) {
2487 ehea_error("query_ehea_qp failed (3)"); 2483 pr_err("query_ehea_qp failed (3)\n");
2488 goto out; 2484 goto out;
2489 } 2485 }
2490 2486
@@ -2493,14 +2489,14 @@ int ehea_activate_qp(struct ehea_adapter *adapter, struct ehea_qp *qp)
2493 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0, 2489 EHEA_BMASK_SET(H_QPCB0_QP_CTL_REG, 1), cb0,
2494 &dummy64, &dummy64, &dummy16, &dummy16); 2490 &dummy64, &dummy64, &dummy16, &dummy16);
2495 if (hret != H_SUCCESS) { 2491 if (hret != H_SUCCESS) {
2496 ehea_error("modify_ehea_qp failed (3)"); 2492 pr_err("modify_ehea_qp failed (3)\n");
2497 goto out; 2493 goto out;
2498 } 2494 }
2499 2495
2500 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle, 2496 hret = ehea_h_query_ehea_qp(adapter->handle, 0, qp->fw_handle,
2501 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0); 2497 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), cb0);
2502 if (hret != H_SUCCESS) { 2498 if (hret != H_SUCCESS) {
2503 ehea_error("query_ehea_qp failed (4)"); 2499 pr_err("query_ehea_qp failed (4)\n");
2504 goto out; 2500 goto out;
2505 } 2501 }
2506 2502
@@ -2521,7 +2517,7 @@ static int ehea_port_res_setup(struct ehea_port *port, int def_qps,
2521 EHEA_MAX_ENTRIES_EQ, 1); 2517 EHEA_MAX_ENTRIES_EQ, 1);
2522 if (!port->qp_eq) { 2518 if (!port->qp_eq) {
2523 ret = -EINVAL; 2519 ret = -EINVAL;
2524 ehea_error("ehea_create_eq failed (qp_eq)"); 2520 pr_err("ehea_create_eq failed (qp_eq)\n");
2525 goto out_kill_eq; 2521 goto out_kill_eq;
2526 } 2522 }
2527 2523
@@ -2602,27 +2598,27 @@ static int ehea_up(struct net_device *dev)
2602 ret = ehea_port_res_setup(port, port->num_def_qps, 2598 ret = ehea_port_res_setup(port, port->num_def_qps,
2603 port->num_add_tx_qps); 2599 port->num_add_tx_qps);
2604 if (ret) { 2600 if (ret) {
2605 ehea_error("port_res_failed"); 2601 netdev_err(dev, "port_res_failed\n");
2606 goto out; 2602 goto out;
2607 } 2603 }
2608 2604
2609 /* Set default QP for this port */ 2605 /* Set default QP for this port */
2610 ret = ehea_configure_port(port); 2606 ret = ehea_configure_port(port);
2611 if (ret) { 2607 if (ret) {
2612 ehea_error("ehea_configure_port failed. ret:%d", ret); 2608 netdev_err(dev, "ehea_configure_port failed. ret:%d\n", ret);
2613 goto out_clean_pr; 2609 goto out_clean_pr;
2614 } 2610 }
2615 2611
2616 ret = ehea_reg_interrupts(dev); 2612 ret = ehea_reg_interrupts(dev);
2617 if (ret) { 2613 if (ret) {
2618 ehea_error("reg_interrupts failed. ret:%d", ret); 2614 netdev_err(dev, "reg_interrupts failed. ret:%d\n", ret);
2619 goto out_clean_pr; 2615 goto out_clean_pr;
2620 } 2616 }
2621 2617
2622 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) { 2618 for (i = 0; i < port->num_def_qps + port->num_add_tx_qps; i++) {
2623 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp); 2619 ret = ehea_activate_qp(port->adapter, port->port_res[i].qp);
2624 if (ret) { 2620 if (ret) {
2625 ehea_error("activate_qp failed"); 2621 netdev_err(dev, "activate_qp failed\n");
2626 goto out_free_irqs; 2622 goto out_free_irqs;
2627 } 2623 }
2628 } 2624 }
@@ -2630,7 +2626,7 @@ static int ehea_up(struct net_device *dev)
2630 for (i = 0; i < port->num_def_qps; i++) { 2626 for (i = 0; i < port->num_def_qps; i++) {
2631 ret = ehea_fill_port_res(&port->port_res[i]); 2627 ret = ehea_fill_port_res(&port->port_res[i]);
2632 if (ret) { 2628 if (ret) {
2633 ehea_error("out_free_irqs"); 2629 netdev_err(dev, "out_free_irqs\n");
2634 goto out_free_irqs; 2630 goto out_free_irqs;
2635 } 2631 }
2636 } 2632 }
@@ -2653,7 +2649,7 @@ out_clean_pr:
2653 ehea_clean_all_portres(port); 2649 ehea_clean_all_portres(port);
2654out: 2650out:
2655 if (ret) 2651 if (ret)
2656 ehea_info("Failed starting %s. ret=%i", dev->name, ret); 2652 netdev_info(dev, "Failed starting. ret=%i\n", ret);
2657 2653
2658 ehea_update_bcmc_registrations(); 2654 ehea_update_bcmc_registrations();
2659 ehea_update_firmware_handles(); 2655 ehea_update_firmware_handles();
@@ -2684,8 +2680,7 @@ static int ehea_open(struct net_device *dev)
2684 2680
2685 mutex_lock(&port->port_lock); 2681 mutex_lock(&port->port_lock);
2686 2682
2687 if (netif_msg_ifup(port)) 2683 netif_info(port, ifup, dev, "enabling port\n");
2688 ehea_info("enabling port %s", dev->name);
2689 2684
2690 ret = ehea_up(dev); 2685 ret = ehea_up(dev);
2691 if (!ret) { 2686 if (!ret) {
@@ -2720,8 +2715,7 @@ static int ehea_down(struct net_device *dev)
2720 2715
2721 ret = ehea_clean_all_portres(port); 2716 ret = ehea_clean_all_portres(port);
2722 if (ret) 2717 if (ret)
2723 ehea_info("Failed freeing resources for %s. ret=%i", 2718 netdev_info(dev, "Failed freeing resources. ret=%i\n", ret);
2724 dev->name, ret);
2725 2719
2726 ehea_update_firmware_handles(); 2720 ehea_update_firmware_handles();
2727 2721
@@ -2733,8 +2727,7 @@ static int ehea_stop(struct net_device *dev)
2733 int ret; 2727 int ret;
2734 struct ehea_port *port = netdev_priv(dev); 2728 struct ehea_port *port = netdev_priv(dev);
2735 2729
2736 if (netif_msg_ifdown(port)) 2730 netif_info(port, ifdown, dev, "disabling port\n");
2737 ehea_info("disabling port %s", dev->name);
2738 2731
2739 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags); 2732 set_bit(__EHEA_DISABLE_PORT_RESET, &port->flags);
2740 cancel_work_sync(&port->reset_task); 2733 cancel_work_sync(&port->reset_task);
@@ -2775,7 +2768,7 @@ static void ehea_flush_sq(struct ehea_port *port)
2775 msecs_to_jiffies(100)); 2768 msecs_to_jiffies(100));
2776 2769
2777 if (!ret) { 2770 if (!ret) {
2778 ehea_error("WARNING: sq not flushed completely"); 2771 pr_err("WARNING: sq not flushed completely\n");
2779 break; 2772 break;
2780 } 2773 }
2781 } 2774 }
@@ -2811,7 +2804,7 @@ int ehea_stop_qps(struct net_device *dev)
2811 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), 2804 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2812 cb0); 2805 cb0);
2813 if (hret != H_SUCCESS) { 2806 if (hret != H_SUCCESS) {
2814 ehea_error("query_ehea_qp failed (1)"); 2807 pr_err("query_ehea_qp failed (1)\n");
2815 goto out; 2808 goto out;
2816 } 2809 }
2817 2810
@@ -2823,7 +2816,7 @@ int ehea_stop_qps(struct net_device *dev)
2823 1), cb0, &dummy64, 2816 1), cb0, &dummy64,
2824 &dummy64, &dummy16, &dummy16); 2817 &dummy64, &dummy16, &dummy16);
2825 if (hret != H_SUCCESS) { 2818 if (hret != H_SUCCESS) {
2826 ehea_error("modify_ehea_qp failed (1)"); 2819 pr_err("modify_ehea_qp failed (1)\n");
2827 goto out; 2820 goto out;
2828 } 2821 }
2829 2822
@@ -2831,14 +2824,14 @@ int ehea_stop_qps(struct net_device *dev)
2831 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), 2824 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2832 cb0); 2825 cb0);
2833 if (hret != H_SUCCESS) { 2826 if (hret != H_SUCCESS) {
2834 ehea_error("query_ehea_qp failed (2)"); 2827 pr_err("query_ehea_qp failed (2)\n");
2835 goto out; 2828 goto out;
2836 } 2829 }
2837 2830
2838 /* deregister shared memory regions */ 2831 /* deregister shared memory regions */
2839 dret = ehea_rem_smrs(pr); 2832 dret = ehea_rem_smrs(pr);
2840 if (dret) { 2833 if (dret) {
2841 ehea_error("unreg shared memory region failed"); 2834 pr_err("unreg shared memory region failed\n");
2842 goto out; 2835 goto out;
2843 } 2836 }
2844 } 2837 }
@@ -2907,7 +2900,7 @@ int ehea_restart_qps(struct net_device *dev)
2907 2900
2908 ret = ehea_gen_smrs(pr); 2901 ret = ehea_gen_smrs(pr);
2909 if (ret) { 2902 if (ret) {
2910 ehea_error("creation of shared memory regions failed"); 2903 netdev_err(dev, "creation of shared memory regions failed\n");
2911 goto out; 2904 goto out;
2912 } 2905 }
2913 2906
@@ -2918,7 +2911,7 @@ int ehea_restart_qps(struct net_device *dev)
2918 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), 2911 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2919 cb0); 2912 cb0);
2920 if (hret != H_SUCCESS) { 2913 if (hret != H_SUCCESS) {
2921 ehea_error("query_ehea_qp failed (1)"); 2914 netdev_err(dev, "query_ehea_qp failed (1)\n");
2922 goto out; 2915 goto out;
2923 } 2916 }
2924 2917
@@ -2930,7 +2923,7 @@ int ehea_restart_qps(struct net_device *dev)
2930 1), cb0, &dummy64, 2923 1), cb0, &dummy64,
2931 &dummy64, &dummy16, &dummy16); 2924 &dummy64, &dummy16, &dummy16);
2932 if (hret != H_SUCCESS) { 2925 if (hret != H_SUCCESS) {
2933 ehea_error("modify_ehea_qp failed (1)"); 2926 netdev_err(dev, "modify_ehea_qp failed (1)\n");
2934 goto out; 2927 goto out;
2935 } 2928 }
2936 2929
@@ -2938,7 +2931,7 @@ int ehea_restart_qps(struct net_device *dev)
2938 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF), 2931 EHEA_BMASK_SET(H_QPCB0_ALL, 0xFFFF),
2939 cb0); 2932 cb0);
2940 if (hret != H_SUCCESS) { 2933 if (hret != H_SUCCESS) {
2941 ehea_error("query_ehea_qp failed (2)"); 2934 netdev_err(dev, "query_ehea_qp failed (2)\n");
2942 goto out; 2935 goto out;
2943 } 2936 }
2944 2937
@@ -2975,8 +2968,7 @@ static void ehea_reset_port(struct work_struct *work)
2975 2968
2976 ehea_set_multicast_list(dev); 2969 ehea_set_multicast_list(dev);
2977 2970
2978 if (netif_msg_timer(port)) 2971 netif_info(port, timer, dev, "reset successful\n");
2979 ehea_info("Device %s resetted successfully", dev->name);
2980 2972
2981 port_napi_enable(port); 2973 port_napi_enable(port);
2982 2974
@@ -2986,12 +2978,12 @@ out:
2986 mutex_unlock(&dlpar_mem_lock); 2978 mutex_unlock(&dlpar_mem_lock);
2987} 2979}
2988 2980
2989static void ehea_rereg_mrs(struct work_struct *work) 2981static void ehea_rereg_mrs(void)
2990{ 2982{
2991 int ret, i; 2983 int ret, i;
2992 struct ehea_adapter *adapter; 2984 struct ehea_adapter *adapter;
2993 2985
2994 ehea_info("LPAR memory changed - re-initializing driver"); 2986 pr_info("LPAR memory changed - re-initializing driver\n");
2995 2987
2996 list_for_each_entry(adapter, &adapter_list, list) 2988 list_for_each_entry(adapter, &adapter_list, list)
2997 if (adapter->active_ports) { 2989 if (adapter->active_ports) {
@@ -3023,8 +3015,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
3023 /* Unregister old memory region */ 3015 /* Unregister old memory region */
3024 ret = ehea_rem_mr(&adapter->mr); 3016 ret = ehea_rem_mr(&adapter->mr);
3025 if (ret) { 3017 if (ret) {
3026 ehea_error("unregister MR failed - driver" 3018 pr_err("unregister MR failed - driver inoperable!\n");
3027 " inoperable!");
3028 goto out; 3019 goto out;
3029 } 3020 }
3030 } 3021 }
@@ -3036,8 +3027,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
3036 /* Register new memory region */ 3027 /* Register new memory region */
3037 ret = ehea_reg_kernel_mr(adapter, &adapter->mr); 3028 ret = ehea_reg_kernel_mr(adapter, &adapter->mr);
3038 if (ret) { 3029 if (ret) {
3039 ehea_error("register MR failed - driver" 3030 pr_err("register MR failed - driver inoperable!\n");
3040 " inoperable!");
3041 goto out; 3031 goto out;
3042 } 3032 }
3043 3033
@@ -3060,7 +3050,7 @@ static void ehea_rereg_mrs(struct work_struct *work)
3060 } 3050 }
3061 } 3051 }
3062 } 3052 }
3063 ehea_info("re-initializing driver complete"); 3053 pr_info("re-initializing driver complete\n");
3064out: 3054out:
3065 return; 3055 return;
3066} 3056}
@@ -3113,7 +3103,7 @@ int ehea_get_jumboframe_status(struct ehea_port *port, int *jumbo)
3113 /* (Try to) enable *jumbo frames */ 3103 /* (Try to) enable *jumbo frames */
3114 cb4 = (void *)get_zeroed_page(GFP_KERNEL); 3104 cb4 = (void *)get_zeroed_page(GFP_KERNEL);
3115 if (!cb4) { 3105 if (!cb4) {
3116 ehea_error("no mem for cb4"); 3106 pr_err("no mem for cb4\n");
3117 ret = -ENOMEM; 3107 ret = -ENOMEM;
3118 goto out; 3108 goto out;
3119 } else { 3109 } else {
@@ -3175,13 +3165,13 @@ static struct device *ehea_register_port(struct ehea_port *port,
3175 3165
3176 ret = of_device_register(&port->ofdev); 3166 ret = of_device_register(&port->ofdev);
3177 if (ret) { 3167 if (ret) {
3178 ehea_error("failed to register device. ret=%d", ret); 3168 pr_err("failed to register device. ret=%d\n", ret);
3179 goto out; 3169 goto out;
3180 } 3170 }
3181 3171
3182 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id); 3172 ret = device_create_file(&port->ofdev.dev, &dev_attr_log_port_id);
3183 if (ret) { 3173 if (ret) {
3184 ehea_error("failed to register attributes, ret=%d", ret); 3174 pr_err("failed to register attributes, ret=%d\n", ret);
3185 goto out_unreg_of_dev; 3175 goto out_unreg_of_dev;
3186 } 3176 }
3187 3177
@@ -3231,7 +3221,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3231 dev = alloc_etherdev(sizeof(struct ehea_port)); 3221 dev = alloc_etherdev(sizeof(struct ehea_port));
3232 3222
3233 if (!dev) { 3223 if (!dev) {
3234 ehea_error("no mem for net_device"); 3224 pr_err("no mem for net_device\n");
3235 ret = -ENOMEM; 3225 ret = -ENOMEM;
3236 goto out_err; 3226 goto out_err;
3237 } 3227 }
@@ -3285,7 +3275,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3285 3275
3286 ret = register_netdev(dev); 3276 ret = register_netdev(dev);
3287 if (ret) { 3277 if (ret) {
3288 ehea_error("register_netdev failed. ret=%d", ret); 3278 pr_err("register_netdev failed. ret=%d\n", ret);
3289 goto out_unreg_port; 3279 goto out_unreg_port;
3290 } 3280 }
3291 3281
@@ -3293,11 +3283,10 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter,
3293 3283
3294 ret = ehea_get_jumboframe_status(port, &jumbo); 3284 ret = ehea_get_jumboframe_status(port, &jumbo);
3295 if (ret) 3285 if (ret)
3296 ehea_error("failed determining jumbo frame status for %s", 3286 netdev_err(dev, "failed determining jumbo frame status\n");
3297 port->netdev->name);
3298 3287
3299 ehea_info("%s: Jumbo frames are %sabled", dev->name, 3288 netdev_info(dev, "Jumbo frames are %sabled\n",
3300 jumbo == 1 ? "en" : "dis"); 3289 jumbo == 1 ? "en" : "dis");
3301 3290
3302 adapter->active_ports++; 3291 adapter->active_ports++;
3303 3292
@@ -3313,14 +3302,16 @@ out_free_ethdev:
3313 free_netdev(dev); 3302 free_netdev(dev);
3314 3303
3315out_err: 3304out_err:
3316 ehea_error("setting up logical port with id=%d failed, ret=%d", 3305 pr_err("setting up logical port with id=%d failed, ret=%d\n",
3317 logical_port_id, ret); 3306 logical_port_id, ret);
3318 return NULL; 3307 return NULL;
3319} 3308}
3320 3309
3321static void ehea_shutdown_single_port(struct ehea_port *port) 3310static void ehea_shutdown_single_port(struct ehea_port *port)
3322{ 3311{
3323 struct ehea_adapter *adapter = port->adapter; 3312 struct ehea_adapter *adapter = port->adapter;
3313
3314 cancel_work_sync(&port->reset_task);
3324 unregister_netdev(port->netdev); 3315 unregister_netdev(port->netdev);
3325 ehea_unregister_port(port); 3316 ehea_unregister_port(port);
3326 kfree(port->mc_list); 3317 kfree(port->mc_list);
@@ -3342,13 +3333,13 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
3342 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no", 3333 dn_log_port_id = of_get_property(eth_dn, "ibm,hea-port-no",
3343 NULL); 3334 NULL);
3344 if (!dn_log_port_id) { 3335 if (!dn_log_port_id) {
3345 ehea_error("bad device node: eth_dn name=%s", 3336 pr_err("bad device node: eth_dn name=%s\n",
3346 eth_dn->full_name); 3337 eth_dn->full_name);
3347 continue; 3338 continue;
3348 } 3339 }
3349 3340
3350 if (ehea_add_adapter_mr(adapter)) { 3341 if (ehea_add_adapter_mr(adapter)) {
3351 ehea_error("creating MR failed"); 3342 pr_err("creating MR failed\n");
3352 of_node_put(eth_dn); 3343 of_node_put(eth_dn);
3353 return -EIO; 3344 return -EIO;
3354 } 3345 }
@@ -3357,9 +3348,8 @@ static int ehea_setup_ports(struct ehea_adapter *adapter)
3357 *dn_log_port_id, 3348 *dn_log_port_id,
3358 eth_dn); 3349 eth_dn);
3359 if (adapter->port[i]) 3350 if (adapter->port[i])
3360 ehea_info("%s -> logical port id #%d", 3351 netdev_info(adapter->port[i]->netdev,
3361 adapter->port[i]->netdev->name, 3352 "logical port id #%d\n", *dn_log_port_id);
3362 *dn_log_port_id);
3363 else 3353 else
3364 ehea_remove_adapter_mr(adapter); 3354 ehea_remove_adapter_mr(adapter);
3365 3355
@@ -3404,21 +3394,20 @@ static ssize_t ehea_probe_port(struct device *dev,
3404 port = ehea_get_port(adapter, logical_port_id); 3394 port = ehea_get_port(adapter, logical_port_id);
3405 3395
3406 if (port) { 3396 if (port) {
3407 ehea_info("adding port with logical port id=%d failed. port " 3397 netdev_info(port->netdev, "adding port with logical port id=%d failed: port already configured\n",
3408 "already configured as %s.", logical_port_id, 3398 logical_port_id);
3409 port->netdev->name);
3410 return -EINVAL; 3399 return -EINVAL;
3411 } 3400 }
3412 3401
3413 eth_dn = ehea_get_eth_dn(adapter, logical_port_id); 3402 eth_dn = ehea_get_eth_dn(adapter, logical_port_id);
3414 3403
3415 if (!eth_dn) { 3404 if (!eth_dn) {
3416 ehea_info("no logical port with id %d found", logical_port_id); 3405 pr_info("no logical port with id %d found\n", logical_port_id);
3417 return -EINVAL; 3406 return -EINVAL;
3418 } 3407 }
3419 3408
3420 if (ehea_add_adapter_mr(adapter)) { 3409 if (ehea_add_adapter_mr(adapter)) {
3421 ehea_error("creating MR failed"); 3410 pr_err("creating MR failed\n");
3422 return -EIO; 3411 return -EIO;
3423 } 3412 }
3424 3413
@@ -3433,8 +3422,8 @@ static ssize_t ehea_probe_port(struct device *dev,
3433 break; 3422 break;
3434 } 3423 }
3435 3424
3436 ehea_info("added %s (logical port id=%d)", port->netdev->name, 3425 netdev_info(port->netdev, "added: (logical port id=%d)\n",
3437 logical_port_id); 3426 logical_port_id);
3438 } else { 3427 } else {
3439 ehea_remove_adapter_mr(adapter); 3428 ehea_remove_adapter_mr(adapter);
3440 return -EIO; 3429 return -EIO;
@@ -3457,8 +3446,8 @@ static ssize_t ehea_remove_port(struct device *dev,
3457 port = ehea_get_port(adapter, logical_port_id); 3446 port = ehea_get_port(adapter, logical_port_id);
3458 3447
3459 if (port) { 3448 if (port) {
3460 ehea_info("removed %s (logical port id=%d)", port->netdev->name, 3449 netdev_info(port->netdev, "removed: (logical port id=%d)\n",
3461 logical_port_id); 3450 logical_port_id);
3462 3451
3463 ehea_shutdown_single_port(port); 3452 ehea_shutdown_single_port(port);
3464 3453
@@ -3468,8 +3457,8 @@ static ssize_t ehea_remove_port(struct device *dev,
3468 break; 3457 break;
3469 } 3458 }
3470 } else { 3459 } else {
3471 ehea_error("removing port with logical port id=%d failed. port " 3460 pr_err("removing port with logical port id=%d failed. port not configured.\n",
3472 "not configured.", logical_port_id); 3461 logical_port_id);
3473 return -EINVAL; 3462 return -EINVAL;
3474 } 3463 }
3475 3464
@@ -3506,7 +3495,7 @@ static int __devinit ehea_probe_adapter(struct platform_device *dev,
3506 int ret; 3495 int ret;
3507 3496
3508 if (!dev || !dev->dev.of_node) { 3497 if (!dev || !dev->dev.of_node) {
3509 ehea_error("Invalid ibmebus device probed"); 3498 pr_err("Invalid ibmebus device probed\n");
3510 return -EINVAL; 3499 return -EINVAL;
3511 } 3500 }
3512 3501
@@ -3610,8 +3599,6 @@ static int __devexit ehea_remove(struct platform_device *dev)
3610 3599
3611 ehea_remove_device_sysfs(dev); 3600 ehea_remove_device_sysfs(dev);
3612 3601
3613 flush_scheduled_work();
3614
3615 ibmebus_free_irq(adapter->neq->attr.ist1, adapter); 3602 ibmebus_free_irq(adapter->neq->attr.ist1, adapter);
3616 tasklet_kill(&adapter->neq_tasklet); 3603 tasklet_kill(&adapter->neq_tasklet);
3617 3604
@@ -3654,21 +3641,21 @@ static int ehea_mem_notifier(struct notifier_block *nb,
3654 3641
3655 switch (action) { 3642 switch (action) {
3656 case MEM_CANCEL_OFFLINE: 3643 case MEM_CANCEL_OFFLINE:
3657 ehea_info("memory offlining canceled"); 3644 pr_info("memory offlining canceled");
3658 /* Readd canceled memory block */ 3645 /* Readd canceled memory block */
3659 case MEM_ONLINE: 3646 case MEM_ONLINE:
3660 ehea_info("memory is going online"); 3647 pr_info("memory is going online");
3661 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 3648 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3662 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages)) 3649 if (ehea_add_sect_bmap(arg->start_pfn, arg->nr_pages))
3663 goto out_unlock; 3650 goto out_unlock;
3664 ehea_rereg_mrs(NULL); 3651 ehea_rereg_mrs();
3665 break; 3652 break;
3666 case MEM_GOING_OFFLINE: 3653 case MEM_GOING_OFFLINE:
3667 ehea_info("memory is going offline"); 3654 pr_info("memory is going offline");
3668 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags); 3655 set_bit(__EHEA_STOP_XFER, &ehea_driver_flags);
3669 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages)) 3656 if (ehea_rem_sect_bmap(arg->start_pfn, arg->nr_pages))
3670 goto out_unlock; 3657 goto out_unlock;
3671 ehea_rereg_mrs(NULL); 3658 ehea_rereg_mrs();
3672 break; 3659 break;
3673 default: 3660 default:
3674 break; 3661 break;
@@ -3690,7 +3677,7 @@ static int ehea_reboot_notifier(struct notifier_block *nb,
3690 unsigned long action, void *unused) 3677 unsigned long action, void *unused)
3691{ 3678{
3692 if (action == SYS_RESTART) { 3679 if (action == SYS_RESTART) {
3693 ehea_info("Reboot: freeing all eHEA resources"); 3680 pr_info("Reboot: freeing all eHEA resources\n");
3694 ibmebus_unregister_driver(&ehea_driver); 3681 ibmebus_unregister_driver(&ehea_driver);
3695 } 3682 }
3696 return NOTIFY_DONE; 3683 return NOTIFY_DONE;
@@ -3706,22 +3693,22 @@ static int check_module_parm(void)
3706 3693
3707 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) || 3694 if ((rq1_entries < EHEA_MIN_ENTRIES_QP) ||
3708 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) { 3695 (rq1_entries > EHEA_MAX_ENTRIES_RQ1)) {
3709 ehea_info("Bad parameter: rq1_entries"); 3696 pr_info("Bad parameter: rq1_entries\n");
3710 ret = -EINVAL; 3697 ret = -EINVAL;
3711 } 3698 }
3712 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) || 3699 if ((rq2_entries < EHEA_MIN_ENTRIES_QP) ||
3713 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) { 3700 (rq2_entries > EHEA_MAX_ENTRIES_RQ2)) {
3714 ehea_info("Bad parameter: rq2_entries"); 3701 pr_info("Bad parameter: rq2_entries\n");
3715 ret = -EINVAL; 3702 ret = -EINVAL;
3716 } 3703 }
3717 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) || 3704 if ((rq3_entries < EHEA_MIN_ENTRIES_QP) ||
3718 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) { 3705 (rq3_entries > EHEA_MAX_ENTRIES_RQ3)) {
3719 ehea_info("Bad parameter: rq3_entries"); 3706 pr_info("Bad parameter: rq3_entries\n");
3720 ret = -EINVAL; 3707 ret = -EINVAL;
3721 } 3708 }
3722 if ((sq_entries < EHEA_MIN_ENTRIES_QP) || 3709 if ((sq_entries < EHEA_MIN_ENTRIES_QP) ||
3723 (sq_entries > EHEA_MAX_ENTRIES_SQ)) { 3710 (sq_entries > EHEA_MAX_ENTRIES_SQ)) {
3724 ehea_info("Bad parameter: sq_entries"); 3711 pr_info("Bad parameter: sq_entries\n");
3725 ret = -EINVAL; 3712 ret = -EINVAL;
3726 } 3713 }
3727 3714
@@ -3741,11 +3728,8 @@ int __init ehea_module_init(void)
3741{ 3728{
3742 int ret; 3729 int ret;
3743 3730
3744 printk(KERN_INFO "IBM eHEA ethernet device driver (Release %s)\n", 3731 pr_info("IBM eHEA ethernet device driver (Release %s)\n", DRV_VERSION);
3745 DRV_VERSION);
3746
3747 3732
3748 INIT_WORK(&ehea_rereg_mr_task, ehea_rereg_mrs);
3749 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles)); 3733 memset(&ehea_fw_handles, 0, sizeof(ehea_fw_handles));
3750 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs)); 3734 memset(&ehea_bcmc_regs, 0, sizeof(ehea_bcmc_regs));
3751 3735
@@ -3762,27 +3746,27 @@ int __init ehea_module_init(void)
3762 3746
3763 ret = register_reboot_notifier(&ehea_reboot_nb); 3747 ret = register_reboot_notifier(&ehea_reboot_nb);
3764 if (ret) 3748 if (ret)
3765 ehea_info("failed registering reboot notifier"); 3749 pr_info("failed registering reboot notifier\n");
3766 3750
3767 ret = register_memory_notifier(&ehea_mem_nb); 3751 ret = register_memory_notifier(&ehea_mem_nb);
3768 if (ret) 3752 if (ret)
3769 ehea_info("failed registering memory remove notifier"); 3753 pr_info("failed registering memory remove notifier\n");
3770 3754
3771 ret = crash_shutdown_register(ehea_crash_handler); 3755 ret = crash_shutdown_register(ehea_crash_handler);
3772 if (ret) 3756 if (ret)
3773 ehea_info("failed registering crash handler"); 3757 pr_info("failed registering crash handler\n");
3774 3758
3775 ret = ibmebus_register_driver(&ehea_driver); 3759 ret = ibmebus_register_driver(&ehea_driver);
3776 if (ret) { 3760 if (ret) {
3777 ehea_error("failed registering eHEA device driver on ebus"); 3761 pr_err("failed registering eHEA device driver on ebus\n");
3778 goto out2; 3762 goto out2;
3779 } 3763 }
3780 3764
3781 ret = driver_create_file(&ehea_driver.driver, 3765 ret = driver_create_file(&ehea_driver.driver,
3782 &driver_attr_capabilities); 3766 &driver_attr_capabilities);
3783 if (ret) { 3767 if (ret) {
3784 ehea_error("failed to register capabilities attribute, ret=%d", 3768 pr_err("failed to register capabilities attribute, ret=%d\n",
3785 ret); 3769 ret);
3786 goto out3; 3770 goto out3;
3787 } 3771 }
3788 3772
@@ -3802,13 +3786,12 @@ static void __exit ehea_module_exit(void)
3802{ 3786{
3803 int ret; 3787 int ret;
3804 3788
3805 flush_scheduled_work();
3806 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities); 3789 driver_remove_file(&ehea_driver.driver, &driver_attr_capabilities);
3807 ibmebus_unregister_driver(&ehea_driver); 3790 ibmebus_unregister_driver(&ehea_driver);
3808 unregister_reboot_notifier(&ehea_reboot_nb); 3791 unregister_reboot_notifier(&ehea_reboot_nb);
3809 ret = crash_shutdown_unregister(ehea_crash_handler); 3792 ret = crash_shutdown_unregister(ehea_crash_handler);
3810 if (ret) 3793 if (ret)
3811 ehea_info("failed unregistering crash handler"); 3794 pr_info("failed unregistering crash handler\n");
3812 unregister_memory_notifier(&ehea_mem_nb); 3795 unregister_memory_notifier(&ehea_mem_nb);
3813 kfree(ehea_fw_handles.arr); 3796 kfree(ehea_fw_handles.arr);
3814 kfree(ehea_bcmc_regs.arr); 3797 kfree(ehea_bcmc_regs.arr);
diff --git a/drivers/net/ehea/ehea_phyp.c b/drivers/net/ehea/ehea_phyp.c
index 8fe9dcaa7538..0506967b9044 100644
--- a/drivers/net/ehea/ehea_phyp.c
+++ b/drivers/net/ehea/ehea_phyp.c
@@ -26,6 +26,8 @@
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include "ehea_phyp.h" 31#include "ehea_phyp.h"
30 32
31 33
@@ -67,12 +69,11 @@ static long ehea_plpar_hcall_norets(unsigned long opcode,
67 } 69 }
68 70
69 if (ret < H_SUCCESS) 71 if (ret < H_SUCCESS)
70 ehea_error("opcode=%lx ret=%lx" 72 pr_err("opcode=%lx ret=%lx"
71 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" 73 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
72 " arg5=%lx arg6=%lx arg7=%lx ", 74 " arg5=%lx arg6=%lx arg7=%lx\n",
73 opcode, ret, 75 opcode, ret,
74 arg1, arg2, arg3, arg4, arg5, 76 arg1, arg2, arg3, arg4, arg5, arg6, arg7);
75 arg6, arg7);
76 77
77 return ret; 78 return ret;
78 } 79 }
@@ -114,19 +115,18 @@ static long ehea_plpar_hcall9(unsigned long opcode,
114 && (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO) 115 && (((cb_cat == H_PORT_CB4) && ((arg3 == H_PORT_CB4_JUMBO)
115 || (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7) 116 || (arg3 == H_PORT_CB4_SPEED))) || ((cb_cat == H_PORT_CB7)
116 && (arg3 == H_PORT_CB7_DUCQPN))))) 117 && (arg3 == H_PORT_CB7_DUCQPN)))))
117 ehea_error("opcode=%lx ret=%lx" 118 pr_err("opcode=%lx ret=%lx"
118 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx" 119 " arg1=%lx arg2=%lx arg3=%lx arg4=%lx"
119 " arg5=%lx arg6=%lx arg7=%lx arg8=%lx" 120 " arg5=%lx arg6=%lx arg7=%lx arg8=%lx"
120 " arg9=%lx" 121 " arg9=%lx"
121 " out1=%lx out2=%lx out3=%lx out4=%lx" 122 " out1=%lx out2=%lx out3=%lx out4=%lx"
122 " out5=%lx out6=%lx out7=%lx out8=%lx" 123 " out5=%lx out6=%lx out7=%lx out8=%lx"
123 " out9=%lx", 124 " out9=%lx\n",
124 opcode, ret, 125 opcode, ret,
125 arg1, arg2, arg3, arg4, arg5, 126 arg1, arg2, arg3, arg4, arg5,
126 arg6, arg7, arg8, arg9, 127 arg6, arg7, arg8, arg9,
127 outs[0], outs[1], outs[2], outs[3], 128 outs[0], outs[1], outs[2], outs[3], outs[4],
128 outs[4], outs[5], outs[6], outs[7], 129 outs[5], outs[6], outs[7], outs[8]);
129 outs[8]);
130 return ret; 130 return ret;
131 } 131 }
132 132
@@ -515,7 +515,7 @@ u64 ehea_h_register_rpage_mr(const u64 adapter_handle, const u64 mr_handle,
515 const u64 log_pageaddr, const u64 count) 515 const u64 log_pageaddr, const u64 count)
516{ 516{
517 if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) { 517 if ((count > 1) && (log_pageaddr & ~PAGE_MASK)) {
518 ehea_error("not on pageboundary"); 518 pr_err("not on pageboundary\n");
519 return H_PARAMETER; 519 return H_PARAMETER;
520 } 520 }
521 521
diff --git a/drivers/net/ehea/ehea_qmr.c b/drivers/net/ehea/ehea_qmr.c
index 89128b6373e3..cd44bb8017d9 100644
--- a/drivers/net/ehea/ehea_qmr.c
+++ b/drivers/net/ehea/ehea_qmr.c
@@ -26,6 +26,8 @@
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
27 */ 27 */
28 28
29#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
30
29#include <linux/mm.h> 31#include <linux/mm.h>
30#include <linux/slab.h> 32#include <linux/slab.h>
31#include "ehea.h" 33#include "ehea.h"
@@ -45,7 +47,7 @@ static void *hw_qpageit_get_inc(struct hw_queue *queue)
45 queue->current_q_offset -= queue->pagesize; 47 queue->current_q_offset -= queue->pagesize;
46 retvalue = NULL; 48 retvalue = NULL;
47 } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) { 49 } else if (((u64) retvalue) & (EHEA_PAGESIZE-1)) {
48 ehea_error("not on pageboundary"); 50 pr_err("not on pageboundary\n");
49 retvalue = NULL; 51 retvalue = NULL;
50 } 52 }
51 return retvalue; 53 return retvalue;
@@ -58,15 +60,15 @@ static int hw_queue_ctor(struct hw_queue *queue, const u32 nr_of_pages,
58 int i, k; 60 int i, k;
59 61
60 if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) { 62 if ((pagesize > PAGE_SIZE) || (!pages_per_kpage)) {
61 ehea_error("pagesize conflict! kernel pagesize=%d, " 63 pr_err("pagesize conflict! kernel pagesize=%d, ehea pagesize=%d\n",
62 "ehea pagesize=%d", (int)PAGE_SIZE, (int)pagesize); 64 (int)PAGE_SIZE, (int)pagesize);
63 return -EINVAL; 65 return -EINVAL;
64 } 66 }
65 67
66 queue->queue_length = nr_of_pages * pagesize; 68 queue->queue_length = nr_of_pages * pagesize;
67 queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL); 69 queue->queue_pages = kmalloc(nr_of_pages * sizeof(void *), GFP_KERNEL);
68 if (!queue->queue_pages) { 70 if (!queue->queue_pages) {
69 ehea_error("no mem for queue_pages"); 71 pr_err("no mem for queue_pages\n");
70 return -ENOMEM; 72 return -ENOMEM;
71 } 73 }
72 74
@@ -130,7 +132,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
130 132
131 cq = kzalloc(sizeof(*cq), GFP_KERNEL); 133 cq = kzalloc(sizeof(*cq), GFP_KERNEL);
132 if (!cq) { 134 if (!cq) {
133 ehea_error("no mem for cq"); 135 pr_err("no mem for cq\n");
134 goto out_nomem; 136 goto out_nomem;
135 } 137 }
136 138
@@ -147,7 +149,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
147 hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr, 149 hret = ehea_h_alloc_resource_cq(adapter->handle, &cq->attr,
148 &cq->fw_handle, &cq->epas); 150 &cq->fw_handle, &cq->epas);
149 if (hret != H_SUCCESS) { 151 if (hret != H_SUCCESS) {
150 ehea_error("alloc_resource_cq failed"); 152 pr_err("alloc_resource_cq failed\n");
151 goto out_freemem; 153 goto out_freemem;
152 } 154 }
153 155
@@ -159,7 +161,7 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
159 for (counter = 0; counter < cq->attr.nr_pages; counter++) { 161 for (counter = 0; counter < cq->attr.nr_pages; counter++) {
160 vpage = hw_qpageit_get_inc(&cq->hw_queue); 162 vpage = hw_qpageit_get_inc(&cq->hw_queue);
161 if (!vpage) { 163 if (!vpage) {
162 ehea_error("hw_qpageit_get_inc failed"); 164 pr_err("hw_qpageit_get_inc failed\n");
163 goto out_kill_hwq; 165 goto out_kill_hwq;
164 } 166 }
165 167
@@ -168,9 +170,8 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
168 0, EHEA_CQ_REGISTER_ORIG, 170 0, EHEA_CQ_REGISTER_ORIG,
169 cq->fw_handle, rpage, 1); 171 cq->fw_handle, rpage, 1);
170 if (hret < H_SUCCESS) { 172 if (hret < H_SUCCESS) {
171 ehea_error("register_rpage_cq failed ehea_cq=%p " 173 pr_err("register_rpage_cq failed ehea_cq=%p hret=%llx counter=%i act_pages=%i\n",
172 "hret=%llx counter=%i act_pages=%i", 174 cq, hret, counter, cq->attr.nr_pages);
173 cq, hret, counter, cq->attr.nr_pages);
174 goto out_kill_hwq; 175 goto out_kill_hwq;
175 } 176 }
176 177
@@ -178,14 +179,14 @@ struct ehea_cq *ehea_create_cq(struct ehea_adapter *adapter,
178 vpage = hw_qpageit_get_inc(&cq->hw_queue); 179 vpage = hw_qpageit_get_inc(&cq->hw_queue);
179 180
180 if ((hret != H_SUCCESS) || (vpage)) { 181 if ((hret != H_SUCCESS) || (vpage)) {
181 ehea_error("registration of pages not " 182 pr_err("registration of pages not complete hret=%llx\n",
182 "complete hret=%llx\n", hret); 183 hret);
183 goto out_kill_hwq; 184 goto out_kill_hwq;
184 } 185 }
185 } else { 186 } else {
186 if (hret != H_PAGE_REGISTERED) { 187 if (hret != H_PAGE_REGISTERED) {
187 ehea_error("CQ: registration of page failed " 188 pr_err("CQ: registration of page failed hret=%llx\n",
188 "hret=%llx\n", hret); 189 hret);
189 goto out_kill_hwq; 190 goto out_kill_hwq;
190 } 191 }
191 } 192 }
@@ -241,7 +242,7 @@ int ehea_destroy_cq(struct ehea_cq *cq)
241 } 242 }
242 243
243 if (hret != H_SUCCESS) { 244 if (hret != H_SUCCESS) {
244 ehea_error("destroy CQ failed"); 245 pr_err("destroy CQ failed\n");
245 return -EIO; 246 return -EIO;
246 } 247 }
247 248
@@ -259,7 +260,7 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
259 260
260 eq = kzalloc(sizeof(*eq), GFP_KERNEL); 261 eq = kzalloc(sizeof(*eq), GFP_KERNEL);
261 if (!eq) { 262 if (!eq) {
262 ehea_error("no mem for eq"); 263 pr_err("no mem for eq\n");
263 return NULL; 264 return NULL;
264 } 265 }
265 266
@@ -272,21 +273,21 @@ struct ehea_eq *ehea_create_eq(struct ehea_adapter *adapter,
272 hret = ehea_h_alloc_resource_eq(adapter->handle, 273 hret = ehea_h_alloc_resource_eq(adapter->handle,
273 &eq->attr, &eq->fw_handle); 274 &eq->attr, &eq->fw_handle);
274 if (hret != H_SUCCESS) { 275 if (hret != H_SUCCESS) {
275 ehea_error("alloc_resource_eq failed"); 276 pr_err("alloc_resource_eq failed\n");
276 goto out_freemem; 277 goto out_freemem;
277 } 278 }
278 279
279 ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages, 280 ret = hw_queue_ctor(&eq->hw_queue, eq->attr.nr_pages,
280 EHEA_PAGESIZE, sizeof(struct ehea_eqe)); 281 EHEA_PAGESIZE, sizeof(struct ehea_eqe));
281 if (ret) { 282 if (ret) {
282 ehea_error("can't allocate eq pages"); 283 pr_err("can't allocate eq pages\n");
283 goto out_freeres; 284 goto out_freeres;
284 } 285 }
285 286
286 for (i = 0; i < eq->attr.nr_pages; i++) { 287 for (i = 0; i < eq->attr.nr_pages; i++) {
287 vpage = hw_qpageit_get_inc(&eq->hw_queue); 288 vpage = hw_qpageit_get_inc(&eq->hw_queue);
288 if (!vpage) { 289 if (!vpage) {
289 ehea_error("hw_qpageit_get_inc failed"); 290 pr_err("hw_qpageit_get_inc failed\n");
290 hret = H_RESOURCE; 291 hret = H_RESOURCE;
291 goto out_kill_hwq; 292 goto out_kill_hwq;
292 } 293 }
@@ -370,7 +371,7 @@ int ehea_destroy_eq(struct ehea_eq *eq)
370 } 371 }
371 372
372 if (hret != H_SUCCESS) { 373 if (hret != H_SUCCESS) {
373 ehea_error("destroy EQ failed"); 374 pr_err("destroy EQ failed\n");
374 return -EIO; 375 return -EIO;
375 } 376 }
376 377
@@ -395,7 +396,7 @@ int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
395 for (cnt = 0; cnt < nr_pages; cnt++) { 396 for (cnt = 0; cnt < nr_pages; cnt++) {
396 vpage = hw_qpageit_get_inc(hw_queue); 397 vpage = hw_qpageit_get_inc(hw_queue);
397 if (!vpage) { 398 if (!vpage) {
398 ehea_error("hw_qpageit_get_inc failed"); 399 pr_err("hw_qpageit_get_inc failed\n");
399 goto out_kill_hwq; 400 goto out_kill_hwq;
400 } 401 }
401 rpage = virt_to_abs(vpage); 402 rpage = virt_to_abs(vpage);
@@ -403,7 +404,7 @@ int ehea_qp_alloc_register(struct ehea_qp *qp, struct hw_queue *hw_queue,
403 0, h_call_q_selector, 404 0, h_call_q_selector,
404 qp->fw_handle, rpage, 1); 405 qp->fw_handle, rpage, 1);
405 if (hret < H_SUCCESS) { 406 if (hret < H_SUCCESS) {
406 ehea_error("register_rpage_qp failed"); 407 pr_err("register_rpage_qp failed\n");
407 goto out_kill_hwq; 408 goto out_kill_hwq;
408 } 409 }
409 } 410 }
@@ -432,7 +433,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
432 433
433 qp = kzalloc(sizeof(*qp), GFP_KERNEL); 434 qp = kzalloc(sizeof(*qp), GFP_KERNEL);
434 if (!qp) { 435 if (!qp) {
435 ehea_error("no mem for qp"); 436 pr_err("no mem for qp\n");
436 return NULL; 437 return NULL;
437 } 438 }
438 439
@@ -441,7 +442,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
441 hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd, 442 hret = ehea_h_alloc_resource_qp(adapter->handle, init_attr, pd,
442 &qp->fw_handle, &qp->epas); 443 &qp->fw_handle, &qp->epas);
443 if (hret != H_SUCCESS) { 444 if (hret != H_SUCCESS) {
444 ehea_error("ehea_h_alloc_resource_qp failed"); 445 pr_err("ehea_h_alloc_resource_qp failed\n");
445 goto out_freemem; 446 goto out_freemem;
446 } 447 }
447 448
@@ -455,7 +456,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
455 init_attr->act_wqe_size_enc_sq, adapter, 456 init_attr->act_wqe_size_enc_sq, adapter,
456 0); 457 0);
457 if (ret) { 458 if (ret) {
458 ehea_error("can't register for sq ret=%x", ret); 459 pr_err("can't register for sq ret=%x\n", ret);
459 goto out_freeres; 460 goto out_freeres;
460 } 461 }
461 462
@@ -465,7 +466,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
465 init_attr->act_wqe_size_enc_rq1, 466 init_attr->act_wqe_size_enc_rq1,
466 adapter, 1); 467 adapter, 1);
467 if (ret) { 468 if (ret) {
468 ehea_error("can't register for rq1 ret=%x", ret); 469 pr_err("can't register for rq1 ret=%x\n", ret);
469 goto out_kill_hwsq; 470 goto out_kill_hwsq;
470 } 471 }
471 472
@@ -476,7 +477,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
476 init_attr->act_wqe_size_enc_rq2, 477 init_attr->act_wqe_size_enc_rq2,
477 adapter, 2); 478 adapter, 2);
478 if (ret) { 479 if (ret) {
479 ehea_error("can't register for rq2 ret=%x", ret); 480 pr_err("can't register for rq2 ret=%x\n", ret);
480 goto out_kill_hwr1q; 481 goto out_kill_hwr1q;
481 } 482 }
482 } 483 }
@@ -488,7 +489,7 @@ struct ehea_qp *ehea_create_qp(struct ehea_adapter *adapter,
488 init_attr->act_wqe_size_enc_rq3, 489 init_attr->act_wqe_size_enc_rq3,
489 adapter, 3); 490 adapter, 3);
490 if (ret) { 491 if (ret) {
491 ehea_error("can't register for rq3 ret=%x", ret); 492 pr_err("can't register for rq3 ret=%x\n", ret);
492 goto out_kill_hwr2q; 493 goto out_kill_hwr2q;
493 } 494 }
494 } 495 }
@@ -553,7 +554,7 @@ int ehea_destroy_qp(struct ehea_qp *qp)
553 } 554 }
554 555
555 if (hret != H_SUCCESS) { 556 if (hret != H_SUCCESS) {
556 ehea_error("destroy QP failed"); 557 pr_err("destroy QP failed\n");
557 return -EIO; 558 return -EIO;
558 } 559 }
559 560
@@ -842,7 +843,7 @@ static u64 ehea_reg_mr_section(int top, int dir, int idx, u64 *pt,
842 (hret != H_PAGE_REGISTERED)) { 843 (hret != H_PAGE_REGISTERED)) {
843 ehea_h_free_resource(adapter->handle, mr->handle, 844 ehea_h_free_resource(adapter->handle, mr->handle,
844 FORCE_FREE); 845 FORCE_FREE);
845 ehea_error("register_rpage_mr failed"); 846 pr_err("register_rpage_mr failed\n");
846 return hret; 847 return hret;
847 } 848 }
848 } 849 }
@@ -896,7 +897,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
896 897
897 pt = (void *)get_zeroed_page(GFP_KERNEL); 898 pt = (void *)get_zeroed_page(GFP_KERNEL);
898 if (!pt) { 899 if (!pt) {
899 ehea_error("no mem"); 900 pr_err("no mem\n");
900 ret = -ENOMEM; 901 ret = -ENOMEM;
901 goto out; 902 goto out;
902 } 903 }
@@ -906,14 +907,14 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
906 &mr->handle, &mr->lkey); 907 &mr->handle, &mr->lkey);
907 908
908 if (hret != H_SUCCESS) { 909 if (hret != H_SUCCESS) {
909 ehea_error("alloc_resource_mr failed"); 910 pr_err("alloc_resource_mr failed\n");
910 ret = -EIO; 911 ret = -EIO;
911 goto out; 912 goto out;
912 } 913 }
913 914
914 if (!ehea_bmap) { 915 if (!ehea_bmap) {
915 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); 916 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
916 ehea_error("no busmap available"); 917 pr_err("no busmap available\n");
917 ret = -EIO; 918 ret = -EIO;
918 goto out; 919 goto out;
919 } 920 }
@@ -929,7 +930,7 @@ int ehea_reg_kernel_mr(struct ehea_adapter *adapter, struct ehea_mr *mr)
929 930
930 if (hret != H_SUCCESS) { 931 if (hret != H_SUCCESS) {
931 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE); 932 ehea_h_free_resource(adapter->handle, mr->handle, FORCE_FREE);
932 ehea_error("registering mr failed"); 933 pr_err("registering mr failed\n");
933 ret = -EIO; 934 ret = -EIO;
934 goto out; 935 goto out;
935 } 936 }
@@ -952,7 +953,7 @@ int ehea_rem_mr(struct ehea_mr *mr)
952 hret = ehea_h_free_resource(mr->adapter->handle, mr->handle, 953 hret = ehea_h_free_resource(mr->adapter->handle, mr->handle,
953 FORCE_FREE); 954 FORCE_FREE);
954 if (hret != H_SUCCESS) { 955 if (hret != H_SUCCESS) {
955 ehea_error("destroy MR failed"); 956 pr_err("destroy MR failed\n");
956 return -EIO; 957 return -EIO;
957 } 958 }
958 959
@@ -987,14 +988,14 @@ void print_error_data(u64 *data)
987 length = EHEA_PAGESIZE; 988 length = EHEA_PAGESIZE;
988 989
989 if (type == EHEA_AER_RESTYPE_QP) 990 if (type == EHEA_AER_RESTYPE_QP)
990 ehea_error("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, " 991 pr_err("QP (resource=%llX) state: AER=0x%llX, AERR=0x%llX, port=%llX\n",
991 "port=%llX", resource, data[6], data[12], data[22]); 992 resource, data[6], data[12], data[22]);
992 else if (type == EHEA_AER_RESTYPE_CQ) 993 else if (type == EHEA_AER_RESTYPE_CQ)
993 ehea_error("CQ (resource=%llX) state: AER=0x%llX", resource, 994 pr_err("CQ (resource=%llX) state: AER=0x%llX\n",
994 data[6]); 995 resource, data[6]);
995 else if (type == EHEA_AER_RESTYPE_EQ) 996 else if (type == EHEA_AER_RESTYPE_EQ)
996 ehea_error("EQ (resource=%llX) state: AER=0x%llX", resource, 997 pr_err("EQ (resource=%llX) state: AER=0x%llX\n",
997 data[6]); 998 resource, data[6]);
998 999
999 ehea_dump(data, length, "error data"); 1000 ehea_dump(data, length, "error data");
1000} 1001}
@@ -1008,7 +1009,7 @@ u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
1008 1009
1009 rblock = (void *)get_zeroed_page(GFP_KERNEL); 1010 rblock = (void *)get_zeroed_page(GFP_KERNEL);
1010 if (!rblock) { 1011 if (!rblock) {
1011 ehea_error("Cannot allocate rblock memory."); 1012 pr_err("Cannot allocate rblock memory\n");
1012 goto out; 1013 goto out;
1013 } 1014 }
1014 1015
@@ -1020,9 +1021,9 @@ u64 ehea_error_data(struct ehea_adapter *adapter, u64 res_handle,
1020 *aerr = rblock[12]; 1021 *aerr = rblock[12];
1021 print_error_data(rblock); 1022 print_error_data(rblock);
1022 } else if (ret == H_R_STATE) { 1023 } else if (ret == H_R_STATE) {
1023 ehea_error("No error data available: %llX.", res_handle); 1024 pr_err("No error data available: %llX\n", res_handle);
1024 } else 1025 } else
1025 ehea_error("Error data could not be fetched: %llX", res_handle); 1026 pr_err("Error data could not be fetched: %llX\n", res_handle);
1026 1027
1027 free_page((unsigned long)rblock); 1028 free_page((unsigned long)rblock);
1028out: 1029out:
diff --git a/drivers/net/enic/enic.h b/drivers/net/enic/enic.h
index c91d364c5527..a937f49d9db7 100644
--- a/drivers/net/enic/enic.h
+++ b/drivers/net/enic/enic.h
@@ -32,7 +32,7 @@
32 32
33#define DRV_NAME "enic" 33#define DRV_NAME "enic"
34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver" 34#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
35#define DRV_VERSION "1.4.1.6" 35#define DRV_VERSION "1.4.1.10"
36#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc" 36#define DRV_COPYRIGHT "Copyright 2008-2010 Cisco Systems, Inc"
37 37
38#define ENIC_BARS_MAX 6 38#define ENIC_BARS_MAX 6
@@ -61,6 +61,8 @@ struct enic_port_profile {
61 char name[PORT_PROFILE_MAX]; 61 char name[PORT_PROFILE_MAX];
62 u8 instance_uuid[PORT_UUID_MAX]; 62 u8 instance_uuid[PORT_UUID_MAX];
63 u8 host_uuid[PORT_UUID_MAX]; 63 u8 host_uuid[PORT_UUID_MAX];
64 u8 vf_mac[ETH_ALEN];
65 u8 mac_addr[ETH_ALEN];
64}; 66};
65 67
66/* Per-instance private data structure */ 68/* Per-instance private data structure */
@@ -78,8 +80,10 @@ struct enic {
78 spinlock_t devcmd_lock; 80 spinlock_t devcmd_lock;
79 u8 mac_addr[ETH_ALEN]; 81 u8 mac_addr[ETH_ALEN];
80 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; 82 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
83 u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
81 unsigned int flags; 84 unsigned int flags;
82 unsigned int mc_count; 85 unsigned int mc_count;
86 unsigned int uc_count;
83 int csum_rx_enabled; 87 int csum_rx_enabled;
84 u32 port_mtu; 88 u32 port_mtu;
85 u32 rx_coalesce_usecs; 89 u32 rx_coalesce_usecs;
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index aa28b270c045..a0af48c51fb3 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -702,7 +702,7 @@ static inline void enic_queue_wq_skb_csum_l4(struct enic *enic,
702{ 702{
703 unsigned int head_len = skb_headlen(skb); 703 unsigned int head_len = skb_headlen(skb);
704 unsigned int len_left = skb->len - head_len; 704 unsigned int len_left = skb->len - head_len;
705 unsigned int hdr_len = skb_transport_offset(skb); 705 unsigned int hdr_len = skb_checksum_start_offset(skb);
706 unsigned int csum_offset = hdr_len + skb->csum_offset; 706 unsigned int csum_offset = hdr_len + skb->csum_offset;
707 int eop = (len_left == 0); 707 int eop = (len_left == 0);
708 708
@@ -1002,7 +1002,7 @@ static int enic_dev_packet_filter(struct enic *enic, int directed,
1002 return err; 1002 return err;
1003} 1003}
1004 1004
1005static int enic_dev_add_multicast_addr(struct enic *enic, u8 *addr) 1005static int enic_dev_add_addr(struct enic *enic, u8 *addr)
1006{ 1006{
1007 int err; 1007 int err;
1008 1008
@@ -1013,7 +1013,7 @@ static int enic_dev_add_multicast_addr(struct enic *enic, u8 *addr)
1013 return err; 1013 return err;
1014} 1014}
1015 1015
1016static int enic_dev_del_multicast_addr(struct enic *enic, u8 *addr) 1016static int enic_dev_del_addr(struct enic *enic, u8 *addr)
1017{ 1017{
1018 int err; 1018 int err;
1019 1019
@@ -1024,29 +1024,19 @@ static int enic_dev_del_multicast_addr(struct enic *enic, u8 *addr)
1024 return err; 1024 return err;
1025} 1025}
1026 1026
1027/* netif_tx_lock held, BHs disabled */ 1027static void enic_add_multicast_addr_list(struct enic *enic)
1028static void enic_set_multicast_list(struct net_device *netdev)
1029{ 1028{
1030 struct enic *enic = netdev_priv(netdev); 1029 struct net_device *netdev = enic->netdev;
1031 struct netdev_hw_addr *ha; 1030 struct netdev_hw_addr *ha;
1032 int directed = 1;
1033 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
1034 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
1035 int promisc = (netdev->flags & IFF_PROMISC) ? 1 : 0;
1036 unsigned int mc_count = netdev_mc_count(netdev); 1031 unsigned int mc_count = netdev_mc_count(netdev);
1037 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
1038 mc_count > ENIC_MULTICAST_PERFECT_FILTERS;
1039 unsigned int flags = netdev->flags | (allmulti ? IFF_ALLMULTI : 0);
1040 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN]; 1032 u8 mc_addr[ENIC_MULTICAST_PERFECT_FILTERS][ETH_ALEN];
1041 unsigned int i, j; 1033 unsigned int i, j;
1042 1034
1043 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) 1035 if (mc_count > ENIC_MULTICAST_PERFECT_FILTERS) {
1036 netdev_warn(netdev, "Registering only %d out of %d "
1037 "multicast addresses\n",
1038 ENIC_MULTICAST_PERFECT_FILTERS, mc_count);
1044 mc_count = ENIC_MULTICAST_PERFECT_FILTERS; 1039 mc_count = ENIC_MULTICAST_PERFECT_FILTERS;
1045
1046 if (enic->flags != flags) {
1047 enic->flags = flags;
1048 enic_dev_packet_filter(enic, directed,
1049 multicast, broadcast, promisc, allmulti);
1050 } 1040 }
1051 1041
1052 /* Is there an easier way? Trying to minimize to 1042 /* Is there an easier way? Trying to minimize to
@@ -1068,7 +1058,7 @@ static void enic_set_multicast_list(struct net_device *netdev)
1068 mc_addr[j]) == 0) 1058 mc_addr[j]) == 0)
1069 break; 1059 break;
1070 if (j == mc_count) 1060 if (j == mc_count)
1071 enic_dev_del_multicast_addr(enic, enic->mc_addr[i]); 1061 enic_dev_del_addr(enic, enic->mc_addr[i]);
1072 } 1062 }
1073 1063
1074 for (i = 0; i < mc_count; i++) { 1064 for (i = 0; i < mc_count; i++) {
@@ -1077,7 +1067,7 @@ static void enic_set_multicast_list(struct net_device *netdev)
1077 enic->mc_addr[j]) == 0) 1067 enic->mc_addr[j]) == 0)
1078 break; 1068 break;
1079 if (j == enic->mc_count) 1069 if (j == enic->mc_count)
1080 enic_dev_add_multicast_addr(enic, mc_addr[i]); 1070 enic_dev_add_addr(enic, mc_addr[i]);
1081 } 1071 }
1082 1072
1083 /* Save the list to compare against next time 1073 /* Save the list to compare against next time
@@ -1089,6 +1079,89 @@ static void enic_set_multicast_list(struct net_device *netdev)
1089 enic->mc_count = mc_count; 1079 enic->mc_count = mc_count;
1090} 1080}
1091 1081
1082static void enic_add_unicast_addr_list(struct enic *enic)
1083{
1084 struct net_device *netdev = enic->netdev;
1085 struct netdev_hw_addr *ha;
1086 unsigned int uc_count = netdev_uc_count(netdev);
1087 u8 uc_addr[ENIC_UNICAST_PERFECT_FILTERS][ETH_ALEN];
1088 unsigned int i, j;
1089
1090 if (uc_count > ENIC_UNICAST_PERFECT_FILTERS) {
1091 netdev_warn(netdev, "Registering only %d out of %d "
1092 "unicast addresses\n",
1093 ENIC_UNICAST_PERFECT_FILTERS, uc_count);
1094 uc_count = ENIC_UNICAST_PERFECT_FILTERS;
1095 }
1096
1097 /* Is there an easier way? Trying to minimize to
1098 * calls to add/del unicast addrs. We keep the
1099 * addrs from the last call in enic->uc_addr and
1100 * look for changes to add/del.
1101 */
1102
1103 i = 0;
1104 netdev_for_each_uc_addr(ha, netdev) {
1105 if (i == uc_count)
1106 break;
1107 memcpy(uc_addr[i++], ha->addr, ETH_ALEN);
1108 }
1109
1110 for (i = 0; i < enic->uc_count; i++) {
1111 for (j = 0; j < uc_count; j++)
1112 if (compare_ether_addr(enic->uc_addr[i],
1113 uc_addr[j]) == 0)
1114 break;
1115 if (j == uc_count)
1116 enic_dev_del_addr(enic, enic->uc_addr[i]);
1117 }
1118
1119 for (i = 0; i < uc_count; i++) {
1120 for (j = 0; j < enic->uc_count; j++)
1121 if (compare_ether_addr(uc_addr[i],
1122 enic->uc_addr[j]) == 0)
1123 break;
1124 if (j == enic->uc_count)
1125 enic_dev_add_addr(enic, uc_addr[i]);
1126 }
1127
1128 /* Save the list to compare against next time
1129 */
1130
1131 for (i = 0; i < uc_count; i++)
1132 memcpy(enic->uc_addr[i], uc_addr[i], ETH_ALEN);
1133
1134 enic->uc_count = uc_count;
1135}
1136
1137/* netif_tx_lock held, BHs disabled */
1138static void enic_set_rx_mode(struct net_device *netdev)
1139{
1140 struct enic *enic = netdev_priv(netdev);
1141 int directed = 1;
1142 int multicast = (netdev->flags & IFF_MULTICAST) ? 1 : 0;
1143 int broadcast = (netdev->flags & IFF_BROADCAST) ? 1 : 0;
1144 int promisc = (netdev->flags & IFF_PROMISC) ||
1145 netdev_uc_count(netdev) > ENIC_UNICAST_PERFECT_FILTERS;
1146 int allmulti = (netdev->flags & IFF_ALLMULTI) ||
1147 netdev_mc_count(netdev) > ENIC_MULTICAST_PERFECT_FILTERS;
1148 unsigned int flags = netdev->flags |
1149 (allmulti ? IFF_ALLMULTI : 0) |
1150 (promisc ? IFF_PROMISC : 0);
1151
1152 if (enic->flags != flags) {
1153 enic->flags = flags;
1154 enic_dev_packet_filter(enic, directed,
1155 multicast, broadcast, promisc, allmulti);
1156 }
1157
1158 if (!promisc) {
1159 enic_add_unicast_addr_list(enic);
1160 if (!allmulti)
1161 enic_add_multicast_addr_list(enic);
1162 }
1163}
1164
1092/* rtnl lock is held */ 1165/* rtnl lock is held */
1093static void enic_vlan_rx_register(struct net_device *netdev, 1166static void enic_vlan_rx_register(struct net_device *netdev,
1094 struct vlan_group *vlan_group) 1167 struct vlan_group *vlan_group)
@@ -1158,11 +1231,31 @@ static int enic_dev_init_done(struct enic *enic, int *done, int *error)
1158 return err; 1231 return err;
1159} 1232}
1160 1233
1234static int enic_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
1235{
1236 struct enic *enic = netdev_priv(netdev);
1237
1238 if (vf != PORT_SELF_VF)
1239 return -EOPNOTSUPP;
1240
1241 /* Ignore the vf argument for now. We can assume the request
1242 * is coming on a vf.
1243 */
1244 if (is_valid_ether_addr(mac)) {
1245 memcpy(enic->pp.vf_mac, mac, ETH_ALEN);
1246 return 0;
1247 } else
1248 return -EINVAL;
1249}
1250
1161static int enic_set_port_profile(struct enic *enic, u8 *mac) 1251static int enic_set_port_profile(struct enic *enic, u8 *mac)
1162{ 1252{
1163 struct vic_provinfo *vp; 1253 struct vic_provinfo *vp;
1164 u8 oui[3] = VIC_PROVINFO_CISCO_OUI; 1254 u8 oui[3] = VIC_PROVINFO_CISCO_OUI;
1255 u16 os_type = VIC_GENERIC_PROV_OS_TYPE_LINUX;
1165 char uuid_str[38]; 1256 char uuid_str[38];
1257 char client_mac_str[18];
1258 u8 *client_mac;
1166 int err; 1259 int err;
1167 1260
1168 err = enic_vnic_dev_deinit(enic); 1261 err = enic_vnic_dev_deinit(enic);
@@ -1180,46 +1273,63 @@ static int enic_set_port_profile(struct enic *enic, u8 *mac)
1180 return -EADDRNOTAVAIL; 1273 return -EADDRNOTAVAIL;
1181 1274
1182 vp = vic_provinfo_alloc(GFP_KERNEL, oui, 1275 vp = vic_provinfo_alloc(GFP_KERNEL, oui,
1183 VIC_PROVINFO_LINUX_TYPE); 1276 VIC_PROVINFO_GENERIC_TYPE);
1184 if (!vp) 1277 if (!vp)
1185 return -ENOMEM; 1278 return -ENOMEM;
1186 1279
1187 vic_provinfo_add_tlv(vp, 1280 vic_provinfo_add_tlv(vp,
1188 VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR, 1281 VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR,
1189 strlen(enic->pp.name) + 1, enic->pp.name); 1282 strlen(enic->pp.name) + 1, enic->pp.name);
1190 1283
1284 if (!is_zero_ether_addr(enic->pp.mac_addr))
1285 client_mac = enic->pp.mac_addr;
1286 else
1287 client_mac = mac;
1288
1289 vic_provinfo_add_tlv(vp,
1290 VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR,
1291 ETH_ALEN, client_mac);
1292
1293 sprintf(client_mac_str, "%pM", client_mac);
1191 vic_provinfo_add_tlv(vp, 1294 vic_provinfo_add_tlv(vp,
1192 VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR, 1295 VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR,
1193 ETH_ALEN, mac); 1296 sizeof(client_mac_str), client_mac_str);
1194 1297
1195 if (enic->pp.set & ENIC_SET_INSTANCE) { 1298 if (enic->pp.set & ENIC_SET_INSTANCE) {
1196 sprintf(uuid_str, "%pUB", enic->pp.instance_uuid); 1299 sprintf(uuid_str, "%pUB", enic->pp.instance_uuid);
1197 vic_provinfo_add_tlv(vp, 1300 vic_provinfo_add_tlv(vp,
1198 VIC_LINUX_PROV_TLV_CLIENT_UUID_STR, 1301 VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR,
1199 sizeof(uuid_str), uuid_str); 1302 sizeof(uuid_str), uuid_str);
1200 } 1303 }
1201 1304
1202 if (enic->pp.set & ENIC_SET_HOST) { 1305 if (enic->pp.set & ENIC_SET_HOST) {
1203 sprintf(uuid_str, "%pUB", enic->pp.host_uuid); 1306 sprintf(uuid_str, "%pUB", enic->pp.host_uuid);
1204 vic_provinfo_add_tlv(vp, 1307 vic_provinfo_add_tlv(vp,
1205 VIC_LINUX_PROV_TLV_HOST_UUID_STR, 1308 VIC_GENERIC_PROV_TLV_HOST_UUID_STR,
1206 sizeof(uuid_str), uuid_str); 1309 sizeof(uuid_str), uuid_str);
1207 } 1310 }
1208 1311
1312 os_type = htons(os_type);
1313 vic_provinfo_add_tlv(vp,
1314 VIC_GENERIC_PROV_TLV_OS_TYPE,
1315 sizeof(os_type), &os_type);
1316
1209 err = enic_dev_init_prov(enic, vp); 1317 err = enic_dev_init_prov(enic, vp);
1210 vic_provinfo_free(vp); 1318 vic_provinfo_free(vp);
1211 if (err) 1319 if (err)
1212 return err; 1320 return err;
1321
1322 enic->pp.set |= ENIC_SET_APPLIED;
1213 break; 1323 break;
1214 1324
1215 case PORT_REQUEST_DISASSOCIATE: 1325 case PORT_REQUEST_DISASSOCIATE:
1326 enic->pp.set &= ~ENIC_SET_APPLIED;
1216 break; 1327 break;
1217 1328
1218 default: 1329 default:
1219 return -EINVAL; 1330 return -EINVAL;
1220 } 1331 }
1221 1332
1222 enic->pp.set |= ENIC_SET_APPLIED;
1223 return 0; 1333 return 0;
1224} 1334}
1225 1335
@@ -1227,29 +1337,31 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
1227 struct nlattr *port[]) 1337 struct nlattr *port[])
1228{ 1338{
1229 struct enic *enic = netdev_priv(netdev); 1339 struct enic *enic = netdev_priv(netdev);
1340 struct enic_port_profile new_pp;
1341 int err = 0;
1230 1342
1231 memset(&enic->pp, 0, sizeof(enic->pp)); 1343 memset(&new_pp, 0, sizeof(new_pp));
1232 1344
1233 if (port[IFLA_PORT_REQUEST]) { 1345 if (port[IFLA_PORT_REQUEST]) {
1234 enic->pp.set |= ENIC_SET_REQUEST; 1346 new_pp.set |= ENIC_SET_REQUEST;
1235 enic->pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]); 1347 new_pp.request = nla_get_u8(port[IFLA_PORT_REQUEST]);
1236 } 1348 }
1237 1349
1238 if (port[IFLA_PORT_PROFILE]) { 1350 if (port[IFLA_PORT_PROFILE]) {
1239 enic->pp.set |= ENIC_SET_NAME; 1351 new_pp.set |= ENIC_SET_NAME;
1240 memcpy(enic->pp.name, nla_data(port[IFLA_PORT_PROFILE]), 1352 memcpy(new_pp.name, nla_data(port[IFLA_PORT_PROFILE]),
1241 PORT_PROFILE_MAX); 1353 PORT_PROFILE_MAX);
1242 } 1354 }
1243 1355
1244 if (port[IFLA_PORT_INSTANCE_UUID]) { 1356 if (port[IFLA_PORT_INSTANCE_UUID]) {
1245 enic->pp.set |= ENIC_SET_INSTANCE; 1357 new_pp.set |= ENIC_SET_INSTANCE;
1246 memcpy(enic->pp.instance_uuid, 1358 memcpy(new_pp.instance_uuid,
1247 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX); 1359 nla_data(port[IFLA_PORT_INSTANCE_UUID]), PORT_UUID_MAX);
1248 } 1360 }
1249 1361
1250 if (port[IFLA_PORT_HOST_UUID]) { 1362 if (port[IFLA_PORT_HOST_UUID]) {
1251 enic->pp.set |= ENIC_SET_HOST; 1363 new_pp.set |= ENIC_SET_HOST;
1252 memcpy(enic->pp.host_uuid, 1364 memcpy(new_pp.host_uuid,
1253 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX); 1365 nla_data(port[IFLA_PORT_HOST_UUID]), PORT_UUID_MAX);
1254 } 1366 }
1255 1367
@@ -1257,21 +1369,39 @@ static int enic_set_vf_port(struct net_device *netdev, int vf,
1257 if (vf != PORT_SELF_VF) 1369 if (vf != PORT_SELF_VF)
1258 return -EOPNOTSUPP; 1370 return -EOPNOTSUPP;
1259 1371
1260 if (!(enic->pp.set & ENIC_SET_REQUEST)) 1372 if (!(new_pp.set & ENIC_SET_REQUEST))
1261 return -EOPNOTSUPP; 1373 return -EOPNOTSUPP;
1262 1374
1263 if (enic->pp.request == PORT_REQUEST_ASSOCIATE) { 1375 if (new_pp.request == PORT_REQUEST_ASSOCIATE) {
1264 1376 /* Special case handling */
1265 /* If the interface mac addr hasn't been assigned, 1377 if (!is_zero_ether_addr(enic->pp.vf_mac))
1266 * assign a random mac addr before setting port- 1378 memcpy(new_pp.mac_addr, enic->pp.vf_mac, ETH_ALEN);
1267 * profile.
1268 */
1269 1379
1270 if (is_zero_ether_addr(netdev->dev_addr)) 1380 if (is_zero_ether_addr(netdev->dev_addr))
1271 random_ether_addr(netdev->dev_addr); 1381 random_ether_addr(netdev->dev_addr);
1382 } else if (new_pp.request == PORT_REQUEST_DISASSOCIATE) {
1383 if (!is_zero_ether_addr(enic->pp.mac_addr))
1384 enic_dev_del_addr(enic, enic->pp.mac_addr);
1272 } 1385 }
1273 1386
1274 return enic_set_port_profile(enic, netdev->dev_addr); 1387 memcpy(&enic->pp, &new_pp, sizeof(struct enic_port_profile));
1388
1389 err = enic_set_port_profile(enic, netdev->dev_addr);
1390 if (err)
1391 goto set_port_profile_cleanup;
1392
1393 if (!is_zero_ether_addr(enic->pp.mac_addr))
1394 enic_dev_add_addr(enic, enic->pp.mac_addr);
1395
1396set_port_profile_cleanup:
1397 memset(enic->pp.vf_mac, 0, ETH_ALEN);
1398
1399 if (err || enic->pp.request == PORT_REQUEST_DISASSOCIATE) {
1400 memset(netdev->dev_addr, 0, ETH_ALEN);
1401 memset(enic->pp.mac_addr, 0, ETH_ALEN);
1402 }
1403
1404 return err;
1275} 1405}
1276 1406
1277static int enic_get_vf_port(struct net_device *netdev, int vf, 1407static int enic_get_vf_port(struct net_device *netdev, int vf,
@@ -1851,8 +1981,11 @@ static int enic_open(struct net_device *netdev)
1851 for (i = 0; i < enic->rq_count; i++) 1981 for (i = 0; i < enic->rq_count; i++)
1852 vnic_rq_enable(&enic->rq[i]); 1982 vnic_rq_enable(&enic->rq[i]);
1853 1983
1854 enic_dev_add_station_addr(enic); 1984 if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
1855 enic_set_multicast_list(netdev); 1985 enic_dev_add_addr(enic, enic->pp.mac_addr);
1986 else
1987 enic_dev_add_station_addr(enic);
1988 enic_set_rx_mode(netdev);
1856 1989
1857 netif_wake_queue(netdev); 1990 netif_wake_queue(netdev);
1858 1991
@@ -1899,7 +2032,10 @@ static int enic_stop(struct net_device *netdev)
1899 2032
1900 netif_carrier_off(netdev); 2033 netif_carrier_off(netdev);
1901 netif_tx_disable(netdev); 2034 netif_tx_disable(netdev);
1902 enic_dev_del_station_addr(enic); 2035 if (enic_is_dynamic(enic) && !is_zero_ether_addr(enic->pp.mac_addr))
2036 enic_dev_del_addr(enic, enic->pp.mac_addr);
2037 else
2038 enic_dev_del_station_addr(enic);
1903 2039
1904 for (i = 0; i < enic->wq_count; i++) { 2040 for (i = 0; i < enic->wq_count; i++) {
1905 err = vnic_wq_disable(&enic->wq[i]); 2041 err = vnic_wq_disable(&enic->wq[i]);
@@ -2043,7 +2179,7 @@ static int enic_dev_hang_reset(struct enic *enic)
2043 2179
2044static int enic_set_rsskey(struct enic *enic) 2180static int enic_set_rsskey(struct enic *enic)
2045{ 2181{
2046 u64 rss_key_buf_pa; 2182 dma_addr_t rss_key_buf_pa;
2047 union vnic_rss_key *rss_key_buf_va = NULL; 2183 union vnic_rss_key *rss_key_buf_va = NULL;
2048 union vnic_rss_key rss_key = { 2184 union vnic_rss_key rss_key = {
2049 .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101}, 2185 .key[0].b = {85, 67, 83, 97, 119, 101, 115, 111, 109, 101},
@@ -2074,7 +2210,7 @@ static int enic_set_rsskey(struct enic *enic)
2074 2210
2075static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits) 2211static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
2076{ 2212{
2077 u64 rss_cpu_buf_pa; 2213 dma_addr_t rss_cpu_buf_pa;
2078 union vnic_rss_cpu *rss_cpu_buf_va = NULL; 2214 union vnic_rss_cpu *rss_cpu_buf_va = NULL;
2079 unsigned int i; 2215 unsigned int i;
2080 int err; 2216 int err;
@@ -2329,7 +2465,8 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
2329 .ndo_start_xmit = enic_hard_start_xmit, 2465 .ndo_start_xmit = enic_hard_start_xmit,
2330 .ndo_get_stats = enic_get_stats, 2466 .ndo_get_stats = enic_get_stats,
2331 .ndo_validate_addr = eth_validate_addr, 2467 .ndo_validate_addr = eth_validate_addr,
2332 .ndo_set_multicast_list = enic_set_multicast_list, 2468 .ndo_set_rx_mode = enic_set_rx_mode,
2469 .ndo_set_multicast_list = enic_set_rx_mode,
2333 .ndo_set_mac_address = enic_set_mac_address_dynamic, 2470 .ndo_set_mac_address = enic_set_mac_address_dynamic,
2334 .ndo_change_mtu = enic_change_mtu, 2471 .ndo_change_mtu = enic_change_mtu,
2335 .ndo_vlan_rx_register = enic_vlan_rx_register, 2472 .ndo_vlan_rx_register = enic_vlan_rx_register,
@@ -2338,6 +2475,9 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
2338 .ndo_tx_timeout = enic_tx_timeout, 2475 .ndo_tx_timeout = enic_tx_timeout,
2339 .ndo_set_vf_port = enic_set_vf_port, 2476 .ndo_set_vf_port = enic_set_vf_port,
2340 .ndo_get_vf_port = enic_get_vf_port, 2477 .ndo_get_vf_port = enic_get_vf_port,
2478#ifdef IFLA_VF_MAX
2479 .ndo_set_vf_mac = enic_set_vf_mac,
2480#endif
2341#ifdef CONFIG_NET_POLL_CONTROLLER 2481#ifdef CONFIG_NET_POLL_CONTROLLER
2342 .ndo_poll_controller = enic_poll_controller, 2482 .ndo_poll_controller = enic_poll_controller,
2343#endif 2483#endif
@@ -2350,7 +2490,8 @@ static const struct net_device_ops enic_netdev_ops = {
2350 .ndo_get_stats = enic_get_stats, 2490 .ndo_get_stats = enic_get_stats,
2351 .ndo_validate_addr = eth_validate_addr, 2491 .ndo_validate_addr = eth_validate_addr,
2352 .ndo_set_mac_address = enic_set_mac_address, 2492 .ndo_set_mac_address = enic_set_mac_address,
2353 .ndo_set_multicast_list = enic_set_multicast_list, 2493 .ndo_set_rx_mode = enic_set_rx_mode,
2494 .ndo_set_multicast_list = enic_set_rx_mode,
2354 .ndo_change_mtu = enic_change_mtu, 2495 .ndo_change_mtu = enic_change_mtu,
2355 .ndo_vlan_rx_register = enic_vlan_rx_register, 2496 .ndo_vlan_rx_register = enic_vlan_rx_register,
2356 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid, 2497 .ndo_vlan_rx_add_vid = enic_vlan_rx_add_vid,
@@ -2694,7 +2835,7 @@ static void __devexit enic_remove(struct pci_dev *pdev)
2694 if (netdev) { 2835 if (netdev) {
2695 struct enic *enic = netdev_priv(netdev); 2836 struct enic *enic = netdev_priv(netdev);
2696 2837
2697 flush_scheduled_work(); 2838 cancel_work_sync(&enic->reset);
2698 unregister_netdev(netdev); 2839 unregister_netdev(netdev);
2699 enic_dev_deinit(enic); 2840 enic_dev_deinit(enic);
2700 vnic_dev_close(enic->vdev); 2841 vnic_dev_close(enic->vdev);
diff --git a/drivers/net/enic/enic_res.h b/drivers/net/enic/enic_res.h
index 9a103d9ef9e2..25be2734c3fe 100644
--- a/drivers/net/enic/enic_res.h
+++ b/drivers/net/enic/enic_res.h
@@ -34,6 +34,7 @@
34#define ENIC_MAX_MTU 9000 34#define ENIC_MAX_MTU 9000
35 35
36#define ENIC_MULTICAST_PERFECT_FILTERS 32 36#define ENIC_MULTICAST_PERFECT_FILTERS 32
37#define ENIC_UNICAST_PERFECT_FILTERS 32
37 38
38#define ENIC_NON_TSO_MAX_DESC 16 39#define ENIC_NON_TSO_MAX_DESC 16
39 40
diff --git a/drivers/net/enic/vnic_vic.h b/drivers/net/enic/vnic_vic.h
index 7e46e5e8600f..f700f5d9e81d 100644
--- a/drivers/net/enic/vnic_vic.h
+++ b/drivers/net/enic/vnic_vic.h
@@ -24,14 +24,29 @@
24/* Note: String field lengths include null char */ 24/* Note: String field lengths include null char */
25 25
26#define VIC_PROVINFO_CISCO_OUI { 0x00, 0x00, 0x0c } 26#define VIC_PROVINFO_CISCO_OUI { 0x00, 0x00, 0x0c }
27#define VIC_PROVINFO_LINUX_TYPE 0x2 27#define VIC_PROVINFO_GENERIC_TYPE 0x4
28 28
29enum vic_linux_prov_tlv_type { 29enum vic_generic_prov_tlv_type {
30 VIC_LINUX_PROV_TLV_PORT_PROFILE_NAME_STR = 0, 30 VIC_GENERIC_PROV_TLV_PORT_PROFILE_NAME_STR = 0,
31 VIC_LINUX_PROV_TLV_CLIENT_MAC_ADDR = 1, /* u8[6] */ 31 VIC_GENERIC_PROV_TLV_CLIENT_MAC_ADDR = 1,
32 VIC_LINUX_PROV_TLV_CLIENT_NAME_STR = 2, 32 VIC_GENERIC_PROV_TLV_CLIENT_NAME_STR = 2,
33 VIC_LINUX_PROV_TLV_HOST_UUID_STR = 8, 33 VIC_GENERIC_PROV_TLV_CLUSTER_PORT_NAME_STR = 3,
34 VIC_LINUX_PROV_TLV_CLIENT_UUID_STR = 9, 34 VIC_GENERIC_PROV_TLV_CLUSTER_PORT_UUID_STR = 4,
35 VIC_GENERIC_PROV_TLV_CLUSTER_UUID_STR = 5,
36 VIC_GENERIC_PROV_TLV_CLUSTER_NAME_STR = 7,
37 VIC_GENERIC_PROV_TLV_HOST_UUID_STR = 8,
38 VIC_GENERIC_PROV_TLV_CLIENT_UUID_STR = 9,
39 VIC_GENERIC_PROV_TLV_INCARNATION_NUMBER = 10,
40 VIC_GENERIC_PROV_TLV_OS_TYPE = 11,
41 VIC_GENERIC_PROV_TLV_OS_VENDOR = 12,
42 VIC_GENERIC_PROV_TLV_CLIENT_TYPE = 15,
43};
44
45enum vic_generic_prov_os_type {
46 VIC_GENERIC_PROV_OS_TYPE_UNKNOWN = 0,
47 VIC_GENERIC_PROV_OS_TYPE_ESX = 1,
48 VIC_GENERIC_PROV_OS_TYPE_LINUX = 2,
49 VIC_GENERIC_PROV_OS_TYPE_WINDOWS = 3,
35}; 50};
36 51
37struct vic_provinfo { 52struct vic_provinfo {
diff --git a/drivers/net/epic100.c b/drivers/net/epic100.c
index aa56963ad558..c353bf3113cc 100644
--- a/drivers/net/epic100.c
+++ b/drivers/net/epic100.c
@@ -935,7 +935,7 @@ static void epic_init_ring(struct net_device *dev)
935 935
936 /* Fill in the Rx buffers. Handle allocation failure gracefully. */ 936 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
937 for (i = 0; i < RX_RING_SIZE; i++) { 937 for (i = 0; i < RX_RING_SIZE; i++) {
938 struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz); 938 struct sk_buff *skb = dev_alloc_skb(ep->rx_buf_sz + 2);
939 ep->rx_skbuff[i] = skb; 939 ep->rx_skbuff[i] = skb;
940 if (skb == NULL) 940 if (skb == NULL)
941 break; 941 break;
@@ -1233,7 +1233,7 @@ static int epic_rx(struct net_device *dev, int budget)
1233 entry = ep->dirty_rx % RX_RING_SIZE; 1233 entry = ep->dirty_rx % RX_RING_SIZE;
1234 if (ep->rx_skbuff[entry] == NULL) { 1234 if (ep->rx_skbuff[entry] == NULL) {
1235 struct sk_buff *skb; 1235 struct sk_buff *skb;
1236 skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz); 1236 skb = ep->rx_skbuff[entry] = dev_alloc_skb(ep->rx_buf_sz + 2);
1237 if (skb == NULL) 1237 if (skb == NULL)
1238 break; 1238 break;
1239 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ 1239 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index c5a2fe099a8d..b79d7e1555d5 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -19,6 +19,7 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/sched.h> 20#include <linux/sched.h>
21#include <linux/slab.h> 21#include <linux/slab.h>
22#include <linux/of.h>
22#include <net/ethoc.h> 23#include <net/ethoc.h>
23 24
24static int buffer_size = 0x8000; /* 32 KBytes */ 25static int buffer_size = 0x8000; /* 32 KBytes */
@@ -184,7 +185,6 @@ MODULE_PARM_DESC(buffer_size, "DMA buffer allocation size");
184 * @netdev: pointer to network device structure 185 * @netdev: pointer to network device structure
185 * @napi: NAPI structure 186 * @napi: NAPI structure
186 * @msg_enable: device state flags 187 * @msg_enable: device state flags
187 * @rx_lock: receive lock
188 * @lock: device lock 188 * @lock: device lock
189 * @phy: attached PHY 189 * @phy: attached PHY
190 * @mdio: MDIO bus for PHY access 190 * @mdio: MDIO bus for PHY access
@@ -209,7 +209,6 @@ struct ethoc {
209 struct napi_struct napi; 209 struct napi_struct napi;
210 u32 msg_enable; 210 u32 msg_enable;
211 211
212 spinlock_t rx_lock;
213 spinlock_t lock; 212 spinlock_t lock;
214 213
215 struct phy_device *phy; 214 struct phy_device *phy;
@@ -413,10 +412,21 @@ static int ethoc_rx(struct net_device *dev, int limit)
413 unsigned int entry; 412 unsigned int entry;
414 struct ethoc_bd bd; 413 struct ethoc_bd bd;
415 414
416 entry = priv->num_tx + (priv->cur_rx % priv->num_rx); 415 entry = priv->num_tx + priv->cur_rx;
417 ethoc_read_bd(priv, entry, &bd); 416 ethoc_read_bd(priv, entry, &bd);
418 if (bd.stat & RX_BD_EMPTY) 417 if (bd.stat & RX_BD_EMPTY) {
419 break; 418 ethoc_ack_irq(priv, INT_MASK_RX);
419 /* If packet (interrupt) came in between checking
420 * BD_EMTPY and clearing the interrupt source, then we
421 * risk missing the packet as the RX interrupt won't
422 * trigger right away when we reenable it; hence, check
423 * BD_EMTPY here again to make sure there isn't such a
424 * packet waiting for us...
425 */
426 ethoc_read_bd(priv, entry, &bd);
427 if (bd.stat & RX_BD_EMPTY)
428 break;
429 }
420 430
421 if (ethoc_update_rx_stats(priv, &bd) == 0) { 431 if (ethoc_update_rx_stats(priv, &bd) == 0) {
422 int size = bd.stat >> 16; 432 int size = bd.stat >> 16;
@@ -446,13 +456,14 @@ static int ethoc_rx(struct net_device *dev, int limit)
446 bd.stat &= ~RX_BD_STATS; 456 bd.stat &= ~RX_BD_STATS;
447 bd.stat |= RX_BD_EMPTY; 457 bd.stat |= RX_BD_EMPTY;
448 ethoc_write_bd(priv, entry, &bd); 458 ethoc_write_bd(priv, entry, &bd);
449 priv->cur_rx++; 459 if (++priv->cur_rx == priv->num_rx)
460 priv->cur_rx = 0;
450 } 461 }
451 462
452 return count; 463 return count;
453} 464}
454 465
455static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd) 466static void ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
456{ 467{
457 struct net_device *netdev = dev->netdev; 468 struct net_device *netdev = dev->netdev;
458 469
@@ -482,32 +493,44 @@ static int ethoc_update_tx_stats(struct ethoc *dev, struct ethoc_bd *bd)
482 netdev->stats.collisions += (bd->stat >> 4) & 0xf; 493 netdev->stats.collisions += (bd->stat >> 4) & 0xf;
483 netdev->stats.tx_bytes += bd->stat >> 16; 494 netdev->stats.tx_bytes += bd->stat >> 16;
484 netdev->stats.tx_packets++; 495 netdev->stats.tx_packets++;
485 return 0;
486} 496}
487 497
488static void ethoc_tx(struct net_device *dev) 498static int ethoc_tx(struct net_device *dev, int limit)
489{ 499{
490 struct ethoc *priv = netdev_priv(dev); 500 struct ethoc *priv = netdev_priv(dev);
501 int count;
502 struct ethoc_bd bd;
491 503
492 spin_lock(&priv->lock); 504 for (count = 0; count < limit; ++count) {
505 unsigned int entry;
493 506
494 while (priv->dty_tx != priv->cur_tx) { 507 entry = priv->dty_tx & (priv->num_tx-1);
495 unsigned int entry = priv->dty_tx % priv->num_tx;
496 struct ethoc_bd bd;
497 508
498 ethoc_read_bd(priv, entry, &bd); 509 ethoc_read_bd(priv, entry, &bd);
499 if (bd.stat & TX_BD_READY)
500 break;
501 510
502 entry = (++priv->dty_tx) % priv->num_tx; 511 if (bd.stat & TX_BD_READY || (priv->dty_tx == priv->cur_tx)) {
503 (void)ethoc_update_tx_stats(priv, &bd); 512 ethoc_ack_irq(priv, INT_MASK_TX);
513 /* If interrupt came in between reading in the BD
514 * and clearing the interrupt source, then we risk
515 * missing the event as the TX interrupt won't trigger
516 * right away when we reenable it; hence, check
517 * BD_EMPTY here again to make sure there isn't such an
518 * event pending...
519 */
520 ethoc_read_bd(priv, entry, &bd);
521 if (bd.stat & TX_BD_READY ||
522 (priv->dty_tx == priv->cur_tx))
523 break;
524 }
525
526 ethoc_update_tx_stats(priv, &bd);
527 priv->dty_tx++;
504 } 528 }
505 529
506 if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2)) 530 if ((priv->cur_tx - priv->dty_tx) <= (priv->num_tx / 2))
507 netif_wake_queue(dev); 531 netif_wake_queue(dev);
508 532
509 ethoc_ack_irq(priv, INT_MASK_TX); 533 return count;
510 spin_unlock(&priv->lock);
511} 534}
512 535
513static irqreturn_t ethoc_interrupt(int irq, void *dev_id) 536static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
@@ -515,32 +538,38 @@ static irqreturn_t ethoc_interrupt(int irq, void *dev_id)
515 struct net_device *dev = dev_id; 538 struct net_device *dev = dev_id;
516 struct ethoc *priv = netdev_priv(dev); 539 struct ethoc *priv = netdev_priv(dev);
517 u32 pending; 540 u32 pending;
518 541 u32 mask;
519 ethoc_disable_irq(priv, INT_MASK_ALL); 542
543 /* Figure out what triggered the interrupt...
544 * The tricky bit here is that the interrupt source bits get
545 * set in INT_SOURCE for an event irregardless of whether that
546 * event is masked or not. Thus, in order to figure out what
547 * triggered the interrupt, we need to remove the sources
548 * for all events that are currently masked. This behaviour
549 * is not particularly well documented but reasonable...
550 */
551 mask = ethoc_read(priv, INT_MASK);
520 pending = ethoc_read(priv, INT_SOURCE); 552 pending = ethoc_read(priv, INT_SOURCE);
553 pending &= mask;
554
521 if (unlikely(pending == 0)) { 555 if (unlikely(pending == 0)) {
522 ethoc_enable_irq(priv, INT_MASK_ALL);
523 return IRQ_NONE; 556 return IRQ_NONE;
524 } 557 }
525 558
526 ethoc_ack_irq(priv, pending); 559 ethoc_ack_irq(priv, pending);
527 560
561 /* We always handle the dropped packet interrupt */
528 if (pending & INT_MASK_BUSY) { 562 if (pending & INT_MASK_BUSY) {
529 dev_err(&dev->dev, "packet dropped\n"); 563 dev_err(&dev->dev, "packet dropped\n");
530 dev->stats.rx_dropped++; 564 dev->stats.rx_dropped++;
531 } 565 }
532 566
533 if (pending & INT_MASK_RX) { 567 /* Handle receive/transmit event by switching to polling */
534 if (napi_schedule_prep(&priv->napi)) 568 if (pending & (INT_MASK_TX | INT_MASK_RX)) {
535 __napi_schedule(&priv->napi); 569 ethoc_disable_irq(priv, INT_MASK_TX | INT_MASK_RX);
536 } else { 570 napi_schedule(&priv->napi);
537 ethoc_enable_irq(priv, INT_MASK_RX);
538 } 571 }
539 572
540 if (pending & INT_MASK_TX)
541 ethoc_tx(dev);
542
543 ethoc_enable_irq(priv, INT_MASK_ALL & ~INT_MASK_RX);
544 return IRQ_HANDLED; 573 return IRQ_HANDLED;
545} 574}
546 575
@@ -566,26 +595,29 @@ static int ethoc_get_mac_address(struct net_device *dev, void *addr)
566static int ethoc_poll(struct napi_struct *napi, int budget) 595static int ethoc_poll(struct napi_struct *napi, int budget)
567{ 596{
568 struct ethoc *priv = container_of(napi, struct ethoc, napi); 597 struct ethoc *priv = container_of(napi, struct ethoc, napi);
569 int work_done = 0; 598 int rx_work_done = 0;
599 int tx_work_done = 0;
600
601 rx_work_done = ethoc_rx(priv->netdev, budget);
602 tx_work_done = ethoc_tx(priv->netdev, budget);
570 603
571 work_done = ethoc_rx(priv->netdev, budget); 604 if (rx_work_done < budget && tx_work_done < budget) {
572 if (work_done < budget) {
573 ethoc_enable_irq(priv, INT_MASK_RX);
574 napi_complete(napi); 605 napi_complete(napi);
606 ethoc_enable_irq(priv, INT_MASK_TX | INT_MASK_RX);
575 } 607 }
576 608
577 return work_done; 609 return rx_work_done;
578} 610}
579 611
580static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg) 612static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
581{ 613{
582 unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
583 struct ethoc *priv = bus->priv; 614 struct ethoc *priv = bus->priv;
615 int i;
584 616
585 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); 617 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
586 ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ); 618 ethoc_write(priv, MIICOMMAND, MIICOMMAND_READ);
587 619
588 while (time_before(jiffies, timeout)) { 620 for (i=0; i < 5; i++) {
589 u32 status = ethoc_read(priv, MIISTATUS); 621 u32 status = ethoc_read(priv, MIISTATUS);
590 if (!(status & MIISTATUS_BUSY)) { 622 if (!(status & MIISTATUS_BUSY)) {
591 u32 data = ethoc_read(priv, MIIRX_DATA); 623 u32 data = ethoc_read(priv, MIIRX_DATA);
@@ -593,8 +625,7 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
593 ethoc_write(priv, MIICOMMAND, 0); 625 ethoc_write(priv, MIICOMMAND, 0);
594 return data; 626 return data;
595 } 627 }
596 628 usleep_range(100,200);
597 schedule();
598 } 629 }
599 630
600 return -EBUSY; 631 return -EBUSY;
@@ -602,22 +633,21 @@ static int ethoc_mdio_read(struct mii_bus *bus, int phy, int reg)
602 633
603static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val) 634static int ethoc_mdio_write(struct mii_bus *bus, int phy, int reg, u16 val)
604{ 635{
605 unsigned long timeout = jiffies + ETHOC_MII_TIMEOUT;
606 struct ethoc *priv = bus->priv; 636 struct ethoc *priv = bus->priv;
637 int i;
607 638
608 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg)); 639 ethoc_write(priv, MIIADDRESS, MIIADDRESS_ADDR(phy, reg));
609 ethoc_write(priv, MIITX_DATA, val); 640 ethoc_write(priv, MIITX_DATA, val);
610 ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE); 641 ethoc_write(priv, MIICOMMAND, MIICOMMAND_WRITE);
611 642
612 while (time_before(jiffies, timeout)) { 643 for (i=0; i < 5; i++) {
613 u32 stat = ethoc_read(priv, MIISTATUS); 644 u32 stat = ethoc_read(priv, MIISTATUS);
614 if (!(stat & MIISTATUS_BUSY)) { 645 if (!(stat & MIISTATUS_BUSY)) {
615 /* reset MII command register */ 646 /* reset MII command register */
616 ethoc_write(priv, MIICOMMAND, 0); 647 ethoc_write(priv, MIICOMMAND, 0);
617 return 0; 648 return 0;
618 } 649 }
619 650 usleep_range(100,200);
620 schedule();
621 } 651 }
622 652
623 return -EBUSY; 653 return -EBUSY;
@@ -971,9 +1001,17 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
971 /* calculate the number of TX/RX buffers, maximum 128 supported */ 1001 /* calculate the number of TX/RX buffers, maximum 128 supported */
972 num_bd = min_t(unsigned int, 1002 num_bd = min_t(unsigned int,
973 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ); 1003 128, (netdev->mem_end - netdev->mem_start + 1) / ETHOC_BUFSIZ);
974 priv->num_tx = max(2, num_bd / 4); 1004 if (num_bd < 4) {
1005 ret = -ENODEV;
1006 goto error;
1007 }
1008 /* num_tx must be a power of two */
1009 priv->num_tx = rounddown_pow_of_two(num_bd >> 1);
975 priv->num_rx = num_bd - priv->num_tx; 1010 priv->num_rx = num_bd - priv->num_tx;
976 1011
1012 dev_dbg(&pdev->dev, "ethoc: num_tx: %d num_rx: %d\n",
1013 priv->num_tx, priv->num_rx);
1014
977 priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL); 1015 priv->vma = devm_kzalloc(&pdev->dev, num_bd*sizeof(void*), GFP_KERNEL);
978 if (!priv->vma) { 1016 if (!priv->vma) {
979 ret = -ENOMEM; 1017 ret = -ENOMEM;
@@ -982,10 +1020,23 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
982 1020
983 /* Allow the platform setup code to pass in a MAC address. */ 1021 /* Allow the platform setup code to pass in a MAC address. */
984 if (pdev->dev.platform_data) { 1022 if (pdev->dev.platform_data) {
985 struct ethoc_platform_data *pdata = 1023 struct ethoc_platform_data *pdata = pdev->dev.platform_data;
986 (struct ethoc_platform_data *)pdev->dev.platform_data;
987 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN); 1024 memcpy(netdev->dev_addr, pdata->hwaddr, IFHWADDRLEN);
988 priv->phy_id = pdata->phy_id; 1025 priv->phy_id = pdata->phy_id;
1026 } else {
1027 priv->phy_id = -1;
1028
1029#ifdef CONFIG_OF
1030 {
1031 const uint8_t* mac;
1032
1033 mac = of_get_property(pdev->dev.of_node,
1034 "local-mac-address",
1035 NULL);
1036 if (mac)
1037 memcpy(netdev->dev_addr, mac, IFHWADDRLEN);
1038 }
1039#endif
989 } 1040 }
990 1041
991 /* Check that the given MAC address is valid. If it isn't, read the 1042 /* Check that the given MAC address is valid. If it isn't, read the
@@ -1046,7 +1097,6 @@ static int __devinit ethoc_probe(struct platform_device *pdev)
1046 /* setup NAPI */ 1097 /* setup NAPI */
1047 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64); 1098 netif_napi_add(netdev, &priv->napi, ethoc_poll, 64);
1048 1099
1049 spin_lock_init(&priv->rx_lock);
1050 spin_lock_init(&priv->lock); 1100 spin_lock_init(&priv->lock);
1051 1101
1052 ret = register_netdev(netdev); 1102 ret = register_netdev(netdev);
@@ -1113,6 +1163,16 @@ static int ethoc_resume(struct platform_device *pdev)
1113# define ethoc_resume NULL 1163# define ethoc_resume NULL
1114#endif 1164#endif
1115 1165
1166#ifdef CONFIG_OF
1167static struct of_device_id ethoc_match[] = {
1168 {
1169 .compatible = "opencores,ethoc",
1170 },
1171 {},
1172};
1173MODULE_DEVICE_TABLE(of, ethoc_match);
1174#endif
1175
1116static struct platform_driver ethoc_driver = { 1176static struct platform_driver ethoc_driver = {
1117 .probe = ethoc_probe, 1177 .probe = ethoc_probe,
1118 .remove = __devexit_p(ethoc_remove), 1178 .remove = __devexit_p(ethoc_remove),
@@ -1120,6 +1180,10 @@ static struct platform_driver ethoc_driver = {
1120 .resume = ethoc_resume, 1180 .resume = ethoc_resume,
1121 .driver = { 1181 .driver = {
1122 .name = "ethoc", 1182 .name = "ethoc",
1183 .owner = THIS_MODULE,
1184#ifdef CONFIG_OF
1185 .of_match_table = ethoc_match,
1186#endif
1123 }, 1187 },
1124}; 1188};
1125 1189
diff --git a/drivers/net/fec.c b/drivers/net/fec.c
index cce32d43175f..2a71373719ae 100644
--- a/drivers/net/fec.c
+++ b/drivers/net/fec.c
@@ -17,6 +17,8 @@
17 * 17 *
18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be) 18 * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
19 * Copyright (c) 2004-2006 Macq Electronique SA. 19 * Copyright (c) 2004-2006 Macq Electronique SA.
20 *
21 * Copyright (C) 2010 Freescale Semiconductor, Inc.
20 */ 22 */
21 23
22#include <linux/module.h> 24#include <linux/module.h>
@@ -45,29 +47,41 @@
45 47
46#include <asm/cacheflush.h> 48#include <asm/cacheflush.h>
47 49
48#ifndef CONFIG_ARCH_MXC 50#ifndef CONFIG_ARM
49#include <asm/coldfire.h> 51#include <asm/coldfire.h>
50#include <asm/mcfsim.h> 52#include <asm/mcfsim.h>
51#endif 53#endif
52 54
53#include "fec.h" 55#include "fec.h"
54 56
55#ifdef CONFIG_ARCH_MXC 57#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
56#include <mach/hardware.h>
57#define FEC_ALIGNMENT 0xf 58#define FEC_ALIGNMENT 0xf
58#else 59#else
59#define FEC_ALIGNMENT 0x3 60#define FEC_ALIGNMENT 0x3
60#endif 61#endif
61 62
62/* 63#define DRIVER_NAME "fec"
63 * Define the fixed address of the FEC hardware. 64
64 */ 65/* Controller is ENET-MAC */
65#if defined(CONFIG_M5272) 66#define FEC_QUIRK_ENET_MAC (1 << 0)
67/* Controller needs driver to swap frame */
68#define FEC_QUIRK_SWAP_FRAME (1 << 1)
66 69
67static unsigned char fec_mac_default[] = { 70static struct platform_device_id fec_devtype[] = {
68 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 71 {
72 .name = DRIVER_NAME,
73 .driver_data = 0,
74 }, {
75 .name = "imx28-fec",
76 .driver_data = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME,
77 }
69}; 78};
70 79
80static unsigned char macaddr[ETH_ALEN];
81module_param_array(macaddr, byte, NULL, 0);
82MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
83
84#if defined(CONFIG_M5272)
71/* 85/*
72 * Some hardware gets it MAC address out of local flash memory. 86 * Some hardware gets it MAC address out of local flash memory.
73 * if this is non-zero then assume it is the address to get MAC from. 87 * if this is non-zero then assume it is the address to get MAC from.
@@ -133,7 +147,8 @@ static unsigned char fec_mac_default[] = {
133 * account when setting it. 147 * account when setting it.
134 */ 148 */
135#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 149#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
136 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) 150 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
151 defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
137#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16) 152#define OPT_FRAME_SIZE (PKT_MAXBUF_SIZE << 16)
138#else 153#else
139#define OPT_FRAME_SIZE 0 154#define OPT_FRAME_SIZE 0
@@ -186,7 +201,6 @@ struct fec_enet_private {
186 int mii_timeout; 201 int mii_timeout;
187 uint phy_speed; 202 uint phy_speed;
188 phy_interface_t phy_interface; 203 phy_interface_t phy_interface;
189 int index;
190 int link; 204 int link;
191 int full_duplex; 205 int full_duplex;
192 struct completion mdio_done; 206 struct completion mdio_done;
@@ -213,10 +227,23 @@ static void fec_stop(struct net_device *dev);
213/* Transmitter timeout */ 227/* Transmitter timeout */
214#define TX_TIMEOUT (2 * HZ) 228#define TX_TIMEOUT (2 * HZ)
215 229
230static void *swap_buffer(void *bufaddr, int len)
231{
232 int i;
233 unsigned int *buf = bufaddr;
234
235 for (i = 0; i < (len + 3) / 4; i++, buf++)
236 *buf = cpu_to_be32(*buf);
237
238 return bufaddr;
239}
240
216static netdev_tx_t 241static netdev_tx_t
217fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev) 242fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
218{ 243{
219 struct fec_enet_private *fep = netdev_priv(dev); 244 struct fec_enet_private *fep = netdev_priv(dev);
245 const struct platform_device_id *id_entry =
246 platform_get_device_id(fep->pdev);
220 struct bufdesc *bdp; 247 struct bufdesc *bdp;
221 void *bufaddr; 248 void *bufaddr;
222 unsigned short status; 249 unsigned short status;
@@ -261,6 +288,14 @@ fec_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
261 bufaddr = fep->tx_bounce[index]; 288 bufaddr = fep->tx_bounce[index];
262 } 289 }
263 290
291 /*
292 * Some design made an incorrect assumption on endian mode of
293 * the system that it's running on. As the result, driver has to
294 * swap every frame going to and coming from the controller.
295 */
296 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
297 swap_buffer(bufaddr, skb->len);
298
264 /* Save skb pointer */ 299 /* Save skb pointer */
265 fep->tx_skbuff[fep->skb_cur] = skb; 300 fep->tx_skbuff[fep->skb_cur] = skb;
266 301
@@ -429,6 +464,8 @@ static void
429fec_enet_rx(struct net_device *dev) 464fec_enet_rx(struct net_device *dev)
430{ 465{
431 struct fec_enet_private *fep = netdev_priv(dev); 466 struct fec_enet_private *fep = netdev_priv(dev);
467 const struct platform_device_id *id_entry =
468 platform_get_device_id(fep->pdev);
432 struct bufdesc *bdp; 469 struct bufdesc *bdp;
433 unsigned short status; 470 unsigned short status;
434 struct sk_buff *skb; 471 struct sk_buff *skb;
@@ -492,6 +529,9 @@ fec_enet_rx(struct net_device *dev)
492 dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen, 529 dma_unmap_single(NULL, bdp->cbd_bufaddr, bdp->cbd_datlen,
493 DMA_FROM_DEVICE); 530 DMA_FROM_DEVICE);
494 531
532 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
533 swap_buffer(data, pkt_len);
534
495 /* This does 16 byte alignment, exactly what we need. 535 /* This does 16 byte alignment, exactly what we need.
496 * The packet length includes FCS, but we don't want to 536 * The packet length includes FCS, but we don't want to
497 * include that when passing upstream as it messes up 537 * include that when passing upstream as it messes up
@@ -538,37 +578,50 @@ rx_processing_done:
538} 578}
539 579
540/* ------------------------------------------------------------------------- */ 580/* ------------------------------------------------------------------------- */
541#ifdef CONFIG_M5272
542static void __inline__ fec_get_mac(struct net_device *dev) 581static void __inline__ fec_get_mac(struct net_device *dev)
543{ 582{
544 struct fec_enet_private *fep = netdev_priv(dev); 583 struct fec_enet_private *fep = netdev_priv(dev);
584 struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
545 unsigned char *iap, tmpaddr[ETH_ALEN]; 585 unsigned char *iap, tmpaddr[ETH_ALEN];
546 586
547 if (FEC_FLASHMAC) { 587 /*
548 /* 588 * try to get mac address in following order:
549 * Get MAC address from FLASH. 589 *
550 * If it is all 1's or 0's, use the default. 590 * 1) module parameter via kernel command line in form
551 */ 591 * fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
552 iap = (unsigned char *)FEC_FLASHMAC; 592 */
553 if ((iap[0] == 0) && (iap[1] == 0) && (iap[2] == 0) && 593 iap = macaddr;
554 (iap[3] == 0) && (iap[4] == 0) && (iap[5] == 0)) 594
555 iap = fec_mac_default; 595 /*
556 if ((iap[0] == 0xff) && (iap[1] == 0xff) && (iap[2] == 0xff) && 596 * 2) from flash or fuse (via platform data)
557 (iap[3] == 0xff) && (iap[4] == 0xff) && (iap[5] == 0xff)) 597 */
558 iap = fec_mac_default; 598 if (!is_valid_ether_addr(iap)) {
559 } else { 599#ifdef CONFIG_M5272
560 *((unsigned long *) &tmpaddr[0]) = readl(fep->hwp + FEC_ADDR_LOW); 600 if (FEC_FLASHMAC)
561 *((unsigned short *) &tmpaddr[4]) = (readl(fep->hwp + FEC_ADDR_HIGH) >> 16); 601 iap = (unsigned char *)FEC_FLASHMAC;
602#else
603 if (pdata)
604 memcpy(iap, pdata->mac, ETH_ALEN);
605#endif
606 }
607
608 /*
609 * 3) FEC mac registers set by bootloader
610 */
611 if (!is_valid_ether_addr(iap)) {
612 *((unsigned long *) &tmpaddr[0]) =
613 be32_to_cpu(readl(fep->hwp + FEC_ADDR_LOW));
614 *((unsigned short *) &tmpaddr[4]) =
615 be16_to_cpu(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
562 iap = &tmpaddr[0]; 616 iap = &tmpaddr[0];
563 } 617 }
564 618
565 memcpy(dev->dev_addr, iap, ETH_ALEN); 619 memcpy(dev->dev_addr, iap, ETH_ALEN);
566 620
567 /* Adjust MAC if using default MAC address */ 621 /* Adjust MAC if using macaddr */
568 if (iap == fec_mac_default) 622 if (iap == macaddr)
569 dev->dev_addr[ETH_ALEN-1] = fec_mac_default[ETH_ALEN-1] + fep->index; 623 dev->dev_addr[ETH_ALEN-1] = macaddr[ETH_ALEN-1] + fep->pdev->id;
570} 624}
571#endif
572 625
573/* ------------------------------------------------------------------------- */ 626/* ------------------------------------------------------------------------- */
574 627
@@ -651,8 +704,8 @@ static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
651 fep->mii_timeout = 0; 704 fep->mii_timeout = 0;
652 init_completion(&fep->mdio_done); 705 init_completion(&fep->mdio_done);
653 706
654 /* start a read op */ 707 /* start a write op */
655 writel(FEC_MMFR_ST | FEC_MMFR_OP_READ | 708 writel(FEC_MMFR_ST | FEC_MMFR_OP_WRITE |
656 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) | 709 FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(regnum) |
657 FEC_MMFR_TA | FEC_MMFR_DATA(value), 710 FEC_MMFR_TA | FEC_MMFR_DATA(value),
658 fep->hwp + FEC_MII_DATA); 711 fep->hwp + FEC_MII_DATA);
@@ -681,6 +734,7 @@ static int fec_enet_mii_probe(struct net_device *dev)
681 char mdio_bus_id[MII_BUS_ID_SIZE]; 734 char mdio_bus_id[MII_BUS_ID_SIZE];
682 char phy_name[MII_BUS_ID_SIZE + 3]; 735 char phy_name[MII_BUS_ID_SIZE + 3];
683 int phy_id; 736 int phy_id;
737 int dev_id = fep->pdev->id;
684 738
685 fep->phy_dev = NULL; 739 fep->phy_dev = NULL;
686 740
@@ -692,6 +746,8 @@ static int fec_enet_mii_probe(struct net_device *dev)
692 continue; 746 continue;
693 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0) 747 if (fep->mii_bus->phy_map[phy_id]->phy_id == 0)
694 continue; 748 continue;
749 if (dev_id--)
750 continue;
695 strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE); 751 strncpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
696 break; 752 break;
697 } 753 }
@@ -729,10 +785,35 @@ static int fec_enet_mii_probe(struct net_device *dev)
729 785
730static int fec_enet_mii_init(struct platform_device *pdev) 786static int fec_enet_mii_init(struct platform_device *pdev)
731{ 787{
788 static struct mii_bus *fec0_mii_bus;
732 struct net_device *dev = platform_get_drvdata(pdev); 789 struct net_device *dev = platform_get_drvdata(pdev);
733 struct fec_enet_private *fep = netdev_priv(dev); 790 struct fec_enet_private *fep = netdev_priv(dev);
791 const struct platform_device_id *id_entry =
792 platform_get_device_id(fep->pdev);
734 int err = -ENXIO, i; 793 int err = -ENXIO, i;
735 794
795 /*
796 * The dual fec interfaces are not equivalent with enet-mac.
797 * Here are the differences:
798 *
799 * - fec0 supports MII & RMII modes while fec1 only supports RMII
800 * - fec0 acts as the 1588 time master while fec1 is slave
801 * - external phys can only be configured by fec0
802 *
803 * That is to say fec1 can not work independently. It only works
804 * when fec0 is working. The reason behind this design is that the
805 * second interface is added primarily for Switch mode.
806 *
807 * Because of the last point above, both phys are attached on fec0
808 * mdio interface in board design, and need to be configured by
809 * fec0 mii_bus.
810 */
811 if ((id_entry->driver_data & FEC_QUIRK_ENET_MAC) && pdev->id) {
812 /* fec1 uses fec0 mii_bus */
813 fep->mii_bus = fec0_mii_bus;
814 return 0;
815 }
816
736 fep->mii_timeout = 0; 817 fep->mii_timeout = 0;
737 818
738 /* 819 /*
@@ -769,6 +850,10 @@ static int fec_enet_mii_init(struct platform_device *pdev)
769 if (mdiobus_register(fep->mii_bus)) 850 if (mdiobus_register(fep->mii_bus))
770 goto err_out_free_mdio_irq; 851 goto err_out_free_mdio_irq;
771 852
853 /* save fec0 mii_bus */
854 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
855 fec0_mii_bus = fep->mii_bus;
856
772 return 0; 857 return 0;
773 858
774err_out_free_mdio_irq: 859err_out_free_mdio_irq:
@@ -1067,9 +1152,8 @@ static const struct net_device_ops fec_netdev_ops = {
1067 /* 1152 /*
1068 * XXX: We need to clean up on failure exits here. 1153 * XXX: We need to clean up on failure exits here.
1069 * 1154 *
1070 * index is only used in legacy code
1071 */ 1155 */
1072static int fec_enet_init(struct net_device *dev, int index) 1156static int fec_enet_init(struct net_device *dev)
1073{ 1157{
1074 struct fec_enet_private *fep = netdev_priv(dev); 1158 struct fec_enet_private *fep = netdev_priv(dev);
1075 struct bufdesc *cbd_base; 1159 struct bufdesc *cbd_base;
@@ -1086,26 +1170,11 @@ static int fec_enet_init(struct net_device *dev, int index)
1086 1170
1087 spin_lock_init(&fep->hw_lock); 1171 spin_lock_init(&fep->hw_lock);
1088 1172
1089 fep->index = index;
1090 fep->hwp = (void __iomem *)dev->base_addr; 1173 fep->hwp = (void __iomem *)dev->base_addr;
1091 fep->netdev = dev; 1174 fep->netdev = dev;
1092 1175
1093 /* Set the Ethernet address */ 1176 /* Get the Ethernet address */
1094#ifdef CONFIG_M5272
1095 fec_get_mac(dev); 1177 fec_get_mac(dev);
1096#else
1097 {
1098 unsigned long l;
1099 l = readl(fep->hwp + FEC_ADDR_LOW);
1100 dev->dev_addr[0] = (unsigned char)((l & 0xFF000000) >> 24);
1101 dev->dev_addr[1] = (unsigned char)((l & 0x00FF0000) >> 16);
1102 dev->dev_addr[2] = (unsigned char)((l & 0x0000FF00) >> 8);
1103 dev->dev_addr[3] = (unsigned char)((l & 0x000000FF) >> 0);
1104 l = readl(fep->hwp + FEC_ADDR_HIGH);
1105 dev->dev_addr[4] = (unsigned char)((l & 0xFF000000) >> 24);
1106 dev->dev_addr[5] = (unsigned char)((l & 0x00FF0000) >> 16);
1107 }
1108#endif
1109 1178
1110 /* Set receive and transmit descriptor base. */ 1179 /* Set receive and transmit descriptor base. */
1111 fep->rx_bd_base = cbd_base; 1180 fep->rx_bd_base = cbd_base;
@@ -1156,12 +1225,25 @@ static void
1156fec_restart(struct net_device *dev, int duplex) 1225fec_restart(struct net_device *dev, int duplex)
1157{ 1226{
1158 struct fec_enet_private *fep = netdev_priv(dev); 1227 struct fec_enet_private *fep = netdev_priv(dev);
1228 const struct platform_device_id *id_entry =
1229 platform_get_device_id(fep->pdev);
1159 int i; 1230 int i;
1231 u32 val, temp_mac[2];
1160 1232
1161 /* Whack a reset. We should wait for this. */ 1233 /* Whack a reset. We should wait for this. */
1162 writel(1, fep->hwp + FEC_ECNTRL); 1234 writel(1, fep->hwp + FEC_ECNTRL);
1163 udelay(10); 1235 udelay(10);
1164 1236
1237 /*
1238 * enet-mac reset will reset mac address registers too,
1239 * so need to reconfigure it.
1240 */
1241 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
1242 memcpy(&temp_mac, dev->dev_addr, ETH_ALEN);
1243 writel(cpu_to_be32(temp_mac[0]), fep->hwp + FEC_ADDR_LOW);
1244 writel(cpu_to_be32(temp_mac[1]), fep->hwp + FEC_ADDR_HIGH);
1245 }
1246
1165 /* Clear any outstanding interrupt. */ 1247 /* Clear any outstanding interrupt. */
1166 writel(0xffc00000, fep->hwp + FEC_IEVENT); 1248 writel(0xffc00000, fep->hwp + FEC_IEVENT);
1167 1249
@@ -1208,20 +1290,45 @@ fec_restart(struct net_device *dev, int duplex)
1208 /* Set MII speed */ 1290 /* Set MII speed */
1209 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED); 1291 writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
1210 1292
1211#ifdef FEC_MIIGSK_ENR 1293 /*
1212 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) { 1294 * The phy interface and speed need to get configured
1213 /* disable the gasket and wait */ 1295 * differently on enet-mac.
1214 writel(0, fep->hwp + FEC_MIIGSK_ENR); 1296 */
1215 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4) 1297 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) {
1216 udelay(1); 1298 val = readl(fep->hwp + FEC_R_CNTRL);
1217 1299
1218 /* configure the gasket: RMII, 50 MHz, no loopback, no echo */ 1300 /* MII or RMII */
1219 writel(1, fep->hwp + FEC_MIIGSK_CFGR); 1301 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
1302 val |= (1 << 8);
1303 else
1304 val &= ~(1 << 8);
1220 1305
1221 /* re-enable the gasket */ 1306 /* 10M or 100M */
1222 writel(2, fep->hwp + FEC_MIIGSK_ENR); 1307 if (fep->phy_dev && fep->phy_dev->speed == SPEED_100)
1223 } 1308 val &= ~(1 << 9);
1309 else
1310 val |= (1 << 9);
1311
1312 writel(val, fep->hwp + FEC_R_CNTRL);
1313 } else {
1314#ifdef FEC_MIIGSK_ENR
1315 if (fep->phy_interface == PHY_INTERFACE_MODE_RMII) {
1316 /* disable the gasket and wait */
1317 writel(0, fep->hwp + FEC_MIIGSK_ENR);
1318 while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
1319 udelay(1);
1320
1321 /*
1322 * configure the gasket:
1323 * RMII, 50 MHz, no loopback, no echo
1324 */
1325 writel(1, fep->hwp + FEC_MIIGSK_CFGR);
1326
1327 /* re-enable the gasket */
1328 writel(2, fep->hwp + FEC_MIIGSK_ENR);
1329 }
1224#endif 1330#endif
1331 }
1225 1332
1226 /* And last, enable the transmit and receive processing */ 1333 /* And last, enable the transmit and receive processing */
1227 writel(2, fep->hwp + FEC_ECNTRL); 1334 writel(2, fep->hwp + FEC_ECNTRL);
@@ -1316,7 +1423,7 @@ fec_probe(struct platform_device *pdev)
1316 } 1423 }
1317 clk_enable(fep->clk); 1424 clk_enable(fep->clk);
1318 1425
1319 ret = fec_enet_init(ndev, 0); 1426 ret = fec_enet_init(ndev);
1320 if (ret) 1427 if (ret)
1321 goto failed_init; 1428 goto failed_init;
1322 1429
@@ -1380,8 +1487,10 @@ fec_suspend(struct device *dev)
1380 1487
1381 if (ndev) { 1488 if (ndev) {
1382 fep = netdev_priv(ndev); 1489 fep = netdev_priv(ndev);
1383 if (netif_running(ndev)) 1490 if (netif_running(ndev)) {
1384 fec_enet_close(ndev); 1491 fec_stop(ndev);
1492 netif_device_detach(ndev);
1493 }
1385 clk_disable(fep->clk); 1494 clk_disable(fep->clk);
1386 } 1495 }
1387 return 0; 1496 return 0;
@@ -1396,8 +1505,10 @@ fec_resume(struct device *dev)
1396 if (ndev) { 1505 if (ndev) {
1397 fep = netdev_priv(ndev); 1506 fep = netdev_priv(ndev);
1398 clk_enable(fep->clk); 1507 clk_enable(fep->clk);
1399 if (netif_running(ndev)) 1508 if (netif_running(ndev)) {
1400 fec_enet_open(ndev); 1509 fec_restart(ndev, fep->full_duplex);
1510 netif_device_attach(ndev);
1511 }
1401 } 1512 }
1402 return 0; 1513 return 0;
1403} 1514}
@@ -1414,12 +1525,13 @@ static const struct dev_pm_ops fec_pm_ops = {
1414 1525
1415static struct platform_driver fec_driver = { 1526static struct platform_driver fec_driver = {
1416 .driver = { 1527 .driver = {
1417 .name = "fec", 1528 .name = DRIVER_NAME,
1418 .owner = THIS_MODULE, 1529 .owner = THIS_MODULE,
1419#ifdef CONFIG_PM 1530#ifdef CONFIG_PM
1420 .pm = &fec_pm_ops, 1531 .pm = &fec_pm_ops,
1421#endif 1532#endif
1422 }, 1533 },
1534 .id_table = fec_devtype,
1423 .probe = fec_probe, 1535 .probe = fec_probe,
1424 .remove = __devexit_p(fec_drv_remove), 1536 .remove = __devexit_p(fec_drv_remove),
1425}; 1537};
diff --git a/drivers/net/fec.h b/drivers/net/fec.h
index 2c48b25668d5..ace318df4c8d 100644
--- a/drivers/net/fec.h
+++ b/drivers/net/fec.h
@@ -14,7 +14,8 @@
14/****************************************************************************/ 14/****************************************************************************/
15 15
16#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \ 16#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
17 defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARCH_MXC) 17 defined(CONFIG_M520x) || defined(CONFIG_M532x) || \
18 defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
18/* 19/*
19 * Just figures, Motorola would have to change the offsets for 20 * Just figures, Motorola would have to change the offsets for
20 * registers in the same peripheral device on different models 21 * registers in the same peripheral device on different models
@@ -78,7 +79,7 @@
78/* 79/*
79 * Define the buffer descriptor structure. 80 * Define the buffer descriptor structure.
80 */ 81 */
81#ifdef CONFIG_ARCH_MXC 82#if defined(CONFIG_ARCH_MXC) || defined(CONFIG_SOC_IMX28)
82struct bufdesc { 83struct bufdesc {
83 unsigned short cbd_datlen; /* Data length */ 84 unsigned short cbd_datlen; /* Data length */
84 unsigned short cbd_sc; /* Control and status info */ 85 unsigned short cbd_sc; /* Control and status info */
diff --git a/drivers/net/fec_mpc52xx.c b/drivers/net/fec_mpc52xx.c
index e9f5d030bc26..50c1213f61fe 100644
--- a/drivers/net/fec_mpc52xx.c
+++ b/drivers/net/fec_mpc52xx.c
@@ -366,9 +366,8 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
366{ 366{
367 struct net_device *dev = dev_id; 367 struct net_device *dev = dev_id;
368 struct mpc52xx_fec_priv *priv = netdev_priv(dev); 368 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
369 unsigned long flags;
370 369
371 spin_lock_irqsave(&priv->lock, flags); 370 spin_lock(&priv->lock);
372 while (bcom_buffer_done(priv->tx_dmatsk)) { 371 while (bcom_buffer_done(priv->tx_dmatsk)) {
373 struct sk_buff *skb; 372 struct sk_buff *skb;
374 struct bcom_fec_bd *bd; 373 struct bcom_fec_bd *bd;
@@ -379,7 +378,7 @@ static irqreturn_t mpc52xx_fec_tx_interrupt(int irq, void *dev_id)
379 378
380 dev_kfree_skb_irq(skb); 379 dev_kfree_skb_irq(skb);
381 } 380 }
382 spin_unlock_irqrestore(&priv->lock, flags); 381 spin_unlock(&priv->lock);
383 382
384 netif_wake_queue(dev); 383 netif_wake_queue(dev);
385 384
@@ -395,9 +394,8 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
395 struct bcom_fec_bd *bd; 394 struct bcom_fec_bd *bd;
396 u32 status, physaddr; 395 u32 status, physaddr;
397 int length; 396 int length;
398 unsigned long flags;
399 397
400 spin_lock_irqsave(&priv->lock, flags); 398 spin_lock(&priv->lock);
401 399
402 while (bcom_buffer_done(priv->rx_dmatsk)) { 400 while (bcom_buffer_done(priv->rx_dmatsk)) {
403 401
@@ -429,7 +427,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
429 427
430 /* Process the received skb - Drop the spin lock while 428 /* Process the received skb - Drop the spin lock while
431 * calling into the network stack */ 429 * calling into the network stack */
432 spin_unlock_irqrestore(&priv->lock, flags); 430 spin_unlock(&priv->lock);
433 431
434 dma_unmap_single(dev->dev.parent, physaddr, rskb->len, 432 dma_unmap_single(dev->dev.parent, physaddr, rskb->len,
435 DMA_FROM_DEVICE); 433 DMA_FROM_DEVICE);
@@ -438,10 +436,10 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
438 rskb->protocol = eth_type_trans(rskb, dev); 436 rskb->protocol = eth_type_trans(rskb, dev);
439 netif_rx(rskb); 437 netif_rx(rskb);
440 438
441 spin_lock_irqsave(&priv->lock, flags); 439 spin_lock(&priv->lock);
442 } 440 }
443 441
444 spin_unlock_irqrestore(&priv->lock, flags); 442 spin_unlock(&priv->lock);
445 443
446 return IRQ_HANDLED; 444 return IRQ_HANDLED;
447} 445}
@@ -452,7 +450,6 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
452 struct mpc52xx_fec_priv *priv = netdev_priv(dev); 450 struct mpc52xx_fec_priv *priv = netdev_priv(dev);
453 struct mpc52xx_fec __iomem *fec = priv->fec; 451 struct mpc52xx_fec __iomem *fec = priv->fec;
454 u32 ievent; 452 u32 ievent;
455 unsigned long flags;
456 453
457 ievent = in_be32(&fec->ievent); 454 ievent = in_be32(&fec->ievent);
458 455
@@ -470,9 +467,9 @@ static irqreturn_t mpc52xx_fec_interrupt(int irq, void *dev_id)
470 if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR)) 467 if (net_ratelimit() && (ievent & FEC_IEVENT_XFIFO_ERROR))
471 dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n"); 468 dev_warn(&dev->dev, "FEC_IEVENT_XFIFO_ERROR\n");
472 469
473 spin_lock_irqsave(&priv->lock, flags); 470 spin_lock(&priv->lock);
474 mpc52xx_fec_reset(dev); 471 mpc52xx_fec_reset(dev);
475 spin_unlock_irqrestore(&priv->lock, flags); 472 spin_unlock(&priv->lock);
476 473
477 return IRQ_HANDLED; 474 return IRQ_HANDLED;
478 } 475 }
diff --git a/drivers/net/forcedeth.c b/drivers/net/forcedeth.c
index 0fa1776563a3..af09296ef0dd 100644
--- a/drivers/net/forcedeth.c
+++ b/drivers/net/forcedeth.c
@@ -39,6 +39,9 @@
39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few 39 * DEV_NEED_TIMERIRQ will not harm you on sane hardware, only generating a few
40 * superfluous timer interrupts from the nic. 40 * superfluous timer interrupts from the nic.
41 */ 41 */
42
43#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
44
42#define FORCEDETH_VERSION "0.64" 45#define FORCEDETH_VERSION "0.64"
43#define DRV_NAME "forcedeth" 46#define DRV_NAME "forcedeth"
44 47
@@ -60,18 +63,12 @@
60#include <linux/if_vlan.h> 63#include <linux/if_vlan.h>
61#include <linux/dma-mapping.h> 64#include <linux/dma-mapping.h>
62#include <linux/slab.h> 65#include <linux/slab.h>
66#include <linux/uaccess.h>
67#include <linux/io.h>
63 68
64#include <asm/irq.h> 69#include <asm/irq.h>
65#include <asm/io.h>
66#include <asm/uaccess.h>
67#include <asm/system.h> 70#include <asm/system.h>
68 71
69#if 0
70#define dprintk printk
71#else
72#define dprintk(x...) do { } while (0)
73#endif
74
75#define TX_WORK_PER_LOOP 64 72#define TX_WORK_PER_LOOP 64
76#define RX_WORK_PER_LOOP 64 73#define RX_WORK_PER_LOOP 64
77 74
@@ -186,9 +183,9 @@ enum {
186 NvRegSlotTime = 0x9c, 183 NvRegSlotTime = 0x9c,
187#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000 184#define NVREG_SLOTTIME_LEGBF_ENABLED 0x80000000
188#define NVREG_SLOTTIME_10_100_FULL 0x00007f00 185#define NVREG_SLOTTIME_10_100_FULL 0x00007f00
189#define NVREG_SLOTTIME_1000_FULL 0x0003ff00 186#define NVREG_SLOTTIME_1000_FULL 0x0003ff00
190#define NVREG_SLOTTIME_HALF 0x0000ff00 187#define NVREG_SLOTTIME_HALF 0x0000ff00
191#define NVREG_SLOTTIME_DEFAULT 0x00007f00 188#define NVREG_SLOTTIME_DEFAULT 0x00007f00
192#define NVREG_SLOTTIME_MASK 0x000000ff 189#define NVREG_SLOTTIME_MASK 0x000000ff
193 190
194 NvRegTxDeferral = 0xA0, 191 NvRegTxDeferral = 0xA0,
@@ -297,7 +294,7 @@ enum {
297#define NVREG_WAKEUPFLAGS_ENABLE 0x1111 294#define NVREG_WAKEUPFLAGS_ENABLE 0x1111
298 295
299 NvRegMgmtUnitGetVersion = 0x204, 296 NvRegMgmtUnitGetVersion = 0x204,
300#define NVREG_MGMTUNITGETVERSION 0x01 297#define NVREG_MGMTUNITGETVERSION 0x01
301 NvRegMgmtUnitVersion = 0x208, 298 NvRegMgmtUnitVersion = 0x208,
302#define NVREG_MGMTUNITVERSION 0x08 299#define NVREG_MGMTUNITVERSION 0x08
303 NvRegPowerCap = 0x268, 300 NvRegPowerCap = 0x268,
@@ -368,8 +365,8 @@ struct ring_desc_ex {
368}; 365};
369 366
370union ring_type { 367union ring_type {
371 struct ring_desc* orig; 368 struct ring_desc *orig;
372 struct ring_desc_ex* ex; 369 struct ring_desc_ex *ex;
373}; 370};
374 371
375#define FLAG_MASK_V1 0xffff0000 372#define FLAG_MASK_V1 0xffff0000
@@ -444,10 +441,10 @@ union ring_type {
444#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF) 441#define NV_RX3_VLAN_TAG_MASK (0x0000FFFF)
445 442
446/* Miscelaneous hardware related defines: */ 443/* Miscelaneous hardware related defines: */
447#define NV_PCI_REGSZ_VER1 0x270 444#define NV_PCI_REGSZ_VER1 0x270
448#define NV_PCI_REGSZ_VER2 0x2d4 445#define NV_PCI_REGSZ_VER2 0x2d4
449#define NV_PCI_REGSZ_VER3 0x604 446#define NV_PCI_REGSZ_VER3 0x604
450#define NV_PCI_REGSZ_MAX 0x604 447#define NV_PCI_REGSZ_MAX 0x604
451 448
452/* various timeout delays: all in usec */ 449/* various timeout delays: all in usec */
453#define NV_TXRX_RESET_DELAY 4 450#define NV_TXRX_RESET_DELAY 4
@@ -717,7 +714,7 @@ static const struct register_test nv_registers_test[] = {
717 { NvRegMulticastAddrA, 0xffffffff }, 714 { NvRegMulticastAddrA, 0xffffffff },
718 { NvRegTxWatermark, 0x0ff }, 715 { NvRegTxWatermark, 0x0ff },
719 { NvRegWakeUpFlags, 0x07777 }, 716 { NvRegWakeUpFlags, 0x07777 },
720 { 0,0 } 717 { 0, 0 }
721}; 718};
722 719
723struct nv_skb_map { 720struct nv_skb_map {
@@ -911,7 +908,7 @@ static int phy_cross = NV_CROSSOVER_DETECTION_DISABLED;
911 * Power down phy when interface is down (persists through reboot; 908 * Power down phy when interface is down (persists through reboot;
912 * older Linux and other OSes may not power it up again) 909 * older Linux and other OSes may not power it up again)
913 */ 910 */
914static int phy_power_down = 0; 911static int phy_power_down;
915 912
916static inline struct fe_priv *get_nvpriv(struct net_device *dev) 913static inline struct fe_priv *get_nvpriv(struct net_device *dev)
917{ 914{
@@ -948,7 +945,7 @@ static bool nv_optimized(struct fe_priv *np)
948} 945}
949 946
950static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target, 947static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
951 int delay, int delaymax, const char *msg) 948 int delay, int delaymax)
952{ 949{
953 u8 __iomem *base = get_hwbase(dev); 950 u8 __iomem *base = get_hwbase(dev);
954 951
@@ -956,11 +953,8 @@ static int reg_delay(struct net_device *dev, int offset, u32 mask, u32 target,
956 do { 953 do {
957 udelay(delay); 954 udelay(delay);
958 delaymax -= delay; 955 delaymax -= delay;
959 if (delaymax < 0) { 956 if (delaymax < 0)
960 if (msg)
961 printk("%s", msg);
962 return 1; 957 return 1;
963 }
964 } while ((readl(base + offset) & mask) != target); 958 } while ((readl(base + offset) & mask) != target);
965 return 0; 959 return 0;
966} 960}
@@ -984,12 +978,10 @@ static void setup_hw_rings(struct net_device *dev, int rxtx_flags)
984 u8 __iomem *base = get_hwbase(dev); 978 u8 __iomem *base = get_hwbase(dev);
985 979
986 if (!nv_optimized(np)) { 980 if (!nv_optimized(np)) {
987 if (rxtx_flags & NV_SETUP_RX_RING) { 981 if (rxtx_flags & NV_SETUP_RX_RING)
988 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 982 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
989 } 983 if (rxtx_flags & NV_SETUP_TX_RING)
990 if (rxtx_flags & NV_SETUP_TX_RING) {
991 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr); 984 writel(dma_low(np->ring_addr + np->rx_ring_size*sizeof(struct ring_desc)), base + NvRegTxRingPhysAddr);
992 }
993 } else { 985 } else {
994 if (rxtx_flags & NV_SETUP_RX_RING) { 986 if (rxtx_flags & NV_SETUP_RX_RING) {
995 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr); 987 writel(dma_low(np->ring_addr), base + NvRegRxRingPhysAddr);
@@ -1015,10 +1007,8 @@ static void free_rings(struct net_device *dev)
1015 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size), 1007 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (np->rx_ring_size + np->tx_ring_size),
1016 np->rx_ring.ex, np->ring_addr); 1008 np->rx_ring.ex, np->ring_addr);
1017 } 1009 }
1018 if (np->rx_skb) 1010 kfree(np->rx_skb);
1019 kfree(np->rx_skb); 1011 kfree(np->tx_skb);
1020 if (np->tx_skb)
1021 kfree(np->tx_skb);
1022} 1012}
1023 1013
1024static int using_multi_irqs(struct net_device *dev) 1014static int using_multi_irqs(struct net_device *dev)
@@ -1145,23 +1135,15 @@ static int mii_rw(struct net_device *dev, int addr, int miireg, int value)
1145 writel(reg, base + NvRegMIIControl); 1135 writel(reg, base + NvRegMIIControl);
1146 1136
1147 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0, 1137 if (reg_delay(dev, NvRegMIIControl, NVREG_MIICTL_INUSE, 0,
1148 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX, NULL)) { 1138 NV_MIIPHY_DELAY, NV_MIIPHY_DELAYMAX)) {
1149 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d timed out.\n",
1150 dev->name, miireg, addr);
1151 retval = -1; 1139 retval = -1;
1152 } else if (value != MII_READ) { 1140 } else if (value != MII_READ) {
1153 /* it was a write operation - fewer failures are detectable */ 1141 /* it was a write operation - fewer failures are detectable */
1154 dprintk(KERN_DEBUG "%s: mii_rw wrote 0x%x to reg %d at PHY %d\n",
1155 dev->name, value, miireg, addr);
1156 retval = 0; 1142 retval = 0;
1157 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) { 1143 } else if (readl(base + NvRegMIIStatus) & NVREG_MIISTAT_ERROR) {
1158 dprintk(KERN_DEBUG "%s: mii_rw of reg %d at PHY %d failed.\n",
1159 dev->name, miireg, addr);
1160 retval = -1; 1144 retval = -1;
1161 } else { 1145 } else {
1162 retval = readl(base + NvRegMIIData); 1146 retval = readl(base + NvRegMIIData);
1163 dprintk(KERN_DEBUG "%s: mii_rw read from reg %d at PHY %d: 0x%x.\n",
1164 dev->name, miireg, addr, retval);
1165 } 1147 }
1166 1148
1167 return retval; 1149 return retval;
@@ -1174,16 +1156,15 @@ static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1174 unsigned int tries = 0; 1156 unsigned int tries = 0;
1175 1157
1176 miicontrol = BMCR_RESET | bmcr_setup; 1158 miicontrol = BMCR_RESET | bmcr_setup;
1177 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol)) { 1159 if (mii_rw(dev, np->phyaddr, MII_BMCR, miicontrol))
1178 return -1; 1160 return -1;
1179 }
1180 1161
1181 /* wait for 500ms */ 1162 /* wait for 500ms */
1182 msleep(500); 1163 msleep(500);
1183 1164
1184 /* must wait till reset is deasserted */ 1165 /* must wait till reset is deasserted */
1185 while (miicontrol & BMCR_RESET) { 1166 while (miicontrol & BMCR_RESET) {
1186 msleep(10); 1167 usleep_range(10000, 20000);
1187 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1168 miicontrol = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1188 /* FIXME: 100 tries seem excessive */ 1169 /* FIXME: 100 tries seem excessive */
1189 if (tries++ > 100) 1170 if (tries++ > 100)
@@ -1192,106 +1173,239 @@ static int phy_reset(struct net_device *dev, u32 bmcr_setup)
1192 return 0; 1173 return 0;
1193} 1174}
1194 1175
1176static int init_realtek_8211b(struct net_device *dev, struct fe_priv *np)
1177{
1178 static const struct {
1179 int reg;
1180 int init;
1181 } ri[] = {
1182 { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1183 { PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2 },
1184 { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3 },
1185 { PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4 },
1186 { PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5 },
1187 { PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6 },
1188 { PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1 },
1189 };
1190 int i;
1191
1192 for (i = 0; i < ARRAY_SIZE(ri); i++) {
1193 if (mii_rw(dev, np->phyaddr, ri[i].reg, ri[i].init))
1194 return PHY_ERROR;
1195 }
1196
1197 return 0;
1198}
1199
1200static int init_realtek_8211c(struct net_device *dev, struct fe_priv *np)
1201{
1202 u32 reg;
1203 u8 __iomem *base = get_hwbase(dev);
1204 u32 powerstate = readl(base + NvRegPowerState2);
1205
1206 /* need to perform hw phy reset */
1207 powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1208 writel(powerstate, base + NvRegPowerState2);
1209 msleep(25);
1210
1211 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1212 writel(powerstate, base + NvRegPowerState2);
1213 msleep(25);
1214
1215 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1216 reg |= PHY_REALTEK_INIT9;
1217 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg))
1218 return PHY_ERROR;
1219 if (mii_rw(dev, np->phyaddr,
1220 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10))
1221 return PHY_ERROR;
1222 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1223 if (!(reg & PHY_REALTEK_INIT11)) {
1224 reg |= PHY_REALTEK_INIT11;
1225 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg))
1226 return PHY_ERROR;
1227 }
1228 if (mii_rw(dev, np->phyaddr,
1229 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1230 return PHY_ERROR;
1231
1232 return 0;
1233}
1234
1235static int init_realtek_8201(struct net_device *dev, struct fe_priv *np)
1236{
1237 u32 phy_reserved;
1238
1239 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1240 phy_reserved = mii_rw(dev, np->phyaddr,
1241 PHY_REALTEK_INIT_REG6, MII_READ);
1242 phy_reserved |= PHY_REALTEK_INIT7;
1243 if (mii_rw(dev, np->phyaddr,
1244 PHY_REALTEK_INIT_REG6, phy_reserved))
1245 return PHY_ERROR;
1246 }
1247
1248 return 0;
1249}
1250
1251static int init_realtek_8201_cross(struct net_device *dev, struct fe_priv *np)
1252{
1253 u32 phy_reserved;
1254
1255 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1256 if (mii_rw(dev, np->phyaddr,
1257 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3))
1258 return PHY_ERROR;
1259 phy_reserved = mii_rw(dev, np->phyaddr,
1260 PHY_REALTEK_INIT_REG2, MII_READ);
1261 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1262 phy_reserved |= PHY_REALTEK_INIT3;
1263 if (mii_rw(dev, np->phyaddr,
1264 PHY_REALTEK_INIT_REG2, phy_reserved))
1265 return PHY_ERROR;
1266 if (mii_rw(dev, np->phyaddr,
1267 PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1))
1268 return PHY_ERROR;
1269 }
1270
1271 return 0;
1272}
1273
1274static int init_cicada(struct net_device *dev, struct fe_priv *np,
1275 u32 phyinterface)
1276{
1277 u32 phy_reserved;
1278
1279 if (phyinterface & PHY_RGMII) {
1280 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ);
1281 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2);
1282 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4);
1283 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved))
1284 return PHY_ERROR;
1285 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1286 phy_reserved |= PHY_CICADA_INIT5;
1287 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved))
1288 return PHY_ERROR;
1289 }
1290 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1291 phy_reserved |= PHY_CICADA_INIT6;
1292 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved))
1293 return PHY_ERROR;
1294
1295 return 0;
1296}
1297
1298static int init_vitesse(struct net_device *dev, struct fe_priv *np)
1299{
1300 u32 phy_reserved;
1301
1302 if (mii_rw(dev, np->phyaddr,
1303 PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1))
1304 return PHY_ERROR;
1305 if (mii_rw(dev, np->phyaddr,
1306 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2))
1307 return PHY_ERROR;
1308 phy_reserved = mii_rw(dev, np->phyaddr,
1309 PHY_VITESSE_INIT_REG4, MII_READ);
1310 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1311 return PHY_ERROR;
1312 phy_reserved = mii_rw(dev, np->phyaddr,
1313 PHY_VITESSE_INIT_REG3, MII_READ);
1314 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1315 phy_reserved |= PHY_VITESSE_INIT3;
1316 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1317 return PHY_ERROR;
1318 if (mii_rw(dev, np->phyaddr,
1319 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4))
1320 return PHY_ERROR;
1321 if (mii_rw(dev, np->phyaddr,
1322 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5))
1323 return PHY_ERROR;
1324 phy_reserved = mii_rw(dev, np->phyaddr,
1325 PHY_VITESSE_INIT_REG4, MII_READ);
1326 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1327 phy_reserved |= PHY_VITESSE_INIT3;
1328 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1329 return PHY_ERROR;
1330 phy_reserved = mii_rw(dev, np->phyaddr,
1331 PHY_VITESSE_INIT_REG3, MII_READ);
1332 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1333 return PHY_ERROR;
1334 if (mii_rw(dev, np->phyaddr,
1335 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6))
1336 return PHY_ERROR;
1337 if (mii_rw(dev, np->phyaddr,
1338 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7))
1339 return PHY_ERROR;
1340 phy_reserved = mii_rw(dev, np->phyaddr,
1341 PHY_VITESSE_INIT_REG4, MII_READ);
1342 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved))
1343 return PHY_ERROR;
1344 phy_reserved = mii_rw(dev, np->phyaddr,
1345 PHY_VITESSE_INIT_REG3, MII_READ);
1346 phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1347 phy_reserved |= PHY_VITESSE_INIT8;
1348 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved))
1349 return PHY_ERROR;
1350 if (mii_rw(dev, np->phyaddr,
1351 PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9))
1352 return PHY_ERROR;
1353 if (mii_rw(dev, np->phyaddr,
1354 PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10))
1355 return PHY_ERROR;
1356
1357 return 0;
1358}
1359
1195static int phy_init(struct net_device *dev) 1360static int phy_init(struct net_device *dev)
1196{ 1361{
1197 struct fe_priv *np = get_nvpriv(dev); 1362 struct fe_priv *np = get_nvpriv(dev);
1198 u8 __iomem *base = get_hwbase(dev); 1363 u8 __iomem *base = get_hwbase(dev);
1199 u32 phyinterface, phy_reserved, mii_status, mii_control, mii_control_1000,reg; 1364 u32 phyinterface;
1365 u32 mii_status, mii_control, mii_control_1000, reg;
1200 1366
1201 /* phy errata for E3016 phy */ 1367 /* phy errata for E3016 phy */
1202 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 1368 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
1203 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1369 reg = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ);
1204 reg &= ~PHY_MARVELL_E3016_INITMASK; 1370 reg &= ~PHY_MARVELL_E3016_INITMASK;
1205 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) { 1371 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, reg)) {
1206 printk(KERN_INFO "%s: phy write to errata reg failed.\n", pci_name(np->pci_dev)); 1372 netdev_info(dev, "%s: phy write to errata reg failed\n",
1373 pci_name(np->pci_dev));
1207 return PHY_ERROR; 1374 return PHY_ERROR;
1208 } 1375 }
1209 } 1376 }
1210 if (np->phy_oui == PHY_OUI_REALTEK) { 1377 if (np->phy_oui == PHY_OUI_REALTEK) {
1211 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1378 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1212 np->phy_rev == PHY_REV_REALTEK_8211B) { 1379 np->phy_rev == PHY_REV_REALTEK_8211B) {
1213 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1380 if (init_realtek_8211b(dev, np)) {
1214 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1381 netdev_info(dev, "%s: phy init failed\n",
1215 return PHY_ERROR; 1382 pci_name(np->pci_dev));
1216 }
1217 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1218 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1219 return PHY_ERROR;
1220 }
1221 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1222 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1223 return PHY_ERROR;
1224 }
1225 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1226 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1227 return PHY_ERROR; 1383 return PHY_ERROR;
1228 } 1384 }
1229 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) { 1385 } else if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1230 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1386 np->phy_rev == PHY_REV_REALTEK_8211C) {
1387 if (init_realtek_8211c(dev, np)) {
1388 netdev_info(dev, "%s: phy init failed\n",
1389 pci_name(np->pci_dev));
1231 return PHY_ERROR; 1390 return PHY_ERROR;
1232 } 1391 }
1233 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) { 1392 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1234 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1393 if (init_realtek_8201(dev, np)) {
1235 return PHY_ERROR; 1394 netdev_info(dev, "%s: phy init failed\n",
1236 } 1395 pci_name(np->pci_dev));
1237 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1238 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1239 return PHY_ERROR;
1240 }
1241 }
1242 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1243 np->phy_rev == PHY_REV_REALTEK_8211C) {
1244 u32 powerstate = readl(base + NvRegPowerState2);
1245
1246 /* need to perform hw phy reset */
1247 powerstate |= NVREG_POWERSTATE2_PHY_RESET;
1248 writel(powerstate, base + NvRegPowerState2);
1249 msleep(25);
1250
1251 powerstate &= ~NVREG_POWERSTATE2_PHY_RESET;
1252 writel(powerstate, base + NvRegPowerState2);
1253 msleep(25);
1254
1255 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1256 reg |= PHY_REALTEK_INIT9;
1257 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, reg)) {
1258 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1259 return PHY_ERROR;
1260 }
1261 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT10)) {
1262 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1263 return PHY_ERROR;
1264 }
1265 reg = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, MII_READ);
1266 if (!(reg & PHY_REALTEK_INIT11)) {
1267 reg |= PHY_REALTEK_INIT11;
1268 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG7, reg)) {
1269 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1270 return PHY_ERROR;
1271 }
1272 }
1273 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1274 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1275 return PHY_ERROR; 1396 return PHY_ERROR;
1276 } 1397 }
1277 } 1398 }
1278 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1279 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1280 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1281 phy_reserved |= PHY_REALTEK_INIT7;
1282 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
1283 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1284 return PHY_ERROR;
1285 }
1286 }
1287 }
1288 } 1399 }
1289 1400
1290 /* set advertise register */ 1401 /* set advertise register */
1291 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 1402 reg = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
1292 reg |= (ADVERTISE_10HALF|ADVERTISE_10FULL|ADVERTISE_100HALF|ADVERTISE_100FULL|ADVERTISE_PAUSE_ASYM|ADVERTISE_PAUSE_CAP); 1403 reg |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
1404 ADVERTISE_100HALF | ADVERTISE_100FULL |
1405 ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP);
1293 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) { 1406 if (mii_rw(dev, np->phyaddr, MII_ADVERTISE, reg)) {
1294 printk(KERN_INFO "%s: phy write to advertise failed.\n", pci_name(np->pci_dev)); 1407 netdev_info(dev, "%s: phy write to advertise failed\n",
1408 pci_name(np->pci_dev));
1295 return PHY_ERROR; 1409 return PHY_ERROR;
1296 } 1410 }
1297 1411
@@ -1302,7 +1416,8 @@ static int phy_init(struct net_device *dev)
1302 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 1416 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
1303 if (mii_status & PHY_GIGABIT) { 1417 if (mii_status & PHY_GIGABIT) {
1304 np->gigabit = PHY_GIGABIT; 1418 np->gigabit = PHY_GIGABIT;
1305 mii_control_1000 = mii_rw(dev, np->phyaddr, MII_CTRL1000, MII_READ); 1419 mii_control_1000 = mii_rw(dev, np->phyaddr,
1420 MII_CTRL1000, MII_READ);
1306 mii_control_1000 &= ~ADVERTISE_1000HALF; 1421 mii_control_1000 &= ~ADVERTISE_1000HALF;
1307 if (phyinterface & PHY_RGMII) 1422 if (phyinterface & PHY_RGMII)
1308 mii_control_1000 |= ADVERTISE_1000FULL; 1423 mii_control_1000 |= ADVERTISE_1000FULL;
@@ -1310,11 +1425,11 @@ static int phy_init(struct net_device *dev)
1310 mii_control_1000 &= ~ADVERTISE_1000FULL; 1425 mii_control_1000 &= ~ADVERTISE_1000FULL;
1311 1426
1312 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) { 1427 if (mii_rw(dev, np->phyaddr, MII_CTRL1000, mii_control_1000)) {
1313 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1428 netdev_info(dev, "%s: phy init failed\n",
1429 pci_name(np->pci_dev));
1314 return PHY_ERROR; 1430 return PHY_ERROR;
1315 } 1431 }
1316 } 1432 } else
1317 else
1318 np->gigabit = 0; 1433 np->gigabit = 0;
1319 1434
1320 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1435 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -1326,7 +1441,8 @@ static int phy_init(struct net_device *dev)
1326 /* start autoneg since we already performed hw reset above */ 1441 /* start autoneg since we already performed hw reset above */
1327 mii_control |= BMCR_ANRESTART; 1442 mii_control |= BMCR_ANRESTART;
1328 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) { 1443 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1329 printk(KERN_INFO "%s: phy init failed\n", pci_name(np->pci_dev)); 1444 netdev_info(dev, "%s: phy init failed\n",
1445 pci_name(np->pci_dev));
1330 return PHY_ERROR; 1446 return PHY_ERROR;
1331 } 1447 }
1332 } else { 1448 } else {
@@ -1334,164 +1450,41 @@ static int phy_init(struct net_device *dev)
1334 * (certain phys need bmcr to be setup with reset) 1450 * (certain phys need bmcr to be setup with reset)
1335 */ 1451 */
1336 if (phy_reset(dev, mii_control)) { 1452 if (phy_reset(dev, mii_control)) {
1337 printk(KERN_INFO "%s: phy reset failed\n", pci_name(np->pci_dev)); 1453 netdev_info(dev, "%s: phy reset failed\n",
1454 pci_name(np->pci_dev));
1338 return PHY_ERROR; 1455 return PHY_ERROR;
1339 } 1456 }
1340 } 1457 }
1341 1458
1342 /* phy vendor specific configuration */ 1459 /* phy vendor specific configuration */
1343 if ((np->phy_oui == PHY_OUI_CICADA) && (phyinterface & PHY_RGMII) ) { 1460 if ((np->phy_oui == PHY_OUI_CICADA)) {
1344 phy_reserved = mii_rw(dev, np->phyaddr, MII_RESV1, MII_READ); 1461 if (init_cicada(dev, np, phyinterface)) {
1345 phy_reserved &= ~(PHY_CICADA_INIT1 | PHY_CICADA_INIT2); 1462 netdev_info(dev, "%s: phy init failed\n",
1346 phy_reserved |= (PHY_CICADA_INIT3 | PHY_CICADA_INIT4); 1463 pci_name(np->pci_dev));
1347 if (mii_rw(dev, np->phyaddr, MII_RESV1, phy_reserved)) {
1348 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1349 return PHY_ERROR; 1464 return PHY_ERROR;
1350 } 1465 }
1351 phy_reserved = mii_rw(dev, np->phyaddr, MII_NCONFIG, MII_READ); 1466 } else if (np->phy_oui == PHY_OUI_VITESSE) {
1352 phy_reserved |= PHY_CICADA_INIT5; 1467 if (init_vitesse(dev, np)) {
1353 if (mii_rw(dev, np->phyaddr, MII_NCONFIG, phy_reserved)) { 1468 netdev_info(dev, "%s: phy init failed\n",
1354 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1469 pci_name(np->pci_dev));
1355 return PHY_ERROR; 1470 return PHY_ERROR;
1356 } 1471 }
1357 } 1472 } else if (np->phy_oui == PHY_OUI_REALTEK) {
1358 if (np->phy_oui == PHY_OUI_CICADA) {
1359 phy_reserved = mii_rw(dev, np->phyaddr, MII_SREVISION, MII_READ);
1360 phy_reserved |= PHY_CICADA_INIT6;
1361 if (mii_rw(dev, np->phyaddr, MII_SREVISION, phy_reserved)) {
1362 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1363 return PHY_ERROR;
1364 }
1365 }
1366 if (np->phy_oui == PHY_OUI_VITESSE) {
1367 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT1)) {
1368 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1369 return PHY_ERROR;
1370 }
1371 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT2)) {
1372 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1373 return PHY_ERROR;
1374 }
1375 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1376 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1377 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1378 return PHY_ERROR;
1379 }
1380 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1381 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1382 phy_reserved |= PHY_VITESSE_INIT3;
1383 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1384 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1385 return PHY_ERROR;
1386 }
1387 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT4)) {
1388 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1389 return PHY_ERROR;
1390 }
1391 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT5)) {
1392 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1393 return PHY_ERROR;
1394 }
1395 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1396 phy_reserved &= ~PHY_VITESSE_INIT_MSK1;
1397 phy_reserved |= PHY_VITESSE_INIT3;
1398 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1399 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1400 return PHY_ERROR;
1401 }
1402 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1403 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1404 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1405 return PHY_ERROR;
1406 }
1407 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT6)) {
1408 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1409 return PHY_ERROR;
1410 }
1411 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT7)) {
1412 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1413 return PHY_ERROR;
1414 }
1415 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, MII_READ);
1416 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG4, phy_reserved)) {
1417 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1418 return PHY_ERROR;
1419 }
1420 phy_reserved = mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, MII_READ);
1421 phy_reserved &= ~PHY_VITESSE_INIT_MSK2;
1422 phy_reserved |= PHY_VITESSE_INIT8;
1423 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG3, phy_reserved)) {
1424 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1425 return PHY_ERROR;
1426 }
1427 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG2, PHY_VITESSE_INIT9)) {
1428 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1429 return PHY_ERROR;
1430 }
1431 if (mii_rw(dev, np->phyaddr, PHY_VITESSE_INIT_REG1, PHY_VITESSE_INIT10)) {
1432 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1433 return PHY_ERROR;
1434 }
1435 }
1436 if (np->phy_oui == PHY_OUI_REALTEK) {
1437 if (np->phy_model == PHY_MODEL_REALTEK_8211 && 1473 if (np->phy_model == PHY_MODEL_REALTEK_8211 &&
1438 np->phy_rev == PHY_REV_REALTEK_8211B) { 1474 np->phy_rev == PHY_REV_REALTEK_8211B) {
1439 /* reset could have cleared these out, set them back */ 1475 /* reset could have cleared these out, set them back */
1440 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) { 1476 if (init_realtek_8211b(dev, np)) {
1441 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1477 netdev_info(dev, "%s: phy init failed\n",
1442 return PHY_ERROR; 1478 pci_name(np->pci_dev));
1443 }
1444 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, PHY_REALTEK_INIT2)) {
1445 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1446 return PHY_ERROR;
1447 }
1448 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1449 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1450 return PHY_ERROR;
1451 }
1452 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG3, PHY_REALTEK_INIT4)) {
1453 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1454 return PHY_ERROR; 1479 return PHY_ERROR;
1455 } 1480 }
1456 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG4, PHY_REALTEK_INIT5)) { 1481 } else if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1457 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev)); 1482 if (init_realtek_8201(dev, np) ||
1483 init_realtek_8201_cross(dev, np)) {
1484 netdev_info(dev, "%s: phy init failed\n",
1485 pci_name(np->pci_dev));
1458 return PHY_ERROR; 1486 return PHY_ERROR;
1459 } 1487 }
1460 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG5, PHY_REALTEK_INIT6)) {
1461 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1462 return PHY_ERROR;
1463 }
1464 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1465 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1466 return PHY_ERROR;
1467 }
1468 }
1469 if (np->phy_model == PHY_MODEL_REALTEK_8201) {
1470 if (np->driver_data & DEV_NEED_PHY_INIT_FIX) {
1471 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, MII_READ);
1472 phy_reserved |= PHY_REALTEK_INIT7;
1473 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG6, phy_reserved)) {
1474 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1475 return PHY_ERROR;
1476 }
1477 }
1478 if (phy_cross == NV_CROSSOVER_DETECTION_DISABLED) {
1479 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT3)) {
1480 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1481 return PHY_ERROR;
1482 }
1483 phy_reserved = mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, MII_READ);
1484 phy_reserved &= ~PHY_REALTEK_INIT_MSK1;
1485 phy_reserved |= PHY_REALTEK_INIT3;
1486 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG2, phy_reserved)) {
1487 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1488 return PHY_ERROR;
1489 }
1490 if (mii_rw(dev, np->phyaddr, PHY_REALTEK_INIT_REG1, PHY_REALTEK_INIT1)) {
1491 printk(KERN_INFO "%s: phy init failed.\n", pci_name(np->pci_dev));
1492 return PHY_ERROR;
1493 }
1494 }
1495 } 1488 }
1496 } 1489 }
1497 1490
@@ -1501,12 +1494,10 @@ static int phy_init(struct net_device *dev)
1501 /* restart auto negotiation, power down phy */ 1494 /* restart auto negotiation, power down phy */
1502 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 1495 mii_control = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
1503 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE); 1496 mii_control |= (BMCR_ANRESTART | BMCR_ANENABLE);
1504 if (phy_power_down) { 1497 if (phy_power_down)
1505 mii_control |= BMCR_PDOWN; 1498 mii_control |= BMCR_PDOWN;
1506 } 1499 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control))
1507 if (mii_rw(dev, np->phyaddr, MII_BMCR, mii_control)) {
1508 return PHY_ERROR; 1500 return PHY_ERROR;
1509 }
1510 1501
1511 return 0; 1502 return 0;
1512} 1503}
@@ -1517,7 +1508,6 @@ static void nv_start_rx(struct net_device *dev)
1517 u8 __iomem *base = get_hwbase(dev); 1508 u8 __iomem *base = get_hwbase(dev);
1518 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1509 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1519 1510
1520 dprintk(KERN_DEBUG "%s: nv_start_rx\n", dev->name);
1521 /* Already running? Stop it. */ 1511 /* Already running? Stop it. */
1522 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) { 1512 if ((readl(base + NvRegReceiverControl) & NVREG_RCVCTL_START) && !np->mac_in_use) {
1523 rx_ctrl &= ~NVREG_RCVCTL_START; 1513 rx_ctrl &= ~NVREG_RCVCTL_START;
@@ -1526,12 +1516,10 @@ static void nv_start_rx(struct net_device *dev)
1526 } 1516 }
1527 writel(np->linkspeed, base + NvRegLinkSpeed); 1517 writel(np->linkspeed, base + NvRegLinkSpeed);
1528 pci_push(base); 1518 pci_push(base);
1529 rx_ctrl |= NVREG_RCVCTL_START; 1519 rx_ctrl |= NVREG_RCVCTL_START;
1530 if (np->mac_in_use) 1520 if (np->mac_in_use)
1531 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN; 1521 rx_ctrl &= ~NVREG_RCVCTL_RX_PATH_EN;
1532 writel(rx_ctrl, base + NvRegReceiverControl); 1522 writel(rx_ctrl, base + NvRegReceiverControl);
1533 dprintk(KERN_DEBUG "%s: nv_start_rx to duplex %d, speed 0x%08x.\n",
1534 dev->name, np->duplex, np->linkspeed);
1535 pci_push(base); 1523 pci_push(base);
1536} 1524}
1537 1525
@@ -1541,15 +1529,15 @@ static void nv_stop_rx(struct net_device *dev)
1541 u8 __iomem *base = get_hwbase(dev); 1529 u8 __iomem *base = get_hwbase(dev);
1542 u32 rx_ctrl = readl(base + NvRegReceiverControl); 1530 u32 rx_ctrl = readl(base + NvRegReceiverControl);
1543 1531
1544 dprintk(KERN_DEBUG "%s: nv_stop_rx\n", dev->name);
1545 if (!np->mac_in_use) 1532 if (!np->mac_in_use)
1546 rx_ctrl &= ~NVREG_RCVCTL_START; 1533 rx_ctrl &= ~NVREG_RCVCTL_START;
1547 else 1534 else
1548 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN; 1535 rx_ctrl |= NVREG_RCVCTL_RX_PATH_EN;
1549 writel(rx_ctrl, base + NvRegReceiverControl); 1536 writel(rx_ctrl, base + NvRegReceiverControl);
1550 reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0, 1537 if (reg_delay(dev, NvRegReceiverStatus, NVREG_RCVSTAT_BUSY, 0,
1551 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX, 1538 NV_RXSTOP_DELAY1, NV_RXSTOP_DELAY1MAX))
1552 KERN_INFO "nv_stop_rx: ReceiverStatus remained busy"); 1539 netdev_info(dev, "%s: ReceiverStatus remained busy\n",
1540 __func__);
1553 1541
1554 udelay(NV_RXSTOP_DELAY2); 1542 udelay(NV_RXSTOP_DELAY2);
1555 if (!np->mac_in_use) 1543 if (!np->mac_in_use)
@@ -1562,7 +1550,6 @@ static void nv_start_tx(struct net_device *dev)
1562 u8 __iomem *base = get_hwbase(dev); 1550 u8 __iomem *base = get_hwbase(dev);
1563 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1551 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1564 1552
1565 dprintk(KERN_DEBUG "%s: nv_start_tx\n", dev->name);
1566 tx_ctrl |= NVREG_XMITCTL_START; 1553 tx_ctrl |= NVREG_XMITCTL_START;
1567 if (np->mac_in_use) 1554 if (np->mac_in_use)
1568 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN; 1555 tx_ctrl &= ~NVREG_XMITCTL_TX_PATH_EN;
@@ -1576,15 +1563,15 @@ static void nv_stop_tx(struct net_device *dev)
1576 u8 __iomem *base = get_hwbase(dev); 1563 u8 __iomem *base = get_hwbase(dev);
1577 u32 tx_ctrl = readl(base + NvRegTransmitterControl); 1564 u32 tx_ctrl = readl(base + NvRegTransmitterControl);
1578 1565
1579 dprintk(KERN_DEBUG "%s: nv_stop_tx\n", dev->name);
1580 if (!np->mac_in_use) 1566 if (!np->mac_in_use)
1581 tx_ctrl &= ~NVREG_XMITCTL_START; 1567 tx_ctrl &= ~NVREG_XMITCTL_START;
1582 else 1568 else
1583 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN; 1569 tx_ctrl |= NVREG_XMITCTL_TX_PATH_EN;
1584 writel(tx_ctrl, base + NvRegTransmitterControl); 1570 writel(tx_ctrl, base + NvRegTransmitterControl);
1585 reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0, 1571 if (reg_delay(dev, NvRegTransmitterStatus, NVREG_XMITSTAT_BUSY, 0,
1586 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX, 1572 NV_TXSTOP_DELAY1, NV_TXSTOP_DELAY1MAX))
1587 KERN_INFO "nv_stop_tx: TransmitterStatus remained busy"); 1573 netdev_info(dev, "%s: TransmitterStatus remained busy\n",
1574 __func__);
1588 1575
1589 udelay(NV_TXSTOP_DELAY2); 1576 udelay(NV_TXSTOP_DELAY2);
1590 if (!np->mac_in_use) 1577 if (!np->mac_in_use)
@@ -1609,7 +1596,6 @@ static void nv_txrx_reset(struct net_device *dev)
1609 struct fe_priv *np = netdev_priv(dev); 1596 struct fe_priv *np = netdev_priv(dev);
1610 u8 __iomem *base = get_hwbase(dev); 1597 u8 __iomem *base = get_hwbase(dev);
1611 1598
1612 dprintk(KERN_DEBUG "%s: nv_txrx_reset\n", dev->name);
1613 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1599 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1614 pci_push(base); 1600 pci_push(base);
1615 udelay(NV_TXRX_RESET_DELAY); 1601 udelay(NV_TXRX_RESET_DELAY);
@@ -1623,8 +1609,6 @@ static void nv_mac_reset(struct net_device *dev)
1623 u8 __iomem *base = get_hwbase(dev); 1609 u8 __iomem *base = get_hwbase(dev);
1624 u32 temp1, temp2, temp3; 1610 u32 temp1, temp2, temp3;
1625 1611
1626 dprintk(KERN_DEBUG "%s: nv_mac_reset\n", dev->name);
1627
1628 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl); 1612 writel(NVREG_TXRXCTL_BIT2 | NVREG_TXRXCTL_RESET | np->txrxctl_bits, base + NvRegTxRxControl);
1629 pci_push(base); 1613 pci_push(base);
1630 1614
@@ -1745,7 +1729,7 @@ static struct net_device_stats *nv_get_stats(struct net_device *dev)
1745static int nv_alloc_rx(struct net_device *dev) 1729static int nv_alloc_rx(struct net_device *dev)
1746{ 1730{
1747 struct fe_priv *np = netdev_priv(dev); 1731 struct fe_priv *np = netdev_priv(dev);
1748 struct ring_desc* less_rx; 1732 struct ring_desc *less_rx;
1749 1733
1750 less_rx = np->get_rx.orig; 1734 less_rx = np->get_rx.orig;
1751 if (less_rx-- == np->first_rx.orig) 1735 if (less_rx-- == np->first_rx.orig)
@@ -1767,9 +1751,8 @@ static int nv_alloc_rx(struct net_device *dev)
1767 np->put_rx.orig = np->first_rx.orig; 1751 np->put_rx.orig = np->first_rx.orig;
1768 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1752 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1769 np->put_rx_ctx = np->first_rx_ctx; 1753 np->put_rx_ctx = np->first_rx_ctx;
1770 } else { 1754 } else
1771 return 1; 1755 return 1;
1772 }
1773 } 1756 }
1774 return 0; 1757 return 0;
1775} 1758}
@@ -1777,7 +1760,7 @@ static int nv_alloc_rx(struct net_device *dev)
1777static int nv_alloc_rx_optimized(struct net_device *dev) 1760static int nv_alloc_rx_optimized(struct net_device *dev)
1778{ 1761{
1779 struct fe_priv *np = netdev_priv(dev); 1762 struct fe_priv *np = netdev_priv(dev);
1780 struct ring_desc_ex* less_rx; 1763 struct ring_desc_ex *less_rx;
1781 1764
1782 less_rx = np->get_rx.ex; 1765 less_rx = np->get_rx.ex;
1783 if (less_rx-- == np->first_rx.ex) 1766 if (less_rx-- == np->first_rx.ex)
@@ -1800,9 +1783,8 @@ static int nv_alloc_rx_optimized(struct net_device *dev)
1800 np->put_rx.ex = np->first_rx.ex; 1783 np->put_rx.ex = np->first_rx.ex;
1801 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx)) 1784 if (unlikely(np->put_rx_ctx++ == np->last_rx_ctx))
1802 np->put_rx_ctx = np->first_rx_ctx; 1785 np->put_rx_ctx = np->first_rx_ctx;
1803 } else { 1786 } else
1804 return 1; 1787 return 1;
1805 }
1806 } 1788 }
1807 return 0; 1789 return 0;
1808} 1790}
@@ -2018,24 +2000,24 @@ static void nv_legacybackoff_reseed(struct net_device *dev)
2018 2000
2019/* Known Good seed sets */ 2001/* Known Good seed sets */
2020static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2002static const u32 main_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2021 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2003 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2022 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974}, 2004 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 385, 761, 790, 974},
2023 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874}, 2005 {145, 155, 165, 175, 185, 196, 235, 245, 255, 265, 275, 285, 660, 690, 874},
2024 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974}, 2006 {245, 255, 265, 575, 385, 298, 335, 345, 355, 366, 375, 386, 761, 790, 974},
2025 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984}, 2007 {266, 265, 276, 585, 397, 208, 345, 355, 365, 376, 385, 396, 771, 700, 984},
2026 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984}, 2008 {266, 265, 276, 586, 397, 208, 346, 355, 365, 376, 285, 396, 771, 700, 984},
2027 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84}, 2009 {366, 365, 376, 686, 497, 308, 447, 455, 466, 476, 485, 496, 871, 800, 84},
2028 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184}}; 2010 {466, 465, 476, 786, 597, 408, 547, 555, 566, 576, 585, 597, 971, 900, 184} };
2029 2011
2030static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = { 2012static const u32 gear_seedset[BACKOFF_SEEDSET_ROWS][BACKOFF_SEEDSET_LFSRS] = {
2031 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2013 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2032 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2014 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2033 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397}, 2015 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 397},
2034 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2016 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2035 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295}, 2017 {251, 262, 273, 324, 319, 508, 375, 364, 341, 371, 398, 193, 375, 30, 295},
2036 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2018 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2037 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}, 2019 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395},
2038 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395}}; 2020 {351, 375, 373, 469, 551, 639, 477, 464, 441, 472, 498, 293, 476, 130, 395} };
2039 2021
2040static void nv_gear_backoff_reseed(struct net_device *dev) 2022static void nv_gear_backoff_reseed(struct net_device *dev)
2041{ 2023{
@@ -2083,13 +2065,12 @@ static void nv_gear_backoff_reseed(struct net_device *dev)
2083 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT); 2065 temp = NVREG_BKOFFCTRL_DEFAULT | (0 << NVREG_BKOFFCTRL_SELECT);
2084 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK; 2066 temp |= combinedSeed & NVREG_BKOFFCTRL_SEED_MASK;
2085 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR; 2067 temp |= combinedSeed >> NVREG_BKOFFCTRL_GEAR;
2086 writel(temp,base + NvRegBackOffControl); 2068 writel(temp, base + NvRegBackOffControl);
2087 2069
2088 /* Setup seeds for all gear LFSRs. */ 2070 /* Setup seeds for all gear LFSRs. */
2089 get_random_bytes(&seedset, sizeof(seedset)); 2071 get_random_bytes(&seedset, sizeof(seedset));
2090 seedset = seedset % BACKOFF_SEEDSET_ROWS; 2072 seedset = seedset % BACKOFF_SEEDSET_ROWS;
2091 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) 2073 for (i = 1; i <= BACKOFF_SEEDSET_LFSRS; i++) {
2092 {
2093 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT); 2074 temp = NVREG_BKOFFCTRL_DEFAULT | (i << NVREG_BKOFFCTRL_SELECT);
2094 temp |= main_seedset[seedset][i-1] & 0x3ff; 2075 temp |= main_seedset[seedset][i-1] & 0x3ff;
2095 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR); 2076 temp |= ((gear_seedset[seedset][i-1] & 0x3ff) << NVREG_BKOFFCTRL_GEAR);
@@ -2113,10 +2094,10 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2113 u32 size = skb_headlen(skb); 2094 u32 size = skb_headlen(skb);
2114 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2095 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2115 u32 empty_slots; 2096 u32 empty_slots;
2116 struct ring_desc* put_tx; 2097 struct ring_desc *put_tx;
2117 struct ring_desc* start_tx; 2098 struct ring_desc *start_tx;
2118 struct ring_desc* prev_tx; 2099 struct ring_desc *prev_tx;
2119 struct nv_skb_map* prev_tx_ctx; 2100 struct nv_skb_map *prev_tx_ctx;
2120 unsigned long flags; 2101 unsigned long flags;
2121 2102
2122 /* add fragments to entries count */ 2103 /* add fragments to entries count */
@@ -2204,18 +2185,6 @@ static netdev_tx_t nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
2204 2185
2205 spin_unlock_irqrestore(&np->lock, flags); 2186 spin_unlock_irqrestore(&np->lock, flags);
2206 2187
2207 dprintk(KERN_DEBUG "%s: nv_start_xmit: entries %d queued for transmission. tx_flags_extra: %x\n",
2208 dev->name, entries, tx_flags_extra);
2209 {
2210 int j;
2211 for (j=0; j<64; j++) {
2212 if ((j%16) == 0)
2213 dprintk("\n%03x:", j);
2214 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2215 }
2216 dprintk("\n");
2217 }
2218
2219 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2188 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2220 return NETDEV_TX_OK; 2189 return NETDEV_TX_OK;
2221} 2190}
@@ -2233,11 +2202,11 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2233 u32 size = skb_headlen(skb); 2202 u32 size = skb_headlen(skb);
2234 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0); 2203 u32 entries = (size >> NV_TX2_TSO_MAX_SHIFT) + ((size & (NV_TX2_TSO_MAX_SIZE-1)) ? 1 : 0);
2235 u32 empty_slots; 2204 u32 empty_slots;
2236 struct ring_desc_ex* put_tx; 2205 struct ring_desc_ex *put_tx;
2237 struct ring_desc_ex* start_tx; 2206 struct ring_desc_ex *start_tx;
2238 struct ring_desc_ex* prev_tx; 2207 struct ring_desc_ex *prev_tx;
2239 struct nv_skb_map* prev_tx_ctx; 2208 struct nv_skb_map *prev_tx_ctx;
2240 struct nv_skb_map* start_tx_ctx; 2209 struct nv_skb_map *start_tx_ctx;
2241 unsigned long flags; 2210 unsigned long flags;
2242 2211
2243 /* add fragments to entries count */ 2212 /* add fragments to entries count */
@@ -2355,18 +2324,6 @@ static netdev_tx_t nv_start_xmit_optimized(struct sk_buff *skb,
2355 2324
2356 spin_unlock_irqrestore(&np->lock, flags); 2325 spin_unlock_irqrestore(&np->lock, flags);
2357 2326
2358 dprintk(KERN_DEBUG "%s: nv_start_xmit_optimized: entries %d queued for transmission. tx_flags_extra: %x\n",
2359 dev->name, entries, tx_flags_extra);
2360 {
2361 int j;
2362 for (j=0; j<64; j++) {
2363 if ((j%16) == 0)
2364 dprintk("\n%03x:", j);
2365 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2366 }
2367 dprintk("\n");
2368 }
2369
2370 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2327 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
2371 return NETDEV_TX_OK; 2328 return NETDEV_TX_OK;
2372} 2329}
@@ -2399,15 +2356,12 @@ static int nv_tx_done(struct net_device *dev, int limit)
2399 struct fe_priv *np = netdev_priv(dev); 2356 struct fe_priv *np = netdev_priv(dev);
2400 u32 flags; 2357 u32 flags;
2401 int tx_work = 0; 2358 int tx_work = 0;
2402 struct ring_desc* orig_get_tx = np->get_tx.orig; 2359 struct ring_desc *orig_get_tx = np->get_tx.orig;
2403 2360
2404 while ((np->get_tx.orig != np->put_tx.orig) && 2361 while ((np->get_tx.orig != np->put_tx.orig) &&
2405 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) && 2362 !((flags = le32_to_cpu(np->get_tx.orig->flaglen)) & NV_TX_VALID) &&
2406 (tx_work < limit)) { 2363 (tx_work < limit)) {
2407 2364
2408 dprintk(KERN_DEBUG "%s: nv_tx_done: flags 0x%x.\n",
2409 dev->name, flags);
2410
2411 nv_unmap_txskb(np, np->get_tx_ctx); 2365 nv_unmap_txskb(np, np->get_tx_ctx);
2412 2366
2413 if (np->desc_ver == DESC_VER_1) { 2367 if (np->desc_ver == DESC_VER_1) {
@@ -2464,15 +2418,12 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
2464 struct fe_priv *np = netdev_priv(dev); 2418 struct fe_priv *np = netdev_priv(dev);
2465 u32 flags; 2419 u32 flags;
2466 int tx_work = 0; 2420 int tx_work = 0;
2467 struct ring_desc_ex* orig_get_tx = np->get_tx.ex; 2421 struct ring_desc_ex *orig_get_tx = np->get_tx.ex;
2468 2422
2469 while ((np->get_tx.ex != np->put_tx.ex) && 2423 while ((np->get_tx.ex != np->put_tx.ex) &&
2470 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) && 2424 !((flags = le32_to_cpu(np->get_tx.ex->flaglen)) & NV_TX2_VALID) &&
2471 (tx_work < limit)) { 2425 (tx_work < limit)) {
2472 2426
2473 dprintk(KERN_DEBUG "%s: nv_tx_done_optimized: flags 0x%x.\n",
2474 dev->name, flags);
2475
2476 nv_unmap_txskb(np, np->get_tx_ctx); 2427 nv_unmap_txskb(np, np->get_tx_ctx);
2477 2428
2478 if (flags & NV_TX2_LASTPACKET) { 2429 if (flags & NV_TX2_LASTPACKET) {
@@ -2491,9 +2442,8 @@ static int nv_tx_done_optimized(struct net_device *dev, int limit)
2491 np->get_tx_ctx->skb = NULL; 2442 np->get_tx_ctx->skb = NULL;
2492 tx_work++; 2443 tx_work++;
2493 2444
2494 if (np->tx_limit) { 2445 if (np->tx_limit)
2495 nv_tx_flip_ownership(dev); 2446 nv_tx_flip_ownership(dev);
2496 }
2497 } 2447 }
2498 if (unlikely(np->get_tx.ex++ == np->last_tx.ex)) 2448 if (unlikely(np->get_tx.ex++ == np->last_tx.ex))
2499 np->get_tx.ex = np->first_tx.ex; 2449 np->get_tx.ex = np->first_tx.ex;
@@ -2518,57 +2468,56 @@ static void nv_tx_timeout(struct net_device *dev)
2518 u32 status; 2468 u32 status;
2519 union ring_type put_tx; 2469 union ring_type put_tx;
2520 int saved_tx_limit; 2470 int saved_tx_limit;
2471 int i;
2521 2472
2522 if (np->msi_flags & NV_MSI_X_ENABLED) 2473 if (np->msi_flags & NV_MSI_X_ENABLED)
2523 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK; 2474 status = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQSTAT_MASK;
2524 else 2475 else
2525 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 2476 status = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
2526 2477
2527 printk(KERN_INFO "%s: Got tx_timeout. irq: %08x\n", dev->name, status); 2478 netdev_info(dev, "Got tx_timeout. irq: %08x\n", status);
2528 2479
2529 { 2480 netdev_info(dev, "Ring at %lx\n", (unsigned long)np->ring_addr);
2530 int i; 2481 netdev_info(dev, "Dumping tx registers\n");
2531 2482 for (i = 0; i <= np->register_size; i += 32) {
2532 printk(KERN_INFO "%s: Ring at %lx\n", 2483 netdev_info(dev,
2533 dev->name, (unsigned long)np->ring_addr); 2484 "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n",
2534 printk(KERN_INFO "%s: Dumping tx registers\n", dev->name); 2485 i,
2535 for (i=0;i<=np->register_size;i+= 32) { 2486 readl(base + i + 0), readl(base + i + 4),
2536 printk(KERN_INFO "%3x: %08x %08x %08x %08x %08x %08x %08x %08x\n", 2487 readl(base + i + 8), readl(base + i + 12),
2537 i, 2488 readl(base + i + 16), readl(base + i + 20),
2538 readl(base + i + 0), readl(base + i + 4), 2489 readl(base + i + 24), readl(base + i + 28));
2539 readl(base + i + 8), readl(base + i + 12), 2490 }
2540 readl(base + i + 16), readl(base + i + 20), 2491 netdev_info(dev, "Dumping tx ring\n");
2541 readl(base + i + 24), readl(base + i + 28)); 2492 for (i = 0; i < np->tx_ring_size; i += 4) {
2542 } 2493 if (!nv_optimized(np)) {
2543 printk(KERN_INFO "%s: Dumping tx ring\n", dev->name); 2494 netdev_info(dev,
2544 for (i=0;i<np->tx_ring_size;i+= 4) { 2495 "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n",
2545 if (!nv_optimized(np)) { 2496 i,
2546 printk(KERN_INFO "%03x: %08x %08x // %08x %08x // %08x %08x // %08x %08x\n", 2497 le32_to_cpu(np->tx_ring.orig[i].buf),
2547 i, 2498 le32_to_cpu(np->tx_ring.orig[i].flaglen),
2548 le32_to_cpu(np->tx_ring.orig[i].buf), 2499 le32_to_cpu(np->tx_ring.orig[i+1].buf),
2549 le32_to_cpu(np->tx_ring.orig[i].flaglen), 2500 le32_to_cpu(np->tx_ring.orig[i+1].flaglen),
2550 le32_to_cpu(np->tx_ring.orig[i+1].buf), 2501 le32_to_cpu(np->tx_ring.orig[i+2].buf),
2551 le32_to_cpu(np->tx_ring.orig[i+1].flaglen), 2502 le32_to_cpu(np->tx_ring.orig[i+2].flaglen),
2552 le32_to_cpu(np->tx_ring.orig[i+2].buf), 2503 le32_to_cpu(np->tx_ring.orig[i+3].buf),
2553 le32_to_cpu(np->tx_ring.orig[i+2].flaglen), 2504 le32_to_cpu(np->tx_ring.orig[i+3].flaglen));
2554 le32_to_cpu(np->tx_ring.orig[i+3].buf), 2505 } else {
2555 le32_to_cpu(np->tx_ring.orig[i+3].flaglen)); 2506 netdev_info(dev,
2556 } else { 2507 "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n",
2557 printk(KERN_INFO "%03x: %08x %08x %08x // %08x %08x %08x // %08x %08x %08x // %08x %08x %08x\n", 2508 i,
2558 i, 2509 le32_to_cpu(np->tx_ring.ex[i].bufhigh),
2559 le32_to_cpu(np->tx_ring.ex[i].bufhigh), 2510 le32_to_cpu(np->tx_ring.ex[i].buflow),
2560 le32_to_cpu(np->tx_ring.ex[i].buflow), 2511 le32_to_cpu(np->tx_ring.ex[i].flaglen),
2561 le32_to_cpu(np->tx_ring.ex[i].flaglen), 2512 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh),
2562 le32_to_cpu(np->tx_ring.ex[i+1].bufhigh), 2513 le32_to_cpu(np->tx_ring.ex[i+1].buflow),
2563 le32_to_cpu(np->tx_ring.ex[i+1].buflow), 2514 le32_to_cpu(np->tx_ring.ex[i+1].flaglen),
2564 le32_to_cpu(np->tx_ring.ex[i+1].flaglen), 2515 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh),
2565 le32_to_cpu(np->tx_ring.ex[i+2].bufhigh), 2516 le32_to_cpu(np->tx_ring.ex[i+2].buflow),
2566 le32_to_cpu(np->tx_ring.ex[i+2].buflow), 2517 le32_to_cpu(np->tx_ring.ex[i+2].flaglen),
2567 le32_to_cpu(np->tx_ring.ex[i+2].flaglen), 2518 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh),
2568 le32_to_cpu(np->tx_ring.ex[i+3].bufhigh), 2519 le32_to_cpu(np->tx_ring.ex[i+3].buflow),
2569 le32_to_cpu(np->tx_ring.ex[i+3].buflow), 2520 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2570 le32_to_cpu(np->tx_ring.ex[i+3].flaglen));
2571 }
2572 } 2521 }
2573 } 2522 }
2574 2523
@@ -2616,15 +2565,13 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2616 int protolen; /* length as stored in the proto field */ 2565 int protolen; /* length as stored in the proto field */
2617 2566
2618 /* 1) calculate len according to header */ 2567 /* 1) calculate len according to header */
2619 if ( ((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) { 2568 if (((struct vlan_ethhdr *)packet)->h_vlan_proto == htons(ETH_P_8021Q)) {
2620 protolen = ntohs( ((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto ); 2569 protolen = ntohs(((struct vlan_ethhdr *)packet)->h_vlan_encapsulated_proto);
2621 hdrlen = VLAN_HLEN; 2570 hdrlen = VLAN_HLEN;
2622 } else { 2571 } else {
2623 protolen = ntohs( ((struct ethhdr *)packet)->h_proto); 2572 protolen = ntohs(((struct ethhdr *)packet)->h_proto);
2624 hdrlen = ETH_HLEN; 2573 hdrlen = ETH_HLEN;
2625 } 2574 }
2626 dprintk(KERN_DEBUG "%s: nv_getlen: datalen %d, protolen %d, hdrlen %d\n",
2627 dev->name, datalen, protolen, hdrlen);
2628 if (protolen > ETH_DATA_LEN) 2575 if (protolen > ETH_DATA_LEN)
2629 return datalen; /* Value in proto field not a len, no checks possible */ 2576 return datalen; /* Value in proto field not a len, no checks possible */
2630 2577
@@ -2635,26 +2582,18 @@ static int nv_getlen(struct net_device *dev, void *packet, int datalen)
2635 /* more data on wire than in 802 header, trim of 2582 /* more data on wire than in 802 header, trim of
2636 * additional data. 2583 * additional data.
2637 */ 2584 */
2638 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2639 dev->name, protolen);
2640 return protolen; 2585 return protolen;
2641 } else { 2586 } else {
2642 /* less data on wire than mentioned in header. 2587 /* less data on wire than mentioned in header.
2643 * Discard the packet. 2588 * Discard the packet.
2644 */ 2589 */
2645 dprintk(KERN_DEBUG "%s: nv_getlen: discarding long packet.\n",
2646 dev->name);
2647 return -1; 2590 return -1;
2648 } 2591 }
2649 } else { 2592 } else {
2650 /* short packet. Accept only if 802 values are also short */ 2593 /* short packet. Accept only if 802 values are also short */
2651 if (protolen > ETH_ZLEN) { 2594 if (protolen > ETH_ZLEN) {
2652 dprintk(KERN_DEBUG "%s: nv_getlen: discarding short packet.\n",
2653 dev->name);
2654 return -1; 2595 return -1;
2655 } 2596 }
2656 dprintk(KERN_DEBUG "%s: nv_getlen: accepting %d bytes.\n",
2657 dev->name, datalen);
2658 return datalen; 2597 return datalen;
2659 } 2598 }
2660} 2599}
@@ -2667,13 +2606,10 @@ static int nv_rx_process(struct net_device *dev, int limit)
2667 struct sk_buff *skb; 2606 struct sk_buff *skb;
2668 int len; 2607 int len;
2669 2608
2670 while((np->get_rx.orig != np->put_rx.orig) && 2609 while ((np->get_rx.orig != np->put_rx.orig) &&
2671 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) && 2610 !((flags = le32_to_cpu(np->get_rx.orig->flaglen)) & NV_RX_AVAIL) &&
2672 (rx_work < limit)) { 2611 (rx_work < limit)) {
2673 2612
2674 dprintk(KERN_DEBUG "%s: nv_rx_process: flags 0x%x.\n",
2675 dev->name, flags);
2676
2677 /* 2613 /*
2678 * the packet is for us - immediately tear down the pci mapping. 2614 * the packet is for us - immediately tear down the pci mapping.
2679 * TODO: check if a prefetch of the first cacheline improves 2615 * TODO: check if a prefetch of the first cacheline improves
@@ -2685,16 +2621,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2685 skb = np->get_rx_ctx->skb; 2621 skb = np->get_rx_ctx->skb;
2686 np->get_rx_ctx->skb = NULL; 2622 np->get_rx_ctx->skb = NULL;
2687 2623
2688 {
2689 int j;
2690 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2691 for (j=0; j<64; j++) {
2692 if ((j%16) == 0)
2693 dprintk("\n%03x:", j);
2694 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2695 }
2696 dprintk("\n");
2697 }
2698 /* look at what we actually got: */ 2624 /* look at what we actually got: */
2699 if (np->desc_ver == DESC_VER_1) { 2625 if (np->desc_ver == DESC_VER_1) {
2700 if (likely(flags & NV_RX_DESCRIPTORVALID)) { 2626 if (likely(flags & NV_RX_DESCRIPTORVALID)) {
@@ -2710,9 +2636,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
2710 } 2636 }
2711 /* framing errors are soft errors */ 2637 /* framing errors are soft errors */
2712 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) { 2638 else if ((flags & NV_RX_ERROR_MASK) == NV_RX_FRAMINGERR) {
2713 if (flags & NV_RX_SUBSTRACT1) { 2639 if (flags & NV_RX_SUBSTRACT1)
2714 len--; 2640 len--;
2715 }
2716 } 2641 }
2717 /* the rest are hard errors */ 2642 /* the rest are hard errors */
2718 else { 2643 else {
@@ -2745,9 +2670,8 @@ static int nv_rx_process(struct net_device *dev, int limit)
2745 } 2670 }
2746 /* framing errors are soft errors */ 2671 /* framing errors are soft errors */
2747 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2672 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2748 if (flags & NV_RX2_SUBSTRACT1) { 2673 if (flags & NV_RX2_SUBSTRACT1)
2749 len--; 2674 len--;
2750 }
2751 } 2675 }
2752 /* the rest are hard errors */ 2676 /* the rest are hard errors */
2753 else { 2677 else {
@@ -2771,8 +2695,6 @@ static int nv_rx_process(struct net_device *dev, int limit)
2771 /* got a valid packet - forward it to the network core */ 2695 /* got a valid packet - forward it to the network core */
2772 skb_put(skb, len); 2696 skb_put(skb, len);
2773 skb->protocol = eth_type_trans(skb, dev); 2697 skb->protocol = eth_type_trans(skb, dev);
2774 dprintk(KERN_DEBUG "%s: nv_rx_process: %d bytes, proto %d accepted.\n",
2775 dev->name, len, skb->protocol);
2776 napi_gro_receive(&np->napi, skb); 2698 napi_gro_receive(&np->napi, skb);
2777 dev->stats.rx_packets++; 2699 dev->stats.rx_packets++;
2778 dev->stats.rx_bytes += len; 2700 dev->stats.rx_bytes += len;
@@ -2797,13 +2719,10 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2797 struct sk_buff *skb; 2719 struct sk_buff *skb;
2798 int len; 2720 int len;
2799 2721
2800 while((np->get_rx.ex != np->put_rx.ex) && 2722 while ((np->get_rx.ex != np->put_rx.ex) &&
2801 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) && 2723 !((flags = le32_to_cpu(np->get_rx.ex->flaglen)) & NV_RX2_AVAIL) &&
2802 (rx_work < limit)) { 2724 (rx_work < limit)) {
2803 2725
2804 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: flags 0x%x.\n",
2805 dev->name, flags);
2806
2807 /* 2726 /*
2808 * the packet is for us - immediately tear down the pci mapping. 2727 * the packet is for us - immediately tear down the pci mapping.
2809 * TODO: check if a prefetch of the first cacheline improves 2728 * TODO: check if a prefetch of the first cacheline improves
@@ -2815,16 +2734,6 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2815 skb = np->get_rx_ctx->skb; 2734 skb = np->get_rx_ctx->skb;
2816 np->get_rx_ctx->skb = NULL; 2735 np->get_rx_ctx->skb = NULL;
2817 2736
2818 {
2819 int j;
2820 dprintk(KERN_DEBUG "Dumping packet (flags 0x%x).",flags);
2821 for (j=0; j<64; j++) {
2822 if ((j%16) == 0)
2823 dprintk("\n%03x:", j);
2824 dprintk(" %02x", ((unsigned char*)skb->data)[j]);
2825 }
2826 dprintk("\n");
2827 }
2828 /* look at what we actually got: */ 2737 /* look at what we actually got: */
2829 if (likely(flags & NV_RX2_DESCRIPTORVALID)) { 2738 if (likely(flags & NV_RX2_DESCRIPTORVALID)) {
2830 len = flags & LEN_MASK_V2; 2739 len = flags & LEN_MASK_V2;
@@ -2838,9 +2747,8 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2838 } 2747 }
2839 /* framing errors are soft errors */ 2748 /* framing errors are soft errors */
2840 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) { 2749 else if ((flags & NV_RX2_ERROR_MASK) == NV_RX2_FRAMINGERR) {
2841 if (flags & NV_RX2_SUBSTRACT1) { 2750 if (flags & NV_RX2_SUBSTRACT1)
2842 len--; 2751 len--;
2843 }
2844 } 2752 }
2845 /* the rest are hard errors */ 2753 /* the rest are hard errors */
2846 else { 2754 else {
@@ -2858,9 +2766,6 @@ static int nv_rx_process_optimized(struct net_device *dev, int limit)
2858 skb->protocol = eth_type_trans(skb, dev); 2766 skb->protocol = eth_type_trans(skb, dev);
2859 prefetch(skb->data); 2767 prefetch(skb->data);
2860 2768
2861 dprintk(KERN_DEBUG "%s: nv_rx_process_optimized: %d bytes, proto %d accepted.\n",
2862 dev->name, len, skb->protocol);
2863
2864 if (likely(!np->vlangrp)) { 2769 if (likely(!np->vlangrp)) {
2865 napi_gro_receive(&np->napi, skb); 2770 napi_gro_receive(&np->napi, skb);
2866 } else { 2771 } else {
@@ -2949,7 +2854,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
2949 /* reinit nic view of the rx queue */ 2854 /* reinit nic view of the rx queue */
2950 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 2855 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
2951 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 2856 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
2952 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 2857 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
2953 base + NvRegRingSizes); 2858 base + NvRegRingSizes);
2954 pci_push(base); 2859 pci_push(base);
2955 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 2860 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -2986,7 +2891,7 @@ static void nv_copy_mac_to_hw(struct net_device *dev)
2986static int nv_set_mac_address(struct net_device *dev, void *addr) 2891static int nv_set_mac_address(struct net_device *dev, void *addr)
2987{ 2892{
2988 struct fe_priv *np = netdev_priv(dev); 2893 struct fe_priv *np = netdev_priv(dev);
2989 struct sockaddr *macaddr = (struct sockaddr*)addr; 2894 struct sockaddr *macaddr = (struct sockaddr *)addr;
2990 2895
2991 if (!is_valid_ether_addr(macaddr->sa_data)) 2896 if (!is_valid_ether_addr(macaddr->sa_data))
2992 return -EADDRNOTAVAIL; 2897 return -EADDRNOTAVAIL;
@@ -3076,8 +2981,6 @@ static void nv_set_multicast(struct net_device *dev)
3076 writel(mask[0], base + NvRegMulticastMaskA); 2981 writel(mask[0], base + NvRegMulticastMaskA);
3077 writel(mask[1], base + NvRegMulticastMaskB); 2982 writel(mask[1], base + NvRegMulticastMaskB);
3078 writel(pff, base + NvRegPacketFilterFlags); 2983 writel(pff, base + NvRegPacketFilterFlags);
3079 dprintk(KERN_INFO "%s: reconfiguration for multicast lists.\n",
3080 dev->name);
3081 nv_start_rx(dev); 2984 nv_start_rx(dev);
3082 spin_unlock_irq(&np->lock); 2985 spin_unlock_irq(&np->lock);
3083} 2986}
@@ -3152,8 +3055,6 @@ static int nv_update_linkspeed(struct net_device *dev)
3152 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 3055 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
3153 3056
3154 if (!(mii_status & BMSR_LSTATUS)) { 3057 if (!(mii_status & BMSR_LSTATUS)) {
3155 dprintk(KERN_DEBUG "%s: no link detected by phy - falling back to 10HD.\n",
3156 dev->name);
3157 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3058 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3158 newdup = 0; 3059 newdup = 0;
3159 retval = 0; 3060 retval = 0;
@@ -3161,8 +3062,6 @@ static int nv_update_linkspeed(struct net_device *dev)
3161 } 3062 }
3162 3063
3163 if (np->autoneg == 0) { 3064 if (np->autoneg == 0) {
3164 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: autoneg off, PHY set to 0x%04x.\n",
3165 dev->name, np->fixed_mode);
3166 if (np->fixed_mode & LPA_100FULL) { 3065 if (np->fixed_mode & LPA_100FULL) {
3167 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100; 3066 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_100;
3168 newdup = 1; 3067 newdup = 1;
@@ -3185,14 +3084,11 @@ static int nv_update_linkspeed(struct net_device *dev)
3185 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3084 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3186 newdup = 0; 3085 newdup = 0;
3187 retval = 0; 3086 retval = 0;
3188 dprintk(KERN_DEBUG "%s: autoneg not completed - falling back to 10HD.\n", dev->name);
3189 goto set_speed; 3087 goto set_speed;
3190 } 3088 }
3191 3089
3192 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ); 3090 adv = mii_rw(dev, np->phyaddr, MII_ADVERTISE, MII_READ);
3193 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ); 3091 lpa = mii_rw(dev, np->phyaddr, MII_LPA, MII_READ);
3194 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: PHY advertises 0x%04x, lpa 0x%04x.\n",
3195 dev->name, adv, lpa);
3196 3092
3197 retval = 1; 3093 retval = 1;
3198 if (np->gigabit == PHY_GIGABIT) { 3094 if (np->gigabit == PHY_GIGABIT) {
@@ -3201,8 +3097,6 @@ static int nv_update_linkspeed(struct net_device *dev)
3201 3097
3202 if ((control_1000 & ADVERTISE_1000FULL) && 3098 if ((control_1000 & ADVERTISE_1000FULL) &&
3203 (status_1000 & LPA_1000FULL)) { 3099 (status_1000 & LPA_1000FULL)) {
3204 dprintk(KERN_DEBUG "%s: nv_update_linkspeed: GBit ethernet detected.\n",
3205 dev->name);
3206 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000; 3100 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_1000;
3207 newdup = 1; 3101 newdup = 1;
3208 goto set_speed; 3102 goto set_speed;
@@ -3224,7 +3118,6 @@ static int nv_update_linkspeed(struct net_device *dev)
3224 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3118 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3225 newdup = 0; 3119 newdup = 0;
3226 } else { 3120 } else {
3227 dprintk(KERN_DEBUG "%s: bad ability %04x - falling back to 10HD.\n", dev->name, adv_lpa);
3228 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10; 3121 newls = NVREG_LINKSPEED_FORCE|NVREG_LINKSPEED_10;
3229 newdup = 0; 3122 newdup = 0;
3230 } 3123 }
@@ -3233,9 +3126,6 @@ set_speed:
3233 if (np->duplex == newdup && np->linkspeed == newls) 3126 if (np->duplex == newdup && np->linkspeed == newls)
3234 return retval; 3127 return retval;
3235 3128
3236 dprintk(KERN_INFO "%s: changing link setting from %d/%d to %d/%d.\n",
3237 dev->name, np->linkspeed, np->duplex, newls, newdup);
3238
3239 np->duplex = newdup; 3129 np->duplex = newdup;
3240 np->linkspeed = newls; 3130 np->linkspeed = newls;
3241 3131
@@ -3302,7 +3192,7 @@ set_speed:
3302 } 3192 }
3303 writel(txreg, base + NvRegTxWatermark); 3193 writel(txreg, base + NvRegTxWatermark);
3304 3194
3305 writel(NVREG_MISC1_FORCE | ( np->duplex ? 0 : NVREG_MISC1_HD), 3195 writel(NVREG_MISC1_FORCE | (np->duplex ? 0 : NVREG_MISC1_HD),
3306 base + NvRegMisc1); 3196 base + NvRegMisc1);
3307 pci_push(base); 3197 pci_push(base);
3308 writel(np->linkspeed, base + NvRegLinkSpeed); 3198 writel(np->linkspeed, base + NvRegLinkSpeed);
@@ -3312,8 +3202,8 @@ set_speed:
3312 /* setup pause frame */ 3202 /* setup pause frame */
3313 if (np->duplex != 0) { 3203 if (np->duplex != 0) {
3314 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) { 3204 if (np->autoneg && np->pause_flags & NV_PAUSEFRAME_AUTONEG) {
3315 adv_pause = adv & (ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM); 3205 adv_pause = adv & (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
3316 lpa_pause = lpa & (LPA_PAUSE_CAP| LPA_PAUSE_ASYM); 3206 lpa_pause = lpa & (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
3317 3207
3318 switch (adv_pause) { 3208 switch (adv_pause) {
3319 case ADVERTISE_PAUSE_CAP: 3209 case ADVERTISE_PAUSE_CAP:
@@ -3324,22 +3214,17 @@ set_speed:
3324 } 3214 }
3325 break; 3215 break;
3326 case ADVERTISE_PAUSE_ASYM: 3216 case ADVERTISE_PAUSE_ASYM:
3327 if (lpa_pause == (LPA_PAUSE_CAP| LPA_PAUSE_ASYM)) 3217 if (lpa_pause == (LPA_PAUSE_CAP | LPA_PAUSE_ASYM))
3328 {
3329 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3218 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3330 }
3331 break; 3219 break;
3332 case ADVERTISE_PAUSE_CAP| ADVERTISE_PAUSE_ASYM: 3220 case ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM:
3333 if (lpa_pause & LPA_PAUSE_CAP) 3221 if (lpa_pause & LPA_PAUSE_CAP) {
3334 {
3335 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3222 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3336 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ) 3223 if (np->pause_flags & NV_PAUSEFRAME_TX_REQ)
3337 pause_flags |= NV_PAUSEFRAME_TX_ENABLE; 3224 pause_flags |= NV_PAUSEFRAME_TX_ENABLE;
3338 } 3225 }
3339 if (lpa_pause == LPA_PAUSE_ASYM) 3226 if (lpa_pause == LPA_PAUSE_ASYM)
3340 {
3341 pause_flags |= NV_PAUSEFRAME_RX_ENABLE; 3227 pause_flags |= NV_PAUSEFRAME_RX_ENABLE;
3342 }
3343 break; 3228 break;
3344 } 3229 }
3345 } else { 3230 } else {
@@ -3361,14 +3246,14 @@ static void nv_linkchange(struct net_device *dev)
3361 if (nv_update_linkspeed(dev)) { 3246 if (nv_update_linkspeed(dev)) {
3362 if (!netif_carrier_ok(dev)) { 3247 if (!netif_carrier_ok(dev)) {
3363 netif_carrier_on(dev); 3248 netif_carrier_on(dev);
3364 printk(KERN_INFO "%s: link up.\n", dev->name); 3249 netdev_info(dev, "link up\n");
3365 nv_txrx_gate(dev, false); 3250 nv_txrx_gate(dev, false);
3366 nv_start_rx(dev); 3251 nv_start_rx(dev);
3367 } 3252 }
3368 } else { 3253 } else {
3369 if (netif_carrier_ok(dev)) { 3254 if (netif_carrier_ok(dev)) {
3370 netif_carrier_off(dev); 3255 netif_carrier_off(dev);
3371 printk(KERN_INFO "%s: link down.\n", dev->name); 3256 netdev_info(dev, "link down\n");
3372 nv_txrx_gate(dev, true); 3257 nv_txrx_gate(dev, true);
3373 nv_stop_rx(dev); 3258 nv_stop_rx(dev);
3374 } 3259 }
@@ -3382,11 +3267,9 @@ static void nv_link_irq(struct net_device *dev)
3382 3267
3383 miistat = readl(base + NvRegMIIStatus); 3268 miistat = readl(base + NvRegMIIStatus);
3384 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus); 3269 writel(NVREG_MIISTAT_LINKCHANGE, base + NvRegMIIStatus);
3385 dprintk(KERN_INFO "%s: link change irq, status 0x%x.\n", dev->name, miistat);
3386 3270
3387 if (miistat & (NVREG_MIISTAT_LINKCHANGE)) 3271 if (miistat & (NVREG_MIISTAT_LINKCHANGE))
3388 nv_linkchange(dev); 3272 nv_linkchange(dev);
3389 dprintk(KERN_DEBUG "%s: link change notification done.\n", dev->name);
3390} 3273}
3391 3274
3392static void nv_msi_workaround(struct fe_priv *np) 3275static void nv_msi_workaround(struct fe_priv *np)
@@ -3437,8 +3320,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3437 struct fe_priv *np = netdev_priv(dev); 3320 struct fe_priv *np = netdev_priv(dev);
3438 u8 __iomem *base = get_hwbase(dev); 3321 u8 __iomem *base = get_hwbase(dev);
3439 3322
3440 dprintk(KERN_DEBUG "%s: nv_nic_irq\n", dev->name);
3441
3442 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3323 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3443 np->events = readl(base + NvRegIrqStatus); 3324 np->events = readl(base + NvRegIrqStatus);
3444 writel(np->events, base + NvRegIrqStatus); 3325 writel(np->events, base + NvRegIrqStatus);
@@ -3446,7 +3327,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3446 np->events = readl(base + NvRegMSIXIrqStatus); 3327 np->events = readl(base + NvRegMSIXIrqStatus);
3447 writel(np->events, base + NvRegMSIXIrqStatus); 3328 writel(np->events, base + NvRegMSIXIrqStatus);
3448 } 3329 }
3449 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
3450 if (!(np->events & np->irqmask)) 3330 if (!(np->events & np->irqmask))
3451 return IRQ_NONE; 3331 return IRQ_NONE;
3452 3332
@@ -3460,8 +3340,6 @@ static irqreturn_t nv_nic_irq(int foo, void *data)
3460 __napi_schedule(&np->napi); 3340 __napi_schedule(&np->napi);
3461 } 3341 }
3462 3342
3463 dprintk(KERN_DEBUG "%s: nv_nic_irq completed\n", dev->name);
3464
3465 return IRQ_HANDLED; 3343 return IRQ_HANDLED;
3466} 3344}
3467 3345
@@ -3476,8 +3354,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3476 struct fe_priv *np = netdev_priv(dev); 3354 struct fe_priv *np = netdev_priv(dev);
3477 u8 __iomem *base = get_hwbase(dev); 3355 u8 __iomem *base = get_hwbase(dev);
3478 3356
3479 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized\n", dev->name);
3480
3481 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3357 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3482 np->events = readl(base + NvRegIrqStatus); 3358 np->events = readl(base + NvRegIrqStatus);
3483 writel(np->events, base + NvRegIrqStatus); 3359 writel(np->events, base + NvRegIrqStatus);
@@ -3485,7 +3361,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3485 np->events = readl(base + NvRegMSIXIrqStatus); 3361 np->events = readl(base + NvRegMSIXIrqStatus);
3486 writel(np->events, base + NvRegMSIXIrqStatus); 3362 writel(np->events, base + NvRegMSIXIrqStatus);
3487 } 3363 }
3488 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, np->events);
3489 if (!(np->events & np->irqmask)) 3364 if (!(np->events & np->irqmask))
3490 return IRQ_NONE; 3365 return IRQ_NONE;
3491 3366
@@ -3498,7 +3373,6 @@ static irqreturn_t nv_nic_irq_optimized(int foo, void *data)
3498 writel(0, base + NvRegIrqMask); 3373 writel(0, base + NvRegIrqMask);
3499 __napi_schedule(&np->napi); 3374 __napi_schedule(&np->napi);
3500 } 3375 }
3501 dprintk(KERN_DEBUG "%s: nv_nic_irq_optimized completed\n", dev->name);
3502 3376
3503 return IRQ_HANDLED; 3377 return IRQ_HANDLED;
3504} 3378}
@@ -3512,12 +3386,9 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3512 int i; 3386 int i;
3513 unsigned long flags; 3387 unsigned long flags;
3514 3388
3515 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx\n", dev->name); 3389 for (i = 0;; i++) {
3516
3517 for (i=0; ; i++) {
3518 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL; 3390 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_TX_ALL;
3519 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus); 3391 writel(NVREG_IRQ_TX_ALL, base + NvRegMSIXIrqStatus);
3520 dprintk(KERN_DEBUG "%s: tx irq: %08x\n", dev->name, events);
3521 if (!(events & np->irqmask)) 3392 if (!(events & np->irqmask))
3522 break; 3393 break;
3523 3394
@@ -3536,12 +3407,12 @@ static irqreturn_t nv_nic_irq_tx(int foo, void *data)
3536 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3407 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3537 } 3408 }
3538 spin_unlock_irqrestore(&np->lock, flags); 3409 spin_unlock_irqrestore(&np->lock, flags);
3539 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_tx.\n", dev->name, i); 3410 netdev_dbg(dev, "%s: too many iterations (%d)\n",
3411 __func__, i);
3540 break; 3412 break;
3541 } 3413 }
3542 3414
3543 } 3415 }
3544 dprintk(KERN_DEBUG "%s: nv_nic_irq_tx completed\n", dev->name);
3545 3416
3546 return IRQ_RETVAL(i); 3417 return IRQ_RETVAL(i);
3547} 3418}
@@ -3553,7 +3424,7 @@ static int nv_napi_poll(struct napi_struct *napi, int budget)
3553 u8 __iomem *base = get_hwbase(dev); 3424 u8 __iomem *base = get_hwbase(dev);
3554 unsigned long flags; 3425 unsigned long flags;
3555 int retcode; 3426 int retcode;
3556 int rx_count, tx_work=0, rx_work=0; 3427 int rx_count, tx_work = 0, rx_work = 0;
3557 3428
3558 do { 3429 do {
3559 if (!nv_optimized(np)) { 3430 if (!nv_optimized(np)) {
@@ -3626,12 +3497,9 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3626 int i; 3497 int i;
3627 unsigned long flags; 3498 unsigned long flags;
3628 3499
3629 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx\n", dev->name); 3500 for (i = 0;; i++) {
3630
3631 for (i=0; ; i++) {
3632 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL; 3501 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_RX_ALL;
3633 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus); 3502 writel(NVREG_IRQ_RX_ALL, base + NvRegMSIXIrqStatus);
3634 dprintk(KERN_DEBUG "%s: rx irq: %08x\n", dev->name, events);
3635 if (!(events & np->irqmask)) 3503 if (!(events & np->irqmask))
3636 break; 3504 break;
3637 3505
@@ -3655,11 +3523,11 @@ static irqreturn_t nv_nic_irq_rx(int foo, void *data)
3655 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3523 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3656 } 3524 }
3657 spin_unlock_irqrestore(&np->lock, flags); 3525 spin_unlock_irqrestore(&np->lock, flags);
3658 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_rx.\n", dev->name, i); 3526 netdev_dbg(dev, "%s: too many iterations (%d)\n",
3527 __func__, i);
3659 break; 3528 break;
3660 } 3529 }
3661 } 3530 }
3662 dprintk(KERN_DEBUG "%s: nv_nic_irq_rx completed\n", dev->name);
3663 3531
3664 return IRQ_RETVAL(i); 3532 return IRQ_RETVAL(i);
3665} 3533}
@@ -3673,12 +3541,9 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
3673 int i; 3541 int i;
3674 unsigned long flags; 3542 unsigned long flags;
3675 3543
3676 dprintk(KERN_DEBUG "%s: nv_nic_irq_other\n", dev->name); 3544 for (i = 0;; i++) {
3677
3678 for (i=0; ; i++) {
3679 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER; 3545 events = readl(base + NvRegMSIXIrqStatus) & NVREG_IRQ_OTHER;
3680 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus); 3546 writel(NVREG_IRQ_OTHER, base + NvRegMSIXIrqStatus);
3681 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3682 if (!(events & np->irqmask)) 3547 if (!(events & np->irqmask))
3683 break; 3548 break;
3684 3549
@@ -3723,12 +3588,12 @@ static irqreturn_t nv_nic_irq_other(int foo, void *data)
3723 mod_timer(&np->nic_poll, jiffies + POLL_WAIT); 3588 mod_timer(&np->nic_poll, jiffies + POLL_WAIT);
3724 } 3589 }
3725 spin_unlock_irqrestore(&np->lock, flags); 3590 spin_unlock_irqrestore(&np->lock, flags);
3726 printk(KERN_DEBUG "%s: too many iterations (%d) in nv_nic_irq_other.\n", dev->name, i); 3591 netdev_dbg(dev, "%s: too many iterations (%d)\n",
3592 __func__, i);
3727 break; 3593 break;
3728 } 3594 }
3729 3595
3730 } 3596 }
3731 dprintk(KERN_DEBUG "%s: nv_nic_irq_other completed\n", dev->name);
3732 3597
3733 return IRQ_RETVAL(i); 3598 return IRQ_RETVAL(i);
3734} 3599}
@@ -3740,8 +3605,6 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data)
3740 u8 __iomem *base = get_hwbase(dev); 3605 u8 __iomem *base = get_hwbase(dev);
3741 u32 events; 3606 u32 events;
3742 3607
3743 dprintk(KERN_DEBUG "%s: nv_nic_irq_test\n", dev->name);
3744
3745 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 3608 if (!(np->msi_flags & NV_MSI_X_ENABLED)) {
3746 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK; 3609 events = readl(base + NvRegIrqStatus) & NVREG_IRQSTAT_MASK;
3747 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus); 3610 writel(NVREG_IRQ_TIMER, base + NvRegIrqStatus);
@@ -3750,7 +3613,6 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data)
3750 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus); 3613 writel(NVREG_IRQ_TIMER, base + NvRegMSIXIrqStatus);
3751 } 3614 }
3752 pci_push(base); 3615 pci_push(base);
3753 dprintk(KERN_DEBUG "%s: irq: %08x\n", dev->name, events);
3754 if (!(events & NVREG_IRQ_TIMER)) 3616 if (!(events & NVREG_IRQ_TIMER))
3755 return IRQ_RETVAL(0); 3617 return IRQ_RETVAL(0);
3756 3618
@@ -3760,8 +3622,6 @@ static irqreturn_t nv_nic_irq_test(int foo, void *data)
3760 np->intr_test = 1; 3622 np->intr_test = 1;
3761 spin_unlock(&np->lock); 3623 spin_unlock(&np->lock);
3762 3624
3763 dprintk(KERN_DEBUG "%s: nv_nic_irq_test completed\n", dev->name);
3764
3765 return IRQ_RETVAL(1); 3625 return IRQ_RETVAL(1);
3766} 3626}
3767 3627
@@ -3776,17 +3636,15 @@ static void set_msix_vector_map(struct net_device *dev, u32 vector, u32 irqmask)
3776 * the remaining 8 interrupts. 3636 * the remaining 8 interrupts.
3777 */ 3637 */
3778 for (i = 0; i < 8; i++) { 3638 for (i = 0; i < 8; i++) {
3779 if ((irqmask >> i) & 0x1) { 3639 if ((irqmask >> i) & 0x1)
3780 msixmap |= vector << (i << 2); 3640 msixmap |= vector << (i << 2);
3781 }
3782 } 3641 }
3783 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0); 3642 writel(readl(base + NvRegMSIXMap0) | msixmap, base + NvRegMSIXMap0);
3784 3643
3785 msixmap = 0; 3644 msixmap = 0;
3786 for (i = 0; i < 8; i++) { 3645 for (i = 0; i < 8; i++) {
3787 if ((irqmask >> (i + 8)) & 0x1) { 3646 if ((irqmask >> (i + 8)) & 0x1)
3788 msixmap |= vector << (i << 2); 3647 msixmap |= vector << (i << 2);
3789 }
3790 } 3648 }
3791 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1); 3649 writel(readl(base + NvRegMSIXMap1) | msixmap, base + NvRegMSIXMap1);
3792} 3650}
@@ -3809,17 +3667,19 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3809 } 3667 }
3810 3668
3811 if (np->msi_flags & NV_MSI_X_CAPABLE) { 3669 if (np->msi_flags & NV_MSI_X_CAPABLE) {
3812 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3670 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3813 np->msi_x_entry[i].entry = i; 3671 np->msi_x_entry[i].entry = i;
3814 } 3672 ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK));
3815 if ((ret = pci_enable_msix(np->pci_dev, np->msi_x_entry, (np->msi_flags & NV_MSI_X_VECTORS_MASK))) == 0) { 3673 if (ret == 0) {
3816 np->msi_flags |= NV_MSI_X_ENABLED; 3674 np->msi_flags |= NV_MSI_X_ENABLED;
3817 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) { 3675 if (optimization_mode == NV_OPTIMIZATION_MODE_THROUGHPUT && !intr_test) {
3818 /* Request irq for rx handling */ 3676 /* Request irq for rx handling */
3819 sprintf(np->name_rx, "%s-rx", dev->name); 3677 sprintf(np->name_rx, "%s-rx", dev->name);
3820 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector, 3678 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_RX].vector,
3821 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) { 3679 nv_nic_irq_rx, IRQF_SHARED, np->name_rx, dev) != 0) {
3822 printk(KERN_INFO "forcedeth: request_irq failed for rx %d\n", ret); 3680 netdev_info(dev,
3681 "request_irq failed for rx %d\n",
3682 ret);
3823 pci_disable_msix(np->pci_dev); 3683 pci_disable_msix(np->pci_dev);
3824 np->msi_flags &= ~NV_MSI_X_ENABLED; 3684 np->msi_flags &= ~NV_MSI_X_ENABLED;
3825 goto out_err; 3685 goto out_err;
@@ -3828,7 +3688,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3828 sprintf(np->name_tx, "%s-tx", dev->name); 3688 sprintf(np->name_tx, "%s-tx", dev->name);
3829 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector, 3689 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_TX].vector,
3830 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) { 3690 nv_nic_irq_tx, IRQF_SHARED, np->name_tx, dev) != 0) {
3831 printk(KERN_INFO "forcedeth: request_irq failed for tx %d\n", ret); 3691 netdev_info(dev,
3692 "request_irq failed for tx %d\n",
3693 ret);
3832 pci_disable_msix(np->pci_dev); 3694 pci_disable_msix(np->pci_dev);
3833 np->msi_flags &= ~NV_MSI_X_ENABLED; 3695 np->msi_flags &= ~NV_MSI_X_ENABLED;
3834 goto out_free_rx; 3696 goto out_free_rx;
@@ -3837,7 +3699,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3837 sprintf(np->name_other, "%s-other", dev->name); 3699 sprintf(np->name_other, "%s-other", dev->name);
3838 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector, 3700 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_OTHER].vector,
3839 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) { 3701 nv_nic_irq_other, IRQF_SHARED, np->name_other, dev) != 0) {
3840 printk(KERN_INFO "forcedeth: request_irq failed for link %d\n", ret); 3702 netdev_info(dev,
3703 "request_irq failed for link %d\n",
3704 ret);
3841 pci_disable_msix(np->pci_dev); 3705 pci_disable_msix(np->pci_dev);
3842 np->msi_flags &= ~NV_MSI_X_ENABLED; 3706 np->msi_flags &= ~NV_MSI_X_ENABLED;
3843 goto out_free_tx; 3707 goto out_free_tx;
@@ -3851,7 +3715,9 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3851 } else { 3715 } else {
3852 /* Request irq for all interrupts */ 3716 /* Request irq for all interrupts */
3853 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) { 3717 if (request_irq(np->msi_x_entry[NV_MSI_X_VECTOR_ALL].vector, handler, IRQF_SHARED, dev->name, dev) != 0) {
3854 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3718 netdev_info(dev,
3719 "request_irq failed %d\n",
3720 ret);
3855 pci_disable_msix(np->pci_dev); 3721 pci_disable_msix(np->pci_dev);
3856 np->msi_flags &= ~NV_MSI_X_ENABLED; 3722 np->msi_flags &= ~NV_MSI_X_ENABLED;
3857 goto out_err; 3723 goto out_err;
@@ -3864,11 +3730,13 @@ static int nv_request_irq(struct net_device *dev, int intr_test)
3864 } 3730 }
3865 } 3731 }
3866 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) { 3732 if (ret != 0 && np->msi_flags & NV_MSI_CAPABLE) {
3867 if ((ret = pci_enable_msi(np->pci_dev)) == 0) { 3733 ret = pci_enable_msi(np->pci_dev);
3734 if (ret == 0) {
3868 np->msi_flags |= NV_MSI_ENABLED; 3735 np->msi_flags |= NV_MSI_ENABLED;
3869 dev->irq = np->pci_dev->irq; 3736 dev->irq = np->pci_dev->irq;
3870 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) { 3737 if (request_irq(np->pci_dev->irq, handler, IRQF_SHARED, dev->name, dev) != 0) {
3871 printk(KERN_INFO "forcedeth: request_irq failed %d\n", ret); 3738 netdev_info(dev, "request_irq failed %d\n",
3739 ret);
3872 pci_disable_msi(np->pci_dev); 3740 pci_disable_msi(np->pci_dev);
3873 np->msi_flags &= ~NV_MSI_ENABLED; 3741 np->msi_flags &= ~NV_MSI_ENABLED;
3874 dev->irq = np->pci_dev->irq; 3742 dev->irq = np->pci_dev->irq;
@@ -3903,9 +3771,8 @@ static void nv_free_irq(struct net_device *dev)
3903 int i; 3771 int i;
3904 3772
3905 if (np->msi_flags & NV_MSI_X_ENABLED) { 3773 if (np->msi_flags & NV_MSI_X_ENABLED) {
3906 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++) { 3774 for (i = 0; i < (np->msi_flags & NV_MSI_X_VECTORS_MASK); i++)
3907 free_irq(np->msi_x_entry[i].vector, dev); 3775 free_irq(np->msi_x_entry[i].vector, dev);
3908 }
3909 pci_disable_msix(np->pci_dev); 3776 pci_disable_msix(np->pci_dev);
3910 np->msi_flags &= ~NV_MSI_X_ENABLED; 3777 np->msi_flags &= ~NV_MSI_X_ENABLED;
3911 } else { 3778 } else {
@@ -3954,7 +3821,7 @@ static void nv_do_nic_poll(unsigned long data)
3954 3821
3955 if (np->recover_error) { 3822 if (np->recover_error) {
3956 np->recover_error = 0; 3823 np->recover_error = 0;
3957 printk(KERN_INFO "%s: MAC in recoverable error state\n", dev->name); 3824 netdev_info(dev, "MAC in recoverable error state\n");
3958 if (netif_running(dev)) { 3825 if (netif_running(dev)) {
3959 netif_tx_lock_bh(dev); 3826 netif_tx_lock_bh(dev);
3960 netif_addr_lock(dev); 3827 netif_addr_lock(dev);
@@ -3975,7 +3842,7 @@ static void nv_do_nic_poll(unsigned long data)
3975 /* reinit nic view of the rx queue */ 3842 /* reinit nic view of the rx queue */
3976 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 3843 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
3977 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 3844 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
3978 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 3845 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
3979 base + NvRegRingSizes); 3846 base + NvRegRingSizes);
3980 pci_push(base); 3847 pci_push(base);
3981 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 3848 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -4082,6 +3949,7 @@ static int nv_set_wol(struct net_device *dev, struct ethtool_wolinfo *wolinfo)
4082 writel(flags, base + NvRegWakeUpFlags); 3949 writel(flags, base + NvRegWakeUpFlags);
4083 spin_unlock_irq(&np->lock); 3950 spin_unlock_irq(&np->lock);
4084 } 3951 }
3952 device_set_wakeup_enable(&np->pci_dev->dev, np->wolenabled);
4085 return 0; 3953 return 0;
4086} 3954}
4087 3955
@@ -4105,7 +3973,7 @@ static int nv_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4105 } 3973 }
4106 3974
4107 if (netif_carrier_ok(dev)) { 3975 if (netif_carrier_ok(dev)) {
4108 switch(np->linkspeed & (NVREG_LINKSPEED_MASK)) { 3976 switch (np->linkspeed & (NVREG_LINKSPEED_MASK)) {
4109 case NVREG_LINKSPEED_10: 3977 case NVREG_LINKSPEED_10:
4110 ecmd->speed = SPEED_10; 3978 ecmd->speed = SPEED_10;
4111 break; 3979 break;
@@ -4250,14 +4118,14 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4250 } 4118 }
4251 4119
4252 if (netif_running(dev)) 4120 if (netif_running(dev))
4253 printk(KERN_INFO "%s: link down.\n", dev->name); 4121 netdev_info(dev, "link down\n");
4254 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4122 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4255 if (np->phy_model == PHY_MODEL_MARVELL_E3016) { 4123 if (np->phy_model == PHY_MODEL_MARVELL_E3016) {
4256 bmcr |= BMCR_ANENABLE; 4124 bmcr |= BMCR_ANENABLE;
4257 /* reset the phy in order for settings to stick, 4125 /* reset the phy in order for settings to stick,
4258 * and cause autoneg to start */ 4126 * and cause autoneg to start */
4259 if (phy_reset(dev, bmcr)) { 4127 if (phy_reset(dev, bmcr)) {
4260 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4128 netdev_info(dev, "phy reset failed\n");
4261 return -EINVAL; 4129 return -EINVAL;
4262 } 4130 }
4263 } else { 4131 } else {
@@ -4306,7 +4174,7 @@ static int nv_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
4306 if (np->phy_oui == PHY_OUI_MARVELL) { 4174 if (np->phy_oui == PHY_OUI_MARVELL) {
4307 /* reset the phy in order for forced mode settings to stick */ 4175 /* reset the phy in order for forced mode settings to stick */
4308 if (phy_reset(dev, bmcr)) { 4176 if (phy_reset(dev, bmcr)) {
4309 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4177 netdev_info(dev, "phy reset failed\n");
4310 return -EINVAL; 4178 return -EINVAL;
4311 } 4179 }
4312 } else { 4180 } else {
@@ -4344,7 +4212,7 @@ static void nv_get_regs(struct net_device *dev, struct ethtool_regs *regs, void
4344 4212
4345 regs->version = FORCEDETH_REGS_VER; 4213 regs->version = FORCEDETH_REGS_VER;
4346 spin_lock_irq(&np->lock); 4214 spin_lock_irq(&np->lock);
4347 for (i = 0;i <= np->register_size/sizeof(u32); i++) 4215 for (i = 0; i <= np->register_size/sizeof(u32); i++)
4348 rbuf[i] = readl(base + i*sizeof(u32)); 4216 rbuf[i] = readl(base + i*sizeof(u32));
4349 spin_unlock_irq(&np->lock); 4217 spin_unlock_irq(&np->lock);
4350} 4218}
@@ -4368,7 +4236,7 @@ static int nv_nway_reset(struct net_device *dev)
4368 spin_unlock(&np->lock); 4236 spin_unlock(&np->lock);
4369 netif_addr_unlock(dev); 4237 netif_addr_unlock(dev);
4370 netif_tx_unlock_bh(dev); 4238 netif_tx_unlock_bh(dev);
4371 printk(KERN_INFO "%s: link down.\n", dev->name); 4239 netdev_info(dev, "link down\n");
4372 } 4240 }
4373 4241
4374 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4242 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
@@ -4376,7 +4244,7 @@ static int nv_nway_reset(struct net_device *dev)
4376 bmcr |= BMCR_ANENABLE; 4244 bmcr |= BMCR_ANENABLE;
4377 /* reset the phy in order for settings to stick*/ 4245 /* reset the phy in order for settings to stick*/
4378 if (phy_reset(dev, bmcr)) { 4246 if (phy_reset(dev, bmcr)) {
4379 printk(KERN_INFO "%s: phy reset failed\n", dev->name); 4247 netdev_info(dev, "phy reset failed\n");
4380 return -EINVAL; 4248 return -EINVAL;
4381 } 4249 }
4382 } else { 4250 } else {
@@ -4464,10 +4332,9 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4464 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending), 4332 pci_free_consistent(np->pci_dev, sizeof(struct ring_desc_ex) * (ring->rx_pending + ring->tx_pending),
4465 rxtx_ring, ring_addr); 4333 rxtx_ring, ring_addr);
4466 } 4334 }
4467 if (rx_skbuff) 4335
4468 kfree(rx_skbuff); 4336 kfree(rx_skbuff);
4469 if (tx_skbuff) 4337 kfree(tx_skbuff);
4470 kfree(tx_skbuff);
4471 goto exit; 4338 goto exit;
4472 } 4339 }
4473 4340
@@ -4491,14 +4358,14 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4491 np->tx_ring_size = ring->tx_pending; 4358 np->tx_ring_size = ring->tx_pending;
4492 4359
4493 if (!nv_optimized(np)) { 4360 if (!nv_optimized(np)) {
4494 np->rx_ring.orig = (struct ring_desc*)rxtx_ring; 4361 np->rx_ring.orig = (struct ring_desc *)rxtx_ring;
4495 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size]; 4362 np->tx_ring.orig = &np->rx_ring.orig[np->rx_ring_size];
4496 } else { 4363 } else {
4497 np->rx_ring.ex = (struct ring_desc_ex*)rxtx_ring; 4364 np->rx_ring.ex = (struct ring_desc_ex *)rxtx_ring;
4498 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size]; 4365 np->tx_ring.ex = &np->rx_ring.ex[np->rx_ring_size];
4499 } 4366 }
4500 np->rx_skb = (struct nv_skb_map*)rx_skbuff; 4367 np->rx_skb = (struct nv_skb_map *)rx_skbuff;
4501 np->tx_skb = (struct nv_skb_map*)tx_skbuff; 4368 np->tx_skb = (struct nv_skb_map *)tx_skbuff;
4502 np->ring_addr = ring_addr; 4369 np->ring_addr = ring_addr;
4503 4370
4504 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size); 4371 memset(np->rx_skb, 0, sizeof(struct nv_skb_map) * np->rx_ring_size);
@@ -4515,7 +4382,7 @@ static int nv_set_ringparam(struct net_device *dev, struct ethtool_ringparam* ri
4515 /* reinit nic view of the queues */ 4382 /* reinit nic view of the queues */
4516 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4383 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4517 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4384 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4518 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4385 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4519 base + NvRegRingSizes); 4386 base + NvRegRingSizes);
4520 pci_push(base); 4387 pci_push(base);
4521 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4388 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -4550,12 +4417,11 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
4550 4417
4551 if ((!np->autoneg && np->duplex == 0) || 4418 if ((!np->autoneg && np->duplex == 0) ||
4552 (np->autoneg && !pause->autoneg && np->duplex == 0)) { 4419 (np->autoneg && !pause->autoneg && np->duplex == 0)) {
4553 printk(KERN_INFO "%s: can not set pause settings when forced link is in half duplex.\n", 4420 netdev_info(dev, "can not set pause settings when forced link is in half duplex\n");
4554 dev->name);
4555 return -EINVAL; 4421 return -EINVAL;
4556 } 4422 }
4557 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) { 4423 if (pause->tx_pause && !(np->pause_flags & NV_PAUSEFRAME_TX_CAPABLE)) {
4558 printk(KERN_INFO "%s: hardware does not support tx pause frames.\n", dev->name); 4424 netdev_info(dev, "hardware does not support tx pause frames\n");
4559 return -EINVAL; 4425 return -EINVAL;
4560 } 4426 }
4561 4427
@@ -4590,7 +4456,7 @@ static int nv_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam*
4590 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv); 4456 mii_rw(dev, np->phyaddr, MII_ADVERTISE, adv);
4591 4457
4592 if (netif_running(dev)) 4458 if (netif_running(dev))
4593 printk(KERN_INFO "%s: link down.\n", dev->name); 4459 netdev_info(dev, "link down\n");
4594 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ); 4460 bmcr = mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ);
4595 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); 4461 bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART);
4596 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr); 4462 mii_rw(dev, np->phyaddr, MII_BMCR, bmcr);
@@ -4841,7 +4707,7 @@ static int nv_loopback_test(struct net_device *dev)
4841 /* reinit nic view of the rx queue */ 4707 /* reinit nic view of the rx queue */
4842 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4708 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
4843 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4709 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
4844 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4710 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
4845 base + NvRegRingSizes); 4711 base + NvRegRingSizes);
4846 pci_push(base); 4712 pci_push(base);
4847 4713
@@ -4852,8 +4718,7 @@ static int nv_loopback_test(struct net_device *dev)
4852 pkt_len = ETH_DATA_LEN; 4718 pkt_len = ETH_DATA_LEN;
4853 tx_skb = dev_alloc_skb(pkt_len); 4719 tx_skb = dev_alloc_skb(pkt_len);
4854 if (!tx_skb) { 4720 if (!tx_skb) {
4855 printk(KERN_ERR "dev_alloc_skb() failed during loopback test" 4721 netdev_err(dev, "dev_alloc_skb() failed during loopback test\n");
4856 " of %s\n", dev->name);
4857 ret = 0; 4722 ret = 0;
4858 goto out; 4723 goto out;
4859 } 4724 }
@@ -4893,29 +4758,22 @@ static int nv_loopback_test(struct net_device *dev)
4893 if (flags & NV_RX_ERROR) 4758 if (flags & NV_RX_ERROR)
4894 ret = 0; 4759 ret = 0;
4895 } else { 4760 } else {
4896 if (flags & NV_RX2_ERROR) { 4761 if (flags & NV_RX2_ERROR)
4897 ret = 0; 4762 ret = 0;
4898 }
4899 } 4763 }
4900 4764
4901 if (ret) { 4765 if (ret) {
4902 if (len != pkt_len) { 4766 if (len != pkt_len) {
4903 ret = 0; 4767 ret = 0;
4904 dprintk(KERN_DEBUG "%s: loopback len mismatch %d vs %d\n",
4905 dev->name, len, pkt_len);
4906 } else { 4768 } else {
4907 rx_skb = np->rx_skb[0].skb; 4769 rx_skb = np->rx_skb[0].skb;
4908 for (i = 0; i < pkt_len; i++) { 4770 for (i = 0; i < pkt_len; i++) {
4909 if (rx_skb->data[i] != (u8)(i & 0xff)) { 4771 if (rx_skb->data[i] != (u8)(i & 0xff)) {
4910 ret = 0; 4772 ret = 0;
4911 dprintk(KERN_DEBUG "%s: loopback pattern check failed on byte %d\n",
4912 dev->name, i);
4913 break; 4773 break;
4914 } 4774 }
4915 } 4775 }
4916 } 4776 }
4917 } else {
4918 dprintk(KERN_DEBUG "%s: loopback - did not receive test packet\n", dev->name);
4919 } 4777 }
4920 4778
4921 pci_unmap_single(np->pci_dev, test_dma_addr, 4779 pci_unmap_single(np->pci_dev, test_dma_addr,
@@ -4958,11 +4816,10 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
4958 netif_addr_lock(dev); 4816 netif_addr_lock(dev);
4959 spin_lock_irq(&np->lock); 4817 spin_lock_irq(&np->lock);
4960 nv_disable_hw_interrupts(dev, np->irqmask); 4818 nv_disable_hw_interrupts(dev, np->irqmask);
4961 if (!(np->msi_flags & NV_MSI_X_ENABLED)) { 4819 if (!(np->msi_flags & NV_MSI_X_ENABLED))
4962 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 4820 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
4963 } else { 4821 else
4964 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus); 4822 writel(NVREG_IRQSTAT_MASK, base + NvRegMSIXIrqStatus);
4965 }
4966 /* stop engines */ 4823 /* stop engines */
4967 nv_stop_rxtx(dev); 4824 nv_stop_rxtx(dev);
4968 nv_txrx_reset(dev); 4825 nv_txrx_reset(dev);
@@ -5003,7 +4860,7 @@ static void nv_self_test(struct net_device *dev, struct ethtool_test *test, u64
5003 /* reinit nic view of the rx queue */ 4860 /* reinit nic view of the rx queue */
5004 writel(np->rx_buf_sz, base + NvRegOffloadConfig); 4861 writel(np->rx_buf_sz, base + NvRegOffloadConfig);
5005 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 4862 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5006 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 4863 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5007 base + NvRegRingSizes); 4864 base + NvRegRingSizes);
5008 pci_push(base); 4865 pci_push(base);
5009 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl); 4866 writel(NVREG_TXRXCTL_KICK|np->txrxctl_bits, get_hwbase(dev) + NvRegTxRxControl);
@@ -5106,8 +4963,7 @@ static int nv_mgmt_acquire_sema(struct net_device *dev)
5106 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) { 4963 ((tx_ctrl & NVREG_XMITCTL_MGMT_SEMA_MASK) == NVREG_XMITCTL_MGMT_SEMA_FREE)) {
5107 np->mgmt_sema = 1; 4964 np->mgmt_sema = 1;
5108 return 1; 4965 return 1;
5109 } 4966 } else
5110 else
5111 udelay(50); 4967 udelay(50);
5112 } 4968 }
5113 4969
@@ -5167,8 +5023,6 @@ static int nv_open(struct net_device *dev)
5167 int oom, i; 5023 int oom, i;
5168 u32 low; 5024 u32 low;
5169 5025
5170 dprintk(KERN_DEBUG "nv_open: begin\n");
5171
5172 /* power up phy */ 5026 /* power up phy */
5173 mii_rw(dev, np->phyaddr, MII_BMCR, 5027 mii_rw(dev, np->phyaddr, MII_BMCR,
5174 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN); 5028 mii_rw(dev, np->phyaddr, MII_BMCR, MII_READ) & ~BMCR_PDOWN);
@@ -5204,7 +5058,7 @@ static int nv_open(struct net_device *dev)
5204 5058
5205 /* give hw rings */ 5059 /* give hw rings */
5206 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING); 5060 setup_hw_rings(dev, NV_SETUP_RX_RING | NV_SETUP_TX_RING);
5207 writel( ((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT), 5061 writel(((np->rx_ring_size-1) << NVREG_RINGSZ_RXSHIFT) + ((np->tx_ring_size-1) << NVREG_RINGSZ_TXSHIFT),
5208 base + NvRegRingSizes); 5062 base + NvRegRingSizes);
5209 5063
5210 writel(np->linkspeed, base + NvRegLinkSpeed); 5064 writel(np->linkspeed, base + NvRegLinkSpeed);
@@ -5216,9 +5070,11 @@ static int nv_open(struct net_device *dev)
5216 writel(np->vlanctl_bits, base + NvRegVlanControl); 5070 writel(np->vlanctl_bits, base + NvRegVlanControl);
5217 pci_push(base); 5071 pci_push(base);
5218 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl); 5072 writel(NVREG_TXRXCTL_BIT1|np->txrxctl_bits, base + NvRegTxRxControl);
5219 reg_delay(dev, NvRegUnknownSetupReg5, NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31, 5073 if (reg_delay(dev, NvRegUnknownSetupReg5,
5220 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX, 5074 NVREG_UNKSETUP5_BIT31, NVREG_UNKSETUP5_BIT31,
5221 KERN_INFO "open: SetupReg5, Bit 31 remained off\n"); 5075 NV_SETUP5_DELAY, NV_SETUP5_DELAYMAX))
5076 netdev_info(dev,
5077 "%s: SetupReg5, Bit 31 remained off\n", __func__);
5222 5078
5223 writel(0, base + NvRegMIIMask); 5079 writel(0, base + NvRegMIIMask);
5224 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5080 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
@@ -5251,8 +5107,7 @@ static int nv_open(struct net_device *dev)
5251 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval); 5107 writel(NVREG_POLL_DEFAULT_THROUGHPUT, base + NvRegPollingInterval);
5252 else 5108 else
5253 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval); 5109 writel(NVREG_POLL_DEFAULT_CPU, base + NvRegPollingInterval);
5254 } 5110 } else
5255 else
5256 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval); 5111 writel(poll_interval & 0xFFFF, base + NvRegPollingInterval);
5257 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6); 5112 writel(NVREG_UNKSETUP6_VAL, base + NvRegUnknownSetupReg6);
5258 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING, 5113 writel((np->phyaddr << NVREG_ADAPTCTL_PHYSHIFT)|NVREG_ADAPTCTL_PHYVALID|NVREG_ADAPTCTL_RUNNING,
@@ -5263,7 +5118,7 @@ static int nv_open(struct net_device *dev)
5263 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags); 5118 writel(NVREG_WAKEUPFLAGS_ENABLE , base + NvRegWakeUpFlags);
5264 5119
5265 i = readl(base + NvRegPowerState); 5120 i = readl(base + NvRegPowerState);
5266 if ( (i & NVREG_POWERSTATE_POWEREDUP) == 0) 5121 if ((i & NVREG_POWERSTATE_POWEREDUP) == 0)
5267 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState); 5122 writel(NVREG_POWERSTATE_POWEREDUP|i, base + NvRegPowerState);
5268 5123
5269 pci_push(base); 5124 pci_push(base);
@@ -5276,9 +5131,8 @@ static int nv_open(struct net_device *dev)
5276 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus); 5131 writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
5277 pci_push(base); 5132 pci_push(base);
5278 5133
5279 if (nv_request_irq(dev, 0)) { 5134 if (nv_request_irq(dev, 0))
5280 goto out_drain; 5135 goto out_drain;
5281 }
5282 5136
5283 /* ask for interrupts */ 5137 /* ask for interrupts */
5284 nv_enable_hw_interrupts(dev, np->irqmask); 5138 nv_enable_hw_interrupts(dev, np->irqmask);
@@ -5296,7 +5150,6 @@ static int nv_open(struct net_device *dev)
5296 u32 miistat; 5150 u32 miistat;
5297 miistat = readl(base + NvRegMIIStatus); 5151 miistat = readl(base + NvRegMIIStatus);
5298 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus); 5152 writel(NVREG_MIISTAT_MASK_ALL, base + NvRegMIIStatus);
5299 dprintk(KERN_INFO "startup: got 0x%08x.\n", miistat);
5300 } 5153 }
5301 /* set linkspeed to invalid value, thus force nv_update_linkspeed 5154 /* set linkspeed to invalid value, thus force nv_update_linkspeed
5302 * to init hw */ 5155 * to init hw */
@@ -5309,7 +5162,7 @@ static int nv_open(struct net_device *dev)
5309 if (ret) { 5162 if (ret) {
5310 netif_carrier_on(dev); 5163 netif_carrier_on(dev);
5311 } else { 5164 } else {
5312 printk(KERN_INFO "%s: no link during initialization.\n", dev->name); 5165 netdev_info(dev, "no link during initialization\n");
5313 netif_carrier_off(dev); 5166 netif_carrier_off(dev);
5314 } 5167 }
5315 if (oom) 5168 if (oom)
@@ -5352,7 +5205,6 @@ static int nv_close(struct net_device *dev)
5352 base = get_hwbase(dev); 5205 base = get_hwbase(dev);
5353 nv_disable_hw_interrupts(dev, np->irqmask); 5206 nv_disable_hw_interrupts(dev, np->irqmask);
5354 pci_push(base); 5207 pci_push(base);
5355 dprintk(KERN_INFO "%s: Irqmask is zero again\n", dev->name);
5356 5208
5357 spin_unlock_irq(&np->lock); 5209 spin_unlock_irq(&np->lock);
5358 5210
@@ -5421,8 +5273,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5421 static int printed_version; 5273 static int printed_version;
5422 5274
5423 if (!printed_version++) 5275 if (!printed_version++)
5424 printk(KERN_INFO "%s: Reverse Engineered nForce ethernet" 5276 pr_info("Reverse Engineered nForce ethernet driver. Version %s.\n",
5425 " driver. Version %s.\n", DRV_NAME, FORCEDETH_VERSION); 5277 FORCEDETH_VERSION);
5426 5278
5427 dev = alloc_etherdev(sizeof(struct fe_priv)); 5279 dev = alloc_etherdev(sizeof(struct fe_priv));
5428 err = -ENOMEM; 5280 err = -ENOMEM;
@@ -5465,10 +5317,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5465 err = -EINVAL; 5317 err = -EINVAL;
5466 addr = 0; 5318 addr = 0;
5467 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { 5319 for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
5468 dprintk(KERN_DEBUG "%s: resource %d start %p len %ld flags 0x%08lx.\n",
5469 pci_name(pci_dev), i, (void*)pci_resource_start(pci_dev, i),
5470 pci_resource_len(pci_dev, i),
5471 pci_resource_flags(pci_dev, i));
5472 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM && 5320 if (pci_resource_flags(pci_dev, i) & IORESOURCE_MEM &&
5473 pci_resource_len(pci_dev, i) >= np->register_size) { 5321 pci_resource_len(pci_dev, i) >= np->register_size) {
5474 addr = pci_resource_start(pci_dev, i); 5322 addr = pci_resource_start(pci_dev, i);
@@ -5476,8 +5324,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5476 } 5324 }
5477 } 5325 }
5478 if (i == DEVICE_COUNT_RESOURCE) { 5326 if (i == DEVICE_COUNT_RESOURCE) {
5479 dev_printk(KERN_INFO, &pci_dev->dev, 5327 dev_info(&pci_dev->dev, "Couldn't find register window\n");
5480 "Couldn't find register window\n");
5481 goto out_relreg; 5328 goto out_relreg;
5482 } 5329 }
5483 5330
@@ -5493,13 +5340,13 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5493 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3; 5340 np->txrxctl_bits = NVREG_TXRXCTL_DESC_3;
5494 if (dma_64bit) { 5341 if (dma_64bit) {
5495 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39))) 5342 if (pci_set_dma_mask(pci_dev, DMA_BIT_MASK(39)))
5496 dev_printk(KERN_INFO, &pci_dev->dev, 5343 dev_info(&pci_dev->dev,
5497 "64-bit DMA failed, using 32-bit addressing\n"); 5344 "64-bit DMA failed, using 32-bit addressing\n");
5498 else 5345 else
5499 dev->features |= NETIF_F_HIGHDMA; 5346 dev->features |= NETIF_F_HIGHDMA;
5500 if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) { 5347 if (pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(39))) {
5501 dev_printk(KERN_INFO, &pci_dev->dev, 5348 dev_info(&pci_dev->dev,
5502 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n"); 5349 "64-bit DMA (consistent) failed, using 32-bit ring buffers\n");
5503 } 5350 }
5504 } 5351 }
5505 } else if (id->driver_data & DEV_HAS_LARGEDESC) { 5352 } else if (id->driver_data & DEV_HAS_LARGEDESC) {
@@ -5620,7 +5467,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5620 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff; 5467 dev->dev_addr[4] = (np->orig_mac[0] >> 8) & 0xff;
5621 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff; 5468 dev->dev_addr[5] = (np->orig_mac[0] >> 0) & 0xff;
5622 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll); 5469 writel(txreg|NVREG_TRANSMITPOLL_MAC_ADDR_REV, base + NvRegTransmitPoll);
5623 printk(KERN_DEBUG "nv_probe: set workaround bit for reversed mac addr\n"); 5470 dev_dbg(&pci_dev->dev,
5471 "%s: set workaround bit for reversed mac addr\n",
5472 __func__);
5624 } 5473 }
5625 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 5474 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
5626 5475
@@ -5629,28 +5478,21 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5629 * Bad mac address. At least one bios sets the mac address 5478 * Bad mac address. At least one bios sets the mac address
5630 * to 01:23:45:67:89:ab 5479 * to 01:23:45:67:89:ab
5631 */ 5480 */
5632 dev_printk(KERN_ERR, &pci_dev->dev, 5481 dev_err(&pci_dev->dev,
5633 "Invalid Mac address detected: %pM\n", 5482 "Invalid MAC address detected: %pM - Please complain to your hardware vendor.\n",
5634 dev->dev_addr); 5483 dev->dev_addr);
5635 dev_printk(KERN_ERR, &pci_dev->dev,
5636 "Please complain to your hardware vendor. Switching to a random MAC.\n");
5637 random_ether_addr(dev->dev_addr); 5484 random_ether_addr(dev->dev_addr);
5485 dev_err(&pci_dev->dev,
5486 "Using random MAC address: %pM\n", dev->dev_addr);
5638 } 5487 }
5639 5488
5640 dprintk(KERN_DEBUG "%s: MAC Address %pM\n",
5641 pci_name(pci_dev), dev->dev_addr);
5642
5643 /* set mac address */ 5489 /* set mac address */
5644 nv_copy_mac_to_hw(dev); 5490 nv_copy_mac_to_hw(dev);
5645 5491
5646 /* Workaround current PCI init glitch: wakeup bits aren't
5647 * being set from PCI PM capability.
5648 */
5649 device_init_wakeup(&pci_dev->dev, 1);
5650
5651 /* disable WOL */ 5492 /* disable WOL */
5652 writel(0, base + NvRegWakeUpFlags); 5493 writel(0, base + NvRegWakeUpFlags);
5653 np->wolenabled = 0; 5494 np->wolenabled = 0;
5495 device_set_wakeup_enable(&pci_dev->dev, false);
5654 5496
5655 if (id->driver_data & DEV_HAS_POWER_CNTRL) { 5497 if (id->driver_data & DEV_HAS_POWER_CNTRL) {
5656 5498
@@ -5663,16 +5505,15 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5663 writel(powerstate, base + NvRegPowerState2); 5505 writel(powerstate, base + NvRegPowerState2);
5664 } 5506 }
5665 5507
5666 if (np->desc_ver == DESC_VER_1) { 5508 if (np->desc_ver == DESC_VER_1)
5667 np->tx_flags = NV_TX_VALID; 5509 np->tx_flags = NV_TX_VALID;
5668 } else { 5510 else
5669 np->tx_flags = NV_TX2_VALID; 5511 np->tx_flags = NV_TX2_VALID;
5670 }
5671 5512
5672 np->msi_flags = 0; 5513 np->msi_flags = 0;
5673 if ((id->driver_data & DEV_HAS_MSI) && msi) { 5514 if ((id->driver_data & DEV_HAS_MSI) && msi)
5674 np->msi_flags |= NV_MSI_CAPABLE; 5515 np->msi_flags |= NV_MSI_CAPABLE;
5675 } 5516
5676 if ((id->driver_data & DEV_HAS_MSI_X) && msix) { 5517 if ((id->driver_data & DEV_HAS_MSI_X) && msix) {
5677 /* msix has had reported issues when modifying irqmask 5518 /* msix has had reported issues when modifying irqmask
5678 as in the case of napi, therefore, disable for now 5519 as in the case of napi, therefore, disable for now
@@ -5702,11 +5543,9 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5702 if (id->driver_data & DEV_NEED_TIMERIRQ) 5543 if (id->driver_data & DEV_NEED_TIMERIRQ)
5703 np->irqmask |= NVREG_IRQ_TIMER; 5544 np->irqmask |= NVREG_IRQ_TIMER;
5704 if (id->driver_data & DEV_NEED_LINKTIMER) { 5545 if (id->driver_data & DEV_NEED_LINKTIMER) {
5705 dprintk(KERN_INFO "%s: link timer on.\n", pci_name(pci_dev));
5706 np->need_linktimer = 1; 5546 np->need_linktimer = 1;
5707 np->link_timeout = jiffies + LINK_TIMEOUT; 5547 np->link_timeout = jiffies + LINK_TIMEOUT;
5708 } else { 5548 } else {
5709 dprintk(KERN_INFO "%s: link timer off.\n", pci_name(pci_dev));
5710 np->need_linktimer = 0; 5549 np->need_linktimer = 0;
5711 } 5550 }
5712 5551
@@ -5735,19 +5574,14 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5735 nv_mgmt_acquire_sema(dev) && 5574 nv_mgmt_acquire_sema(dev) &&
5736 nv_mgmt_get_version(dev)) { 5575 nv_mgmt_get_version(dev)) {
5737 np->mac_in_use = 1; 5576 np->mac_in_use = 1;
5738 if (np->mgmt_version > 0) { 5577 if (np->mgmt_version > 0)
5739 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE; 5578 np->mac_in_use = readl(base + NvRegMgmtUnitControl) & NVREG_MGMTUNITCONTROL_INUSE;
5740 }
5741 dprintk(KERN_INFO "%s: mgmt unit is running. mac in use %x.\n",
5742 pci_name(pci_dev), np->mac_in_use);
5743 /* management unit setup the phy already? */ 5579 /* management unit setup the phy already? */
5744 if (np->mac_in_use && 5580 if (np->mac_in_use &&
5745 ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) == 5581 ((readl(base + NvRegTransmitterControl) & NVREG_XMITCTL_SYNC_MASK) ==
5746 NVREG_XMITCTL_SYNC_PHY_INIT)) { 5582 NVREG_XMITCTL_SYNC_PHY_INIT)) {
5747 /* phy is inited by mgmt unit */ 5583 /* phy is inited by mgmt unit */
5748 phyinitialized = 1; 5584 phyinitialized = 1;
5749 dprintk(KERN_INFO "%s: Phy already initialized by mgmt unit.\n",
5750 pci_name(pci_dev));
5751 } else { 5585 } else {
5752 /* we need to init the phy */ 5586 /* we need to init the phy */
5753 } 5587 }
@@ -5773,8 +5607,6 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5773 np->phy_model = id2 & PHYID2_MODEL_MASK; 5607 np->phy_model = id2 & PHYID2_MODEL_MASK;
5774 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT; 5608 id1 = (id1 & PHYID1_OUI_MASK) << PHYID1_OUI_SHFT;
5775 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT; 5609 id2 = (id2 & PHYID2_OUI_MASK) >> PHYID2_OUI_SHFT;
5776 dprintk(KERN_DEBUG "%s: open: Found PHY %04x:%04x at address %d.\n",
5777 pci_name(pci_dev), id1, id2, phyaddr);
5778 np->phyaddr = phyaddr; 5610 np->phyaddr = phyaddr;
5779 np->phy_oui = id1 | id2; 5611 np->phy_oui = id1 | id2;
5780 5612
@@ -5788,8 +5620,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5788 break; 5620 break;
5789 } 5621 }
5790 if (i == 33) { 5622 if (i == 33) {
5791 dev_printk(KERN_INFO, &pci_dev->dev, 5623 dev_info(&pci_dev->dev, "open: Could not find a valid PHY\n");
5792 "open: Could not find a valid PHY.\n");
5793 goto out_error; 5624 goto out_error;
5794 } 5625 }
5795 5626
@@ -5799,9 +5630,8 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5799 } else { 5630 } else {
5800 /* see if it is a gigabit phy */ 5631 /* see if it is a gigabit phy */
5801 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ); 5632 u32 mii_status = mii_rw(dev, np->phyaddr, MII_BMSR, MII_READ);
5802 if (mii_status & PHY_GIGABIT) { 5633 if (mii_status & PHY_GIGABIT)
5803 np->gigabit = PHY_GIGABIT; 5634 np->gigabit = PHY_GIGABIT;
5804 }
5805 } 5635 }
5806 5636
5807 /* set default link speed settings */ 5637 /* set default link speed settings */
@@ -5811,37 +5641,27 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
5811 5641
5812 err = register_netdev(dev); 5642 err = register_netdev(dev);
5813 if (err) { 5643 if (err) {
5814 dev_printk(KERN_INFO, &pci_dev->dev, 5644 dev_info(&pci_dev->dev, "unable to register netdev: %d\n", err);
5815 "unable to register netdev: %d\n", err);
5816 goto out_error; 5645 goto out_error;
5817 } 5646 }
5818 5647
5819 dev_printk(KERN_INFO, &pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, " 5648 dev_info(&pci_dev->dev, "ifname %s, PHY OUI 0x%x @ %d, addr %pM\n",
5820 "addr %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x\n", 5649 dev->name, np->phy_oui, np->phyaddr, dev->dev_addr);
5821 dev->name, 5650
5822 np->phy_oui, 5651 dev_info(&pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n",
5823 np->phyaddr, 5652 dev->features & NETIF_F_HIGHDMA ? "highdma " : "",
5824 dev->dev_addr[0], 5653 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ?
5825 dev->dev_addr[1], 5654 "csum " : "",
5826 dev->dev_addr[2], 5655 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ?
5827 dev->dev_addr[3], 5656 "vlan " : "",
5828 dev->dev_addr[4], 5657 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
5829 dev->dev_addr[5]); 5658 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
5830 5659 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
5831 dev_printk(KERN_INFO, &pci_dev->dev, "%s%s%s%s%s%s%s%s%s%sdesc-v%u\n", 5660 np->gigabit == PHY_GIGABIT ? "gbit " : "",
5832 dev->features & NETIF_F_HIGHDMA ? "highdma " : "", 5661 np->need_linktimer ? "lnktim " : "",
5833 dev->features & (NETIF_F_IP_CSUM | NETIF_F_SG) ? 5662 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
5834 "csum " : "", 5663 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
5835 dev->features & (NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_TX) ? 5664 np->desc_ver);
5836 "vlan " : "",
5837 id->driver_data & DEV_HAS_POWER_CNTRL ? "pwrctl " : "",
5838 id->driver_data & DEV_HAS_MGMT_UNIT ? "mgmt " : "",
5839 id->driver_data & DEV_NEED_TIMERIRQ ? "timirq " : "",
5840 np->gigabit == PHY_GIGABIT ? "gbit " : "",
5841 np->need_linktimer ? "lnktim " : "",
5842 np->msi_flags & NV_MSI_CAPABLE ? "msi " : "",
5843 np->msi_flags & NV_MSI_X_CAPABLE ? "msi-x " : "",
5844 np->desc_ver);
5845 5665
5846 return 0; 5666 return 0;
5847 5667
@@ -5923,44 +5743,37 @@ static void __devexit nv_remove(struct pci_dev *pci_dev)
5923} 5743}
5924 5744
5925#ifdef CONFIG_PM 5745#ifdef CONFIG_PM
5926static int nv_suspend(struct pci_dev *pdev, pm_message_t state) 5746static int nv_suspend(struct device *device)
5927{ 5747{
5748 struct pci_dev *pdev = to_pci_dev(device);
5928 struct net_device *dev = pci_get_drvdata(pdev); 5749 struct net_device *dev = pci_get_drvdata(pdev);
5929 struct fe_priv *np = netdev_priv(dev); 5750 struct fe_priv *np = netdev_priv(dev);
5930 u8 __iomem *base = get_hwbase(dev); 5751 u8 __iomem *base = get_hwbase(dev);
5931 int i; 5752 int i;
5932 5753
5933 if (netif_running(dev)) { 5754 if (netif_running(dev)) {
5934 // Gross. 5755 /* Gross. */
5935 nv_close(dev); 5756 nv_close(dev);
5936 } 5757 }
5937 netif_device_detach(dev); 5758 netif_device_detach(dev);
5938 5759
5939 /* save non-pci configuration space */ 5760 /* save non-pci configuration space */
5940 for (i = 0;i <= np->register_size/sizeof(u32); i++) 5761 for (i = 0; i <= np->register_size/sizeof(u32); i++)
5941 np->saved_config_space[i] = readl(base + i*sizeof(u32)); 5762 np->saved_config_space[i] = readl(base + i*sizeof(u32));
5942 5763
5943 pci_save_state(pdev);
5944 pci_enable_wake(pdev, pci_choose_state(pdev, state), np->wolenabled);
5945 pci_disable_device(pdev);
5946 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5947 return 0; 5764 return 0;
5948} 5765}
5949 5766
5950static int nv_resume(struct pci_dev *pdev) 5767static int nv_resume(struct device *device)
5951{ 5768{
5769 struct pci_dev *pdev = to_pci_dev(device);
5952 struct net_device *dev = pci_get_drvdata(pdev); 5770 struct net_device *dev = pci_get_drvdata(pdev);
5953 struct fe_priv *np = netdev_priv(dev); 5771 struct fe_priv *np = netdev_priv(dev);
5954 u8 __iomem *base = get_hwbase(dev); 5772 u8 __iomem *base = get_hwbase(dev);
5955 int i, rc = 0; 5773 int i, rc = 0;
5956 5774
5957 pci_set_power_state(pdev, PCI_D0);
5958 pci_restore_state(pdev);
5959 /* ack any pending wake events, disable PME */
5960 pci_enable_wake(pdev, PCI_D0, 0);
5961
5962 /* restore non-pci configuration space */ 5775 /* restore non-pci configuration space */
5963 for (i = 0;i <= np->register_size/sizeof(u32); i++) 5776 for (i = 0; i <= np->register_size/sizeof(u32); i++)
5964 writel(np->saved_config_space[i], base+i*sizeof(u32)); 5777 writel(np->saved_config_space[i], base+i*sizeof(u32));
5965 5778
5966 if (np->driver_data & DEV_NEED_MSI_FIX) 5779 if (np->driver_data & DEV_NEED_MSI_FIX)
@@ -5977,6 +5790,9 @@ static int nv_resume(struct pci_dev *pdev)
5977 return rc; 5790 return rc;
5978} 5791}
5979 5792
5793static SIMPLE_DEV_PM_OPS(nv_pm_ops, nv_suspend, nv_resume);
5794#define NV_PM_OPS (&nv_pm_ops)
5795
5980static void nv_shutdown(struct pci_dev *pdev) 5796static void nv_shutdown(struct pci_dev *pdev)
5981{ 5797{
5982 struct net_device *dev = pci_get_drvdata(pdev); 5798 struct net_device *dev = pci_get_drvdata(pdev);
@@ -5990,9 +5806,8 @@ static void nv_shutdown(struct pci_dev *pdev)
5990 * If we really go for poweroff, we must not restore the MAC, 5806 * If we really go for poweroff, we must not restore the MAC,
5991 * otherwise the MAC for WOL will be reversed at least on some boards. 5807 * otherwise the MAC for WOL will be reversed at least on some boards.
5992 */ 5808 */
5993 if (system_state != SYSTEM_POWER_OFF) { 5809 if (system_state != SYSTEM_POWER_OFF)
5994 nv_restore_mac_addr(pdev); 5810 nv_restore_mac_addr(pdev);
5995 }
5996 5811
5997 pci_disable_device(pdev); 5812 pci_disable_device(pdev);
5998 /* 5813 /*
@@ -6000,15 +5815,13 @@ static void nv_shutdown(struct pci_dev *pdev)
6000 * only put the device into D3 if we really go for poweroff. 5815 * only put the device into D3 if we really go for poweroff.
6001 */ 5816 */
6002 if (system_state == SYSTEM_POWER_OFF) { 5817 if (system_state == SYSTEM_POWER_OFF) {
6003 if (pci_enable_wake(pdev, PCI_D3cold, np->wolenabled)) 5818 pci_wake_from_d3(pdev, np->wolenabled);
6004 pci_enable_wake(pdev, PCI_D3hot, np->wolenabled);
6005 pci_set_power_state(pdev, PCI_D3hot); 5819 pci_set_power_state(pdev, PCI_D3hot);
6006 } 5820 }
6007} 5821}
6008#else 5822#else
6009#define nv_suspend NULL 5823#define NV_PM_OPS NULL
6010#define nv_shutdown NULL 5824#define nv_shutdown NULL
6011#define nv_resume NULL
6012#endif /* CONFIG_PM */ 5825#endif /* CONFIG_PM */
6013 5826
6014static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = { 5827static DEFINE_PCI_DEVICE_TABLE(pci_tbl) = {
@@ -6180,9 +5993,8 @@ static struct pci_driver driver = {
6180 .id_table = pci_tbl, 5993 .id_table = pci_tbl,
6181 .probe = nv_probe, 5994 .probe = nv_probe,
6182 .remove = __devexit_p(nv_remove), 5995 .remove = __devexit_p(nv_remove),
6183 .suspend = nv_suspend,
6184 .resume = nv_resume,
6185 .shutdown = nv_shutdown, 5996 .shutdown = nv_shutdown,
5997 .driver.pm = NV_PM_OPS,
6186}; 5998};
6187 5999
6188static int __init init_nic(void) 6000static int __init init_nic(void)
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index d684f187de57..7a1f3d0ffa78 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -40,6 +40,7 @@
40#include <linux/of_mdio.h> 40#include <linux/of_mdio.h>
41#include <linux/of_platform.h> 41#include <linux/of_platform.h>
42#include <linux/of_gpio.h> 42#include <linux/of_gpio.h>
43#include <linux/of_net.h>
43 44
44#include <linux/vmalloc.h> 45#include <linux/vmalloc.h>
45#include <asm/pgtable.h> 46#include <asm/pgtable.h>
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index d1bec6269173..6de4675016b5 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -95,6 +95,7 @@
95#include <linux/phy.h> 95#include <linux/phy.h>
96#include <linux/phy_fixed.h> 96#include <linux/phy_fixed.h>
97#include <linux/of.h> 97#include <linux/of.h>
98#include <linux/of_net.h>
98 99
99#include "gianfar.h" 100#include "gianfar.h"
100#include "fsl_pq_mdio.h" 101#include "fsl_pq_mdio.h"
@@ -143,7 +144,8 @@ void gfar_halt(struct net_device *dev);
143static void gfar_halt_nodisable(struct net_device *dev); 144static void gfar_halt_nodisable(struct net_device *dev);
144void gfar_start(struct net_device *dev); 145void gfar_start(struct net_device *dev);
145static void gfar_clear_exact_match(struct net_device *dev); 146static void gfar_clear_exact_match(struct net_device *dev);
146static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); 147static void gfar_set_mac_for_addr(struct net_device *dev, int num,
148 const u8 *addr);
147static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 149static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
148 150
149MODULE_AUTHOR("Freescale Semiconductor, Inc"); 151MODULE_AUTHOR("Freescale Semiconductor, Inc");
@@ -3094,10 +3096,10 @@ static void gfar_set_multi(struct net_device *dev)
3094static void gfar_clear_exact_match(struct net_device *dev) 3096static void gfar_clear_exact_match(struct net_device *dev)
3095{ 3097{
3096 int idx; 3098 int idx;
3097 u8 zero_arr[MAC_ADDR_LEN] = {0,0,0,0,0,0}; 3099 static const u8 zero_arr[MAC_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
3098 3100
3099 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++) 3101 for(idx = 1;idx < GFAR_EM_NUM + 1;idx++)
3100 gfar_set_mac_for_addr(dev, idx, (u8 *)zero_arr); 3102 gfar_set_mac_for_addr(dev, idx, zero_arr);
3101} 3103}
3102 3104
3103/* Set the appropriate hash bit for the given addr */ 3105/* Set the appropriate hash bit for the given addr */
@@ -3132,7 +3134,8 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
3132/* There are multiple MAC Address register pairs on some controllers 3134/* There are multiple MAC Address register pairs on some controllers
3133 * This function sets the numth pair to a given address 3135 * This function sets the numth pair to a given address
3134 */ 3136 */
3135static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) 3137static void gfar_set_mac_for_addr(struct net_device *dev, int num,
3138 const u8 *addr)
3136{ 3139{
3137 struct gfar_private *priv = netdev_priv(dev); 3140 struct gfar_private *priv = netdev_priv(dev);
3138 struct gfar __iomem *regs = priv->gfargrp[0].regs; 3141 struct gfar __iomem *regs = priv->gfargrp[0].regs;
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 9a6485892b3d..80d25ed53344 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -1202,7 +1202,7 @@ static void hamachi_init_ring(struct net_device *dev)
1202 } 1202 }
1203 /* Fill in the Rx buffers. Handle allocation failure gracefully. */ 1203 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1204 for (i = 0; i < RX_RING_SIZE; i++) { 1204 for (i = 0; i < RX_RING_SIZE; i++) {
1205 struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz); 1205 struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2);
1206 hmp->rx_skbuff[i] = skb; 1206 hmp->rx_skbuff[i] = skb;
1207 if (skb == NULL) 1207 if (skb == NULL)
1208 break; 1208 break;
@@ -1669,7 +1669,7 @@ static int hamachi_rx(struct net_device *dev)
1669 entry = hmp->dirty_rx % RX_RING_SIZE; 1669 entry = hmp->dirty_rx % RX_RING_SIZE;
1670 desc = &(hmp->rx_ring[entry]); 1670 desc = &(hmp->rx_ring[entry]);
1671 if (hmp->rx_skbuff[entry] == NULL) { 1671 if (hmp->rx_skbuff[entry] == NULL) {
1672 struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz); 1672 struct sk_buff *skb = dev_alloc_skb(hmp->rx_buf_sz + 2);
1673 1673
1674 hmp->rx_skbuff[entry] = skb; 1674 hmp->rx_skbuff[entry] = skb;
1675 if (skb == NULL) 1675 if (skb == NULL)
diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c
index 4e7d1d0a2340..7d9ced0738c5 100644
--- a/drivers/net/hamradio/yam.c
+++ b/drivers/net/hamradio/yam.c
@@ -396,7 +396,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate,
396 while (p) { 396 while (p) {
397 if (p->bitrate == bitrate) { 397 if (p->bitrate == bitrate) {
398 memcpy(p->bits, bits, YAM_FPGA_SIZE); 398 memcpy(p->bits, bits, YAM_FPGA_SIZE);
399 return p->bits; 399 goto out;
400 } 400 }
401 p = p->next; 401 p = p->next;
402 } 402 }
@@ -411,7 +411,7 @@ static unsigned char *add_mcs(unsigned char *bits, int bitrate,
411 p->bitrate = bitrate; 411 p->bitrate = bitrate;
412 p->next = yam_data; 412 p->next = yam_data;
413 yam_data = p; 413 yam_data = p;
414 414 out:
415 release_firmware(fw); 415 release_firmware(fw);
416 return p->bits; 416 return p->bits;
417} 417}
diff --git a/drivers/net/hp.c b/drivers/net/hp.c
index d15d2f2ba78e..ef2014375e62 100644
--- a/drivers/net/hp.c
+++ b/drivers/net/hp.c
@@ -162,9 +162,9 @@ static int __init hp_probe1(struct net_device *dev, int ioaddr)
162 162
163 /* Snarf the interrupt now. Someday this could be moved to open(). */ 163 /* Snarf the interrupt now. Someday this could be moved to open(). */
164 if (dev->irq < 2) { 164 if (dev->irq < 2) {
165 int irq_16list[] = { 11, 10, 5, 3, 4, 7, 9, 0}; 165 static const int irq_16list[] = { 11, 10, 5, 3, 4, 7, 9, 0};
166 int irq_8list[] = { 7, 5, 3, 4, 9, 0}; 166 static const int irq_8list[] = { 7, 5, 3, 4, 9, 0};
167 int *irqp = wordmode ? irq_16list : irq_8list; 167 const int *irqp = wordmode ? irq_16list : irq_8list;
168 do { 168 do {
169 int irq = *irqp; 169 int irq = *irqp;
170 if (request_irq (irq, NULL, 0, "bogus", NULL) != -EBUSY) { 170 if (request_irq (irq, NULL, 0, "bogus", NULL) != -EBUSY) {
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 06bb9b799458..6d9275c52e05 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -1279,7 +1279,7 @@ static void emac_force_link_update(struct emac_instance *dev)
1279 netif_carrier_off(dev->ndev); 1279 netif_carrier_off(dev->ndev);
1280 smp_rmb(); 1280 smp_rmb();
1281 if (dev->link_polling) { 1281 if (dev->link_polling) {
1282 cancel_rearming_delayed_work(&dev->link_work); 1282 cancel_delayed_work_sync(&dev->link_work);
1283 if (dev->link_polling) 1283 if (dev->link_polling)
1284 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF); 1284 schedule_delayed_work(&dev->link_work, PHY_POLL_LINK_OFF);
1285 } 1285 }
@@ -1294,7 +1294,7 @@ static int emac_close(struct net_device *ndev)
1294 1294
1295 if (dev->phy.address >= 0) { 1295 if (dev->phy.address >= 0) {
1296 dev->link_polling = 0; 1296 dev->link_polling = 0;
1297 cancel_rearming_delayed_work(&dev->link_work); 1297 cancel_delayed_work_sync(&dev->link_work);
1298 } 1298 }
1299 mutex_lock(&dev->link_lock); 1299 mutex_lock(&dev->link_lock);
1300 emac_netif_stop(dev); 1300 emac_netif_stop(dev);
@@ -2950,7 +2950,7 @@ static int __devexit emac_remove(struct platform_device *ofdev)
2950 2950
2951 unregister_netdev(dev->ndev); 2951 unregister_netdev(dev->ndev);
2952 2952
2953 flush_scheduled_work(); 2953 cancel_work_sync(&dev->reset_work);
2954 2954
2955 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH)) 2955 if (emac_has_feature(dev, EMAC_FTR_HAS_TAH))
2956 tah_detach(dev->tah_dev, dev->tah_port); 2956 tah_detach(dev->tah_dev, dev->tah_port);
diff --git a/drivers/net/ibmveth.c b/drivers/net/ibmveth.c
index c454b45ca7ec..5522d459654c 100644
--- a/drivers/net/ibmveth.c
+++ b/drivers/net/ibmveth.c
@@ -729,11 +729,6 @@ static void netdev_get_drvinfo(struct net_device *dev,
729 sizeof(info->version) - 1); 729 sizeof(info->version) - 1);
730} 730}
731 731
732static u32 netdev_get_link(struct net_device *dev)
733{
734 return 1;
735}
736
737static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data) 732static void ibmveth_set_rx_csum_flags(struct net_device *dev, u32 data)
738{ 733{
739 struct ibmveth_adapter *adapter = netdev_priv(dev); 734 struct ibmveth_adapter *adapter = netdev_priv(dev);
@@ -918,7 +913,7 @@ static void ibmveth_get_ethtool_stats(struct net_device *dev,
918static const struct ethtool_ops netdev_ethtool_ops = { 913static const struct ethtool_ops netdev_ethtool_ops = {
919 .get_drvinfo = netdev_get_drvinfo, 914 .get_drvinfo = netdev_get_drvinfo,
920 .get_settings = netdev_get_settings, 915 .get_settings = netdev_get_settings,
921 .get_link = netdev_get_link, 916 .get_link = ethtool_op_get_link,
922 .set_tx_csum = ibmveth_set_tx_csum, 917 .set_tx_csum = ibmveth_set_tx_csum,
923 .get_rx_csum = ibmveth_get_rx_csum, 918 .get_rx_csum = ibmveth_get_rx_csum,
924 .set_rx_csum = ibmveth_set_rx_csum, 919 .set_rx_csum = ibmveth_set_rx_csum,
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index fe337bd121aa..e07d487f015a 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -36,22 +36,10 @@
36#include <net/pkt_sched.h> 36#include <net/pkt_sched.h>
37#include <net/net_namespace.h> 37#include <net/net_namespace.h>
38 38
39#define TX_TIMEOUT (2*HZ)
40
41#define TX_Q_LIMIT 32 39#define TX_Q_LIMIT 32
42struct ifb_private { 40struct ifb_private {
43 struct tasklet_struct ifb_tasklet; 41 struct tasklet_struct ifb_tasklet;
44 int tasklet_pending; 42 int tasklet_pending;
45 /* mostly debug stats leave in for now */
46 unsigned long st_task_enter; /* tasklet entered */
47 unsigned long st_txq_refl_try; /* transmit queue refill attempt */
48 unsigned long st_rxq_enter; /* receive queue entered */
49 unsigned long st_rx2tx_tran; /* receive to trasmit transfers */
50 unsigned long st_rxq_notenter; /*receiveQ not entered, resched */
51 unsigned long st_rx_frm_egr; /* received from egress path */
52 unsigned long st_rx_frm_ing; /* received from ingress path */
53 unsigned long st_rxq_check;
54 unsigned long st_rxq_rsch;
55 struct sk_buff_head rq; 43 struct sk_buff_head rq;
56 struct sk_buff_head tq; 44 struct sk_buff_head tq;
57}; 45};
@@ -73,24 +61,17 @@ static void ri_tasklet(unsigned long dev)
73 struct sk_buff *skb; 61 struct sk_buff *skb;
74 62
75 txq = netdev_get_tx_queue(_dev, 0); 63 txq = netdev_get_tx_queue(_dev, 0);
76 dp->st_task_enter++;
77 if ((skb = skb_peek(&dp->tq)) == NULL) { 64 if ((skb = skb_peek(&dp->tq)) == NULL) {
78 dp->st_txq_refl_try++;
79 if (__netif_tx_trylock(txq)) { 65 if (__netif_tx_trylock(txq)) {
80 dp->st_rxq_enter++; 66 skb_queue_splice_tail_init(&dp->rq, &dp->tq);
81 while ((skb = skb_dequeue(&dp->rq)) != NULL) {
82 skb_queue_tail(&dp->tq, skb);
83 dp->st_rx2tx_tran++;
84 }
85 __netif_tx_unlock(txq); 67 __netif_tx_unlock(txq);
86 } else { 68 } else {
87 /* reschedule */ 69 /* reschedule */
88 dp->st_rxq_notenter++;
89 goto resched; 70 goto resched;
90 } 71 }
91 } 72 }
92 73
93 while ((skb = skb_dequeue(&dp->tq)) != NULL) { 74 while ((skb = __skb_dequeue(&dp->tq)) != NULL) {
94 u32 from = G_TC_FROM(skb->tc_verd); 75 u32 from = G_TC_FROM(skb->tc_verd);
95 76
96 skb->tc_verd = 0; 77 skb->tc_verd = 0;
@@ -112,24 +93,20 @@ static void ri_tasklet(unsigned long dev)
112 skb->skb_iif = _dev->ifindex; 93 skb->skb_iif = _dev->ifindex;
113 94
114 if (from & AT_EGRESS) { 95 if (from & AT_EGRESS) {
115 dp->st_rx_frm_egr++;
116 dev_queue_xmit(skb); 96 dev_queue_xmit(skb);
117 } else if (from & AT_INGRESS) { 97 } else if (from & AT_INGRESS) {
118 dp->st_rx_frm_ing++;
119 skb_pull(skb, skb->dev->hard_header_len); 98 skb_pull(skb, skb->dev->hard_header_len);
120 netif_rx(skb); 99 netif_receive_skb(skb);
121 } else 100 } else
122 BUG(); 101 BUG();
123 } 102 }
124 103
125 if (__netif_tx_trylock(txq)) { 104 if (__netif_tx_trylock(txq)) {
126 dp->st_rxq_check++;
127 if ((skb = skb_peek(&dp->rq)) == NULL) { 105 if ((skb = skb_peek(&dp->rq)) == NULL) {
128 dp->tasklet_pending = 0; 106 dp->tasklet_pending = 0;
129 if (netif_queue_stopped(_dev)) 107 if (netif_queue_stopped(_dev))
130 netif_wake_queue(_dev); 108 netif_wake_queue(_dev);
131 } else { 109 } else {
132 dp->st_rxq_rsch++;
133 __netif_tx_unlock(txq); 110 __netif_tx_unlock(txq);
134 goto resched; 111 goto resched;
135 } 112 }
@@ -149,6 +126,10 @@ static const struct net_device_ops ifb_netdev_ops = {
149 .ndo_validate_addr = eth_validate_addr, 126 .ndo_validate_addr = eth_validate_addr,
150}; 127};
151 128
129#define IFB_FEATURES (NETIF_F_NO_CSUM | NETIF_F_SG | NETIF_F_FRAGLIST | \
130 NETIF_F_TSO_ECN | NETIF_F_TSO | NETIF_F_TSO6 | \
131 NETIF_F_HIGHDMA | NETIF_F_HW_VLAN_TX)
132
152static void ifb_setup(struct net_device *dev) 133static void ifb_setup(struct net_device *dev)
153{ 134{
154 /* Initialize the device structure. */ 135 /* Initialize the device structure. */
@@ -159,6 +140,9 @@ static void ifb_setup(struct net_device *dev)
159 ether_setup(dev); 140 ether_setup(dev);
160 dev->tx_queue_len = TX_Q_LIMIT; 141 dev->tx_queue_len = TX_Q_LIMIT;
161 142
143 dev->features |= IFB_FEATURES;
144 dev->vlan_features |= IFB_FEATURES;
145
162 dev->flags |= IFF_NOARP; 146 dev->flags |= IFF_NOARP;
163 dev->flags &= ~IFF_MULTICAST; 147 dev->flags &= ~IFF_MULTICAST;
164 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 148 dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
@@ -184,7 +168,7 @@ static netdev_tx_t ifb_xmit(struct sk_buff *skb, struct net_device *dev)
184 netif_stop_queue(dev); 168 netif_stop_queue(dev);
185 } 169 }
186 170
187 skb_queue_tail(&dp->rq, skb); 171 __skb_queue_tail(&dp->rq, skb);
188 if (!dp->tasklet_pending) { 172 if (!dp->tasklet_pending) {
189 dp->tasklet_pending = 1; 173 dp->tasklet_pending = 1;
190 tasklet_schedule(&dp->ifb_tasklet); 174 tasklet_schedule(&dp->ifb_tasklet);
@@ -199,8 +183,8 @@ static int ifb_close(struct net_device *dev)
199 183
200 tasklet_kill(&dp->ifb_tasklet); 184 tasklet_kill(&dp->ifb_tasklet);
201 netif_stop_queue(dev); 185 netif_stop_queue(dev);
202 skb_queue_purge(&dp->rq); 186 __skb_queue_purge(&dp->rq);
203 skb_queue_purge(&dp->tq); 187 __skb_queue_purge(&dp->tq);
204 return 0; 188 return 0;
205} 189}
206 190
@@ -209,8 +193,8 @@ static int ifb_open(struct net_device *dev)
209 struct ifb_private *dp = netdev_priv(dev); 193 struct ifb_private *dp = netdev_priv(dev);
210 194
211 tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev); 195 tasklet_init(&dp->ifb_tasklet, ri_tasklet, (unsigned long)dev);
212 skb_queue_head_init(&dp->rq); 196 __skb_queue_head_init(&dp->rq);
213 skb_queue_head_init(&dp->tq); 197 __skb_queue_head_init(&dp->tq);
214 netif_start_queue(dev); 198 netif_start_queue(dev);
215 199
216 return 0; 200 return 0;
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index bc183f5487cb..0a2368fa6bc6 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -134,6 +134,8 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
134 case E1000_DEV_ID_82580_COPPER_DUAL: 134 case E1000_DEV_ID_82580_COPPER_DUAL:
135 case E1000_DEV_ID_DH89XXCC_SGMII: 135 case E1000_DEV_ID_DH89XXCC_SGMII:
136 case E1000_DEV_ID_DH89XXCC_SERDES: 136 case E1000_DEV_ID_DH89XXCC_SERDES:
137 case E1000_DEV_ID_DH89XXCC_BACKPLANE:
138 case E1000_DEV_ID_DH89XXCC_SFP:
137 mac->type = e1000_82580; 139 mac->type = e1000_82580;
138 break; 140 break;
139 case E1000_DEV_ID_I350_COPPER: 141 case E1000_DEV_ID_I350_COPPER:
@@ -1478,6 +1480,39 @@ out:
1478} 1480}
1479 1481
1480/** 1482/**
1483 * igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
1484 * @hw: pointer to the hardware struct
1485 * @enable: state to enter, either enabled or disabled
1486 * @pf: Physical Function pool - do not set anti-spoofing for the PF
1487 *
1488 * enables/disables L2 switch anti-spoofing functionality.
1489 **/
1490void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
1491{
1492 u32 dtxswc;
1493
1494 switch (hw->mac.type) {
1495 case e1000_82576:
1496 case e1000_i350:
1497 dtxswc = rd32(E1000_DTXSWC);
1498 if (enable) {
1499 dtxswc |= (E1000_DTXSWC_MAC_SPOOF_MASK |
1500 E1000_DTXSWC_VLAN_SPOOF_MASK);
1501 /* The PF can spoof - it has to in order to
1502 * support emulation mode NICs */
1503 dtxswc ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
1504 } else {
1505 dtxswc &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
1506 E1000_DTXSWC_VLAN_SPOOF_MASK);
1507 }
1508 wr32(E1000_DTXSWC, dtxswc);
1509 break;
1510 default:
1511 break;
1512 }
1513}
1514
1515/**
1481 * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback 1516 * igb_vmdq_set_loopback_pf - enable or disable vmdq loopback
1482 * @hw: pointer to the hardware struct 1517 * @hw: pointer to the hardware struct
1483 * @enable: state to enter, either enabled or disabled 1518 * @enable: state to enter, either enabled or disabled
@@ -1578,7 +1613,7 @@ static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw)
1578{ 1613{
1579 s32 ret_val = 0; 1614 s32 ret_val = 0;
1580 u32 mdicnfg; 1615 u32 mdicnfg;
1581 u16 nvm_data; 1616 u16 nvm_data = 0;
1582 1617
1583 if (hw->mac.type != e1000_82580) 1618 if (hw->mac.type != e1000_82580)
1584 goto out; 1619 goto out;
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index cbd1e1259e4d..1d01af2472e7 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -194,6 +194,10 @@ struct e1000_adv_tx_context_desc {
194#define E1000_NVM_APME_82575 0x0400 194#define E1000_NVM_APME_82575 0x0400
195#define MAX_NUM_VFS 8 195#define MAX_NUM_VFS 8
196 196
197#define E1000_DTXSWC_MAC_SPOOF_MASK 0x000000FF /* Per VF MAC spoof control */
198#define E1000_DTXSWC_VLAN_SPOOF_MASK 0x0000FF00 /* Per VF VLAN spoof control */
199#define E1000_DTXSWC_LLE_MASK 0x00FF0000 /* Per VF Local LB enables */
200#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
197#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ 201#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
198 202
199/* Easy defines for setting default pool, would normally be left a zero */ 203/* Easy defines for setting default pool, would normally be left a zero */
@@ -243,6 +247,7 @@ struct e1000_adv_tx_context_desc {
243 247
244/* RX packet buffer size defines */ 248/* RX packet buffer size defines */
245#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F 249#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
250void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int);
246void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); 251void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
247void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); 252void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
248u16 igb_rxpbs_adjust_82580(u32 data); 253u16 igb_rxpbs_adjust_82580(u32 data);
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index 62222796a8b3..6319ed902bc0 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -419,6 +419,9 @@
419#define E1000_ERR_SWFW_SYNC 13 419#define E1000_ERR_SWFW_SYNC 13
420#define E1000_NOT_IMPLEMENTED 14 420#define E1000_NOT_IMPLEMENTED 14
421#define E1000_ERR_MBX 15 421#define E1000_ERR_MBX 15
422#define E1000_ERR_INVALID_ARGUMENT 16
423#define E1000_ERR_NO_SPACE 17
424#define E1000_ERR_NVM_PBA_SECTION 18
422 425
423/* Loop limit on how long we wait for auto-negotiation to complete */ 426/* Loop limit on how long we wait for auto-negotiation to complete */
424#define COPPER_LINK_UP_LIMIT 10 427#define COPPER_LINK_UP_LIMIT 10
@@ -580,11 +583,15 @@
580 583
581/* Mask bits for fields in Word 0x1a of the NVM */ 584/* Mask bits for fields in Word 0x1a of the NVM */
582 585
586/* length of string needed to store part num */
587#define E1000_PBANUM_LENGTH 11
588
583/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */ 589/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
584#define NVM_SUM 0xBABA 590#define NVM_SUM 0xBABA
585 591
586#define NVM_PBA_OFFSET_0 8 592#define NVM_PBA_OFFSET_0 8
587#define NVM_PBA_OFFSET_1 9 593#define NVM_PBA_OFFSET_1 9
594#define NVM_PBA_PTR_GUARD 0xFAFA
588#define NVM_WORD_SIZE_BASE_SHIFT 6 595#define NVM_WORD_SIZE_BASE_SHIFT 6
589 596
590/* NVM Commands - Microwire */ 597/* NVM Commands - Microwire */
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index c0b017f8d782..e2638afb8cdc 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -54,8 +54,10 @@ struct e1000_hw;
54#define E1000_DEV_ID_82580_SERDES 0x1510 54#define E1000_DEV_ID_82580_SERDES 0x1510
55#define E1000_DEV_ID_82580_SGMII 0x1511 55#define E1000_DEV_ID_82580_SGMII 0x1511
56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516 56#define E1000_DEV_ID_82580_COPPER_DUAL 0x1516
57#define E1000_DEV_ID_DH89XXCC_SGMII 0x0436 57#define E1000_DEV_ID_DH89XXCC_SGMII 0x0438
58#define E1000_DEV_ID_DH89XXCC_SERDES 0x0438 58#define E1000_DEV_ID_DH89XXCC_SERDES 0x043A
59#define E1000_DEV_ID_DH89XXCC_BACKPLANE 0x043C
60#define E1000_DEV_ID_DH89XXCC_SFP 0x0440
59#define E1000_DEV_ID_I350_COPPER 0x1521 61#define E1000_DEV_ID_I350_COPPER 0x1521
60#define E1000_DEV_ID_I350_FIBER 0x1522 62#define E1000_DEV_ID_I350_FIBER 0x1522
61#define E1000_DEV_ID_I350_SERDES 0x1523 63#define E1000_DEV_ID_I350_SERDES 0x1523
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
index d83b77fa4038..6b5cc2cc453d 100644
--- a/drivers/net/igb/e1000_nvm.c
+++ b/drivers/net/igb/e1000_nvm.c
@@ -445,31 +445,112 @@ out:
445} 445}
446 446
447/** 447/**
448 * igb_read_part_num - Read device part number 448 * igb_read_part_string - Read device part number
449 * @hw: pointer to the HW structure 449 * @hw: pointer to the HW structure
450 * @part_num: pointer to device part number 450 * @part_num: pointer to device part number
451 * @part_num_size: size of part number buffer
451 * 452 *
452 * Reads the product board assembly (PBA) number from the EEPROM and stores 453 * Reads the product board assembly (PBA) number from the EEPROM and stores
453 * the value in part_num. 454 * the value in part_num.
454 **/ 455 **/
455s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num) 456s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size)
456{ 457{
457 s32 ret_val; 458 s32 ret_val;
458 u16 nvm_data; 459 u16 nvm_data;
460 u16 pointer;
461 u16 offset;
462 u16 length;
463
464 if (part_num == NULL) {
465 hw_dbg("PBA string buffer was null\n");
466 ret_val = E1000_ERR_INVALID_ARGUMENT;
467 goto out;
468 }
459 469
460 ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data); 470 ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
461 if (ret_val) { 471 if (ret_val) {
462 hw_dbg("NVM Read Error\n"); 472 hw_dbg("NVM Read Error\n");
463 goto out; 473 goto out;
464 } 474 }
465 *part_num = (u32)(nvm_data << 16);
466 475
467 ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &nvm_data); 476 ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer);
477 if (ret_val) {
478 hw_dbg("NVM Read Error\n");
479 goto out;
480 }
481
482 /*
483 * if nvm_data is not ptr guard the PBA must be in legacy format which
484 * means pointer is actually our second data word for the PBA number
485 * and we can decode it into an ascii string
486 */
487 if (nvm_data != NVM_PBA_PTR_GUARD) {
488 hw_dbg("NVM PBA number is not stored as string\n");
489
490 /* we will need 11 characters to store the PBA */
491 if (part_num_size < 11) {
492 hw_dbg("PBA string buffer too small\n");
493 return E1000_ERR_NO_SPACE;
494 }
495
496 /* extract hex string from data and pointer */
497 part_num[0] = (nvm_data >> 12) & 0xF;
498 part_num[1] = (nvm_data >> 8) & 0xF;
499 part_num[2] = (nvm_data >> 4) & 0xF;
500 part_num[3] = nvm_data & 0xF;
501 part_num[4] = (pointer >> 12) & 0xF;
502 part_num[5] = (pointer >> 8) & 0xF;
503 part_num[6] = '-';
504 part_num[7] = 0;
505 part_num[8] = (pointer >> 4) & 0xF;
506 part_num[9] = pointer & 0xF;
507
508 /* put a null character on the end of our string */
509 part_num[10] = '\0';
510
511 /* switch all the data but the '-' to hex char */
512 for (offset = 0; offset < 10; offset++) {
513 if (part_num[offset] < 0xA)
514 part_num[offset] += '0';
515 else if (part_num[offset] < 0x10)
516 part_num[offset] += 'A' - 0xA;
517 }
518
519 goto out;
520 }
521
522 ret_val = hw->nvm.ops.read(hw, pointer, 1, &length);
468 if (ret_val) { 523 if (ret_val) {
469 hw_dbg("NVM Read Error\n"); 524 hw_dbg("NVM Read Error\n");
470 goto out; 525 goto out;
471 } 526 }
472 *part_num |= nvm_data; 527
528 if (length == 0xFFFF || length == 0) {
529 hw_dbg("NVM PBA number section invalid length\n");
530 ret_val = E1000_ERR_NVM_PBA_SECTION;
531 goto out;
532 }
533 /* check if part_num buffer is big enough */
534 if (part_num_size < (((u32)length * 2) - 1)) {
535 hw_dbg("PBA string buffer too small\n");
536 ret_val = E1000_ERR_NO_SPACE;
537 goto out;
538 }
539
540 /* trim pba length from start of string */
541 pointer++;
542 length--;
543
544 for (offset = 0; offset < length; offset++) {
545 ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data);
546 if (ret_val) {
547 hw_dbg("NVM Read Error\n");
548 goto out;
549 }
550 part_num[offset * 2] = (u8)(nvm_data >> 8);
551 part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
552 }
553 part_num[offset * 2] = '\0';
473 554
474out: 555out:
475 return ret_val; 556 return ret_val;
diff --git a/drivers/net/igb/e1000_nvm.h b/drivers/net/igb/e1000_nvm.h
index 1041c34dcbe1..29c956a84bd0 100644
--- a/drivers/net/igb/e1000_nvm.h
+++ b/drivers/net/igb/e1000_nvm.h
@@ -32,6 +32,8 @@ s32 igb_acquire_nvm(struct e1000_hw *hw);
32void igb_release_nvm(struct e1000_hw *hw); 32void igb_release_nvm(struct e1000_hw *hw);
33s32 igb_read_mac_addr(struct e1000_hw *hw); 33s32 igb_read_mac_addr(struct e1000_hw *hw);
34s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num); 34s32 igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
35s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
36 u32 part_num_size);
35s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); 37s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
36s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data); 38s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
37s32 igb_validate_nvm_checksum(struct e1000_hw *hw); 39s32 igb_validate_nvm_checksum(struct e1000_hw *hw);
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index ddd036a78999..6694bf3e5ad9 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -1757,11 +1757,12 @@ s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
1757 u16 phy_data, i, agc_value = 0; 1757 u16 phy_data, i, agc_value = 0;
1758 u16 cur_agc_index, max_agc_index = 0; 1758 u16 cur_agc_index, max_agc_index = 0;
1759 u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1; 1759 u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
1760 u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = 1760 static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
1761 {IGP02E1000_PHY_AGC_A, 1761 IGP02E1000_PHY_AGC_A,
1762 IGP02E1000_PHY_AGC_B, 1762 IGP02E1000_PHY_AGC_B,
1763 IGP02E1000_PHY_AGC_C, 1763 IGP02E1000_PHY_AGC_C,
1764 IGP02E1000_PHY_AGC_D}; 1764 IGP02E1000_PHY_AGC_D
1765 };
1765 1766
1766 /* Read the AGC registers for all channels */ 1767 /* Read the AGC registers for all channels */
1767 for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) { 1768 for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index abb7333a1fbf..8ac83c5190d5 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -301,6 +301,7 @@
301#define E1000_VFTE 0x00C90 /* VF Transmit Enables */ 301#define E1000_VFTE 0x00C90 /* VF Transmit Enables */
302#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ 302#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
303#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ 303#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
304#define E1000_WVBR 0x03554 /* VM Wrong Behavior - RWS */
304#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ 305#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
305#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */ 306#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
306#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ 307#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index edab9c442399..92a4ef09e55c 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -324,6 +324,7 @@ struct igb_adapter {
324 unsigned int vfs_allocated_count; 324 unsigned int vfs_allocated_count;
325 struct vf_data_storage *vf_data; 325 struct vf_data_storage *vf_data;
326 u32 rss_queues; 326 u32 rss_queues;
327 u32 wvbr;
327}; 328};
328 329
329#define IGB_FLAG_HAS_MSI (1 << 0) 330#define IGB_FLAG_HAS_MSI (1 << 0)
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 892d196f17ac..58c665b7513d 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -73,6 +73,8 @@ static DEFINE_PCI_DEVICE_TABLE(igb_pci_tbl) = {
73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 }, 73 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 }, 74 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 }, 75 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
76 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, 78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
77 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, 79 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
78 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 }, 80 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
@@ -1654,7 +1656,7 @@ void igb_reset(struct igb_adapter *adapter)
1654 if (adapter->vfs_allocated_count) { 1656 if (adapter->vfs_allocated_count) {
1655 int i; 1657 int i;
1656 for (i = 0 ; i < adapter->vfs_allocated_count; i++) 1658 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1657 adapter->vf_data[i].flags = 0; 1659 adapter->vf_data[i].flags &= IGB_VF_FLAG_PF_SET_MAC;
1658 1660
1659 /* ping all the active vfs to let them know we are going down */ 1661 /* ping all the active vfs to let them know we are going down */
1660 igb_ping_all_vfs(adapter); 1662 igb_ping_all_vfs(adapter);
@@ -1729,12 +1731,13 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1729 struct igb_adapter *adapter; 1731 struct igb_adapter *adapter;
1730 struct e1000_hw *hw; 1732 struct e1000_hw *hw;
1731 u16 eeprom_data = 0; 1733 u16 eeprom_data = 0;
1734 s32 ret_val;
1732 static int global_quad_port_a; /* global quad port a indication */ 1735 static int global_quad_port_a; /* global quad port a indication */
1733 const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; 1736 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1734 unsigned long mmio_start, mmio_len; 1737 unsigned long mmio_start, mmio_len;
1735 int err, pci_using_dac; 1738 int err, pci_using_dac;
1736 u16 eeprom_apme_mask = IGB_EEPROM_APME; 1739 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1737 u32 part_num; 1740 u8 part_str[E1000_PBANUM_LENGTH];
1738 1741
1739 /* Catch broken hardware that put the wrong VF device ID in 1742 /* Catch broken hardware that put the wrong VF device ID in
1740 * the PCIe SR-IOV capability. 1743 * the PCIe SR-IOV capability.
@@ -2000,10 +2003,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
2000 "unknown"), 2003 "unknown"),
2001 netdev->dev_addr); 2004 netdev->dev_addr);
2002 2005
2003 igb_read_part_num(hw, &part_num); 2006 ret_val = igb_read_part_string(hw, part_str, E1000_PBANUM_LENGTH);
2004 dev_info(&pdev->dev, "%s: PBA No: %06x-%03x\n", netdev->name, 2007 if (ret_val)
2005 (part_num >> 8), (part_num & 0xff)); 2008 strcpy(part_str, "Unknown");
2006 2009 dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
2007 dev_info(&pdev->dev, 2010 dev_info(&pdev->dev,
2008 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n", 2011 "Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
2009 adapter->msix_entries ? "MSI-X" : 2012 adapter->msix_entries ? "MSI-X" :
@@ -2049,13 +2052,16 @@ static void __devexit igb_remove(struct pci_dev *pdev)
2049 struct igb_adapter *adapter = netdev_priv(netdev); 2052 struct igb_adapter *adapter = netdev_priv(netdev);
2050 struct e1000_hw *hw = &adapter->hw; 2053 struct e1000_hw *hw = &adapter->hw;
2051 2054
2052 /* flush_scheduled work may reschedule our watchdog task, so 2055 /*
2053 * explicitly disable watchdog tasks from being rescheduled */ 2056 * The watchdog timer may be rescheduled, so explicitly
2057 * disable watchdog from being rescheduled.
2058 */
2054 set_bit(__IGB_DOWN, &adapter->state); 2059 set_bit(__IGB_DOWN, &adapter->state);
2055 del_timer_sync(&adapter->watchdog_timer); 2060 del_timer_sync(&adapter->watchdog_timer);
2056 del_timer_sync(&adapter->phy_info_timer); 2061 del_timer_sync(&adapter->phy_info_timer);
2057 2062
2058 flush_scheduled_work(); 2063 cancel_work_sync(&adapter->reset_task);
2064 cancel_work_sync(&adapter->watchdog_task);
2059 2065
2060#ifdef CONFIG_IGB_DCA 2066#ifdef CONFIG_IGB_DCA
2061 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { 2067 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
@@ -2436,10 +2442,9 @@ int igb_setup_tx_resources(struct igb_ring *tx_ring)
2436 int size; 2442 int size;
2437 2443
2438 size = sizeof(struct igb_buffer) * tx_ring->count; 2444 size = sizeof(struct igb_buffer) * tx_ring->count;
2439 tx_ring->buffer_info = vmalloc(size); 2445 tx_ring->buffer_info = vzalloc(size);
2440 if (!tx_ring->buffer_info) 2446 if (!tx_ring->buffer_info)
2441 goto err; 2447 goto err;
2442 memset(tx_ring->buffer_info, 0, size);
2443 2448
2444 /* round up to nearest 4K */ 2449 /* round up to nearest 4K */
2445 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 2450 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
@@ -2587,10 +2592,9 @@ int igb_setup_rx_resources(struct igb_ring *rx_ring)
2587 int size, desc_len; 2592 int size, desc_len;
2588 2593
2589 size = sizeof(struct igb_buffer) * rx_ring->count; 2594 size = sizeof(struct igb_buffer) * rx_ring->count;
2590 rx_ring->buffer_info = vmalloc(size); 2595 rx_ring->buffer_info = vzalloc(size);
2591 if (!rx_ring->buffer_info) 2596 if (!rx_ring->buffer_info)
2592 goto err; 2597 goto err;
2593 memset(rx_ring->buffer_info, 0, size);
2594 2598
2595 desc_len = sizeof(union e1000_adv_rx_desc); 2599 desc_len = sizeof(union e1000_adv_rx_desc);
2596 2600
@@ -3362,6 +3366,45 @@ static void igb_set_rx_mode(struct net_device *netdev)
3362 igb_restore_vf_multicasts(adapter); 3366 igb_restore_vf_multicasts(adapter);
3363} 3367}
3364 3368
3369static void igb_check_wvbr(struct igb_adapter *adapter)
3370{
3371 struct e1000_hw *hw = &adapter->hw;
3372 u32 wvbr = 0;
3373
3374 switch (hw->mac.type) {
3375 case e1000_82576:
3376 case e1000_i350:
3377 if (!(wvbr = rd32(E1000_WVBR)))
3378 return;
3379 break;
3380 default:
3381 break;
3382 }
3383
3384 adapter->wvbr |= wvbr;
3385}
3386
3387#define IGB_STAGGERED_QUEUE_OFFSET 8
3388
3389static void igb_spoof_check(struct igb_adapter *adapter)
3390{
3391 int j;
3392
3393 if (!adapter->wvbr)
3394 return;
3395
3396 for(j = 0; j < adapter->vfs_allocated_count; j++) {
3397 if (adapter->wvbr & (1 << j) ||
3398 adapter->wvbr & (1 << (j + IGB_STAGGERED_QUEUE_OFFSET))) {
3399 dev_warn(&adapter->pdev->dev,
3400 "Spoof event(s) detected on VF %d\n", j);
3401 adapter->wvbr &=
3402 ~((1 << j) |
3403 (1 << (j + IGB_STAGGERED_QUEUE_OFFSET)));
3404 }
3405 }
3406}
3407
3365/* Need to wait a few seconds after link up to get diagnostic information from 3408/* Need to wait a few seconds after link up to get diagnostic information from
3366 * the phy */ 3409 * the phy */
3367static void igb_update_phy_info(unsigned long data) 3410static void igb_update_phy_info(unsigned long data)
@@ -3521,6 +3564,8 @@ static void igb_watchdog_task(struct work_struct *work)
3521 wr32(E1000_ICS, E1000_ICS_RXDMT0); 3564 wr32(E1000_ICS, E1000_ICS_RXDMT0);
3522 } 3565 }
3523 3566
3567 igb_spoof_check(adapter);
3568
3524 /* Reset the timer */ 3569 /* Reset the timer */
3525 if (!test_bit(__IGB_DOWN, &adapter->state)) 3570 if (!test_bit(__IGB_DOWN, &adapter->state))
3526 mod_timer(&adapter->watchdog_timer, 3571 mod_timer(&adapter->watchdog_timer,
@@ -4517,6 +4562,10 @@ static irqreturn_t igb_msix_other(int irq, void *data)
4517 if (icr & E1000_ICR_DOUTSYNC) { 4562 if (icr & E1000_ICR_DOUTSYNC) {
4518 /* HW is reporting DMA is out of sync */ 4563 /* HW is reporting DMA is out of sync */
4519 adapter->stats.doosync++; 4564 adapter->stats.doosync++;
4565 /* The DMA Out of Sync is also indication of a spoof event
4566 * in IOV mode. Check the Wrong VM Behavior register to
4567 * see if it is really a spoof event. */
4568 igb_check_wvbr(adapter);
4520 } 4569 }
4521 4570
4522 /* Check for a mailbox event */ 4571 /* Check for a mailbox event */
@@ -4969,8 +5018,8 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4969 5018
4970static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf) 5019static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
4971{ 5020{
4972 /* clear flags */ 5021 /* clear flags - except flag that indicates PF has set the MAC */
4973 adapter->vf_data[vf].flags &= ~(IGB_VF_FLAG_PF_SET_MAC); 5022 adapter->vf_data[vf].flags &= IGB_VF_FLAG_PF_SET_MAC;
4974 adapter->vf_data[vf].last_nack = jiffies; 5023 adapter->vf_data[vf].last_nack = jiffies;
4975 5024
4976 /* reset offloads to defaults */ 5025 /* reset offloads to defaults */
@@ -5024,7 +5073,7 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
5024 reg = rd32(E1000_VFRE); 5073 reg = rd32(E1000_VFRE);
5025 wr32(E1000_VFRE, reg | (1 << vf)); 5074 wr32(E1000_VFRE, reg | (1 << vf));
5026 5075
5027 adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS; 5076 adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
5028 5077
5029 /* reply to reset with ack and vf mac address */ 5078 /* reply to reset with ack and vf mac address */
5030 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; 5079 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
@@ -5103,7 +5152,14 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
5103 5152
5104 switch ((msgbuf[0] & 0xFFFF)) { 5153 switch ((msgbuf[0] & 0xFFFF)) {
5105 case E1000_VF_SET_MAC_ADDR: 5154 case E1000_VF_SET_MAC_ADDR:
5106 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); 5155 retval = -EINVAL;
5156 if (!(vf_data->flags & IGB_VF_FLAG_PF_SET_MAC))
5157 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
5158 else
5159 dev_warn(&pdev->dev,
5160 "VF %d attempted to override administratively "
5161 "set MAC address\nReload the VF driver to "
5162 "resume operations\n", vf);
5107 break; 5163 break;
5108 case E1000_VF_SET_PROMISC: 5164 case E1000_VF_SET_PROMISC:
5109 retval = igb_set_vf_promisc(adapter, msgbuf, vf); 5165 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
@@ -5115,8 +5171,12 @@ static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
5115 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf); 5171 retval = igb_set_vf_rlpml(adapter, msgbuf[1], vf);
5116 break; 5172 break;
5117 case E1000_VF_SET_VLAN: 5173 case E1000_VF_SET_VLAN:
5118 if (adapter->vf_data[vf].pf_vlan) 5174 retval = -1;
5119 retval = -1; 5175 if (vf_data->pf_vlan)
5176 dev_warn(&pdev->dev,
5177 "VF %d attempted to override administratively "
5178 "set VLAN tag\nReload the VF driver to "
5179 "resume operations\n", vf);
5120 else 5180 else
5121 retval = igb_set_vf_vlan(adapter, msgbuf, vf); 5181 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
5122 break; 5182 break;
@@ -6580,6 +6640,8 @@ static void igb_vmm_control(struct igb_adapter *adapter)
6580 if (adapter->vfs_allocated_count) { 6640 if (adapter->vfs_allocated_count) {
6581 igb_vmdq_set_loopback_pf(hw, true); 6641 igb_vmdq_set_loopback_pf(hw, true);
6582 igb_vmdq_set_replication_pf(hw, true); 6642 igb_vmdq_set_replication_pf(hw, true);
6643 igb_vmdq_set_anti_spoofing_pf(hw, true,
6644 adapter->vfs_allocated_count);
6583 } else { 6645 } else {
6584 igb_vmdq_set_loopback_pf(hw, false); 6646 igb_vmdq_set_loopback_pf(hw, false);
6585 igb_vmdq_set_replication_pf(hw, false); 6647 igb_vmdq_set_replication_pf(hw, false);
diff --git a/drivers/net/igbvf/Makefile b/drivers/net/igbvf/Makefile
index c2f150d8f2d9..0fa3db3dd8b6 100644
--- a/drivers/net/igbvf/Makefile
+++ b/drivers/net/igbvf/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel(R) 82576 Virtual Function Linux driver 3# Intel(R) 82576 Virtual Function Linux driver
4# Copyright(c) 2009 Intel Corporation. 4# Copyright(c) 2009 - 2010 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/defines.h b/drivers/net/igbvf/defines.h
index 88a47537518a..79f2604673fe 100644
--- a/drivers/net/igbvf/defines.h
+++ b/drivers/net/igbvf/defines.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index 33add708bcbe..ed6e3d910247 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation. 4 Copyright(c) 2009 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -110,11 +110,6 @@ static int igbvf_get_settings(struct net_device *netdev,
110 return 0; 110 return 0;
111} 111}
112 112
113static u32 igbvf_get_link(struct net_device *netdev)
114{
115 return netif_carrier_ok(netdev);
116}
117
118static int igbvf_set_settings(struct net_device *netdev, 113static int igbvf_set_settings(struct net_device *netdev,
119 struct ethtool_cmd *ecmd) 114 struct ethtool_cmd *ecmd)
120{ 115{
@@ -515,7 +510,7 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
515 .get_msglevel = igbvf_get_msglevel, 510 .get_msglevel = igbvf_get_msglevel,
516 .set_msglevel = igbvf_set_msglevel, 511 .set_msglevel = igbvf_set_msglevel,
517 .nway_reset = igbvf_nway_reset, 512 .nway_reset = igbvf_nway_reset,
518 .get_link = igbvf_get_link, 513 .get_link = ethtool_op_get_link,
519 .get_eeprom_len = igbvf_get_eeprom_len, 514 .get_eeprom_len = igbvf_get_eeprom_len,
520 .get_eeprom = igbvf_get_eeprom, 515 .get_eeprom = igbvf_get_eeprom,
521 .set_eeprom = igbvf_set_eeprom, 516 .set_eeprom = igbvf_set_eeprom,
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index debeee2dc717..990c329e6c3b 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation. 4 Copyright(c) 2009 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -97,6 +97,7 @@ struct igbvf_adapter;
97 97
98enum igbvf_boards { 98enum igbvf_boards {
99 board_vf, 99 board_vf,
100 board_i350_vf,
100}; 101};
101 102
102struct igbvf_queue_stats { 103struct igbvf_queue_stats {
@@ -126,7 +127,6 @@ struct igbvf_buffer {
126 unsigned int page_offset; 127 unsigned int page_offset;
127 }; 128 };
128 }; 129 };
129 struct page *page;
130}; 130};
131 131
132union igbvf_desc { 132union igbvf_desc {
diff --git a/drivers/net/igbvf/mbx.c b/drivers/net/igbvf/mbx.c
index 819a8ec901dc..3d6f4cc3998a 100644
--- a/drivers/net/igbvf/mbx.c
+++ b/drivers/net/igbvf/mbx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation. 4 Copyright(c) 2009 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/mbx.h b/drivers/net/igbvf/mbx.h
index 4938609dbfb5..c2883c45d477 100644
--- a/drivers/net/igbvf/mbx.h
+++ b/drivers/net/igbvf/mbx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 28af019c97bb..6352c8158e6d 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation. 4 Copyright(c) 2009 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -44,12 +44,13 @@
44 44
45#include "igbvf.h" 45#include "igbvf.h"
46 46
47#define DRV_VERSION "1.0.0-k0" 47#define DRV_VERSION "1.0.8-k0"
48char igbvf_driver_name[] = "igbvf"; 48char igbvf_driver_name[] = "igbvf";
49const char igbvf_driver_version[] = DRV_VERSION; 49const char igbvf_driver_version[] = DRV_VERSION;
50static const char igbvf_driver_string[] = 50static const char igbvf_driver_string[] =
51 "Intel(R) Virtual Function Network Driver"; 51 "Intel(R) Virtual Function Network Driver";
52static const char igbvf_copyright[] = "Copyright (c) 2009 Intel Corporation."; 52static const char igbvf_copyright[] =
53 "Copyright (c) 2009 - 2010 Intel Corporation.";
53 54
54static int igbvf_poll(struct napi_struct *napi, int budget); 55static int igbvf_poll(struct napi_struct *napi, int budget);
55static void igbvf_reset(struct igbvf_adapter *); 56static void igbvf_reset(struct igbvf_adapter *);
@@ -63,8 +64,16 @@ static struct igbvf_info igbvf_vf_info = {
63 .init_ops = e1000_init_function_pointers_vf, 64 .init_ops = e1000_init_function_pointers_vf,
64}; 65};
65 66
67static struct igbvf_info igbvf_i350_vf_info = {
68 .mac = e1000_vfadapt_i350,
69 .flags = 0,
70 .pba = 10,
71 .init_ops = e1000_init_function_pointers_vf,
72};
73
66static const struct igbvf_info *igbvf_info_tbl[] = { 74static const struct igbvf_info *igbvf_info_tbl[] = {
67 [board_vf] = &igbvf_vf_info, 75 [board_vf] = &igbvf_vf_info,
76 [board_i350_vf] = &igbvf_i350_vf_info,
68}; 77};
69 78
70/** 79/**
@@ -429,10 +438,9 @@ int igbvf_setup_tx_resources(struct igbvf_adapter *adapter,
429 int size; 438 int size;
430 439
431 size = sizeof(struct igbvf_buffer) * tx_ring->count; 440 size = sizeof(struct igbvf_buffer) * tx_ring->count;
432 tx_ring->buffer_info = vmalloc(size); 441 tx_ring->buffer_info = vzalloc(size);
433 if (!tx_ring->buffer_info) 442 if (!tx_ring->buffer_info)
434 goto err; 443 goto err;
435 memset(tx_ring->buffer_info, 0, size);
436 444
437 /* round up to nearest 4K */ 445 /* round up to nearest 4K */
438 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 446 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
@@ -469,10 +477,9 @@ int igbvf_setup_rx_resources(struct igbvf_adapter *adapter,
469 int size, desc_len; 477 int size, desc_len;
470 478
471 size = sizeof(struct igbvf_buffer) * rx_ring->count; 479 size = sizeof(struct igbvf_buffer) * rx_ring->count;
472 rx_ring->buffer_info = vmalloc(size); 480 rx_ring->buffer_info = vzalloc(size);
473 if (!rx_ring->buffer_info) 481 if (!rx_ring->buffer_info)
474 goto err; 482 goto err;
475 memset(rx_ring->buffer_info, 0, size);
476 483
477 desc_len = sizeof(union e1000_adv_rx_desc); 484 desc_len = sizeof(union e1000_adv_rx_desc);
478 485
@@ -1851,8 +1858,6 @@ static void igbvf_watchdog_task(struct work_struct *work)
1851 1858
1852 if (link) { 1859 if (link) {
1853 if (!netif_carrier_ok(netdev)) { 1860 if (!netif_carrier_ok(netdev)) {
1854 bool txb2b = 1;
1855
1856 mac->ops.get_link_up_info(&adapter->hw, 1861 mac->ops.get_link_up_info(&adapter->hw,
1857 &adapter->link_speed, 1862 &adapter->link_speed,
1858 &adapter->link_duplex); 1863 &adapter->link_duplex);
@@ -1862,11 +1867,9 @@ static void igbvf_watchdog_task(struct work_struct *work)
1862 adapter->tx_timeout_factor = 1; 1867 adapter->tx_timeout_factor = 1;
1863 switch (adapter->link_speed) { 1868 switch (adapter->link_speed) {
1864 case SPEED_10: 1869 case SPEED_10:
1865 txb2b = 0;
1866 adapter->tx_timeout_factor = 16; 1870 adapter->tx_timeout_factor = 16;
1867 break; 1871 break;
1868 case SPEED_100: 1872 case SPEED_100:
1869 txb2b = 0;
1870 /* maybe add some timeout factor ? */ 1873 /* maybe add some timeout factor ? */
1871 break; 1874 break;
1872 } 1875 }
@@ -2830,13 +2833,14 @@ static void __devexit igbvf_remove(struct pci_dev *pdev)
2830 struct e1000_hw *hw = &adapter->hw; 2833 struct e1000_hw *hw = &adapter->hw;
2831 2834
2832 /* 2835 /*
2833 * flush_scheduled work may reschedule our watchdog task, so 2836 * The watchdog timer may be rescheduled, so explicitly
2834 * explicitly disable watchdog tasks from being rescheduled 2837 * disable it from being rescheduled.
2835 */ 2838 */
2836 set_bit(__IGBVF_DOWN, &adapter->state); 2839 set_bit(__IGBVF_DOWN, &adapter->state);
2837 del_timer_sync(&adapter->watchdog_timer); 2840 del_timer_sync(&adapter->watchdog_timer);
2838 2841
2839 flush_scheduled_work(); 2842 cancel_work_sync(&adapter->reset_task);
2843 cancel_work_sync(&adapter->watchdog_task);
2840 2844
2841 unregister_netdev(netdev); 2845 unregister_netdev(netdev);
2842 2846
@@ -2869,6 +2873,7 @@ static struct pci_error_handlers igbvf_err_handler = {
2869 2873
2870static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = { 2874static DEFINE_PCI_DEVICE_TABLE(igbvf_pci_tbl) = {
2871 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf }, 2875 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_VF), board_vf },
2876 { PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_VF), board_i350_vf },
2872 { } /* terminate list */ 2877 { } /* terminate list */
2873}; 2878};
2874MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl); 2879MODULE_DEVICE_TABLE(pci, igbvf_pci_tbl);
diff --git a/drivers/net/igbvf/regs.h b/drivers/net/igbvf/regs.h
index b9e24ed70d0a..77e18d3d6b15 100644
--- a/drivers/net/igbvf/regs.h
+++ b/drivers/net/igbvf/regs.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation. 4 Copyright(c) 2009 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/igbvf/vf.c b/drivers/net/igbvf/vf.c
index a9a61efa964c..74486a8b009a 100644
--- a/drivers/net/igbvf/vf.c
+++ b/drivers/net/igbvf/vf.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation. 4 Copyright(c) 2009 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -362,8 +362,8 @@ static s32 e1000_check_for_link_vf(struct e1000_hw *hw)
362 * or a virtual function reset 362 * or a virtual function reset
363 */ 363 */
364 364
365 /* If we were hit with a reset drop the link */ 365 /* If we were hit with a reset or timeout drop the link */
366 if (!mbx->ops.check_for_rst(hw)) 366 if (!mbx->ops.check_for_rst(hw) || !mbx->timeout)
367 mac->get_link_status = true; 367 mac->get_link_status = true;
368 368
369 if (!mac->get_link_status) 369 if (!mac->get_link_status)
diff --git a/drivers/net/igbvf/vf.h b/drivers/net/igbvf/vf.h
index 1e8ce3741a67..d7ed58fcd9bb 100644
--- a/drivers/net/igbvf/vf.h
+++ b/drivers/net/igbvf/vf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel(R) 82576 Virtual Function Linux driver 3 Intel(R) 82576 Virtual Function Linux driver
4 Copyright(c) 2009 Intel Corporation. 4 Copyright(c) 2009 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -39,6 +39,7 @@
39struct e1000_hw; 39struct e1000_hw;
40 40
41#define E1000_DEV_ID_82576_VF 0x10CA 41#define E1000_DEV_ID_82576_VF 0x10CA
42#define E1000_DEV_ID_I350_VF 0x1520
42#define E1000_REVISION_0 0 43#define E1000_REVISION_0 0
43#define E1000_REVISION_1 1 44#define E1000_REVISION_1 1
44#define E1000_REVISION_2 2 45#define E1000_REVISION_2 2
@@ -133,6 +134,7 @@ struct e1000_adv_tx_context_desc {
133enum e1000_mac_type { 134enum e1000_mac_type {
134 e1000_undefined = 0, 135 e1000_undefined = 0,
135 e1000_vfadapt, 136 e1000_vfadapt,
137 e1000_vfadapt_i350,
136 e1000_num_macs /* List is 1-based, so subtract 1 for true count. */ 138 e1000_num_macs /* List is 1-based, so subtract 1 for true count. */
137}; 139};
138 140
diff --git a/drivers/net/irda/act200l-sir.c b/drivers/net/irda/act200l-sir.c
index 37ab8c855719..8ff084f1d236 100644
--- a/drivers/net/irda/act200l-sir.c
+++ b/drivers/net/irda/act200l-sir.c
@@ -199,7 +199,7 @@ static int act200l_reset(struct sir_dev *dev)
199{ 199{
200 unsigned state = dev->fsm.substate; 200 unsigned state = dev->fsm.substate;
201 unsigned delay = 0; 201 unsigned delay = 0;
202 u8 control[9] = { 202 static const u8 control[9] = {
203 ACT200L_REG15, 203 ACT200L_REG15,
204 ACT200L_REG13 | ACT200L_SHDW, 204 ACT200L_REG13 | ACT200L_SHDW,
205 ACT200L_REG21 | ACT200L_EXCK | ACT200L_OSCL, 205 ACT200L_REG21 | ACT200L_EXCK | ACT200L_OSCL,
diff --git a/drivers/net/irda/bfin_sir.h b/drivers/net/irda/bfin_sir.h
index b54a6f08db45..e3b285a67734 100644
--- a/drivers/net/irda/bfin_sir.h
+++ b/drivers/net/irda/bfin_sir.h
@@ -26,6 +26,8 @@
26#include <asm/cacheflush.h> 26#include <asm/cacheflush.h>
27#include <asm/dma.h> 27#include <asm/dma.h>
28#include <asm/portmux.h> 28#include <asm/portmux.h>
29#include <mach/bfin_serial_5xx.h>
30#undef DRIVER_NAME
29 31
30#ifdef CONFIG_SIR_BFIN_DMA 32#ifdef CONFIG_SIR_BFIN_DMA
31struct dma_rx_buf { 33struct dma_rx_buf {
diff --git a/drivers/net/irda/donauboe.c b/drivers/net/irda/donauboe.c
index b626cccbccd1..f81d944fc360 100644
--- a/drivers/net/irda/donauboe.c
+++ b/drivers/net/irda/donauboe.c
@@ -818,9 +818,9 @@ toshoboe_probe (struct toshoboe_cb *self)
818{ 818{
819 int i, j, n; 819 int i, j, n;
820#ifdef USE_MIR 820#ifdef USE_MIR
821 int bauds[] = { 9600, 115200, 4000000, 1152000 }; 821 static const int bauds[] = { 9600, 115200, 4000000, 1152000 };
822#else 822#else
823 int bauds[] = { 9600, 115200, 4000000 }; 823 static const int bauds[] = { 9600, 115200, 4000000 };
824#endif 824#endif
825 unsigned long flags; 825 unsigned long flags;
826 826
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index 74b20f179cea..cc821de2c966 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -959,7 +959,7 @@ static void mcs_disconnect(struct usb_interface *intf)
959 if (!mcs) 959 if (!mcs)
960 return; 960 return;
961 961
962 flush_scheduled_work(); 962 cancel_work_sync(&mcs->work);
963 963
964 unregister_netdev(mcs->netdev); 964 unregister_netdev(mcs->netdev);
965 free_netdev(mcs->netdev); 965 free_netdev(mcs->netdev);
diff --git a/drivers/net/irda/smsc-ircc2.c b/drivers/net/irda/smsc-ircc2.c
index 8c57bfb5f098..1c1677cfea29 100644
--- a/drivers/net/irda/smsc-ircc2.c
+++ b/drivers/net/irda/smsc-ircc2.c
@@ -376,7 +376,7 @@ MODULE_DEVICE_TABLE(pnp, smsc_ircc_pnp_table);
376static int pnp_driver_registered; 376static int pnp_driver_registered;
377 377
378#ifdef CONFIG_PNP 378#ifdef CONFIG_PNP
379static int __init smsc_ircc_pnp_probe(struct pnp_dev *dev, 379static int __devinit smsc_ircc_pnp_probe(struct pnp_dev *dev,
380 const struct pnp_device_id *dev_id) 380 const struct pnp_device_id *dev_id)
381{ 381{
382 unsigned int firbase, sirbase; 382 unsigned int firbase, sirbase;
diff --git a/drivers/net/iseries_veth.c b/drivers/net/iseries_veth.c
index 8df645e78f2e..9ece1fd9889d 100644
--- a/drivers/net/iseries_veth.c
+++ b/drivers/net/iseries_veth.c
@@ -885,17 +885,8 @@ static void veth_stop_connection(struct veth_lpar_connection *cnx)
885 veth_kick_statemachine(cnx); 885 veth_kick_statemachine(cnx);
886 spin_unlock_irq(&cnx->lock); 886 spin_unlock_irq(&cnx->lock);
887 887
888 /* There's a slim chance the reset code has just queued the 888 /* ensure the statemachine runs now and waits for its completion */
889 * statemachine to run in five seconds. If so we need to cancel 889 flush_delayed_work_sync(&cnx->statemachine_wq);
890 * that and requeue the work to run now. */
891 if (cancel_delayed_work(&cnx->statemachine_wq)) {
892 spin_lock_irq(&cnx->lock);
893 veth_kick_statemachine(cnx);
894 spin_unlock_irq(&cnx->lock);
895 }
896
897 /* Wait for the state machine to run. */
898 flush_scheduled_work();
899} 890}
900 891
901static void veth_destroy_connection(struct veth_lpar_connection *cnx) 892static void veth_destroy_connection(struct veth_lpar_connection *cnx)
@@ -1009,15 +1000,10 @@ static int veth_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
1009 return 0; 1000 return 0;
1010} 1001}
1011 1002
1012static u32 veth_get_link(struct net_device *dev)
1013{
1014 return 1;
1015}
1016
1017static const struct ethtool_ops ops = { 1003static const struct ethtool_ops ops = {
1018 .get_drvinfo = veth_get_drvinfo, 1004 .get_drvinfo = veth_get_drvinfo,
1019 .get_settings = veth_get_settings, 1005 .get_settings = veth_get_settings,
1020 .get_link = veth_get_link, 1006 .get_link = ethtool_op_get_link,
1021}; 1007};
1022 1008
1023static const struct net_device_ops veth_netdev_ops = { 1009static const struct net_device_ops veth_netdev_ops = {
@@ -1605,7 +1591,7 @@ static int veth_probe(struct vio_dev *vdev, const struct vio_device_id *id)
1605 } 1591 }
1606 veth_dev[i] = dev; 1592 veth_dev[i] = dev;
1607 1593
1608 port = (struct veth_port*)netdev_priv(dev); 1594 port = netdev_priv(dev);
1609 1595
1610 /* Start the state machine on each connection on this vlan. If we're 1596 /* Start the state machine on each connection on this vlan. If we're
1611 * the first dev to do so this will commence link negotiation */ 1597 * the first dev to do so this will commence link negotiation */
@@ -1658,15 +1644,14 @@ static void __exit veth_module_cleanup(void)
1658 /* Disconnect our "irq" to stop events coming from the Hypervisor. */ 1644 /* Disconnect our "irq" to stop events coming from the Hypervisor. */
1659 HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan); 1645 HvLpEvent_unregisterHandler(HvLpEvent_Type_VirtualLan);
1660 1646
1661 /* Make sure any work queued from Hypervisor callbacks is finished. */
1662 flush_scheduled_work();
1663
1664 for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) { 1647 for (i = 0; i < HVMAXARCHITECTEDLPS; ++i) {
1665 cnx = veth_cnx[i]; 1648 cnx = veth_cnx[i];
1666 1649
1667 if (!cnx) 1650 if (!cnx)
1668 continue; 1651 continue;
1669 1652
1653 /* Cancel work queued from Hypervisor callbacks */
1654 cancel_delayed_work_sync(&cnx->statemachine_wq);
1670 /* Remove the connection from sysfs */ 1655 /* Remove the connection from sysfs */
1671 kobject_del(&cnx->kobject); 1656 kobject_del(&cnx->kobject);
1672 /* Drop the driver's reference to the connection */ 1657 /* Drop the driver's reference to the connection */
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index caa8192fff2a..5639cccb4935 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -98,6 +98,8 @@ static void ixgb_alloc_rx_buffers(struct ixgb_adapter *, int);
98static void ixgb_tx_timeout(struct net_device *dev); 98static void ixgb_tx_timeout(struct net_device *dev);
99static void ixgb_tx_timeout_task(struct work_struct *work); 99static void ixgb_tx_timeout_task(struct work_struct *work);
100 100
101static void ixgb_vlan_strip_enable(struct ixgb_adapter *adapter);
102static void ixgb_vlan_strip_disable(struct ixgb_adapter *adapter);
101static void ixgb_vlan_rx_register(struct net_device *netdev, 103static void ixgb_vlan_rx_register(struct net_device *netdev,
102 struct vlan_group *grp); 104 struct vlan_group *grp);
103static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid); 105static void ixgb_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
@@ -525,7 +527,7 @@ ixgb_remove(struct pci_dev *pdev)
525 struct net_device *netdev = pci_get_drvdata(pdev); 527 struct net_device *netdev = pci_get_drvdata(pdev);
526 struct ixgb_adapter *adapter = netdev_priv(netdev); 528 struct ixgb_adapter *adapter = netdev_priv(netdev);
527 529
528 flush_scheduled_work(); 530 cancel_work_sync(&adapter->tx_timeout_task);
529 531
530 unregister_netdev(netdev); 532 unregister_netdev(netdev);
531 533
@@ -669,13 +671,12 @@ ixgb_setup_tx_resources(struct ixgb_adapter *adapter)
669 int size; 671 int size;
670 672
671 size = sizeof(struct ixgb_buffer) * txdr->count; 673 size = sizeof(struct ixgb_buffer) * txdr->count;
672 txdr->buffer_info = vmalloc(size); 674 txdr->buffer_info = vzalloc(size);
673 if (!txdr->buffer_info) { 675 if (!txdr->buffer_info) {
674 netif_err(adapter, probe, adapter->netdev, 676 netif_err(adapter, probe, adapter->netdev,
675 "Unable to allocate transmit descriptor ring memory\n"); 677 "Unable to allocate transmit descriptor ring memory\n");
676 return -ENOMEM; 678 return -ENOMEM;
677 } 679 }
678 memset(txdr->buffer_info, 0, size);
679 680
680 /* round up to nearest 4K */ 681 /* round up to nearest 4K */
681 682
@@ -759,13 +760,12 @@ ixgb_setup_rx_resources(struct ixgb_adapter *adapter)
759 int size; 760 int size;
760 761
761 size = sizeof(struct ixgb_buffer) * rxdr->count; 762 size = sizeof(struct ixgb_buffer) * rxdr->count;
762 rxdr->buffer_info = vmalloc(size); 763 rxdr->buffer_info = vzalloc(size);
763 if (!rxdr->buffer_info) { 764 if (!rxdr->buffer_info) {
764 netif_err(adapter, probe, adapter->netdev, 765 netif_err(adapter, probe, adapter->netdev,
765 "Unable to allocate receive descriptor ring\n"); 766 "Unable to allocate receive descriptor ring\n");
766 return -ENOMEM; 767 return -ENOMEM;
767 } 768 }
768 memset(rxdr->buffer_info, 0, size);
769 769
770 /* Round up to nearest 4K */ 770 /* Round up to nearest 4K */
771 771
@@ -1078,6 +1078,8 @@ ixgb_set_multi(struct net_device *netdev)
1078 1078
1079 if (netdev->flags & IFF_PROMISC) { 1079 if (netdev->flags & IFF_PROMISC) {
1080 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE); 1080 rctl |= (IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1081 /* disable VLAN filtering */
1082 rctl &= ~IXGB_RCTL_CFIEN;
1081 rctl &= ~IXGB_RCTL_VFE; 1083 rctl &= ~IXGB_RCTL_VFE;
1082 } else { 1084 } else {
1083 if (netdev->flags & IFF_ALLMULTI) { 1085 if (netdev->flags & IFF_ALLMULTI) {
@@ -1086,7 +1088,9 @@ ixgb_set_multi(struct net_device *netdev)
1086 } else { 1088 } else {
1087 rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE); 1089 rctl &= ~(IXGB_RCTL_UPE | IXGB_RCTL_MPE);
1088 } 1090 }
1091 /* enable VLAN filtering */
1089 rctl |= IXGB_RCTL_VFE; 1092 rctl |= IXGB_RCTL_VFE;
1093 rctl &= ~IXGB_RCTL_CFIEN;
1090 } 1094 }
1091 1095
1092 if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) { 1096 if (netdev_mc_count(netdev) > IXGB_MAX_NUM_MULTICAST_ADDRESSES) {
@@ -1105,6 +1109,12 @@ ixgb_set_multi(struct net_device *netdev)
1105 1109
1106 ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0); 1110 ixgb_mc_addr_list_update(hw, mta, netdev_mc_count(netdev), 0);
1107 } 1111 }
1112
1113 if (netdev->features & NETIF_F_HW_VLAN_RX)
1114 ixgb_vlan_strip_enable(adapter);
1115 else
1116 ixgb_vlan_strip_disable(adapter);
1117
1108} 1118}
1109 1119
1110/** 1120/**
@@ -1252,7 +1262,7 @@ ixgb_tx_csum(struct ixgb_adapter *adapter, struct sk_buff *skb)
1252 1262
1253 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 1263 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
1254 struct ixgb_buffer *buffer_info; 1264 struct ixgb_buffer *buffer_info;
1255 css = skb_transport_offset(skb); 1265 css = skb_checksum_start_offset(skb);
1256 cso = css + skb->csum_offset; 1266 cso = css + skb->csum_offset;
1257 1267
1258 i = adapter->tx_ring.next_to_use; 1268 i = adapter->tx_ring.next_to_use;
@@ -2152,33 +2162,30 @@ static void
2152ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp) 2162ixgb_vlan_rx_register(struct net_device *netdev, struct vlan_group *grp)
2153{ 2163{
2154 struct ixgb_adapter *adapter = netdev_priv(netdev); 2164 struct ixgb_adapter *adapter = netdev_priv(netdev);
2155 u32 ctrl, rctl;
2156 2165
2157 ixgb_irq_disable(adapter);
2158 adapter->vlgrp = grp; 2166 adapter->vlgrp = grp;
2167}
2159 2168
2160 if (grp) { 2169static void
2161 /* enable VLAN tag insert/strip */ 2170ixgb_vlan_strip_enable(struct ixgb_adapter *adapter)
2162 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); 2171{
2163 ctrl |= IXGB_CTRL0_VME; 2172 u32 ctrl;
2164 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2165
2166 /* enable VLAN receive filtering */
2167 2173
2168 rctl = IXGB_READ_REG(&adapter->hw, RCTL); 2174 /* enable VLAN tag insert/strip */
2169 rctl &= ~IXGB_RCTL_CFIEN; 2175 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2170 IXGB_WRITE_REG(&adapter->hw, RCTL, rctl); 2176 ctrl |= IXGB_CTRL0_VME;
2171 } else { 2177 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2172 /* disable VLAN tag insert/strip */ 2178}
2173 2179
2174 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0); 2180static void
2175 ctrl &= ~IXGB_CTRL0_VME; 2181ixgb_vlan_strip_disable(struct ixgb_adapter *adapter)
2176 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl); 2182{
2177 } 2183 u32 ctrl;
2178 2184
2179 /* don't enable interrupts unless we are UP */ 2185 /* disable VLAN tag insert/strip */
2180 if (adapter->netdev->flags & IFF_UP) 2186 ctrl = IXGB_READ_REG(&adapter->hw, CTRL0);
2181 ixgb_irq_enable(adapter); 2187 ctrl &= ~IXGB_CTRL0_VME;
2188 IXGB_WRITE_REG(&adapter->hw, CTRL0, ctrl);
2182} 2189}
2183 2190
2184static void 2191static void
diff --git a/drivers/net/ixgb/ixgb_param.c b/drivers/net/ixgb/ixgb_param.c
index 88a08f056241..dd7fbeb1f7d1 100644
--- a/drivers/net/ixgb/ixgb_param.c
+++ b/drivers/net/ixgb/ixgb_param.c
@@ -191,9 +191,9 @@ struct ixgb_option {
191 } r; 191 } r;
192 struct { /* list_option info */ 192 struct { /* list_option info */
193 int nr; 193 int nr;
194 struct ixgb_opt_list { 194 const struct ixgb_opt_list {
195 int i; 195 int i;
196 char *str; 196 const char *str;
197 } *p; 197 } *p;
198 } l; 198 } l;
199 } arg; 199 } arg;
@@ -226,7 +226,7 @@ ixgb_validate_option(unsigned int *value, const struct ixgb_option *opt)
226 break; 226 break;
227 case list_option: { 227 case list_option: {
228 int i; 228 int i;
229 struct ixgb_opt_list *ent; 229 const struct ixgb_opt_list *ent;
230 230
231 for (i = 0; i < opt->arg.l.nr; i++) { 231 for (i = 0; i < opt->arg.l.nr; i++) {
232 ent = &opt->arg.l.p[i]; 232 ent = &opt->arg.l.p[i];
@@ -322,14 +322,15 @@ ixgb_check_options(struct ixgb_adapter *adapter)
322 } 322 }
323 { /* Flow Control */ 323 { /* Flow Control */
324 324
325 struct ixgb_opt_list fc_list[] = 325 static const struct ixgb_opt_list fc_list[] = {
326 {{ ixgb_fc_none, "Flow Control Disabled" }, 326 { ixgb_fc_none, "Flow Control Disabled" },
327 { ixgb_fc_rx_pause,"Flow Control Receive Only" }, 327 { ixgb_fc_rx_pause, "Flow Control Receive Only" },
328 { ixgb_fc_tx_pause,"Flow Control Transmit Only" }, 328 { ixgb_fc_tx_pause, "Flow Control Transmit Only" },
329 { ixgb_fc_full, "Flow Control Enabled" }, 329 { ixgb_fc_full, "Flow Control Enabled" },
330 { ixgb_fc_default, "Flow Control Hardware Default" }}; 330 { ixgb_fc_default, "Flow Control Hardware Default" }
331 };
331 332
332 const struct ixgb_option opt = { 333 static const struct ixgb_option opt = {
333 .type = list_option, 334 .type = list_option,
334 .name = "Flow Control", 335 .name = "Flow Control",
335 .err = "reading default settings from EEPROM", 336 .err = "reading default settings from EEPROM",
diff --git a/drivers/net/ixgbe/Makefile b/drivers/net/ixgbe/Makefile
index 8f81efb49169..7d7387fbdecd 100644
--- a/drivers/net/ixgbe/Makefile
+++ b/drivers/net/ixgbe/Makefile
@@ -34,7 +34,7 @@ obj-$(CONFIG_IXGBE) += ixgbe.o
34 34
35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ 35ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \
36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ 36 ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \
37 ixgbe_mbx.o 37 ixgbe_mbx.o ixgbe_x540.o
38 38
39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ 39ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \
40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o 40 ixgbe_dcb_82599.o ixgbe_dcb_nl.o
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index ed8703cfffb7..3b8c92463617 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -61,10 +61,8 @@
61#define IXGBE_MIN_RXD 64 61#define IXGBE_MIN_RXD 64
62 62
63/* flow control */ 63/* flow control */
64#define IXGBE_DEFAULT_FCRTL 0x10000
65#define IXGBE_MIN_FCRTL 0x40 64#define IXGBE_MIN_FCRTL 0x40
66#define IXGBE_MAX_FCRTL 0x7FF80 65#define IXGBE_MAX_FCRTL 0x7FF80
67#define IXGBE_DEFAULT_FCRTH 0x20000
68#define IXGBE_MIN_FCRTH 0x600 66#define IXGBE_MIN_FCRTH 0x600
69#define IXGBE_MAX_FCRTH 0x7FFF0 67#define IXGBE_MAX_FCRTH 0x7FFF0
70#define IXGBE_DEFAULT_FCPAUSE 0xFFFF 68#define IXGBE_DEFAULT_FCPAUSE 0xFFFF
@@ -130,7 +128,9 @@ struct ixgbe_tx_buffer {
130 unsigned long time_stamp; 128 unsigned long time_stamp;
131 u16 length; 129 u16 length;
132 u16 next_to_watch; 130 u16 next_to_watch;
133 u16 mapped_as_page; 131 unsigned int bytecount;
132 u16 gso_segs;
133 u8 mapped_as_page;
134}; 134};
135 135
136struct ixgbe_rx_buffer { 136struct ixgbe_rx_buffer {
@@ -146,12 +146,56 @@ struct ixgbe_queue_stats {
146 u64 bytes; 146 u64 bytes;
147}; 147};
148 148
149struct ixgbe_tx_queue_stats {
150 u64 restart_queue;
151 u64 tx_busy;
152 u64 completed;
153 u64 tx_done_old;
154};
155
156struct ixgbe_rx_queue_stats {
157 u64 rsc_count;
158 u64 rsc_flush;
159 u64 non_eop_descs;
160 u64 alloc_rx_page_failed;
161 u64 alloc_rx_buff_failed;
162};
163
164enum ixbge_ring_state_t {
165 __IXGBE_TX_FDIR_INIT_DONE,
166 __IXGBE_TX_DETECT_HANG,
167 __IXGBE_HANG_CHECK_ARMED,
168 __IXGBE_RX_PS_ENABLED,
169 __IXGBE_RX_RSC_ENABLED,
170};
171
172#define ring_is_ps_enabled(ring) \
173 test_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
174#define set_ring_ps_enabled(ring) \
175 set_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
176#define clear_ring_ps_enabled(ring) \
177 clear_bit(__IXGBE_RX_PS_ENABLED, &(ring)->state)
178#define check_for_tx_hang(ring) \
179 test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
180#define set_check_for_tx_hang(ring) \
181 set_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
182#define clear_check_for_tx_hang(ring) \
183 clear_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state)
184#define ring_is_rsc_enabled(ring) \
185 test_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
186#define set_ring_rsc_enabled(ring) \
187 set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
188#define clear_ring_rsc_enabled(ring) \
189 clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state)
149struct ixgbe_ring { 190struct ixgbe_ring {
150 void *desc; /* descriptor ring memory */ 191 void *desc; /* descriptor ring memory */
192 struct device *dev; /* device for DMA mapping */
193 struct net_device *netdev; /* netdev ring belongs to */
151 union { 194 union {
152 struct ixgbe_tx_buffer *tx_buffer_info; 195 struct ixgbe_tx_buffer *tx_buffer_info;
153 struct ixgbe_rx_buffer *rx_buffer_info; 196 struct ixgbe_rx_buffer *rx_buffer_info;
154 }; 197 };
198 unsigned long state;
155 u8 atr_sample_rate; 199 u8 atr_sample_rate;
156 u8 atr_count; 200 u8 atr_count;
157 u16 count; /* amount of descriptors */ 201 u16 count; /* amount of descriptors */
@@ -160,38 +204,30 @@ struct ixgbe_ring {
160 u16 next_to_clean; 204 u16 next_to_clean;
161 205
162 u8 queue_index; /* needed for multiqueue queue management */ 206 u8 queue_index; /* needed for multiqueue queue management */
163 207 u8 reg_idx; /* holds the special value that gets
164#define IXGBE_RING_RX_PS_ENABLED (u8)(1)
165 u8 flags; /* per ring feature flags */
166 u16 head;
167 u16 tail;
168
169 unsigned int total_bytes;
170 unsigned int total_packets;
171
172#ifdef CONFIG_IXGBE_DCA
173 /* cpu for tx queue */
174 int cpu;
175#endif
176
177 u16 work_limit; /* max work per interrupt */
178 u16 reg_idx; /* holds the special value that gets
179 * the hardware register offset 208 * the hardware register offset
180 * associated with this ring, which is 209 * associated with this ring, which is
181 * different for DCB and RSS modes 210 * different for DCB and RSS modes
182 */ 211 */
183 212
213 u16 work_limit; /* max work per interrupt */
214
215 u8 __iomem *tail;
216
217 unsigned int total_bytes;
218 unsigned int total_packets;
219
184 struct ixgbe_queue_stats stats; 220 struct ixgbe_queue_stats stats;
185 struct u64_stats_sync syncp; 221 struct u64_stats_sync syncp;
222 union {
223 struct ixgbe_tx_queue_stats tx_stats;
224 struct ixgbe_rx_queue_stats rx_stats;
225 };
186 int numa_node; 226 int numa_node;
187 unsigned long reinit_state;
188 u64 rsc_count; /* stat for coalesced packets */
189 u64 rsc_flush; /* stats for flushed packets */
190 u32 restart_queue; /* track tx queue restarts */
191 u32 non_eop_descs; /* track hardware descriptor chaining */
192
193 unsigned int size; /* length in bytes */ 227 unsigned int size; /* length in bytes */
194 dma_addr_t dma; /* phys. address of descriptor ring */ 228 dma_addr_t dma; /* phys. address of descriptor ring */
229 struct rcu_head rcu;
230 struct ixgbe_q_vector *q_vector; /* back-pointer to host q_vector */
195} ____cacheline_internodealigned_in_smp; 231} ____cacheline_internodealigned_in_smp;
196 232
197enum ixgbe_ring_f_enum { 233enum ixgbe_ring_f_enum {
@@ -237,6 +273,9 @@ struct ixgbe_q_vector {
237 unsigned int v_idx; /* index of q_vector within array, also used for 273 unsigned int v_idx; /* index of q_vector within array, also used for
238 * finding the bit in EICR and friends that 274 * finding the bit in EICR and friends that
239 * represents the vector for this ring */ 275 * represents the vector for this ring */
276#ifdef CONFIG_IXGBE_DCA
277 int cpu; /* CPU for DCA */
278#endif
240 struct napi_struct napi; 279 struct napi_struct napi;
241 DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */ 280 DECLARE_BITMAP(rxr_idx, MAX_RX_QUEUES); /* Rx ring indices */
242 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */ 281 DECLARE_BITMAP(txr_idx, MAX_TX_QUEUES); /* Tx ring indices */
@@ -246,6 +285,7 @@ struct ixgbe_q_vector {
246 u8 rx_itr; 285 u8 rx_itr;
247 u32 eitr; 286 u32 eitr;
248 cpumask_var_t affinity_mask; 287 cpumask_var_t affinity_mask;
288 char name[IFNAMSIZ + 9];
249}; 289};
250 290
251/* Helper macros to switch between ints/sec and what the register uses. 291/* Helper macros to switch between ints/sec and what the register uses.
@@ -294,7 +334,6 @@ struct ixgbe_adapter {
294 u16 bd_number; 334 u16 bd_number;
295 struct work_struct reset_task; 335 struct work_struct reset_task;
296 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; 336 struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS];
297 char name[MAX_MSIX_COUNT][IFNAMSIZ + 9];
298 struct ixgbe_dcb_config dcb_cfg; 337 struct ixgbe_dcb_config dcb_cfg;
299 struct ixgbe_dcb_config temp_dcb_cfg; 338 struct ixgbe_dcb_config temp_dcb_cfg;
300 u8 dcb_set_bitmap; 339 u8 dcb_set_bitmap;
@@ -417,6 +456,7 @@ struct ixgbe_adapter {
417 int node; 456 int node;
418 struct work_struct check_overtemp_task; 457 struct work_struct check_overtemp_task;
419 u32 interrupt_event; 458 u32 interrupt_event;
459 char lsc_int_name[IFNAMSIZ + 9];
420 460
421 /* SR-IOV */ 461 /* SR-IOV */
422 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); 462 DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS);
@@ -428,17 +468,25 @@ enum ixbge_state_t {
428 __IXGBE_TESTING, 468 __IXGBE_TESTING,
429 __IXGBE_RESETTING, 469 __IXGBE_RESETTING,
430 __IXGBE_DOWN, 470 __IXGBE_DOWN,
431 __IXGBE_FDIR_INIT_DONE,
432 __IXGBE_SFP_MODULE_NOT_FOUND 471 __IXGBE_SFP_MODULE_NOT_FOUND
433}; 472};
434 473
474struct ixgbe_rsc_cb {
475 dma_addr_t dma;
476 u16 skb_cnt;
477 bool delay_unmap;
478};
479#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
480
435enum ixgbe_boards { 481enum ixgbe_boards {
436 board_82598, 482 board_82598,
437 board_82599, 483 board_82599,
484 board_X540,
438}; 485};
439 486
440extern struct ixgbe_info ixgbe_82598_info; 487extern struct ixgbe_info ixgbe_82598_info;
441extern struct ixgbe_info ixgbe_82599_info; 488extern struct ixgbe_info ixgbe_82599_info;
489extern struct ixgbe_info ixgbe_X540_info;
442#ifdef CONFIG_IXGBE_DCB 490#ifdef CONFIG_IXGBE_DCB
443extern const struct dcbnl_rtnl_ops dcbnl_ops; 491extern const struct dcbnl_rtnl_ops dcbnl_ops;
444extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, 492extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
@@ -454,50 +502,41 @@ extern void ixgbe_down(struct ixgbe_adapter *adapter);
454extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter); 502extern void ixgbe_reinit_locked(struct ixgbe_adapter *adapter);
455extern void ixgbe_reset(struct ixgbe_adapter *adapter); 503extern void ixgbe_reset(struct ixgbe_adapter *adapter);
456extern void ixgbe_set_ethtool_ops(struct net_device *netdev); 504extern void ixgbe_set_ethtool_ops(struct net_device *netdev);
457extern int ixgbe_setup_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 505extern int ixgbe_setup_rx_resources(struct ixgbe_ring *);
458extern int ixgbe_setup_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 506extern int ixgbe_setup_tx_resources(struct ixgbe_ring *);
459extern void ixgbe_free_rx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 507extern void ixgbe_free_rx_resources(struct ixgbe_ring *);
460extern void ixgbe_free_tx_resources(struct ixgbe_adapter *, struct ixgbe_ring *); 508extern void ixgbe_free_tx_resources(struct ixgbe_ring *);
461extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); 509extern void ixgbe_configure_rx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
462extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *); 510extern void ixgbe_configure_tx_ring(struct ixgbe_adapter *,struct ixgbe_ring *);
511extern void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
512 struct ixgbe_ring *);
463extern void ixgbe_update_stats(struct ixgbe_adapter *adapter); 513extern void ixgbe_update_stats(struct ixgbe_adapter *adapter);
464extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); 514extern int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter);
465extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); 515extern void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter);
466extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, 516extern netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *,
467 struct net_device *,
468 struct ixgbe_adapter *, 517 struct ixgbe_adapter *,
469 struct ixgbe_ring *); 518 struct ixgbe_ring *);
470extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *, 519extern void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *,
471 struct ixgbe_tx_buffer *); 520 struct ixgbe_tx_buffer *);
472extern void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, 521extern void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16);
473 struct ixgbe_ring *rx_ring,
474 int cleaned_count);
475extern void ixgbe_write_eitr(struct ixgbe_q_vector *); 522extern void ixgbe_write_eitr(struct ixgbe_q_vector *);
476extern int ethtool_ioctl(struct ifreq *ifr); 523extern int ethtool_ioctl(struct ifreq *ifr);
524extern u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index);
477extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); 525extern s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw);
478extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc); 526extern s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc);
479extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc); 527extern s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc);
480extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 528extern s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
481 struct ixgbe_atr_input *input, 529 union ixgbe_atr_hash_dword input,
530 union ixgbe_atr_hash_dword common,
482 u8 queue); 531 u8 queue);
483extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 532extern s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
484 struct ixgbe_atr_input *input, 533 union ixgbe_atr_input *input,
485 struct ixgbe_atr_input_masks *input_masks, 534 struct ixgbe_atr_input_masks *input_masks,
486 u16 soft_id, u8 queue); 535 u16 soft_id, u8 queue);
487extern s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, 536extern void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
488 u16 vlan_id); 537 struct ixgbe_ring *ring);
489extern s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, 538extern void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
490 u32 src_addr); 539 struct ixgbe_ring *ring);
491extern s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input,
492 u32 dst_addr);
493extern s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input,
494 u16 src_port);
495extern s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input,
496 u16 dst_port);
497extern s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input,
498 u16 flex_byte);
499extern s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input,
500 u8 l4type);
501extern void ixgbe_set_rx_mode(struct net_device *netdev); 540extern void ixgbe_set_rx_mode(struct net_device *netdev);
502#ifdef IXGBE_FCOE 541#ifdef IXGBE_FCOE
503extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); 542extern void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter);
diff --git a/drivers/net/ixgbe/ixgbe_82598.c b/drivers/net/ixgbe/ixgbe_82598.c
index 9c02d6014cc4..d0f1d9d2c416 100644
--- a/drivers/net/ixgbe/ixgbe_82598.c
+++ b/drivers/net/ixgbe/ixgbe_82598.c
@@ -38,9 +38,6 @@
38#define IXGBE_82598_MC_TBL_SIZE 128 38#define IXGBE_82598_MC_TBL_SIZE 128
39#define IXGBE_82598_VFT_TBL_SIZE 128 39#define IXGBE_82598_VFT_TBL_SIZE 128
40 40
41static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
42 ixgbe_link_speed *speed,
43 bool *autoneg);
44static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, 41static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
45 ixgbe_link_speed speed, 42 ixgbe_link_speed speed,
46 bool autoneg, 43 bool autoneg,
@@ -156,7 +153,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
156 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 153 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
157 mac->ops.setup_link = &ixgbe_setup_copper_link_82598; 154 mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
158 mac->ops.get_link_capabilities = 155 mac->ops.get_link_capabilities =
159 &ixgbe_get_copper_link_capabilities_82598; 156 &ixgbe_get_copper_link_capabilities_generic;
160 } 157 }
161 158
162 switch (hw->phy.type) { 159 switch (hw->phy.type) {
@@ -274,37 +271,6 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
274} 271}
275 272
276/** 273/**
277 * ixgbe_get_copper_link_capabilities_82598 - Determines link capabilities
278 * @hw: pointer to hardware structure
279 * @speed: pointer to link speed
280 * @autoneg: boolean auto-negotiation value
281 *
282 * Determines the link capabilities by reading the AUTOC register.
283 **/
284static s32 ixgbe_get_copper_link_capabilities_82598(struct ixgbe_hw *hw,
285 ixgbe_link_speed *speed,
286 bool *autoneg)
287{
288 s32 status = IXGBE_ERR_LINK_SETUP;
289 u16 speed_ability;
290
291 *speed = 0;
292 *autoneg = true;
293
294 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
295 &speed_ability);
296
297 if (status == 0) {
298 if (speed_ability & MDIO_SPEED_10G)
299 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
300 if (speed_ability & MDIO_PMA_SPEED_1000)
301 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
302 }
303
304 return status;
305}
306
307/**
308 * ixgbe_get_media_type_82598 - Determines media type 274 * ixgbe_get_media_type_82598 - Determines media type
309 * @hw: pointer to hardware structure 275 * @hw: pointer to hardware structure
310 * 276 *
@@ -357,6 +323,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
357 u32 fctrl_reg; 323 u32 fctrl_reg;
358 u32 rmcs_reg; 324 u32 rmcs_reg;
359 u32 reg; 325 u32 reg;
326 u32 rx_pba_size;
360 u32 link_speed = 0; 327 u32 link_speed = 0;
361 bool link_up; 328 bool link_up;
362 329
@@ -459,16 +426,18 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw, s32 packetbuf_num)
459 426
460 /* Set up and enable Rx high/low water mark thresholds, enable XON. */ 427 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
461 if (hw->fc.current_mode & ixgbe_fc_tx_pause) { 428 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
462 if (hw->fc.send_xon) { 429 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
463 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), 430 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
464 (hw->fc.low_water | IXGBE_FCRTL_XONE)); 431
465 } else { 432 reg = (rx_pba_size - hw->fc.low_water) << 6;
466 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), 433 if (hw->fc.send_xon)
467 hw->fc.low_water); 434 reg |= IXGBE_FCRTL_XONE;
468 } 435 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(packetbuf_num), reg);
436
437 reg = (rx_pba_size - hw->fc.high_water) << 10;
438 reg |= IXGBE_FCRTH_FCEN;
469 439
470 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), 440 IXGBE_WRITE_REG(hw, IXGBE_FCRTH(packetbuf_num), reg);
471 (hw->fc.high_water | IXGBE_FCRTH_FCEN));
472 } 441 }
473 442
474 /* Configure pause time (2 TCs per register) */ 443 /* Configure pause time (2 TCs per register) */
@@ -1222,6 +1191,7 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
1222static struct ixgbe_eeprom_operations eeprom_ops_82598 = { 1191static struct ixgbe_eeprom_operations eeprom_ops_82598 = {
1223 .init_params = &ixgbe_init_eeprom_params_generic, 1192 .init_params = &ixgbe_init_eeprom_params_generic,
1224 .read = &ixgbe_read_eerd_generic, 1193 .read = &ixgbe_read_eerd_generic,
1194 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
1225 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 1195 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
1226 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 1196 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
1227}; 1197};
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 05e6b8cafb39..a21f5817685b 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -56,9 +56,6 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
56 ixgbe_link_speed speed, 56 ixgbe_link_speed speed,
57 bool autoneg, 57 bool autoneg,
58 bool autoneg_wait_to_complete); 58 bool autoneg_wait_to_complete);
59static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
60 ixgbe_link_speed *speed,
61 bool *autoneg);
62static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, 59static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw,
63 ixgbe_link_speed speed, 60 ixgbe_link_speed speed,
64 bool autoneg, 61 bool autoneg,
@@ -68,9 +65,9 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw);
68static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) 65static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
69{ 66{
70 struct ixgbe_mac_info *mac = &hw->mac; 67 struct ixgbe_mac_info *mac = &hw->mac;
71 if (hw->phy.multispeed_fiber) { 68
72 /* Set up dual speed SFP+ support */ 69 /* enable the laser control functions for SFP+ fiber */
73 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; 70 if (mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) {
74 mac->ops.disable_tx_laser = 71 mac->ops.disable_tx_laser =
75 &ixgbe_disable_tx_laser_multispeed_fiber; 72 &ixgbe_disable_tx_laser_multispeed_fiber;
76 mac->ops.enable_tx_laser = 73 mac->ops.enable_tx_laser =
@@ -80,6 +77,12 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
80 mac->ops.disable_tx_laser = NULL; 77 mac->ops.disable_tx_laser = NULL;
81 mac->ops.enable_tx_laser = NULL; 78 mac->ops.enable_tx_laser = NULL;
82 mac->ops.flap_tx_laser = NULL; 79 mac->ops.flap_tx_laser = NULL;
80 }
81
82 if (hw->phy.multispeed_fiber) {
83 /* Set up dual speed SFP+ support */
84 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
85 } else {
83 if ((mac->ops.get_media_type(hw) == 86 if ((mac->ops.get_media_type(hw) ==
84 ixgbe_media_type_backplane) && 87 ixgbe_media_type_backplane) &&
85 (hw->phy.smart_speed == ixgbe_smart_speed_auto || 88 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
@@ -93,6 +96,8 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
93static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) 96static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
94{ 97{
95 s32 ret_val = 0; 98 s32 ret_val = 0;
99 u32 reg_anlp1 = 0;
100 u32 i = 0;
96 u16 list_offset, data_offset, data_value; 101 u16 list_offset, data_offset, data_value;
97 102
98 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { 103 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
@@ -119,14 +124,34 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
119 IXGBE_WRITE_FLUSH(hw); 124 IXGBE_WRITE_FLUSH(hw);
120 hw->eeprom.ops.read(hw, ++data_offset, &data_value); 125 hw->eeprom.ops.read(hw, ++data_offset, &data_value);
121 } 126 }
122 /* Now restart DSP by setting Restart_AN */
123 IXGBE_WRITE_REG(hw, IXGBE_AUTOC,
124 (IXGBE_READ_REG(hw, IXGBE_AUTOC) | IXGBE_AUTOC_AN_RESTART));
125 127
126 /* Release the semaphore */ 128 /* Release the semaphore */
127 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); 129 ixgbe_release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM);
128 /* Delay obtaining semaphore again to allow FW access */ 130 /* Delay obtaining semaphore again to allow FW access */
129 msleep(hw->eeprom.semaphore_delay); 131 msleep(hw->eeprom.semaphore_delay);
132
133 /* Now restart DSP by setting Restart_AN and clearing LMS */
134 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, ((IXGBE_READ_REG(hw,
135 IXGBE_AUTOC) & ~IXGBE_AUTOC_LMS_MASK) |
136 IXGBE_AUTOC_AN_RESTART));
137
138 /* Wait for AN to leave state 0 */
139 for (i = 0; i < 10; i++) {
140 msleep(4);
141 reg_anlp1 = IXGBE_READ_REG(hw, IXGBE_ANLP1);
142 if (reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)
143 break;
144 }
145 if (!(reg_anlp1 & IXGBE_ANLP1_AN_STATE_MASK)) {
146 hw_dbg(hw, "sfp module setup not complete\n");
147 ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
148 goto setup_sfp_out;
149 }
150
151 /* Restart DSP by setting Restart_AN and return to SFI mode */
152 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (IXGBE_READ_REG(hw,
153 IXGBE_AUTOC) | IXGBE_AUTOC_LMS_10G_SERIAL |
154 IXGBE_AUTOC_AN_RESTART));
130 } 155 }
131 156
132setup_sfp_out: 157setup_sfp_out:
@@ -174,7 +199,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
174 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { 199 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
175 mac->ops.setup_link = &ixgbe_setup_copper_link_82599; 200 mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
176 mac->ops.get_link_capabilities = 201 mac->ops.get_link_capabilities =
177 &ixgbe_get_copper_link_capabilities_82599; 202 &ixgbe_get_copper_link_capabilities_generic;
178 } 203 }
179 204
180 /* Set necessary function pointers based on phy type */ 205 /* Set necessary function pointers based on phy type */
@@ -184,6 +209,10 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
184 phy->ops.get_firmware_version = 209 phy->ops.get_firmware_version =
185 &ixgbe_get_phy_firmware_version_tnx; 210 &ixgbe_get_phy_firmware_version_tnx;
186 break; 211 break;
212 case ixgbe_phy_aq:
213 phy->ops.get_firmware_version =
214 &ixgbe_get_phy_firmware_version_generic;
215 break;
187 default: 216 default:
188 break; 217 break;
189 } 218 }
@@ -290,37 +319,6 @@ out:
290} 319}
291 320
292/** 321/**
293 * ixgbe_get_copper_link_capabilities_82599 - Determines link capabilities
294 * @hw: pointer to hardware structure
295 * @speed: pointer to link speed
296 * @autoneg: boolean auto-negotiation value
297 *
298 * Determines the link capabilities by reading the AUTOC register.
299 **/
300static s32 ixgbe_get_copper_link_capabilities_82599(struct ixgbe_hw *hw,
301 ixgbe_link_speed *speed,
302 bool *autoneg)
303{
304 s32 status = IXGBE_ERR_LINK_SETUP;
305 u16 speed_ability;
306
307 *speed = 0;
308 *autoneg = true;
309
310 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
311 &speed_ability);
312
313 if (status == 0) {
314 if (speed_ability & MDIO_SPEED_10G)
315 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
316 if (speed_ability & MDIO_PMA_SPEED_1000)
317 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
318 }
319
320 return status;
321}
322
323/**
324 * ixgbe_get_media_type_82599 - Get media type 322 * ixgbe_get_media_type_82599 - Get media type
325 * @hw: pointer to hardware structure 323 * @hw: pointer to hardware structure
326 * 324 *
@@ -332,7 +330,8 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
332 330
333 /* Detect if there is a copper PHY attached. */ 331 /* Detect if there is a copper PHY attached. */
334 if (hw->phy.type == ixgbe_phy_cu_unknown || 332 if (hw->phy.type == ixgbe_phy_cu_unknown ||
335 hw->phy.type == ixgbe_phy_tn) { 333 hw->phy.type == ixgbe_phy_tn ||
334 hw->phy.type == ixgbe_phy_aq) {
336 media_type = ixgbe_media_type_copper; 335 media_type = ixgbe_media_type_copper;
337 goto out; 336 goto out;
338 } 337 }
@@ -342,11 +341,13 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
342 case IXGBE_DEV_ID_82599_KX4_MEZZ: 341 case IXGBE_DEV_ID_82599_KX4_MEZZ:
343 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: 342 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
344 case IXGBE_DEV_ID_82599_KR: 343 case IXGBE_DEV_ID_82599_KR:
344 case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
345 case IXGBE_DEV_ID_82599_XAUI_LOM: 345 case IXGBE_DEV_ID_82599_XAUI_LOM:
346 /* Default device ID is mezzanine card KX/KX4 */ 346 /* Default device ID is mezzanine card KX/KX4 */
347 media_type = ixgbe_media_type_backplane; 347 media_type = ixgbe_media_type_backplane;
348 break; 348 break;
349 case IXGBE_DEV_ID_82599_SFP: 349 case IXGBE_DEV_ID_82599_SFP:
350 case IXGBE_DEV_ID_82599_SFP_FCOE:
350 case IXGBE_DEV_ID_82599_SFP_EM: 351 case IXGBE_DEV_ID_82599_SFP_EM:
351 media_type = ixgbe_media_type_fiber; 352 media_type = ixgbe_media_type_fiber;
352 break; 353 break;
@@ -1002,7 +1003,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw)
1002 udelay(10); 1003 udelay(10);
1003 } 1004 }
1004 if (i >= IXGBE_FDIRCMD_CMD_POLL) { 1005 if (i >= IXGBE_FDIRCMD_CMD_POLL) {
1005 hw_dbg(hw ,"Flow Director previous command isn't complete, " 1006 hw_dbg(hw, "Flow Director previous command isn't complete, "
1006 "aborting table re-initialization.\n"); 1007 "aborting table re-initialization.\n");
1007 return IXGBE_ERR_FDIR_REINIT_FAILED; 1008 return IXGBE_ERR_FDIR_REINIT_FAILED;
1008 } 1009 }
@@ -1112,13 +1113,10 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 pballoc)
1112 /* Move the flexible bytes to use the ethertype - shift 6 words */ 1113 /* Move the flexible bytes to use the ethertype - shift 6 words */
1113 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1114 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1114 1115
1115 fdirctrl |= IXGBE_FDIRCTRL_REPORT_STATUS;
1116 1116
1117 /* Prime the keys for hashing */ 1117 /* Prime the keys for hashing */
1118 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, 1118 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1119 htonl(IXGBE_ATR_BUCKET_HASH_KEY)); 1119 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1120 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1121 htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
1122 1120
1123 /* 1121 /*
1124 * Poll init-done after we write the register. Estimated times: 1122 * Poll init-done after we write the register. Estimated times:
@@ -1208,10 +1206,8 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1208 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT); 1206 fdirctrl |= (0x6 << IXGBE_FDIRCTRL_FLEX_SHIFT);
1209 1207
1210 /* Prime the keys for hashing */ 1208 /* Prime the keys for hashing */
1211 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, 1209 IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY);
1212 htonl(IXGBE_ATR_BUCKET_HASH_KEY)); 1210 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY);
1213 IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY,
1214 htonl(IXGBE_ATR_SIGNATURE_HASH_KEY));
1215 1211
1216 /* 1212 /*
1217 * Poll init-done after we write the register. Estimated times: 1213 * Poll init-done after we write the register. Estimated times:
@@ -1250,8 +1246,8 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 pballoc)
1250 * @stream: input bitstream to compute the hash on 1246 * @stream: input bitstream to compute the hash on
1251 * @key: 32-bit hash key 1247 * @key: 32-bit hash key
1252 **/ 1248 **/
1253static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input, 1249static u32 ixgbe_atr_compute_hash_82599(union ixgbe_atr_input *atr_input,
1254 u32 key) 1250 u32 key)
1255{ 1251{
1256 /* 1252 /*
1257 * The algorithm is as follows: 1253 * The algorithm is as follows:
@@ -1271,410 +1267,250 @@ static u16 ixgbe_atr_compute_hash_82599(struct ixgbe_atr_input *atr_input,
1271 * To simplify for programming, the algorithm is implemented 1267 * To simplify for programming, the algorithm is implemented
1272 * in software this way: 1268 * in software this way:
1273 * 1269 *
1274 * Key[31:0], Stream[335:0] 1270 * key[31:0], hi_hash_dword[31:0], lo_hash_dword[31:0], hash[15:0]
1275 * 1271 *
1276 * tmp_key[11 * 32 - 1:0] = 11{Key[31:0] = key concatenated 11 times 1272 * for (i = 0; i < 352; i+=32)
1277 * int_key[350:0] = tmp_key[351:1] 1273 * hi_hash_dword[31:0] ^= Stream[(i+31):i];
1278 * int_stream[365:0] = Stream[14:0] | Stream[335:0] | Stream[335:321]
1279 * 1274 *
1280 * hash[15:0] = 0; 1275 * lo_hash_dword[15:0] ^= Stream[15:0];
1281 * for (i = 0; i < 351; i++) { 1276 * lo_hash_dword[15:0] ^= hi_hash_dword[31:16];
1282 * if (int_key[i]) 1277 * lo_hash_dword[31:16] ^= hi_hash_dword[15:0];
1283 * hash ^= int_stream[(i + 15):i]; 1278 *
1279 * hi_hash_dword[31:0] ^= Stream[351:320];
1280 *
1281 * if(key[0])
1282 * hash[15:0] ^= Stream[15:0];
1283 *
1284 * for (i = 0; i < 16; i++) {
1285 * if (key[i])
1286 * hash[15:0] ^= lo_hash_dword[(i+15):i];
1287 * if (key[i + 16])
1288 * hash[15:0] ^= hi_hash_dword[(i+15):i];
1284 * } 1289 * }
1290 *
1285 */ 1291 */
1292 __be32 common_hash_dword = 0;
1293 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1294 u32 hash_result = 0;
1295 u8 i;
1286 1296
1287 union { 1297 /* record the flow_vm_vlan bits as they are a key part to the hash */
1288 u64 fill[6]; 1298 flow_vm_vlan = ntohl(atr_input->dword_stream[0]);
1289 u32 key[11];
1290 u8 key_stream[44];
1291 } tmp_key;
1292 1299
1293 u8 *stream = (u8 *)atr_input; 1300 /* generate common hash dword */
1294 u8 int_key[44]; /* upper-most bit unused */ 1301 for (i = 10; i; i -= 2)
1295 u8 hash_str[46]; /* upper-most 2 bits unused */ 1302 common_hash_dword ^= atr_input->dword_stream[i] ^
1296 u16 hash_result = 0; 1303 atr_input->dword_stream[i - 1];
1297 int i, j, k, h;
1298 1304
1299 /* 1305 hi_hash_dword = ntohl(common_hash_dword);
1300 * Initialize the fill member to prevent warnings
1301 * on some compilers
1302 */
1303 tmp_key.fill[0] = 0;
1304 1306
1305 /* First load the temporary key stream */ 1307 /* low dword is word swapped version of common */
1306 for (i = 0; i < 6; i++) { 1308 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1307 u64 fillkey = ((u64)key << 32) | key;
1308 tmp_key.fill[i] = fillkey;
1309 }
1310 1309
1311 /* 1310 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1312 * Set the interim key for the hashing. Bit 352 is unused, so we must 1311 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1313 * shift and compensate when building the key.
1314 */
1315 1312
1316 int_key[0] = tmp_key.key_stream[0] >> 1; 1313 /* Process bits 0 and 16 */
1317 for (i = 1, j = 0; i < 44; i++) { 1314 if (key & 0x0001) hash_result ^= lo_hash_dword;
1318 unsigned int this_key = tmp_key.key_stream[j] << 7; 1315 if (key & 0x00010000) hash_result ^= hi_hash_dword;
1319 j++;
1320 int_key[i] = (u8)(this_key | (tmp_key.key_stream[j] >> 1));
1321 }
1322 1316
1323 /* 1317 /*
1324 * Set the interim bit string for the hashing. Bits 368 and 367 are 1318 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1325 * unused, so shift and compensate when building the string. 1319 * delay this because bit 0 of the stream should not be processed
1320 * so we do not add the vlan until after bit 0 was processed
1326 */ 1321 */
1327 hash_str[0] = (stream[40] & 0x7f) >> 1; 1322 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1328 for (i = 1, j = 40; i < 46; i++) {
1329 unsigned int this_str = stream[j] << 7;
1330 j++;
1331 if (j > 41)
1332 j = 0;
1333 hash_str[i] = (u8)(this_str | (stream[j] >> 1));
1334 }
1335
1336 /*
1337 * Now compute the hash. i is the index into hash_str, j is into our
1338 * key stream, k is counting the number of bits, and h interates within
1339 * each byte.
1340 */
1341 for (i = 45, j = 43, k = 0; k < 351 && i >= 2 && j >= 0; i--, j--) {
1342 for (h = 0; h < 8 && k < 351; h++, k++) {
1343 if (int_key[j] & (1 << h)) {
1344 /*
1345 * Key bit is set, XOR in the current 16-bit
1346 * string. Example of processing:
1347 * h = 0,
1348 * tmp = (hash_str[i - 2] & 0 << 16) |
1349 * (hash_str[i - 1] & 0xff << 8) |
1350 * (hash_str[i] & 0xff >> 0)
1351 * So tmp = hash_str[15 + k:k], since the
1352 * i + 2 clause rolls off the 16-bit value
1353 * h = 7,
1354 * tmp = (hash_str[i - 2] & 0x7f << 9) |
1355 * (hash_str[i - 1] & 0xff << 1) |
1356 * (hash_str[i] & 0x80 >> 7)
1357 */
1358 int tmp = (hash_str[i] >> h);
1359 tmp |= (hash_str[i - 1] << (8 - h));
1360 tmp |= (int)(hash_str[i - 2] & ((1 << h) - 1))
1361 << (16 - h);
1362 hash_result ^= (u16)tmp;
1363 }
1364 }
1365 }
1366
1367 return hash_result;
1368}
1369
1370/**
1371 * ixgbe_atr_set_vlan_id_82599 - Sets the VLAN id in the ATR input stream
1372 * @input: input stream to modify
1373 * @vlan: the VLAN id to load
1374 **/
1375s32 ixgbe_atr_set_vlan_id_82599(struct ixgbe_atr_input *input, u16 vlan)
1376{
1377 input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] = vlan >> 8;
1378 input->byte_stream[IXGBE_ATR_VLAN_OFFSET] = vlan & 0xff;
1379
1380 return 0;
1381}
1382
1383/**
1384 * ixgbe_atr_set_src_ipv4_82599 - Sets the source IPv4 address
1385 * @input: input stream to modify
1386 * @src_addr: the IP address to load
1387 **/
1388s32 ixgbe_atr_set_src_ipv4_82599(struct ixgbe_atr_input *input, u32 src_addr)
1389{
1390 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] = src_addr >> 24;
1391 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] =
1392 (src_addr >> 16) & 0xff;
1393 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] =
1394 (src_addr >> 8) & 0xff;
1395 input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET] = src_addr & 0xff;
1396
1397 return 0;
1398}
1399
1400/**
1401 * ixgbe_atr_set_dst_ipv4_82599 - Sets the destination IPv4 address
1402 * @input: input stream to modify
1403 * @dst_addr: the IP address to load
1404 **/
1405s32 ixgbe_atr_set_dst_ipv4_82599(struct ixgbe_atr_input *input, u32 dst_addr)
1406{
1407 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] = dst_addr >> 24;
1408 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] =
1409 (dst_addr >> 16) & 0xff;
1410 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] =
1411 (dst_addr >> 8) & 0xff;
1412 input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET] = dst_addr & 0xff;
1413
1414 return 0;
1415}
1416
1417/**
1418 * ixgbe_atr_set_src_port_82599 - Sets the source port
1419 * @input: input stream to modify
1420 * @src_port: the source port to load
1421 **/
1422s32 ixgbe_atr_set_src_port_82599(struct ixgbe_atr_input *input, u16 src_port)
1423{
1424 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1] = src_port >> 8;
1425 input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] = src_port & 0xff;
1426
1427 return 0;
1428}
1429
1430/**
1431 * ixgbe_atr_set_dst_port_82599 - Sets the destination port
1432 * @input: input stream to modify
1433 * @dst_port: the destination port to load
1434 **/
1435s32 ixgbe_atr_set_dst_port_82599(struct ixgbe_atr_input *input, u16 dst_port)
1436{
1437 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1] = dst_port >> 8;
1438 input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] = dst_port & 0xff;
1439
1440 return 0;
1441}
1442
1443/**
1444 * ixgbe_atr_set_flex_byte_82599 - Sets the flexible bytes
1445 * @input: input stream to modify
1446 * @flex_bytes: the flexible bytes to load
1447 **/
1448s32 ixgbe_atr_set_flex_byte_82599(struct ixgbe_atr_input *input, u16 flex_byte)
1449{
1450 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] = flex_byte >> 8;
1451 input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET] = flex_byte & 0xff;
1452
1453 return 0;
1454}
1455
1456/**
1457 * ixgbe_atr_set_l4type_82599 - Sets the layer 4 packet type
1458 * @input: input stream to modify
1459 * @l4type: the layer 4 type value to load
1460 **/
1461s32 ixgbe_atr_set_l4type_82599(struct ixgbe_atr_input *input, u8 l4type)
1462{
1463 input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET] = l4type;
1464
1465 return 0;
1466}
1467
1468/**
1469 * ixgbe_atr_get_vlan_id_82599 - Gets the VLAN id from the ATR input stream
1470 * @input: input stream to search
1471 * @vlan: the VLAN id to load
1472 **/
1473static s32 ixgbe_atr_get_vlan_id_82599(struct ixgbe_atr_input *input, u16 *vlan)
1474{
1475 *vlan = input->byte_stream[IXGBE_ATR_VLAN_OFFSET];
1476 *vlan |= input->byte_stream[IXGBE_ATR_VLAN_OFFSET + 1] << 8;
1477 1323
1478 return 0;
1479}
1480 1324
1481/** 1325 /* process the remaining 30 bits in the key 2 bits at a time */
1482 * ixgbe_atr_get_src_ipv4_82599 - Gets the source IPv4 address 1326 for (i = 15; i; i-- ) {
1483 * @input: input stream to search 1327 if (key & (0x0001 << i)) hash_result ^= lo_hash_dword >> i;
1484 * @src_addr: the IP address to load 1328 if (key & (0x00010000 << i)) hash_result ^= hi_hash_dword >> i;
1485 **/ 1329 }
1486static s32 ixgbe_atr_get_src_ipv4_82599(struct ixgbe_atr_input *input,
1487 u32 *src_addr)
1488{
1489 *src_addr = input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET];
1490 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 1] << 8;
1491 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 2] << 16;
1492 *src_addr |= input->byte_stream[IXGBE_ATR_SRC_IPV4_OFFSET + 3] << 24;
1493 1330
1494 return 0; 1331 return hash_result & IXGBE_ATR_HASH_MASK;
1495} 1332}
1496 1333
1497/** 1334/*
1498 * ixgbe_atr_get_dst_ipv4_82599 - Gets the destination IPv4 address 1335 * These defines allow us to quickly generate all of the necessary instructions
1499 * @input: input stream to search 1336 * in the function below by simply calling out IXGBE_COMPUTE_SIG_HASH_ITERATION
1500 * @dst_addr: the IP address to load 1337 * for values 0 through 15
1501 **/ 1338 */
1502static s32 ixgbe_atr_get_dst_ipv4_82599(struct ixgbe_atr_input *input, 1339#define IXGBE_ATR_COMMON_HASH_KEY \
1503 u32 *dst_addr) 1340 (IXGBE_ATR_BUCKET_HASH_KEY & IXGBE_ATR_SIGNATURE_HASH_KEY)
1504{ 1341#define IXGBE_COMPUTE_SIG_HASH_ITERATION(_n) \
1505 *dst_addr = input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET]; 1342do { \
1506 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 1] << 8; 1343 u32 n = (_n); \
1507 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 2] << 16; 1344 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << n)) \
1508 *dst_addr |= input->byte_stream[IXGBE_ATR_DST_IPV4_OFFSET + 3] << 24; 1345 common_hash ^= lo_hash_dword >> n; \
1509 1346 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << n)) \
1510 return 0; 1347 bucket_hash ^= lo_hash_dword >> n; \
1511} 1348 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << n)) \
1349 sig_hash ^= lo_hash_dword << (16 - n); \
1350 if (IXGBE_ATR_COMMON_HASH_KEY & (0x01 << (n + 16))) \
1351 common_hash ^= hi_hash_dword >> n; \
1352 else if (IXGBE_ATR_BUCKET_HASH_KEY & (0x01 << (n + 16))) \
1353 bucket_hash ^= hi_hash_dword >> n; \
1354 else if (IXGBE_ATR_SIGNATURE_HASH_KEY & (0x01 << (n + 16))) \
1355 sig_hash ^= hi_hash_dword << (16 - n); \
1356} while (0);
1512 1357
1513/** 1358/**
1514 * ixgbe_atr_get_src_ipv6_82599 - Gets the source IPv6 address 1359 * ixgbe_atr_compute_sig_hash_82599 - Compute the signature hash
1515 * @input: input stream to search 1360 * @stream: input bitstream to compute the hash on
1516 * @src_addr_1: the first 4 bytes of the IP address to load
1517 * @src_addr_2: the second 4 bytes of the IP address to load
1518 * @src_addr_3: the third 4 bytes of the IP address to load
1519 * @src_addr_4: the fourth 4 bytes of the IP address to load
1520 **/
1521static s32 ixgbe_atr_get_src_ipv6_82599(struct ixgbe_atr_input *input,
1522 u32 *src_addr_1, u32 *src_addr_2,
1523 u32 *src_addr_3, u32 *src_addr_4)
1524{
1525 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 12];
1526 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 13] << 8;
1527 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 14] << 16;
1528 *src_addr_1 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 15] << 24;
1529
1530 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 8];
1531 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 9] << 8;
1532 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 10] << 16;
1533 *src_addr_2 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 11] << 24;
1534
1535 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 4];
1536 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 5] << 8;
1537 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 6] << 16;
1538 *src_addr_3 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 7] << 24;
1539
1540 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET];
1541 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 1] << 8;
1542 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 2] << 16;
1543 *src_addr_4 = input->byte_stream[IXGBE_ATR_SRC_IPV6_OFFSET + 3] << 24;
1544
1545 return 0;
1546}
1547
1548/**
1549 * ixgbe_atr_get_src_port_82599 - Gets the source port
1550 * @input: input stream to modify
1551 * @src_port: the source port to load
1552 * 1361 *
1553 * Even though the input is given in big-endian, the FDIRPORT registers 1362 * This function is almost identical to the function above but contains
1554 * expect the ports to be programmed in little-endian. Hence the need to swap 1363 * several optomizations such as unwinding all of the loops, letting the
1555 * endianness when retrieving the data. This can be confusing since the 1364 * compiler work out all of the conditional ifs since the keys are static
1556 * internal hash engine expects it to be big-endian. 1365 * defines, and computing two keys at once since the hashed dword stream
1366 * will be the same for both keys.
1557 **/ 1367 **/
1558static s32 ixgbe_atr_get_src_port_82599(struct ixgbe_atr_input *input, 1368static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input,
1559 u16 *src_port) 1369 union ixgbe_atr_hash_dword common)
1560{ 1370{
1561 *src_port = input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET] << 8; 1371 u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan;
1562 *src_port |= input->byte_stream[IXGBE_ATR_SRC_PORT_OFFSET + 1]; 1372 u32 sig_hash = 0, bucket_hash = 0, common_hash = 0;
1563 1373
1564 return 0; 1374 /* record the flow_vm_vlan bits as they are a key part to the hash */
1565} 1375 flow_vm_vlan = ntohl(input.dword);
1566 1376
1567/** 1377 /* generate common hash dword */
1568 * ixgbe_atr_get_dst_port_82599 - Gets the destination port 1378 hi_hash_dword = ntohl(common.dword);
1569 * @input: input stream to modify
1570 * @dst_port: the destination port to load
1571 *
1572 * Even though the input is given in big-endian, the FDIRPORT registers
1573 * expect the ports to be programmed in little-endian. Hence the need to swap
1574 * endianness when retrieving the data. This can be confusing since the
1575 * internal hash engine expects it to be big-endian.
1576 **/
1577static s32 ixgbe_atr_get_dst_port_82599(struct ixgbe_atr_input *input,
1578 u16 *dst_port)
1579{
1580 *dst_port = input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET] << 8;
1581 *dst_port |= input->byte_stream[IXGBE_ATR_DST_PORT_OFFSET + 1];
1582 1379
1583 return 0; 1380 /* low dword is word swapped version of common */
1584} 1381 lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16);
1585 1382
1586/** 1383 /* apply flow ID/VM pool/VLAN ID bits to hash words */
1587 * ixgbe_atr_get_flex_byte_82599 - Gets the flexible bytes 1384 hi_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan >> 16);
1588 * @input: input stream to modify
1589 * @flex_bytes: the flexible bytes to load
1590 **/
1591static s32 ixgbe_atr_get_flex_byte_82599(struct ixgbe_atr_input *input,
1592 u16 *flex_byte)
1593{
1594 *flex_byte = input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET];
1595 *flex_byte |= input->byte_stream[IXGBE_ATR_FLEX_BYTE_OFFSET + 1] << 8;
1596 1385
1597 return 0; 1386 /* Process bits 0 and 16 */
1598} 1387 IXGBE_COMPUTE_SIG_HASH_ITERATION(0);
1599 1388
1600/** 1389 /*
1601 * ixgbe_atr_get_l4type_82599 - Gets the layer 4 packet type 1390 * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to
1602 * @input: input stream to modify 1391 * delay this because bit 0 of the stream should not be processed
1603 * @l4type: the layer 4 type value to load 1392 * so we do not add the vlan until after bit 0 was processed
1604 **/ 1393 */
1605static s32 ixgbe_atr_get_l4type_82599(struct ixgbe_atr_input *input, 1394 lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16);
1606 u8 *l4type) 1395
1607{ 1396 /* Process remaining 30 bit of the key */
1608 *l4type = input->byte_stream[IXGBE_ATR_L4TYPE_OFFSET]; 1397 IXGBE_COMPUTE_SIG_HASH_ITERATION(1);
1398 IXGBE_COMPUTE_SIG_HASH_ITERATION(2);
1399 IXGBE_COMPUTE_SIG_HASH_ITERATION(3);
1400 IXGBE_COMPUTE_SIG_HASH_ITERATION(4);
1401 IXGBE_COMPUTE_SIG_HASH_ITERATION(5);
1402 IXGBE_COMPUTE_SIG_HASH_ITERATION(6);
1403 IXGBE_COMPUTE_SIG_HASH_ITERATION(7);
1404 IXGBE_COMPUTE_SIG_HASH_ITERATION(8);
1405 IXGBE_COMPUTE_SIG_HASH_ITERATION(9);
1406 IXGBE_COMPUTE_SIG_HASH_ITERATION(10);
1407 IXGBE_COMPUTE_SIG_HASH_ITERATION(11);
1408 IXGBE_COMPUTE_SIG_HASH_ITERATION(12);
1409 IXGBE_COMPUTE_SIG_HASH_ITERATION(13);
1410 IXGBE_COMPUTE_SIG_HASH_ITERATION(14);
1411 IXGBE_COMPUTE_SIG_HASH_ITERATION(15);
1412
1413 /* combine common_hash result with signature and bucket hashes */
1414 bucket_hash ^= common_hash;
1415 bucket_hash &= IXGBE_ATR_HASH_MASK;
1609 1416
1610 return 0; 1417 sig_hash ^= common_hash << 16;
1418 sig_hash &= IXGBE_ATR_HASH_MASK << 16;
1419
1420 /* return completed signature hash */
1421 return sig_hash ^ bucket_hash;
1611} 1422}
1612 1423
1613/** 1424/**
1614 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter 1425 * ixgbe_atr_add_signature_filter_82599 - Adds a signature hash filter
1615 * @hw: pointer to hardware structure 1426 * @hw: pointer to hardware structure
1616 * @stream: input bitstream 1427 * @input: unique input dword
1428 * @common: compressed common input dword
1617 * @queue: queue index to direct traffic to 1429 * @queue: queue index to direct traffic to
1618 **/ 1430 **/
1619s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, 1431s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1620 struct ixgbe_atr_input *input, 1432 union ixgbe_atr_hash_dword input,
1433 union ixgbe_atr_hash_dword common,
1621 u8 queue) 1434 u8 queue)
1622{ 1435{
1623 u64 fdirhashcmd; 1436 u64 fdirhashcmd;
1624 u64 fdircmd; 1437 u32 fdircmd;
1625 u32 fdirhash;
1626 u16 bucket_hash, sig_hash;
1627 u8 l4type;
1628
1629 bucket_hash = ixgbe_atr_compute_hash_82599(input,
1630 IXGBE_ATR_BUCKET_HASH_KEY);
1631
1632 /* bucket_hash is only 15 bits */
1633 bucket_hash &= IXGBE_ATR_HASH_MASK;
1634
1635 sig_hash = ixgbe_atr_compute_hash_82599(input,
1636 IXGBE_ATR_SIGNATURE_HASH_KEY);
1637
1638 /* Get the l4type in order to program FDIRCMD properly */
1639 /* lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 */
1640 ixgbe_atr_get_l4type_82599(input, &l4type);
1641 1438
1642 /* 1439 /*
1643 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits 1440 * Get the flow_type in order to program FDIRCMD properly
1644 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH. 1441 * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6
1645 */ 1442 */
1646 fdirhash = sig_hash << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash; 1443 switch (input.formatted.flow_type) {
1647 1444 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1648 fdircmd = (IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | 1445 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1649 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN); 1446 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1650 1447 case IXGBE_ATR_FLOW_TYPE_TCPV6:
1651 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 1448 case IXGBE_ATR_FLOW_TYPE_UDPV6:
1652 case IXGBE_ATR_L4TYPE_TCP: 1449 case IXGBE_ATR_FLOW_TYPE_SCTPV6:
1653 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP;
1654 break;
1655 case IXGBE_ATR_L4TYPE_UDP:
1656 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP;
1657 break;
1658 case IXGBE_ATR_L4TYPE_SCTP:
1659 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP;
1660 break; 1450 break;
1661 default: 1451 default:
1662 hw_dbg(hw, "Error on l4type input\n"); 1452 hw_dbg(hw, " Error on flow type input\n");
1663 return IXGBE_ERR_CONFIG; 1453 return IXGBE_ERR_CONFIG;
1664 } 1454 }
1665 1455
1666 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) 1456 /* configure FDIRCMD register */
1667 fdircmd |= IXGBE_FDIRCMD_IPV6; 1457 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1458 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1459 fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1460 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1668 1461
1669 fdircmd |= ((u64)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT); 1462 /*
1670 fdirhashcmd = ((fdircmd << 32) | fdirhash); 1463 * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits
1464 * is for FDIRCMD. Then do a 64-bit register write from FDIRHASH.
1465 */
1466 fdirhashcmd = (u64)fdircmd << 32;
1467 fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common);
1671 1468
1672 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); 1469 IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd);
1673 1470
1471 hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd);
1472
1674 return 0; 1473 return 0;
1675} 1474}
1676 1475
1677/** 1476/**
1477 * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks
1478 * @input_mask: mask to be bit swapped
1479 *
1480 * The source and destination port masks for flow director are bit swapped
1481 * in that bit 15 effects bit 0, 14 effects 1, 13, 2 etc. In order to
1482 * generate a correctly swapped value we need to bit swap the mask and that
1483 * is what is accomplished by this function.
1484 **/
1485static u32 ixgbe_get_fdirtcpm_82599(struct ixgbe_atr_input_masks *input_masks)
1486{
1487 u32 mask = ntohs(input_masks->dst_port_mask);
1488 mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT;
1489 mask |= ntohs(input_masks->src_port_mask);
1490 mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1);
1491 mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2);
1492 mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4);
1493 return ((mask & 0x00FF00FF) << 8) | ((mask & 0xFF00FF00) >> 8);
1494}
1495
1496/*
1497 * These two macros are meant to address the fact that we have registers
1498 * that are either all or in part big-endian. As a result on big-endian
1499 * systems we will end up byte swapping the value to little-endian before
1500 * it is byte swapped again and written to the hardware in the original
1501 * big-endian format.
1502 */
1503#define IXGBE_STORE_AS_BE32(_value) \
1504 (((u32)(_value) >> 24) | (((u32)(_value) & 0x00FF0000) >> 8) | \
1505 (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24))
1506
1507#define IXGBE_WRITE_REG_BE32(a, reg, value) \
1508 IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value)))
1509
1510#define IXGBE_STORE_AS_BE16(_value) \
1511 (((u16)(_value) >> 8) | ((u16)(_value) << 8))
1512
1513/**
1678 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter 1514 * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter
1679 * @hw: pointer to hardware structure 1515 * @hw: pointer to hardware structure
1680 * @input: input bitstream 1516 * @input: input bitstream
@@ -1686,135 +1522,139 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw,
1686 * hardware writes must be protected from one another. 1522 * hardware writes must be protected from one another.
1687 **/ 1523 **/
1688s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, 1524s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw,
1689 struct ixgbe_atr_input *input, 1525 union ixgbe_atr_input *input,
1690 struct ixgbe_atr_input_masks *input_masks, 1526 struct ixgbe_atr_input_masks *input_masks,
1691 u16 soft_id, u8 queue) 1527 u16 soft_id, u8 queue)
1692{ 1528{
1693 u32 fdircmd = 0;
1694 u32 fdirhash; 1529 u32 fdirhash;
1695 u32 src_ipv4 = 0, dst_ipv4 = 0; 1530 u32 fdircmd;
1696 u32 src_ipv6_1, src_ipv6_2, src_ipv6_3, src_ipv6_4; 1531 u32 fdirport, fdirtcpm;
1697 u16 src_port, dst_port, vlan_id, flex_bytes; 1532 u32 fdirvlan;
1698 u16 bucket_hash; 1533 /* start with VLAN, flex bytes, VM pool, and IPv6 destination masked */
1699 u8 l4type; 1534 u32 fdirm = IXGBE_FDIRM_VLANID | IXGBE_FDIRM_VLANP | IXGBE_FDIRM_FLEX |
1700 u8 fdirm = 0; 1535 IXGBE_FDIRM_POOL | IXGBE_FDIRM_DIPv6;
1701
1702 /* Get our input values */
1703 ixgbe_atr_get_l4type_82599(input, &l4type);
1704 1536
1705 /* 1537 /*
1706 * Check l4type formatting, and bail out before we touch the hardware 1538 * Check flow_type formatting, and bail out before we touch the hardware
1707 * if there's a configuration issue 1539 * if there's a configuration issue
1708 */ 1540 */
1709 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 1541 switch (input->formatted.flow_type) {
1710 case IXGBE_ATR_L4TYPE_TCP: 1542 case IXGBE_ATR_FLOW_TYPE_IPV4:
1711 fdircmd |= IXGBE_FDIRCMD_L4TYPE_TCP; 1543 /* use the L4 protocol mask for raw IPv4/IPv6 traffic */
1712 break; 1544 fdirm |= IXGBE_FDIRM_L4P;
1713 case IXGBE_ATR_L4TYPE_UDP: 1545 case IXGBE_ATR_FLOW_TYPE_SCTPV4:
1714 fdircmd |= IXGBE_FDIRCMD_L4TYPE_UDP; 1546 if (input_masks->dst_port_mask || input_masks->src_port_mask) {
1715 break; 1547 hw_dbg(hw, " Error on src/dst port mask\n");
1716 case IXGBE_ATR_L4TYPE_SCTP: 1548 return IXGBE_ERR_CONFIG;
1717 fdircmd |= IXGBE_FDIRCMD_L4TYPE_SCTP; 1549 }
1550 case IXGBE_ATR_FLOW_TYPE_TCPV4:
1551 case IXGBE_ATR_FLOW_TYPE_UDPV4:
1718 break; 1552 break;
1719 default: 1553 default:
1720 hw_dbg(hw, "Error on l4type input\n"); 1554 hw_dbg(hw, " Error on flow type input\n");
1721 return IXGBE_ERR_CONFIG; 1555 return IXGBE_ERR_CONFIG;
1722 } 1556 }
1723 1557
1724 bucket_hash = ixgbe_atr_compute_hash_82599(input,
1725 IXGBE_ATR_BUCKET_HASH_KEY);
1726
1727 /* bucket_hash is only 15 bits */
1728 bucket_hash &= IXGBE_ATR_HASH_MASK;
1729
1730 ixgbe_atr_get_vlan_id_82599(input, &vlan_id);
1731 ixgbe_atr_get_src_port_82599(input, &src_port);
1732 ixgbe_atr_get_dst_port_82599(input, &dst_port);
1733 ixgbe_atr_get_flex_byte_82599(input, &flex_bytes);
1734
1735 fdirhash = soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT | bucket_hash;
1736
1737 /* Now figure out if we're IPv4 or IPv6 */
1738 if (l4type & IXGBE_ATR_L4TYPE_IPV6_MASK) {
1739 /* IPv6 */
1740 ixgbe_atr_get_src_ipv6_82599(input, &src_ipv6_1, &src_ipv6_2,
1741 &src_ipv6_3, &src_ipv6_4);
1742
1743 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(0), src_ipv6_1);
1744 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(1), src_ipv6_2);
1745 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIPv6(2), src_ipv6_3);
1746 /* The last 4 bytes is the same register as IPv4 */
1747 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv6_4);
1748
1749 fdircmd |= IXGBE_FDIRCMD_IPV6;
1750 fdircmd |= IXGBE_FDIRCMD_IPv6DMATCH;
1751 } else {
1752 /* IPv4 */
1753 ixgbe_atr_get_src_ipv4_82599(input, &src_ipv4);
1754 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPSA, src_ipv4);
1755 }
1756
1757 ixgbe_atr_get_dst_ipv4_82599(input, &dst_ipv4);
1758 IXGBE_WRITE_REG(hw, IXGBE_FDIRIPDA, dst_ipv4);
1759
1760 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, (vlan_id |
1761 (flex_bytes << IXGBE_FDIRVLAN_FLEX_SHIFT)));
1762 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, (src_port |
1763 (dst_port << IXGBE_FDIRPORT_DESTINATION_SHIFT)));
1764
1765 /* 1558 /*
1766 * Program the relevant mask registers. L4type cannot be 1559 * Program the relevant mask registers. If src/dst_port or src/dst_addr
1767 * masked out in this implementation. 1560 * are zero, then assume a full mask for that field. Also assume that
1561 * a VLAN of 0 is unspecified, so mask that out as well. L4type
1562 * cannot be masked out in this implementation.
1768 * 1563 *
1769 * This also assumes IPv4 only. IPv6 masking isn't supported at this 1564 * This also assumes IPv4 only. IPv6 masking isn't supported at this
1770 * point in time. 1565 * point in time.
1771 */ 1566 */
1772 IXGBE_WRITE_REG(hw, IXGBE_FDIRSIP4M, input_masks->src_ip_mask); 1567
1773 IXGBE_WRITE_REG(hw, IXGBE_FDIRDIP4M, input_masks->dst_ip_mask); 1568 /* Program FDIRM */
1774 1569 switch (ntohs(input_masks->vlan_id_mask) & 0xEFFF) {
1775 switch (l4type & IXGBE_ATR_L4TYPE_MASK) { 1570 case 0xEFFF:
1776 case IXGBE_ATR_L4TYPE_TCP: 1571 /* Unmask VLAN ID - bit 0 and fall through to unmask prio */
1777 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, input_masks->src_port_mask); 1572 fdirm &= ~IXGBE_FDIRM_VLANID;
1778 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 1573 case 0xE000:
1779 (IXGBE_READ_REG(hw, IXGBE_FDIRTCPM) | 1574 /* Unmask VLAN prio - bit 1 */
1780 (input_masks->dst_port_mask << 16))); 1575 fdirm &= ~IXGBE_FDIRM_VLANP;
1781 break; 1576 break;
1782 case IXGBE_ATR_L4TYPE_UDP: 1577 case 0x0FFF:
1783 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, input_masks->src_port_mask); 1578 /* Unmask VLAN ID - bit 0 */
1784 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 1579 fdirm &= ~IXGBE_FDIRM_VLANID;
1785 (IXGBE_READ_REG(hw, IXGBE_FDIRUDPM) |
1786 (input_masks->src_port_mask << 16)));
1787 break; 1580 break;
1788 default: 1581 case 0x0000:
1789 /* this already would have failed above */ 1582 /* do nothing, vlans already masked */
1790 break; 1583 break;
1584 default:
1585 hw_dbg(hw, " Error on VLAN mask\n");
1586 return IXGBE_ERR_CONFIG;
1791 } 1587 }
1792 1588
1793 /* Program the last mask register, FDIRM */ 1589 if (input_masks->flex_mask & 0xFFFF) {
1794 if (input_masks->vlan_id_mask) 1590 if ((input_masks->flex_mask & 0xFFFF) != 0xFFFF) {
1795 /* Mask both VLAN and VLANP - bits 0 and 1 */ 1591 hw_dbg(hw, " Error on flexible byte mask\n");
1796 fdirm |= 0x3; 1592 return IXGBE_ERR_CONFIG;
1797 1593 }
1798 if (input_masks->data_mask) 1594 /* Unmask Flex Bytes - bit 4 */
1799 /* Flex bytes need masking, so mask the whole thing - bit 4 */ 1595 fdirm &= ~IXGBE_FDIRM_FLEX;
1800 fdirm |= 0x10; 1596 }
1801 1597
1802 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ 1598 /* Now mask VM pool and destination IPv6 - bits 5 and 2 */
1803 fdirm |= 0x24;
1804
1805 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); 1599 IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm);
1806 1600
1807 fdircmd |= IXGBE_FDIRCMD_CMD_ADD_FLOW; 1601 /* store the TCP/UDP port masks, bit reversed from port layout */
1808 fdircmd |= IXGBE_FDIRCMD_FILTER_UPDATE; 1602 fdirtcpm = ixgbe_get_fdirtcpm_82599(input_masks);
1809 fdircmd |= IXGBE_FDIRCMD_LAST; 1603
1810 fdircmd |= IXGBE_FDIRCMD_QUEUE_EN; 1604 /* write both the same so that UDP and TCP use the same mask */
1811 fdircmd |= queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; 1605 IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm);
1606 IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm);
1607
1608 /* store source and destination IP masks (big-enian) */
1609 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M,
1610 ~input_masks->src_ip_mask[0]);
1611 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M,
1612 ~input_masks->dst_ip_mask[0]);
1613
1614 /* Apply masks to input data */
1615 input->formatted.vlan_id &= input_masks->vlan_id_mask;
1616 input->formatted.flex_bytes &= input_masks->flex_mask;
1617 input->formatted.src_port &= input_masks->src_port_mask;
1618 input->formatted.dst_port &= input_masks->dst_port_mask;
1619 input->formatted.src_ip[0] &= input_masks->src_ip_mask[0];
1620 input->formatted.dst_ip[0] &= input_masks->dst_ip_mask[0];
1621
1622 /* record vlan (little-endian) and flex_bytes(big-endian) */
1623 fdirvlan =
1624 IXGBE_STORE_AS_BE16(ntohs(input->formatted.flex_bytes));
1625 fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT;
1626 fdirvlan |= ntohs(input->formatted.vlan_id);
1627 IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan);
1628
1629 /* record source and destination port (little-endian)*/
1630 fdirport = ntohs(input->formatted.dst_port);
1631 fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT;
1632 fdirport |= ntohs(input->formatted.src_port);
1633 IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport);
1634
1635 /* record the first 32 bits of the destination address (big-endian) */
1636 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]);
1637
1638 /* record the source address (big-endian) */
1639 IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]);
1640
1641 /* configure FDIRCMD register */
1642 fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE |
1643 IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN;
1644 fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT;
1645 fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT;
1646
1647 /* we only want the bucket hash so drop the upper 16 bits */
1648 fdirhash = ixgbe_atr_compute_hash_82599(input,
1649 IXGBE_ATR_BUCKET_HASH_KEY);
1650 fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT;
1812 1651
1813 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); 1652 IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash);
1814 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); 1653 IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd);
1815 1654
1816 return 0; 1655 return 0;
1817} 1656}
1657
1818/** 1658/**
1819 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register 1659 * ixgbe_read_analog_reg8_82599 - Reads 8 bit Omer analog register
1820 * @hw: pointer to hardware structure 1660 * @hw: pointer to hardware structure
@@ -1924,6 +1764,7 @@ static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
1924 hw->phy.ops.identify(hw); 1764 hw->phy.ops.identify(hw);
1925 1765
1926 if (hw->phy.type == ixgbe_phy_tn || 1766 if (hw->phy.type == ixgbe_phy_tn ||
1767 hw->phy.type == ixgbe_phy_aq ||
1927 hw->phy.type == ixgbe_phy_cu_unknown) { 1768 hw->phy.type == ixgbe_phy_cu_unknown) {
1928 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, 1769 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
1929 &ext_ability); 1770 &ext_ability);
@@ -2125,51 +1966,6 @@ fw_version_out:
2125 return status; 1966 return status;
2126} 1967}
2127 1968
2128/**
2129 * ixgbe_get_wwn_prefix_82599 - Get alternative WWNN/WWPN prefix from
2130 * the EEPROM
2131 * @hw: pointer to hardware structure
2132 * @wwnn_prefix: the alternative WWNN prefix
2133 * @wwpn_prefix: the alternative WWPN prefix
2134 *
2135 * This function will read the EEPROM from the alternative SAN MAC address
2136 * block to check the support for the alternative WWNN/WWPN prefix support.
2137 **/
2138static s32 ixgbe_get_wwn_prefix_82599(struct ixgbe_hw *hw, u16 *wwnn_prefix,
2139 u16 *wwpn_prefix)
2140{
2141 u16 offset, caps;
2142 u16 alt_san_mac_blk_offset;
2143
2144 /* clear output first */
2145 *wwnn_prefix = 0xFFFF;
2146 *wwpn_prefix = 0xFFFF;
2147
2148 /* check if alternative SAN MAC is supported */
2149 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
2150 &alt_san_mac_blk_offset);
2151
2152 if ((alt_san_mac_blk_offset == 0) ||
2153 (alt_san_mac_blk_offset == 0xFFFF))
2154 goto wwn_prefix_out;
2155
2156 /* check capability in alternative san mac address block */
2157 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
2158 hw->eeprom.ops.read(hw, offset, &caps);
2159 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
2160 goto wwn_prefix_out;
2161
2162 /* get the corresponding prefix for WWNN/WWPN */
2163 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
2164 hw->eeprom.ops.read(hw, offset, wwnn_prefix);
2165
2166 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
2167 hw->eeprom.ops.read(hw, offset, wwpn_prefix);
2168
2169wwn_prefix_out:
2170 return 0;
2171}
2172
2173static struct ixgbe_mac_operations mac_ops_82599 = { 1969static struct ixgbe_mac_operations mac_ops_82599 = {
2174 .init_hw = &ixgbe_init_hw_generic, 1970 .init_hw = &ixgbe_init_hw_generic,
2175 .reset_hw = &ixgbe_reset_hw_82599, 1971 .reset_hw = &ixgbe_reset_hw_82599,
@@ -2181,7 +1977,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2181 .get_mac_addr = &ixgbe_get_mac_addr_generic, 1977 .get_mac_addr = &ixgbe_get_mac_addr_generic,
2182 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, 1978 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
2183 .get_device_caps = &ixgbe_get_device_caps_82599, 1979 .get_device_caps = &ixgbe_get_device_caps_82599,
2184 .get_wwn_prefix = &ixgbe_get_wwn_prefix_82599, 1980 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
2185 .stop_adapter = &ixgbe_stop_adapter_generic, 1981 .stop_adapter = &ixgbe_stop_adapter_generic,
2186 .get_bus_info = &ixgbe_get_bus_info_generic, 1982 .get_bus_info = &ixgbe_get_bus_info_generic,
2187 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, 1983 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
@@ -2208,12 +2004,15 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2208 .fc_enable = &ixgbe_fc_enable_generic, 2004 .fc_enable = &ixgbe_fc_enable_generic,
2209 .init_uta_tables = &ixgbe_init_uta_tables_generic, 2005 .init_uta_tables = &ixgbe_init_uta_tables_generic,
2210 .setup_sfp = &ixgbe_setup_sfp_modules_82599, 2006 .setup_sfp = &ixgbe_setup_sfp_modules_82599,
2007 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
2008 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
2211}; 2009};
2212 2010
2213static struct ixgbe_eeprom_operations eeprom_ops_82599 = { 2011static struct ixgbe_eeprom_operations eeprom_ops_82599 = {
2214 .init_params = &ixgbe_init_eeprom_params_generic, 2012 .init_params = &ixgbe_init_eeprom_params_generic,
2215 .read = &ixgbe_read_eerd_generic, 2013 .read = &ixgbe_read_eerd_generic,
2216 .write = &ixgbe_write_eeprom_generic, 2014 .write = &ixgbe_write_eeprom_generic,
2015 .calc_checksum = &ixgbe_calc_eeprom_checksum_generic,
2217 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, 2016 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
2218 .update_checksum = &ixgbe_update_eeprom_checksum_generic, 2017 .update_checksum = &ixgbe_update_eeprom_checksum_generic,
2219}; 2018};
@@ -2240,5 +2039,5 @@ struct ixgbe_info ixgbe_82599_info = {
2240 .mac_ops = &mac_ops_82599, 2039 .mac_ops = &mac_ops_82599,
2241 .eeprom_ops = &eeprom_ops_82599, 2040 .eeprom_ops = &eeprom_ops_82599,
2242 .phy_ops = &phy_ops_82599, 2041 .phy_ops = &phy_ops_82599,
2243 .mbx_ops = &mbx_ops_82599, 2042 .mbx_ops = &mbx_ops_generic,
2244}; 2043};
diff --git a/drivers/net/ixgbe/ixgbe_common.c b/drivers/net/ixgbe/ixgbe_common.c
index e3eca1316389..d5ede2df3e42 100644
--- a/drivers/net/ixgbe/ixgbe_common.c
+++ b/drivers/net/ixgbe/ixgbe_common.c
@@ -45,14 +45,12 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count);
45static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 45static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
46static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); 46static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec);
47static void ixgbe_release_eeprom(struct ixgbe_hw *hw); 47static void ixgbe_release_eeprom(struct ixgbe_hw *hw);
48static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw);
49 48
50static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index); 49static void ixgbe_enable_rar(struct ixgbe_hw *hw, u32 index);
51static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index); 50static void ixgbe_disable_rar(struct ixgbe_hw *hw, u32 index);
52static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); 51static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr);
53static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); 52static void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq);
54static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num); 53static s32 ixgbe_setup_fc(struct ixgbe_hw *hw, s32 packetbuf_num);
55static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
56 54
57/** 55/**
58 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx 56 * ixgbe_start_hw_generic - Prepare hardware for Tx/Rx
@@ -198,30 +196,110 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw)
198} 196}
199 197
200/** 198/**
201 * ixgbe_read_pba_num_generic - Reads part number from EEPROM 199 * ixgbe_read_pba_string_generic - Reads part number string from EEPROM
202 * @hw: pointer to hardware structure 200 * @hw: pointer to hardware structure
203 * @pba_num: stores the part number from the EEPROM 201 * @pba_num: stores the part number string from the EEPROM
202 * @pba_num_size: part number string buffer length
204 * 203 *
205 * Reads the part number from the EEPROM. 204 * Reads the part number string from the EEPROM.
206 **/ 205 **/
207s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num) 206s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
207 u32 pba_num_size)
208{ 208{
209 s32 ret_val; 209 s32 ret_val;
210 u16 data; 210 u16 data;
211 u16 pba_ptr;
212 u16 offset;
213 u16 length;
214
215 if (pba_num == NULL) {
216 hw_dbg(hw, "PBA string buffer was null\n");
217 return IXGBE_ERR_INVALID_ARGUMENT;
218 }
211 219
212 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); 220 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data);
213 if (ret_val) { 221 if (ret_val) {
214 hw_dbg(hw, "NVM Read Error\n"); 222 hw_dbg(hw, "NVM Read Error\n");
215 return ret_val; 223 return ret_val;
216 } 224 }
217 *pba_num = (u32)(data << 16);
218 225
219 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &data); 226 ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr);
220 if (ret_val) { 227 if (ret_val) {
221 hw_dbg(hw, "NVM Read Error\n"); 228 hw_dbg(hw, "NVM Read Error\n");
222 return ret_val; 229 return ret_val;
223 } 230 }
224 *pba_num |= data; 231
232 /*
233 * if data is not ptr guard the PBA must be in legacy format which
234 * means pba_ptr is actually our second data word for the PBA number
235 * and we can decode it into an ascii string
236 */
237 if (data != IXGBE_PBANUM_PTR_GUARD) {
238 hw_dbg(hw, "NVM PBA number is not stored as string\n");
239
240 /* we will need 11 characters to store the PBA */
241 if (pba_num_size < 11) {
242 hw_dbg(hw, "PBA string buffer too small\n");
243 return IXGBE_ERR_NO_SPACE;
244 }
245
246 /* extract hex string from data and pba_ptr */
247 pba_num[0] = (data >> 12) & 0xF;
248 pba_num[1] = (data >> 8) & 0xF;
249 pba_num[2] = (data >> 4) & 0xF;
250 pba_num[3] = data & 0xF;
251 pba_num[4] = (pba_ptr >> 12) & 0xF;
252 pba_num[5] = (pba_ptr >> 8) & 0xF;
253 pba_num[6] = '-';
254 pba_num[7] = 0;
255 pba_num[8] = (pba_ptr >> 4) & 0xF;
256 pba_num[9] = pba_ptr & 0xF;
257
258 /* put a null character on the end of our string */
259 pba_num[10] = '\0';
260
261 /* switch all the data but the '-' to hex char */
262 for (offset = 0; offset < 10; offset++) {
263 if (pba_num[offset] < 0xA)
264 pba_num[offset] += '0';
265 else if (pba_num[offset] < 0x10)
266 pba_num[offset] += 'A' - 0xA;
267 }
268
269 return 0;
270 }
271
272 ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length);
273 if (ret_val) {
274 hw_dbg(hw, "NVM Read Error\n");
275 return ret_val;
276 }
277
278 if (length == 0xFFFF || length == 0) {
279 hw_dbg(hw, "NVM PBA number section invalid length\n");
280 return IXGBE_ERR_PBA_SECTION;
281 }
282
283 /* check if pba_num buffer is big enough */
284 if (pba_num_size < (((u32)length * 2) - 1)) {
285 hw_dbg(hw, "PBA string buffer too small\n");
286 return IXGBE_ERR_NO_SPACE;
287 }
288
289 /* trim pba length from start of string */
290 pba_ptr++;
291 length--;
292
293 for (offset = 0; offset < length; offset++) {
294 ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data);
295 if (ret_val) {
296 hw_dbg(hw, "NVM Read Error\n");
297 return ret_val;
298 }
299 pba_num[offset * 2] = (u8)(data >> 8);
300 pba_num[(offset * 2) + 1] = (u8)(data & 0xFF);
301 }
302 pba_num[offset * 2] = '\0';
225 303
226 return 0; 304 return 0;
227} 305}
@@ -638,7 +716,7 @@ out:
638 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the 716 * Polls the status bit (bit 1) of the EERD or EEWR to determine when the
639 * read or write is done respectively. 717 * read or write is done respectively.
640 **/ 718 **/
641static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) 719s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
642{ 720{
643 u32 i; 721 u32 i;
644 u32 reg; 722 u32 reg;
@@ -1009,7 +1087,7 @@ static void ixgbe_release_eeprom(struct ixgbe_hw *hw)
1009 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum 1087 * ixgbe_calc_eeprom_checksum - Calculates and returns the checksum
1010 * @hw: pointer to hardware structure 1088 * @hw: pointer to hardware structure
1011 **/ 1089 **/
1012static u16 ixgbe_calc_eeprom_checksum(struct ixgbe_hw *hw) 1090u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw)
1013{ 1091{
1014 u16 i; 1092 u16 i;
1015 u16 j; 1093 u16 j;
@@ -1072,7 +1150,7 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
1072 status = hw->eeprom.ops.read(hw, 0, &checksum); 1150 status = hw->eeprom.ops.read(hw, 0, &checksum);
1073 1151
1074 if (status == 0) { 1152 if (status == 0) {
1075 checksum = ixgbe_calc_eeprom_checksum(hw); 1153 checksum = hw->eeprom.ops.calc_checksum(hw);
1076 1154
1077 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); 1155 hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum);
1078 1156
@@ -1110,7 +1188,7 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw)
1110 status = hw->eeprom.ops.read(hw, 0, &checksum); 1188 status = hw->eeprom.ops.read(hw, 0, &checksum);
1111 1189
1112 if (status == 0) { 1190 if (status == 0) {
1113 checksum = ixgbe_calc_eeprom_checksum(hw); 1191 checksum = hw->eeprom.ops.calc_checksum(hw);
1114 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, 1192 status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM,
1115 checksum); 1193 checksum);
1116 } else { 1194 } else {
@@ -1595,6 +1673,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1595 u32 mflcn_reg, fccfg_reg; 1673 u32 mflcn_reg, fccfg_reg;
1596 u32 reg; 1674 u32 reg;
1597 u32 rx_pba_size; 1675 u32 rx_pba_size;
1676 u32 fcrtl, fcrth;
1598 1677
1599#ifdef CONFIG_DCB 1678#ifdef CONFIG_DCB
1600 if (hw->fc.requested_mode == ixgbe_fc_pfc) 1679 if (hw->fc.requested_mode == ixgbe_fc_pfc)
@@ -1671,41 +1750,21 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw, s32 packetbuf_num)
1671 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); 1750 IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg);
1672 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); 1751 IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg);
1673 1752
1674 reg = IXGBE_READ_REG(hw, IXGBE_MTQC); 1753 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
1675 /* Thresholds are different for link flow control when in DCB mode */ 1754 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
1676 if (reg & IXGBE_MTQC_RT_ENA) {
1677 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(packetbuf_num));
1678 1755
1679 /* Always disable XON for LFC when in DCB mode */ 1756 fcrth = (rx_pba_size - hw->fc.high_water) << 10;
1680 reg = (rx_pba_size >> 5) & 0xFFE0; 1757 fcrtl = (rx_pba_size - hw->fc.low_water) << 10;
1681 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), reg);
1682 1758
1683 reg = (rx_pba_size >> 2) & 0xFFE0; 1759 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
1684 if (hw->fc.current_mode & ixgbe_fc_tx_pause) 1760 fcrth |= IXGBE_FCRTH_FCEN;
1685 reg |= IXGBE_FCRTH_FCEN; 1761 if (hw->fc.send_xon)
1686 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), reg); 1762 fcrtl |= IXGBE_FCRTL_XONE;
1687 } else {
1688 /*
1689 * Set up and enable Rx high/low water mark thresholds,
1690 * enable XON.
1691 */
1692 if (hw->fc.current_mode & ixgbe_fc_tx_pause) {
1693 if (hw->fc.send_xon) {
1694 IXGBE_WRITE_REG(hw,
1695 IXGBE_FCRTL_82599(packetbuf_num),
1696 (hw->fc.low_water |
1697 IXGBE_FCRTL_XONE));
1698 } else {
1699 IXGBE_WRITE_REG(hw,
1700 IXGBE_FCRTL_82599(packetbuf_num),
1701 hw->fc.low_water);
1702 }
1703
1704 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num),
1705 (hw->fc.high_water | IXGBE_FCRTH_FCEN));
1706 }
1707 } 1763 }
1708 1764
1765 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(packetbuf_num), fcrth);
1766 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(packetbuf_num), fcrtl);
1767
1709 /* Configure pause time (2 TCs per register) */ 1768 /* Configure pause time (2 TCs per register) */
1710 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2)); 1769 reg = IXGBE_READ_REG(hw, IXGBE_FCTTV(packetbuf_num / 2));
1711 if ((packetbuf_num & 1) == 0) 1770 if ((packetbuf_num & 1) == 0)
@@ -2705,3 +2764,112 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
2705 2764
2706 return 0; 2765 return 0;
2707} 2766}
2767
2768/**
2769 * ixgbe_get_wwn_prefix_generic Get alternative WWNN/WWPN prefix from
2770 * the EEPROM
2771 * @hw: pointer to hardware structure
2772 * @wwnn_prefix: the alternative WWNN prefix
2773 * @wwpn_prefix: the alternative WWPN prefix
2774 *
2775 * This function will read the EEPROM from the alternative SAN MAC address
2776 * block to check the support for the alternative WWNN/WWPN prefix support.
2777 **/
2778s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
2779 u16 *wwpn_prefix)
2780{
2781 u16 offset, caps;
2782 u16 alt_san_mac_blk_offset;
2783
2784 /* clear output first */
2785 *wwnn_prefix = 0xFFFF;
2786 *wwpn_prefix = 0xFFFF;
2787
2788 /* check if alternative SAN MAC is supported */
2789 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
2790 &alt_san_mac_blk_offset);
2791
2792 if ((alt_san_mac_blk_offset == 0) ||
2793 (alt_san_mac_blk_offset == 0xFFFF))
2794 goto wwn_prefix_out;
2795
2796 /* check capability in alternative san mac address block */
2797 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
2798 hw->eeprom.ops.read(hw, offset, &caps);
2799 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
2800 goto wwn_prefix_out;
2801
2802 /* get the corresponding prefix for WWNN/WWPN */
2803 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
2804 hw->eeprom.ops.read(hw, offset, wwnn_prefix);
2805
2806 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
2807 hw->eeprom.ops.read(hw, offset, wwpn_prefix);
2808
2809wwn_prefix_out:
2810 return 0;
2811}
2812
2813/**
2814 * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing
2815 * @hw: pointer to hardware structure
2816 * @enable: enable or disable switch for anti-spoofing
2817 * @pf: Physical Function pool - do not enable anti-spoofing for the PF
2818 *
2819 **/
2820void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf)
2821{
2822 int j;
2823 int pf_target_reg = pf >> 3;
2824 int pf_target_shift = pf % 8;
2825 u32 pfvfspoof = 0;
2826
2827 if (hw->mac.type == ixgbe_mac_82598EB)
2828 return;
2829
2830 if (enable)
2831 pfvfspoof = IXGBE_SPOOF_MACAS_MASK;
2832
2833 /*
2834 * PFVFSPOOF register array is size 8 with 8 bits assigned to
2835 * MAC anti-spoof enables in each register array element.
2836 */
2837 for (j = 0; j < IXGBE_PFVFSPOOF_REG_COUNT; j++)
2838 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof);
2839
2840 /* If not enabling anti-spoofing then done */
2841 if (!enable)
2842 return;
2843
2844 /*
2845 * The PF should be allowed to spoof so that it can support
2846 * emulation mode NICs. Reset the bit assigned to the PF
2847 */
2848 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg));
2849 pfvfspoof ^= (1 << pf_target_shift);
2850 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(pf_target_reg), pfvfspoof);
2851}
2852
2853/**
2854 * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing
2855 * @hw: pointer to hardware structure
2856 * @enable: enable or disable switch for VLAN anti-spoofing
2857 * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing
2858 *
2859 **/
2860void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf)
2861{
2862 int vf_target_reg = vf >> 3;
2863 int vf_target_shift = vf % 8 + IXGBE_SPOOF_VLANAS_SHIFT;
2864 u32 pfvfspoof;
2865
2866 if (hw->mac.type == ixgbe_mac_82598EB)
2867 return;
2868
2869 pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg));
2870 if (enable)
2871 pfvfspoof |= (1 << vf_target_shift);
2872 else
2873 pfvfspoof &= ~(1 << vf_target_shift);
2874 IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof);
2875}
diff --git a/drivers/net/ixgbe/ixgbe_common.h b/drivers/net/ixgbe/ixgbe_common.h
index 424c223437dc..66ed045a8cf0 100644
--- a/drivers/net/ixgbe/ixgbe_common.h
+++ b/drivers/net/ixgbe/ixgbe_common.h
@@ -35,7 +35,8 @@ s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw);
35s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw); 35s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw);
36s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw); 36s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw);
37s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); 37s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw);
38s32 ixgbe_read_pba_num_generic(struct ixgbe_hw *hw, u32 *pba_num); 38s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num,
39 u32 pba_num_size);
39s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); 40s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr);
40s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); 41s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw);
41void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); 42void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw);
@@ -49,9 +50,11 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data);
49s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data); 50s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data);
50s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, 51s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
51 u16 *data); 52 u16 *data);
53u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw);
52s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, 54s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw,
53 u16 *checksum_val); 55 u16 *checksum_val);
54s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); 56s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw);
57s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg);
55 58
56s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, 59s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq,
57 u32 enable_addr); 60 u32 enable_addr);
@@ -81,9 +84,12 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw);
81s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, 84s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw,
82 ixgbe_link_speed *speed, 85 ixgbe_link_speed *speed,
83 bool *link_up, bool link_up_wait_to_complete); 86 bool *link_up, bool link_up_wait_to_complete);
84 87s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
88 u16 *wwpn_prefix);
85s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); 89s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index);
86s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); 90s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index);
91void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf);
92void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf);
87 93
88#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg))) 94#define IXGBE_WRITE_REG(a, reg, value) writel((value), ((a)->hw_addr + (reg)))
89 95
diff --git a/drivers/net/ixgbe/ixgbe_dcb.c b/drivers/net/ixgbe/ixgbe_dcb.c
index 0d44c6470ca3..d16c260c1f50 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.c
+++ b/drivers/net/ixgbe/ixgbe_dcb.c
@@ -42,7 +42,8 @@
42 * It should be called only after the rules are checked by 42 * It should be called only after the rules are checked by
43 * ixgbe_dcb_check_config(). 43 * ixgbe_dcb_check_config().
44 */ 44 */
45s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config, 45s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
46 struct ixgbe_dcb_config *dcb_config,
46 int max_frame, u8 direction) 47 int max_frame, u8 direction)
47{ 48{
48 struct tc_bw_alloc *p; 49 struct tc_bw_alloc *p;
@@ -124,7 +125,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *dcb_config,
124 * credit may not be enough to send out a TSO 125 * credit may not be enough to send out a TSO
125 * packet in descriptor plane arbitration. 126 * packet in descriptor plane arbitration.
126 */ 127 */
127 if (credit_max && 128 if ((hw->mac.type == ixgbe_mac_82598EB) &&
129 credit_max &&
128 (credit_max < MINIMUM_CREDIT_FOR_TSO)) 130 (credit_max < MINIMUM_CREDIT_FOR_TSO))
129 credit_max = MINIMUM_CREDIT_FOR_TSO; 131 credit_max = MINIMUM_CREDIT_FOR_TSO;
130 132
@@ -150,10 +152,17 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
150 struct ixgbe_dcb_config *dcb_config) 152 struct ixgbe_dcb_config *dcb_config)
151{ 153{
152 s32 ret = 0; 154 s32 ret = 0;
153 if (hw->mac.type == ixgbe_mac_82598EB) 155 switch (hw->mac.type) {
156 case ixgbe_mac_82598EB:
154 ret = ixgbe_dcb_hw_config_82598(hw, dcb_config); 157 ret = ixgbe_dcb_hw_config_82598(hw, dcb_config);
155 else if (hw->mac.type == ixgbe_mac_82599EB) 158 break;
159 case ixgbe_mac_82599EB:
160 case ixgbe_mac_X540:
156 ret = ixgbe_dcb_hw_config_82599(hw, dcb_config); 161 ret = ixgbe_dcb_hw_config_82599(hw, dcb_config);
162 break;
163 default:
164 break;
165 }
157 return ret; 166 return ret;
158} 167}
159 168
diff --git a/drivers/net/ixgbe/ixgbe_dcb.h b/drivers/net/ixgbe/ixgbe_dcb.h
index 0208a87b129e..1cfe38ee1644 100644
--- a/drivers/net/ixgbe/ixgbe_dcb.h
+++ b/drivers/net/ixgbe/ixgbe_dcb.h
@@ -150,7 +150,8 @@ struct ixgbe_dcb_config {
150/* DCB driver APIs */ 150/* DCB driver APIs */
151 151
152/* DCB credits calculation */ 152/* DCB credits calculation */
153s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_dcb_config *, int, u8); 153s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *,
154 struct ixgbe_dcb_config *, int, u8);
154 155
155/* DCB hw initialization */ 156/* DCB hw initialization */
156s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); 157s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *);
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ixgbe/ixgbe_dcb_82598.c
index 50288bcadc59..9a5e89c12e05 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82598.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82598.c
@@ -256,21 +256,17 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw,
256 * for each traffic class. 256 * for each traffic class.
257 */ 257 */
258 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 258 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
259 if (dcb_config->rx_pba_cfg == pba_equal) { 259 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
260 rx_pba_size = IXGBE_RXPBSIZE_64KB; 260 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
261 } else { 261 reg = (rx_pba_size - hw->fc.low_water) << 10;
262 rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB
263 : IXGBE_RXPBSIZE_48KB;
264 }
265 262
266 reg = ((rx_pba_size >> 5) & 0xFFF0);
267 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || 263 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
268 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) 264 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
269 reg |= IXGBE_FCRTL_XONE; 265 reg |= IXGBE_FCRTL_XONE;
270 266
271 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg); 267 IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), reg);
272 268
273 reg = ((rx_pba_size >> 2) & 0xFFF0); 269 reg = (rx_pba_size - hw->fc.high_water) << 10;
274 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx || 270 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx ||
275 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full) 271 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full)
276 reg |= IXGBE_FCRTH_FCEN; 272 reg |= IXGBE_FCRTH_FCEN;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ixgbe/ixgbe_dcb_82599.c
index 05f224715073..374e1f74d0f5 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_82599.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_82599.c
@@ -251,19 +251,17 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw,
251 251
252 /* Configure PFC Tx thresholds per TC */ 252 /* Configure PFC Tx thresholds per TC */
253 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { 253 for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
254 if (dcb_config->rx_pba_cfg == pba_equal) 254 rx_pba_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
255 rx_pba_size = IXGBE_RXPBSIZE_64KB; 255 rx_pba_size >>= IXGBE_RXPBSIZE_SHIFT;
256 else 256
257 rx_pba_size = (i < 4) ? IXGBE_RXPBSIZE_80KB 257 reg = (rx_pba_size - hw->fc.low_water) << 10;
258 : IXGBE_RXPBSIZE_48KB;
259 258
260 reg = ((rx_pba_size >> 5) & 0xFFE0);
261 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || 259 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
262 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx) 260 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
263 reg |= IXGBE_FCRTL_XONE; 261 reg |= IXGBE_FCRTL_XONE;
264 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg); 262 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), reg);
265 263
266 reg = ((rx_pba_size >> 2) & 0xFFE0); 264 reg = (rx_pba_size - hw->fc.high_water) << 10;
267 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full || 265 if (dcb_config->tc_config[i].dcb_pfc == pfc_enabled_full ||
268 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx) 266 dcb_config->tc_config[i].dcb_pfc == pfc_enabled_tx)
269 reg |= IXGBE_FCRTH_FCEN; 267 reg |= IXGBE_FCRTH_FCEN;
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index b53b465e24af..bf566e8a455e 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -130,15 +130,21 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
130 netdev->netdev_ops->ndo_stop(netdev); 130 netdev->netdev_ops->ndo_stop(netdev);
131 ixgbe_clear_interrupt_scheme(adapter); 131 ixgbe_clear_interrupt_scheme(adapter);
132 132
133 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 133 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
134 switch (adapter->hw.mac.type) {
135 case ixgbe_mac_82598EB:
134 adapter->last_lfc_mode = adapter->hw.fc.current_mode; 136 adapter->last_lfc_mode = adapter->hw.fc.current_mode;
135 adapter->hw.fc.requested_mode = ixgbe_fc_none; 137 adapter->hw.fc.requested_mode = ixgbe_fc_none;
136 } 138 break;
137 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 139 case ixgbe_mac_82599EB:
138 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 140 case ixgbe_mac_X540:
139 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 141 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
140 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 142 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
143 break;
144 default:
145 break;
141 } 146 }
147
142 adapter->flags |= IXGBE_FLAG_DCB_ENABLED; 148 adapter->flags |= IXGBE_FLAG_DCB_ENABLED;
143 ixgbe_init_interrupt_scheme(adapter); 149 ixgbe_init_interrupt_scheme(adapter);
144 if (netif_running(netdev)) 150 if (netif_running(netdev))
@@ -155,8 +161,14 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
155 adapter->dcb_cfg.pfc_mode_enable = false; 161 adapter->dcb_cfg.pfc_mode_enable = false;
156 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 162 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
157 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 163 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
158 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 164 switch (adapter->hw.mac.type) {
165 case ixgbe_mac_82599EB:
166 case ixgbe_mac_X540:
159 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; 167 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
168 break;
169 default:
170 break;
171 }
160 172
161 ixgbe_init_interrupt_scheme(adapter); 173 ixgbe_init_interrupt_scheme(adapter);
162 if (netif_running(netdev)) 174 if (netif_running(netdev))
@@ -178,9 +190,14 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
178 for (i = 0; i < netdev->addr_len; i++) 190 for (i = 0; i < netdev->addr_len; i++)
179 perm_addr[i] = adapter->hw.mac.perm_addr[i]; 191 perm_addr[i] = adapter->hw.mac.perm_addr[i];
180 192
181 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 193 switch (adapter->hw.mac.type) {
194 case ixgbe_mac_82599EB:
195 case ixgbe_mac_X540:
182 for (j = 0; j < netdev->addr_len; j++, i++) 196 for (j = 0; j < netdev->addr_len; j++, i++)
183 perm_addr[i] = adapter->hw.mac.san_addr[j]; 197 perm_addr[i] = adapter->hw.mac.san_addr[j];
198 break;
199 default:
200 break;
184 } 201 }
185} 202}
186 203
@@ -366,15 +383,29 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
366 } 383 }
367 384
368 if (adapter->dcb_cfg.pfc_mode_enable) { 385 if (adapter->dcb_cfg.pfc_mode_enable) {
369 if ((adapter->hw.mac.type != ixgbe_mac_82598EB) && 386 switch (adapter->hw.mac.type) {
370 (adapter->hw.fc.current_mode != ixgbe_fc_pfc)) 387 case ixgbe_mac_82599EB:
371 adapter->last_lfc_mode = adapter->hw.fc.current_mode; 388 case ixgbe_mac_X540:
389 if (adapter->hw.fc.current_mode != ixgbe_fc_pfc)
390 adapter->last_lfc_mode =
391 adapter->hw.fc.current_mode;
392 break;
393 default:
394 break;
395 }
372 adapter->hw.fc.requested_mode = ixgbe_fc_pfc; 396 adapter->hw.fc.requested_mode = ixgbe_fc_pfc;
373 } else { 397 } else {
374 if (adapter->hw.mac.type != ixgbe_mac_82598EB) 398 switch (adapter->hw.mac.type) {
375 adapter->hw.fc.requested_mode = adapter->last_lfc_mode; 399 case ixgbe_mac_82598EB:
376 else
377 adapter->hw.fc.requested_mode = ixgbe_fc_none; 400 adapter->hw.fc.requested_mode = ixgbe_fc_none;
401 break;
402 case ixgbe_mac_82599EB:
403 case ixgbe_mac_X540:
404 adapter->hw.fc.requested_mode = adapter->last_lfc_mode;
405 break;
406 default:
407 break;
408 }
378 } 409 }
379 410
380 if (adapter->dcb_set_bitmap & BIT_RESETLINK) { 411 if (adapter->dcb_set_bitmap & BIT_RESETLINK) {
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 3dc731c22ff2..2002ea88ca2a 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -185,6 +185,16 @@ static int ixgbe_get_settings(struct net_device *netdev,
185 ADVERTISED_FIBRE); 185 ADVERTISED_FIBRE);
186 ecmd->port = PORT_FIBRE; 186 ecmd->port = PORT_FIBRE;
187 ecmd->autoneg = AUTONEG_DISABLE; 187 ecmd->autoneg = AUTONEG_DISABLE;
188 } else if ((hw->device_id == IXGBE_DEV_ID_82599_COMBO_BACKPLANE) ||
189 (hw->device_id == IXGBE_DEV_ID_82599_KX4_MEZZ)) {
190 ecmd->supported |= (SUPPORTED_1000baseT_Full |
191 SUPPORTED_Autoneg |
192 SUPPORTED_FIBRE);
193 ecmd->advertising = (ADVERTISED_10000baseT_Full |
194 ADVERTISED_1000baseT_Full |
195 ADVERTISED_Autoneg |
196 ADVERTISED_FIBRE);
197 ecmd->port = PORT_FIBRE;
188 } else { 198 } else {
189 ecmd->supported |= (SUPPORTED_1000baseT_Full | 199 ecmd->supported |= (SUPPORTED_1000baseT_Full |
190 SUPPORTED_FIBRE); 200 SUPPORTED_FIBRE);
@@ -204,6 +214,7 @@ static int ixgbe_get_settings(struct net_device *netdev,
204 /* Get PHY type */ 214 /* Get PHY type */
205 switch (adapter->hw.phy.type) { 215 switch (adapter->hw.phy.type) {
206 case ixgbe_phy_tn: 216 case ixgbe_phy_tn:
217 case ixgbe_phy_aq:
207 case ixgbe_phy_cu_unknown: 218 case ixgbe_phy_cu_unknown:
208 /* Copper 10G-BASET */ 219 /* Copper 10G-BASET */
209 ecmd->port = PORT_TP; 220 ecmd->port = PORT_TP;
@@ -332,13 +343,6 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
332 else 343 else
333 pause->autoneg = 1; 344 pause->autoneg = 1;
334 345
335#ifdef CONFIG_DCB
336 if (hw->fc.current_mode == ixgbe_fc_pfc) {
337 pause->rx_pause = 0;
338 pause->tx_pause = 0;
339 }
340
341#endif
342 if (hw->fc.current_mode == ixgbe_fc_rx_pause) { 346 if (hw->fc.current_mode == ixgbe_fc_rx_pause) {
343 pause->rx_pause = 1; 347 pause->rx_pause = 1;
344 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) { 348 } else if (hw->fc.current_mode == ixgbe_fc_tx_pause) {
@@ -346,6 +350,11 @@ static void ixgbe_get_pauseparam(struct net_device *netdev,
346 } else if (hw->fc.current_mode == ixgbe_fc_full) { 350 } else if (hw->fc.current_mode == ixgbe_fc_full) {
347 pause->rx_pause = 1; 351 pause->rx_pause = 1;
348 pause->tx_pause = 1; 352 pause->tx_pause = 1;
353#ifdef CONFIG_DCB
354 } else if (hw->fc.current_mode == ixgbe_fc_pfc) {
355 pause->rx_pause = 0;
356 pause->tx_pause = 0;
357#endif
349 } 358 }
350} 359}
351 360
@@ -363,7 +372,6 @@ static int ixgbe_set_pauseparam(struct net_device *netdev,
363 return -EINVAL; 372 return -EINVAL;
364 373
365#endif 374#endif
366
367 fc = hw->fc; 375 fc = hw->fc;
368 376
369 if (pause->autoneg != AUTONEG_ENABLE) 377 if (pause->autoneg != AUTONEG_ENABLE)
@@ -412,11 +420,6 @@ static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data)
412 else 420 else
413 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED; 421 adapter->flags &= ~IXGBE_FLAG_RX_CSUM_ENABLED;
414 422
415 if (netif_running(netdev))
416 ixgbe_reinit_locked(adapter);
417 else
418 ixgbe_reset(adapter);
419
420 return 0; 423 return 0;
421} 424}
422 425
@@ -428,16 +431,21 @@ static u32 ixgbe_get_tx_csum(struct net_device *netdev)
428static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) 431static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data)
429{ 432{
430 struct ixgbe_adapter *adapter = netdev_priv(netdev); 433 struct ixgbe_adapter *adapter = netdev_priv(netdev);
434 u32 feature_list;
431 435
432 if (data) { 436 feature_list = (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
433 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 437 switch (adapter->hw.mac.type) {
434 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 438 case ixgbe_mac_82599EB:
435 netdev->features |= NETIF_F_SCTP_CSUM; 439 case ixgbe_mac_X540:
436 } else { 440 feature_list |= NETIF_F_SCTP_CSUM;
437 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 441 break;
438 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 442 default:
439 netdev->features &= ~NETIF_F_SCTP_CSUM; 443 break;
440 } 444 }
445 if (data)
446 netdev->features |= feature_list;
447 else
448 netdev->features &= ~feature_list;
441 449
442 return 0; 450 return 0;
443} 451}
@@ -530,10 +538,20 @@ static void ixgbe_get_regs(struct net_device *netdev,
530 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); 538 regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
531 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); 539 regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
532 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); 540 regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
533 for (i = 0; i < 8; i++) 541 for (i = 0; i < 8; i++) {
534 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); 542 switch (hw->mac.type) {
535 for (i = 0; i < 8; i++) 543 case ixgbe_mac_82598EB:
536 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); 544 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
545 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
546 break;
547 case ixgbe_mac_82599EB:
548 regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i));
549 regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i));
550 break;
551 default:
552 break;
553 }
554 }
537 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); 555 regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
538 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); 556 regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);
539 557
@@ -615,6 +633,7 @@ static void ixgbe_get_regs(struct net_device *netdev,
615 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); 633 regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
616 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); 634 regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));
617 635
636 /* DCB */
618 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); 637 regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
619 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); 638 regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
620 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); 639 regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
@@ -820,9 +839,10 @@ static void ixgbe_get_drvinfo(struct net_device *netdev,
820 struct ixgbe_adapter *adapter = netdev_priv(netdev); 839 struct ixgbe_adapter *adapter = netdev_priv(netdev);
821 char firmware_version[32]; 840 char firmware_version[32];
822 841
823 strncpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); 842 strncpy(drvinfo->driver, ixgbe_driver_name,
843 sizeof(drvinfo->driver) - 1);
824 strncpy(drvinfo->version, ixgbe_driver_version, 844 strncpy(drvinfo->version, ixgbe_driver_version,
825 sizeof(drvinfo->version)); 845 sizeof(drvinfo->version) - 1);
826 846
827 snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d", 847 snprintf(firmware_version, sizeof(firmware_version), "%d.%d-%d",
828 (adapter->eeprom_version & 0xF000) >> 12, 848 (adapter->eeprom_version & 0xF000) >> 12,
@@ -905,13 +925,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
905 memcpy(&temp_tx_ring[i], adapter->tx_ring[i], 925 memcpy(&temp_tx_ring[i], adapter->tx_ring[i],
906 sizeof(struct ixgbe_ring)); 926 sizeof(struct ixgbe_ring));
907 temp_tx_ring[i].count = new_tx_count; 927 temp_tx_ring[i].count = new_tx_count;
908 err = ixgbe_setup_tx_resources(adapter, 928 err = ixgbe_setup_tx_resources(&temp_tx_ring[i]);
909 &temp_tx_ring[i]);
910 if (err) { 929 if (err) {
911 while (i) { 930 while (i) {
912 i--; 931 i--;
913 ixgbe_free_tx_resources(adapter, 932 ixgbe_free_tx_resources(&temp_tx_ring[i]);
914 &temp_tx_ring[i]);
915 } 933 }
916 goto clear_reset; 934 goto clear_reset;
917 } 935 }
@@ -930,13 +948,11 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
930 memcpy(&temp_rx_ring[i], adapter->rx_ring[i], 948 memcpy(&temp_rx_ring[i], adapter->rx_ring[i],
931 sizeof(struct ixgbe_ring)); 949 sizeof(struct ixgbe_ring));
932 temp_rx_ring[i].count = new_rx_count; 950 temp_rx_ring[i].count = new_rx_count;
933 err = ixgbe_setup_rx_resources(adapter, 951 err = ixgbe_setup_rx_resources(&temp_rx_ring[i]);
934 &temp_rx_ring[i]);
935 if (err) { 952 if (err) {
936 while (i) { 953 while (i) {
937 i--; 954 i--;
938 ixgbe_free_rx_resources(adapter, 955 ixgbe_free_rx_resources(&temp_rx_ring[i]);
939 &temp_rx_ring[i]);
940 } 956 }
941 goto err_setup; 957 goto err_setup;
942 } 958 }
@@ -951,8 +967,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
951 /* tx */ 967 /* tx */
952 if (new_tx_count != adapter->tx_ring_count) { 968 if (new_tx_count != adapter->tx_ring_count) {
953 for (i = 0; i < adapter->num_tx_queues; i++) { 969 for (i = 0; i < adapter->num_tx_queues; i++) {
954 ixgbe_free_tx_resources(adapter, 970 ixgbe_free_tx_resources(adapter->tx_ring[i]);
955 adapter->tx_ring[i]);
956 memcpy(adapter->tx_ring[i], &temp_tx_ring[i], 971 memcpy(adapter->tx_ring[i], &temp_tx_ring[i],
957 sizeof(struct ixgbe_ring)); 972 sizeof(struct ixgbe_ring));
958 } 973 }
@@ -962,8 +977,7 @@ static int ixgbe_set_ringparam(struct net_device *netdev,
962 /* rx */ 977 /* rx */
963 if (new_rx_count != adapter->rx_ring_count) { 978 if (new_rx_count != adapter->rx_ring_count) {
964 for (i = 0; i < adapter->num_rx_queues; i++) { 979 for (i = 0; i < adapter->num_rx_queues; i++) {
965 ixgbe_free_rx_resources(adapter, 980 ixgbe_free_rx_resources(adapter->rx_ring[i]);
966 adapter->rx_ring[i]);
967 memcpy(adapter->rx_ring[i], &temp_rx_ring[i], 981 memcpy(adapter->rx_ring[i], &temp_rx_ring[i],
968 sizeof(struct ixgbe_ring)); 982 sizeof(struct ixgbe_ring));
969 } 983 }
@@ -1144,7 +1158,7 @@ struct ixgbe_reg_test {
1144#define TABLE64_TEST_HI 6 1158#define TABLE64_TEST_HI 6
1145 1159
1146/* default 82599 register test */ 1160/* default 82599 register test */
1147static struct ixgbe_reg_test reg_test_82599[] = { 1161static const struct ixgbe_reg_test reg_test_82599[] = {
1148 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1162 { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1149 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1163 { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1150 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1164 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1168,7 +1182,7 @@ static struct ixgbe_reg_test reg_test_82599[] = {
1168}; 1182};
1169 1183
1170/* default 82598 register test */ 1184/* default 82598 register test */
1171static struct ixgbe_reg_test reg_test_82598[] = { 1185static const struct ixgbe_reg_test reg_test_82598[] = {
1172 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1186 { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1173 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, 1187 { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 },
1174 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 1188 { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
@@ -1195,18 +1209,22 @@ static struct ixgbe_reg_test reg_test_82598[] = {
1195 { 0, 0, 0, 0 } 1209 { 0, 0, 0, 0 }
1196}; 1210};
1197 1211
1212static const u32 register_test_patterns[] = {
1213 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
1214};
1215
1198#define REG_PATTERN_TEST(R, M, W) \ 1216#define REG_PATTERN_TEST(R, M, W) \
1199{ \ 1217{ \
1200 u32 pat, val, before; \ 1218 u32 pat, val, before; \
1201 const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ 1219 for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \
1202 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
1203 before = readl(adapter->hw.hw_addr + R); \ 1220 before = readl(adapter->hw.hw_addr + R); \
1204 writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \ 1221 writel((register_test_patterns[pat] & W), \
1222 (adapter->hw.hw_addr + R)); \
1205 val = readl(adapter->hw.hw_addr + R); \ 1223 val = readl(adapter->hw.hw_addr + R); \
1206 if (val != (_test[pat] & W & M)) { \ 1224 if (val != (register_test_patterns[pat] & W & M)) { \
1207 e_err(drv, "pattern test reg %04X failed: got " \ 1225 e_err(drv, "pattern test reg %04X failed: got " \
1208 "0x%08X expected 0x%08X\n", \ 1226 "0x%08X expected 0x%08X\n", \
1209 R, val, (_test[pat] & W & M)); \ 1227 R, val, (register_test_patterns[pat] & W & M)); \
1210 *data = R; \ 1228 *data = R; \
1211 writel(before, adapter->hw.hw_addr + R); \ 1229 writel(before, adapter->hw.hw_addr + R); \
1212 return 1; \ 1230 return 1; \
@@ -1233,16 +1251,24 @@ static struct ixgbe_reg_test reg_test_82598[] = {
1233 1251
1234static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) 1252static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
1235{ 1253{
1236 struct ixgbe_reg_test *test; 1254 const struct ixgbe_reg_test *test;
1237 u32 value, before, after; 1255 u32 value, before, after;
1238 u32 i, toggle; 1256 u32 i, toggle;
1239 1257
1240 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1258 switch (adapter->hw.mac.type) {
1241 toggle = 0x7FFFF30F; 1259 case ixgbe_mac_82598EB:
1242 test = reg_test_82599;
1243 } else {
1244 toggle = 0x7FFFF3FF; 1260 toggle = 0x7FFFF3FF;
1245 test = reg_test_82598; 1261 test = reg_test_82598;
1262 break;
1263 case ixgbe_mac_82599EB:
1264 case ixgbe_mac_X540:
1265 toggle = 0x7FFFF30F;
1266 test = reg_test_82599;
1267 break;
1268 default:
1269 *data = 1;
1270 return 1;
1271 break;
1246 } 1272 }
1247 1273
1248 /* 1274 /*
@@ -1451,25 +1477,28 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter)
1451 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 1477 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
1452 reg_ctl &= ~IXGBE_RXCTRL_RXEN; 1478 reg_ctl &= ~IXGBE_RXCTRL_RXEN;
1453 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); 1479 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl);
1454 reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx)); 1480 ixgbe_disable_rx_queue(adapter, rx_ring);
1455 reg_ctl &= ~IXGBE_RXDCTL_ENABLE;
1456 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(rx_ring->reg_idx), reg_ctl);
1457 1481
1458 /* now Tx */ 1482 /* now Tx */
1459 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); 1483 reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx));
1460 reg_ctl &= ~IXGBE_TXDCTL_ENABLE; 1484 reg_ctl &= ~IXGBE_TXDCTL_ENABLE;
1461 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); 1485 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl);
1462 1486
1463 if (hw->mac.type == ixgbe_mac_82599EB) { 1487 switch (hw->mac.type) {
1488 case ixgbe_mac_82599EB:
1489 case ixgbe_mac_X540:
1464 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); 1490 reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL);
1465 reg_ctl &= ~IXGBE_DMATXCTL_TE; 1491 reg_ctl &= ~IXGBE_DMATXCTL_TE;
1466 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); 1492 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl);
1493 break;
1494 default:
1495 break;
1467 } 1496 }
1468 1497
1469 ixgbe_reset(adapter); 1498 ixgbe_reset(adapter);
1470 1499
1471 ixgbe_free_tx_resources(adapter, &adapter->test_tx_ring); 1500 ixgbe_free_tx_resources(&adapter->test_tx_ring);
1472 ixgbe_free_rx_resources(adapter, &adapter->test_rx_ring); 1501 ixgbe_free_rx_resources(&adapter->test_rx_ring);
1473} 1502}
1474 1503
1475static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) 1504static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
@@ -1483,17 +1512,24 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1483 /* Setup Tx descriptor ring and Tx buffers */ 1512 /* Setup Tx descriptor ring and Tx buffers */
1484 tx_ring->count = IXGBE_DEFAULT_TXD; 1513 tx_ring->count = IXGBE_DEFAULT_TXD;
1485 tx_ring->queue_index = 0; 1514 tx_ring->queue_index = 0;
1515 tx_ring->dev = &adapter->pdev->dev;
1516 tx_ring->netdev = adapter->netdev;
1486 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; 1517 tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx;
1487 tx_ring->numa_node = adapter->node; 1518 tx_ring->numa_node = adapter->node;
1488 1519
1489 err = ixgbe_setup_tx_resources(adapter, tx_ring); 1520 err = ixgbe_setup_tx_resources(tx_ring);
1490 if (err) 1521 if (err)
1491 return 1; 1522 return 1;
1492 1523
1493 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1524 switch (adapter->hw.mac.type) {
1525 case ixgbe_mac_82599EB:
1526 case ixgbe_mac_X540:
1494 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); 1527 reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL);
1495 reg_data |= IXGBE_DMATXCTL_TE; 1528 reg_data |= IXGBE_DMATXCTL_TE;
1496 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); 1529 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data);
1530 break;
1531 default:
1532 break;
1497 } 1533 }
1498 1534
1499 ixgbe_configure_tx_ring(adapter, tx_ring); 1535 ixgbe_configure_tx_ring(adapter, tx_ring);
@@ -1501,11 +1537,13 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter)
1501 /* Setup Rx Descriptor ring and Rx buffers */ 1537 /* Setup Rx Descriptor ring and Rx buffers */
1502 rx_ring->count = IXGBE_DEFAULT_RXD; 1538 rx_ring->count = IXGBE_DEFAULT_RXD;
1503 rx_ring->queue_index = 0; 1539 rx_ring->queue_index = 0;
1540 rx_ring->dev = &adapter->pdev->dev;
1541 rx_ring->netdev = adapter->netdev;
1504 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; 1542 rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx;
1505 rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048; 1543 rx_ring->rx_buf_len = IXGBE_RXBUFFER_2048;
1506 rx_ring->numa_node = adapter->node; 1544 rx_ring->numa_node = adapter->node;
1507 1545
1508 err = ixgbe_setup_rx_resources(adapter, rx_ring); 1546 err = ixgbe_setup_rx_resources(rx_ring);
1509 if (err) { 1547 if (err) {
1510 ret_val = 4; 1548 ret_val = 4;
1511 goto err_nomem; 1549 goto err_nomem;
@@ -1604,8 +1642,7 @@ static int ixgbe_check_lbtest_frame(struct sk_buff *skb,
1604 return 13; 1642 return 13;
1605} 1643}
1606 1644
1607static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter, 1645static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring,
1608 struct ixgbe_ring *rx_ring,
1609 struct ixgbe_ring *tx_ring, 1646 struct ixgbe_ring *tx_ring,
1610 unsigned int size) 1647 unsigned int size)
1611{ 1648{
@@ -1627,7 +1664,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
1627 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; 1664 rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
1628 1665
1629 /* unmap Rx buffer, will be remapped by alloc_rx_buffers */ 1666 /* unmap Rx buffer, will be remapped by alloc_rx_buffers */
1630 dma_unmap_single(&adapter->pdev->dev, 1667 dma_unmap_single(rx_ring->dev,
1631 rx_buffer_info->dma, 1668 rx_buffer_info->dma,
1632 bufsz, 1669 bufsz,
1633 DMA_FROM_DEVICE); 1670 DMA_FROM_DEVICE);
@@ -1639,7 +1676,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
1639 1676
1640 /* unmap buffer on Tx side */ 1677 /* unmap buffer on Tx side */
1641 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; 1678 tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
1642 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 1679 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
1643 1680
1644 /* increment Rx/Tx next to clean counters */ 1681 /* increment Rx/Tx next to clean counters */
1645 rx_ntc++; 1682 rx_ntc++;
@@ -1655,7 +1692,7 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_adapter *adapter,
1655 } 1692 }
1656 1693
1657 /* re-map buffers to ring, store next to clean values */ 1694 /* re-map buffers to ring, store next to clean values */
1658 ixgbe_alloc_rx_buffers(adapter, rx_ring, count); 1695 ixgbe_alloc_rx_buffers(rx_ring, count);
1659 rx_ring->next_to_clean = rx_ntc; 1696 rx_ring->next_to_clean = rx_ntc;
1660 tx_ring->next_to_clean = tx_ntc; 1697 tx_ring->next_to_clean = tx_ntc;
1661 1698
@@ -1699,7 +1736,6 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1699 for (i = 0; i < 64; i++) { 1736 for (i = 0; i < 64; i++) {
1700 skb_get(skb); 1737 skb_get(skb);
1701 tx_ret_val = ixgbe_xmit_frame_ring(skb, 1738 tx_ret_val = ixgbe_xmit_frame_ring(skb,
1702 adapter->netdev,
1703 adapter, 1739 adapter,
1704 tx_ring); 1740 tx_ring);
1705 if (tx_ret_val == NETDEV_TX_OK) 1741 if (tx_ret_val == NETDEV_TX_OK)
@@ -1714,8 +1750,7 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter)
1714 /* allow 200 milliseconds for packets to go from Tx to Rx */ 1750 /* allow 200 milliseconds for packets to go from Tx to Rx */
1715 msleep(200); 1751 msleep(200);
1716 1752
1717 good_cnt = ixgbe_clean_test_rings(adapter, rx_ring, 1753 good_cnt = ixgbe_clean_test_rings(rx_ring, tx_ring, size);
1718 tx_ring, size);
1719 if (good_cnt != 64) { 1754 if (good_cnt != 64) {
1720 ret_val = 13; 1755 ret_val = 13;
1721 break; 1756 break;
@@ -1847,7 +1882,25 @@ static int ixgbe_wol_exclusion(struct ixgbe_adapter *adapter,
1847 struct ixgbe_hw *hw = &adapter->hw; 1882 struct ixgbe_hw *hw = &adapter->hw;
1848 int retval = 1; 1883 int retval = 1;
1849 1884
1885 /* WOL not supported except for the following */
1850 switch(hw->device_id) { 1886 switch(hw->device_id) {
1887 case IXGBE_DEV_ID_82599_SFP:
1888 /* Only this subdevice supports WOL */
1889 if (hw->subsystem_device_id != IXGBE_SUBDEV_ID_82599_SFP) {
1890 wol->supported = 0;
1891 break;
1892 }
1893 retval = 0;
1894 break;
1895 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
1896 /* All except this subdevice support WOL */
1897 if (hw->subsystem_device_id ==
1898 IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
1899 wol->supported = 0;
1900 break;
1901 }
1902 retval = 0;
1903 break;
1851 case IXGBE_DEV_ID_82599_KX4: 1904 case IXGBE_DEV_ID_82599_KX4:
1852 retval = 0; 1905 retval = 0;
1853 break; 1906 break;
@@ -1985,6 +2038,41 @@ static int ixgbe_get_coalesce(struct net_device *netdev,
1985 return 0; 2038 return 0;
1986} 2039}
1987 2040
2041/*
2042 * this function must be called before setting the new value of
2043 * rx_itr_setting
2044 */
2045static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter,
2046 struct ethtool_coalesce *ec)
2047{
2048 struct net_device *netdev = adapter->netdev;
2049
2050 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
2051 return false;
2052
2053 /* if interrupt rate is too high then disable RSC */
2054 if (ec->rx_coalesce_usecs != 1 &&
2055 ec->rx_coalesce_usecs <= 1000000/IXGBE_MAX_RSC_INT_RATE) {
2056 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2057 e_info(probe, "rx-usecs set too low, "
2058 "disabling RSC\n");
2059 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2060 return true;
2061 }
2062 } else {
2063 /* check the feature flag value and enable RSC if necessary */
2064 if ((netdev->features & NETIF_F_LRO) &&
2065 !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) {
2066 e_info(probe, "rx-usecs set to %d, "
2067 "re-enabling RSC\n",
2068 ec->rx_coalesce_usecs);
2069 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
2070 return true;
2071 }
2072 }
2073 return false;
2074}
2075
1988static int ixgbe_set_coalesce(struct net_device *netdev, 2076static int ixgbe_set_coalesce(struct net_device *netdev,
1989 struct ethtool_coalesce *ec) 2077 struct ethtool_coalesce *ec)
1990{ 2078{
@@ -2002,17 +2090,14 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2002 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq; 2090 adapter->tx_ring[0]->work_limit = ec->tx_max_coalesced_frames_irq;
2003 2091
2004 if (ec->rx_coalesce_usecs > 1) { 2092 if (ec->rx_coalesce_usecs > 1) {
2005 u32 max_int;
2006 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
2007 max_int = IXGBE_MAX_RSC_INT_RATE;
2008 else
2009 max_int = IXGBE_MAX_INT_RATE;
2010
2011 /* check the limits */ 2093 /* check the limits */
2012 if ((1000000/ec->rx_coalesce_usecs > max_int) || 2094 if ((1000000/ec->rx_coalesce_usecs > IXGBE_MAX_INT_RATE) ||
2013 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE)) 2095 (1000000/ec->rx_coalesce_usecs < IXGBE_MIN_INT_RATE))
2014 return -EINVAL; 2096 return -EINVAL;
2015 2097
2098 /* check the old value and enable RSC if necessary */
2099 need_reset = ixgbe_update_rsc(adapter, ec);
2100
2016 /* store the value in ints/second */ 2101 /* store the value in ints/second */
2017 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs; 2102 adapter->rx_eitr_param = 1000000/ec->rx_coalesce_usecs;
2018 2103
@@ -2021,32 +2106,21 @@ static int ixgbe_set_coalesce(struct net_device *netdev,
2021 /* clear the lower bit as its used for dynamic state */ 2106 /* clear the lower bit as its used for dynamic state */
2022 adapter->rx_itr_setting &= ~1; 2107 adapter->rx_itr_setting &= ~1;
2023 } else if (ec->rx_coalesce_usecs == 1) { 2108 } else if (ec->rx_coalesce_usecs == 1) {
2109 /* check the old value and enable RSC if necessary */
2110 need_reset = ixgbe_update_rsc(adapter, ec);
2111
2024 /* 1 means dynamic mode */ 2112 /* 1 means dynamic mode */
2025 adapter->rx_eitr_param = 20000; 2113 adapter->rx_eitr_param = 20000;
2026 adapter->rx_itr_setting = 1; 2114 adapter->rx_itr_setting = 1;
2027 } else { 2115 } else {
2116 /* check the old value and enable RSC if necessary */
2117 need_reset = ixgbe_update_rsc(adapter, ec);
2028 /* 2118 /*
2029 * any other value means disable eitr, which is best 2119 * any other value means disable eitr, which is best
2030 * served by setting the interrupt rate very high 2120 * served by setting the interrupt rate very high
2031 */ 2121 */
2032 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE; 2122 adapter->rx_eitr_param = IXGBE_MAX_INT_RATE;
2033 adapter->rx_itr_setting = 0; 2123 adapter->rx_itr_setting = 0;
2034
2035 /*
2036 * if hardware RSC is enabled, disable it when
2037 * setting low latency mode, to avoid errata, assuming
2038 * that when the user set low latency mode they want
2039 * it at the cost of anything else
2040 */
2041 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
2042 adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED;
2043 if (netdev->features & NETIF_F_LRO) {
2044 netdev->features &= ~NETIF_F_LRO;
2045 e_info(probe, "rx-usecs set to 0, "
2046 "disabling RSC\n");
2047 }
2048 need_reset = true;
2049 }
2050 } 2124 }
2051 2125
2052 if (ec->tx_coalesce_usecs > 1) { 2126 if (ec->tx_coalesce_usecs > 1) {
@@ -2127,34 +2201,45 @@ static int ixgbe_set_flags(struct net_device *netdev, u32 data)
2127 need_reset = (data & ETH_FLAG_RXVLAN) != 2201 need_reset = (data & ETH_FLAG_RXVLAN) !=
2128 (netdev->features & NETIF_F_HW_VLAN_RX); 2202 (netdev->features & NETIF_F_HW_VLAN_RX);
2129 2203
2130 rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | 2204 rc = ethtool_op_set_flags(netdev, data, ETH_FLAG_LRO | ETH_FLAG_NTUPLE |
2131 ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN); 2205 ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN);
2132 if (rc) 2206 if (rc)
2133 return rc; 2207 return rc;
2134 2208
2135 /* if state changes we need to update adapter->flags and reset */ 2209 /* if state changes we need to update adapter->flags and reset */
2136 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) { 2210 if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) &&
2137 /* 2211 (!!(data & ETH_FLAG_LRO) !=
2138 * cast both to bool and verify if they are set the same 2212 !!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) {
2139 * but only enable RSC if itr is non-zero, as 2213 if ((data & ETH_FLAG_LRO) &&
2140 * itr=0 and RSC are mutually exclusive 2214 (!adapter->rx_itr_setting ||
2141 */ 2215 (adapter->rx_itr_setting > IXGBE_MAX_RSC_INT_RATE))) {
2142 if (((!!(data & ETH_FLAG_LRO)) != 2216 e_info(probe, "rx-usecs set too low, "
2143 (!!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))) && 2217 "not enabling RSC.\n");
2144 adapter->rx_itr_setting) { 2218 } else {
2145 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED; 2219 adapter->flags2 ^= IXGBE_FLAG2_RSC_ENABLED;
2146 switch (adapter->hw.mac.type) { 2220 switch (adapter->hw.mac.type) {
2147 case ixgbe_mac_82599EB: 2221 case ixgbe_mac_82599EB:
2148 need_reset = true; 2222 need_reset = true;
2149 break; 2223 break;
2224 case ixgbe_mac_X540: {
2225 int i;
2226 for (i = 0; i < adapter->num_rx_queues; i++) {
2227 struct ixgbe_ring *ring =
2228 adapter->rx_ring[i];
2229 if (adapter->flags2 &
2230 IXGBE_FLAG2_RSC_ENABLED) {
2231 ixgbe_configure_rscctl(adapter,
2232 ring);
2233 } else {
2234 ixgbe_clear_rscctl(adapter,
2235 ring);
2236 }
2237 }
2238 }
2239 break;
2150 default: 2240 default:
2151 break; 2241 break;
2152 } 2242 }
2153 } else if (!adapter->rx_itr_setting) {
2154 netdev->features &= ~NETIF_F_LRO;
2155 if (data & ETH_FLAG_LRO)
2156 e_info(probe, "rx-usecs set to 0, "
2157 "LRO/RSC cannot be enabled.\n");
2158 } 2243 }
2159 } 2244 }
2160 2245
@@ -2192,10 +2277,11 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev,
2192 struct ethtool_rx_ntuple *cmd) 2277 struct ethtool_rx_ntuple *cmd)
2193{ 2278{
2194 struct ixgbe_adapter *adapter = netdev_priv(dev); 2279 struct ixgbe_adapter *adapter = netdev_priv(dev);
2195 struct ethtool_rx_ntuple_flow_spec fs = cmd->fs; 2280 struct ethtool_rx_ntuple_flow_spec *fs = &cmd->fs;
2196 struct ixgbe_atr_input input_struct; 2281 union ixgbe_atr_input input_struct;
2197 struct ixgbe_atr_input_masks input_masks; 2282 struct ixgbe_atr_input_masks input_masks;
2198 int target_queue; 2283 int target_queue;
2284 int err;
2199 2285
2200 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 2286 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
2201 return -EOPNOTSUPP; 2287 return -EOPNOTSUPP;
@@ -2204,67 +2290,122 @@ static int ixgbe_set_rx_ntuple(struct net_device *dev,
2204 * Don't allow programming if the action is a queue greater than 2290 * Don't allow programming if the action is a queue greater than
2205 * the number of online Tx queues. 2291 * the number of online Tx queues.
2206 */ 2292 */
2207 if ((fs.action >= adapter->num_tx_queues) || 2293 if ((fs->action >= adapter->num_tx_queues) ||
2208 (fs.action < ETHTOOL_RXNTUPLE_ACTION_DROP)) 2294 (fs->action < ETHTOOL_RXNTUPLE_ACTION_DROP))
2209 return -EINVAL; 2295 return -EINVAL;
2210 2296
2211 memset(&input_struct, 0, sizeof(struct ixgbe_atr_input)); 2297 memset(&input_struct, 0, sizeof(union ixgbe_atr_input));
2212 memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks)); 2298 memset(&input_masks, 0, sizeof(struct ixgbe_atr_input_masks));
2213 2299
2214 input_masks.src_ip_mask = fs.m_u.tcp_ip4_spec.ip4src; 2300 /* record flow type */
2215 input_masks.dst_ip_mask = fs.m_u.tcp_ip4_spec.ip4dst; 2301 switch (fs->flow_type) {
2216 input_masks.src_port_mask = fs.m_u.tcp_ip4_spec.psrc; 2302 case IPV4_FLOW:
2217 input_masks.dst_port_mask = fs.m_u.tcp_ip4_spec.pdst; 2303 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_IPV4;
2218 input_masks.vlan_id_mask = fs.vlan_tag_mask; 2304 break;
2219 /* only use the lowest 2 bytes for flex bytes */
2220 input_masks.data_mask = (fs.data_mask & 0xffff);
2221
2222 switch (fs.flow_type) {
2223 case TCP_V4_FLOW: 2305 case TCP_V4_FLOW:
2224 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_TCP); 2306 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
2225 break; 2307 break;
2226 case UDP_V4_FLOW: 2308 case UDP_V4_FLOW:
2227 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_UDP); 2309 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_UDPV4;
2228 break; 2310 break;
2229 case SCTP_V4_FLOW: 2311 case SCTP_V4_FLOW:
2230 ixgbe_atr_set_l4type_82599(&input_struct, IXGBE_ATR_L4TYPE_SCTP); 2312 input_struct.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_SCTPV4;
2231 break; 2313 break;
2232 default: 2314 default:
2233 return -1; 2315 return -1;
2234 } 2316 }
2235 2317
2236 /* Mask bits from the inputs based on user-supplied mask */ 2318 /* copy vlan tag minus the CFI bit */
2237 ixgbe_atr_set_src_ipv4_82599(&input_struct, 2319 if ((fs->vlan_tag & 0xEFFF) || (~fs->vlan_tag_mask & 0xEFFF)) {
2238 (fs.h_u.tcp_ip4_spec.ip4src & ~fs.m_u.tcp_ip4_spec.ip4src)); 2320 input_struct.formatted.vlan_id = htons(fs->vlan_tag & 0xEFFF);
2239 ixgbe_atr_set_dst_ipv4_82599(&input_struct, 2321 if (!fs->vlan_tag_mask) {
2240 (fs.h_u.tcp_ip4_spec.ip4dst & ~fs.m_u.tcp_ip4_spec.ip4dst)); 2322 input_masks.vlan_id_mask = htons(0xEFFF);
2241 /* 82599 expects these to be byte-swapped for perfect filtering */ 2323 } else {
2242 ixgbe_atr_set_src_port_82599(&input_struct, 2324 switch (~fs->vlan_tag_mask & 0xEFFF) {
2243 ((ntohs(fs.h_u.tcp_ip4_spec.psrc)) & ~fs.m_u.tcp_ip4_spec.psrc)); 2325 /* all of these are valid vlan-mask values */
2244 ixgbe_atr_set_dst_port_82599(&input_struct, 2326 case 0xEFFF:
2245 ((ntohs(fs.h_u.tcp_ip4_spec.pdst)) & ~fs.m_u.tcp_ip4_spec.pdst)); 2327 case 0xE000:
2246 2328 case 0x0FFF:
2247 /* VLAN and Flex bytes are either completely masked or not */ 2329 case 0x0000:
2248 if (!fs.vlan_tag_mask) 2330 input_masks.vlan_id_mask =
2249 ixgbe_atr_set_vlan_id_82599(&input_struct, fs.vlan_tag); 2331 htons(~fs->vlan_tag_mask);
2250 2332 break;
2251 if (!input_masks.data_mask) 2333 /* exit with error if vlan-mask is invalid */
2252 /* make sure we only use the first 2 bytes of user data */ 2334 default:
2253 ixgbe_atr_set_flex_byte_82599(&input_struct, 2335 e_err(drv, "Partial VLAN ID or "
2254 (fs.data & 0xffff)); 2336 "priority mask in vlan-mask is not "
2337 "supported by hardware\n");
2338 return -1;
2339 }
2340 }
2341 }
2342
2343 /* make sure we only use the first 2 bytes of user data */
2344 if ((fs->data & 0xFFFF) || (~fs->data_mask & 0xFFFF)) {
2345 input_struct.formatted.flex_bytes = htons(fs->data & 0xFFFF);
2346 if (!(fs->data_mask & 0xFFFF)) {
2347 input_masks.flex_mask = 0xFFFF;
2348 } else if (~fs->data_mask & 0xFFFF) {
2349 e_err(drv, "Partial user-def-mask is not "
2350 "supported by hardware\n");
2351 return -1;
2352 }
2353 }
2354
2355 /*
2356 * Copy input into formatted structures
2357 *
2358 * These assignments are based on the following logic
2359 * If neither input or mask are set assume value is masked out.
2360 * If input is set, but mask is not mask should default to accept all.
2361 * If input is not set, but mask is set then mask likely results in 0.
2362 * If input is set and mask is set then assign both.
2363 */
2364 if (fs->h_u.tcp_ip4_spec.ip4src || ~fs->m_u.tcp_ip4_spec.ip4src) {
2365 input_struct.formatted.src_ip[0] = fs->h_u.tcp_ip4_spec.ip4src;
2366 if (!fs->m_u.tcp_ip4_spec.ip4src)
2367 input_masks.src_ip_mask[0] = 0xFFFFFFFF;
2368 else
2369 input_masks.src_ip_mask[0] =
2370 ~fs->m_u.tcp_ip4_spec.ip4src;
2371 }
2372 if (fs->h_u.tcp_ip4_spec.ip4dst || ~fs->m_u.tcp_ip4_spec.ip4dst) {
2373 input_struct.formatted.dst_ip[0] = fs->h_u.tcp_ip4_spec.ip4dst;
2374 if (!fs->m_u.tcp_ip4_spec.ip4dst)
2375 input_masks.dst_ip_mask[0] = 0xFFFFFFFF;
2376 else
2377 input_masks.dst_ip_mask[0] =
2378 ~fs->m_u.tcp_ip4_spec.ip4dst;
2379 }
2380 if (fs->h_u.tcp_ip4_spec.psrc || ~fs->m_u.tcp_ip4_spec.psrc) {
2381 input_struct.formatted.src_port = fs->h_u.tcp_ip4_spec.psrc;
2382 if (!fs->m_u.tcp_ip4_spec.psrc)
2383 input_masks.src_port_mask = 0xFFFF;
2384 else
2385 input_masks.src_port_mask = ~fs->m_u.tcp_ip4_spec.psrc;
2386 }
2387 if (fs->h_u.tcp_ip4_spec.pdst || ~fs->m_u.tcp_ip4_spec.pdst) {
2388 input_struct.formatted.dst_port = fs->h_u.tcp_ip4_spec.pdst;
2389 if (!fs->m_u.tcp_ip4_spec.pdst)
2390 input_masks.dst_port_mask = 0xFFFF;
2391 else
2392 input_masks.dst_port_mask = ~fs->m_u.tcp_ip4_spec.pdst;
2393 }
2255 2394
2256 /* determine if we need to drop or route the packet */ 2395 /* determine if we need to drop or route the packet */
2257 if (fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) 2396 if (fs->action == ETHTOOL_RXNTUPLE_ACTION_DROP)
2258 target_queue = MAX_RX_QUEUES - 1; 2397 target_queue = MAX_RX_QUEUES - 1;
2259 else 2398 else
2260 target_queue = fs.action; 2399 target_queue = fs->action;
2261 2400
2262 spin_lock(&adapter->fdir_perfect_lock); 2401 spin_lock(&adapter->fdir_perfect_lock);
2263 ixgbe_fdir_add_perfect_filter_82599(&adapter->hw, &input_struct, 2402 err = ixgbe_fdir_add_perfect_filter_82599(&adapter->hw,
2264 &input_masks, 0, target_queue); 2403 &input_struct,
2404 &input_masks, 0,
2405 target_queue);
2265 spin_unlock(&adapter->fdir_perfect_lock); 2406 spin_unlock(&adapter->fdir_perfect_lock);
2266 2407
2267 return 0; 2408 return err ? -1 : 0;
2268} 2409}
2269 2410
2270static const struct ethtool_ops ixgbe_ethtool_ops = { 2411static const struct ethtool_ops ixgbe_ethtool_ops = {
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 05efa6a8ce8e..6342d4859790 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -68,7 +68,7 @@ static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc)
68static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) 68static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
69{ 69{
70 ddp->len = 0; 70 ddp->len = 0;
71 ddp->err = 0; 71 ddp->err = 1;
72 ddp->udl = NULL; 72 ddp->udl = NULL;
73 ddp->udp = 0UL; 73 ddp->udp = 0UL;
74 ddp->sgl = NULL; 74 ddp->sgl = NULL;
@@ -92,6 +92,7 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
92 struct ixgbe_fcoe *fcoe; 92 struct ixgbe_fcoe *fcoe;
93 struct ixgbe_adapter *adapter; 93 struct ixgbe_adapter *adapter;
94 struct ixgbe_fcoe_ddp *ddp; 94 struct ixgbe_fcoe_ddp *ddp;
95 u32 fcbuff;
95 96
96 if (!netdev) 97 if (!netdev)
97 goto out_ddp_put; 98 goto out_ddp_put;
@@ -115,7 +116,14 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
115 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); 116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0);
116 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, 117 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
117 (xid | IXGBE_FCDMARW_WE)); 118 (xid | IXGBE_FCDMARW_WE));
119
120 /* guaranteed to be invalidated after 100us */
121 IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW,
122 (xid | IXGBE_FCDMARW_RE));
123 fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF);
118 spin_unlock_bh(&fcoe->lock); 124 spin_unlock_bh(&fcoe->lock);
125 if (fcbuff & IXGBE_FCBUFF_VALID)
126 udelay(100);
119 } 127 }
120 if (ddp->sgl) 128 if (ddp->sgl)
121 pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, 129 pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc,
@@ -168,6 +176,11 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
168 return 0; 176 return 0;
169 } 177 }
170 178
179 /* no DDP if we are already down or resetting */
180 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
181 test_bit(__IXGBE_RESETTING, &adapter->state))
182 return 0;
183
171 fcoe = &adapter->fcoe; 184 fcoe = &adapter->fcoe;
172 if (!fcoe->pool) { 185 if (!fcoe->pool) {
173 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); 186 e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid);
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index eee0b298bd36..a060610a42db 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -52,13 +52,14 @@ char ixgbe_driver_name[] = "ixgbe";
52static const char ixgbe_driver_string[] = 52static const char ixgbe_driver_string[] =
53 "Intel(R) 10 Gigabit PCI Express Network Driver"; 53 "Intel(R) 10 Gigabit PCI Express Network Driver";
54 54
55#define DRV_VERSION "2.0.84-k2" 55#define DRV_VERSION "3.0.12-k2"
56const char ixgbe_driver_version[] = DRV_VERSION; 56const char ixgbe_driver_version[] = DRV_VERSION;
57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation."; 57static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
58 58
59static const struct ixgbe_info *ixgbe_info_tbl[] = { 59static const struct ixgbe_info *ixgbe_info_tbl[] = {
60 [board_82598] = &ixgbe_82598_info, 60 [board_82598] = &ixgbe_82598_info,
61 [board_82599] = &ixgbe_82599_info, 61 [board_82599] = &ixgbe_82599_info,
62 [board_X540] = &ixgbe_X540_info,
62}; 63};
63 64
64/* ixgbe_pci_tbl - PCI Device ID Table 65/* ixgbe_pci_tbl - PCI Device ID Table
@@ -108,10 +109,16 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
108 board_82599 }, 109 board_82599 },
109 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), 110 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
110 board_82599 }, 111 board_82599 },
112 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE),
113 board_82599 },
114 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE),
115 board_82599 },
111 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), 116 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM),
112 board_82599 }, 117 board_82599 },
113 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), 118 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
114 board_82599 }, 119 board_82599 },
120 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T),
121 board_X540 },
115 122
116 /* required last entry */ 123 /* required last entry */
117 {0, } 124 {0, }
@@ -560,6 +567,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
560 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar); 567 IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
561 break; 568 break;
562 case ixgbe_mac_82599EB: 569 case ixgbe_mac_82599EB:
570 case ixgbe_mac_X540:
563 if (direction == -1) { 571 if (direction == -1) {
564 /* other causes */ 572 /* other causes */
565 msix_vector |= IXGBE_IVAR_ALLOC_VAL; 573 msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -589,29 +597,34 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
589{ 597{
590 u32 mask; 598 u32 mask;
591 599
592 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 600 switch (adapter->hw.mac.type) {
601 case ixgbe_mac_82598EB:
593 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 602 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
594 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask); 603 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
595 } else { 604 break;
605 case ixgbe_mac_82599EB:
606 case ixgbe_mac_X540:
596 mask = (qmask & 0xFFFFFFFF); 607 mask = (qmask & 0xFFFFFFFF);
597 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); 608 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
598 mask = (qmask >> 32); 609 mask = (qmask >> 32);
599 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask); 610 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
611 break;
612 default:
613 break;
600 } 614 }
601} 615}
602 616
603void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter, 617void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
604 struct ixgbe_tx_buffer 618 struct ixgbe_tx_buffer *tx_buffer_info)
605 *tx_buffer_info)
606{ 619{
607 if (tx_buffer_info->dma) { 620 if (tx_buffer_info->dma) {
608 if (tx_buffer_info->mapped_as_page) 621 if (tx_buffer_info->mapped_as_page)
609 dma_unmap_page(&adapter->pdev->dev, 622 dma_unmap_page(tx_ring->dev,
610 tx_buffer_info->dma, 623 tx_buffer_info->dma,
611 tx_buffer_info->length, 624 tx_buffer_info->length,
612 DMA_TO_DEVICE); 625 DMA_TO_DEVICE);
613 else 626 else
614 dma_unmap_single(&adapter->pdev->dev, 627 dma_unmap_single(tx_ring->dev,
615 tx_buffer_info->dma, 628 tx_buffer_info->dma,
616 tx_buffer_info->length, 629 tx_buffer_info->length,
617 DMA_TO_DEVICE); 630 DMA_TO_DEVICE);
@@ -626,92 +639,166 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
626} 639}
627 640
628/** 641/**
629 * ixgbe_tx_xon_state - check the tx ring xon state 642 * ixgbe_dcb_txq_to_tc - convert a reg index to a traffic class
630 * @adapter: the ixgbe adapter 643 * @adapter: driver private struct
631 * @tx_ring: the corresponding tx_ring 644 * @index: reg idx of queue to query (0-127)
632 * 645 *
633 * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the 646 * Helper function to determine the traffic index for a paticular
634 * corresponding TC of this tx_ring when checking TFCS. 647 * register index.
635 * 648 *
636 * Returns : true if in xon state (currently not paused) 649 * Returns : a tc index for use in range 0-7, or 0-3
637 */ 650 */
638static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter, 651u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
639 struct ixgbe_ring *tx_ring)
640{ 652{
641 u32 txoff = IXGBE_TFCS_TXOFF; 653 int tc = -1;
654 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
642 655
643#ifdef CONFIG_IXGBE_DCB 656 /* if DCB is not enabled the queues have no TC */
644 if (adapter->dcb_cfg.pfc_mode_enable) { 657 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
645 int tc; 658 return tc;
646 int reg_idx = tx_ring->reg_idx; 659
647 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 660 /* check valid range */
661 if (reg_idx >= adapter->hw.mac.max_tx_queues)
662 return tc;
663
664 switch (adapter->hw.mac.type) {
665 case ixgbe_mac_82598EB:
666 tc = reg_idx >> 2;
667 break;
668 default:
669 if (dcb_i != 4 && dcb_i != 8)
670 break;
671
672 /* if VMDq is enabled the lowest order bits determine TC */
673 if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
674 IXGBE_FLAG_VMDQ_ENABLED)) {
675 tc = reg_idx & (dcb_i - 1);
676 break;
677 }
678
679 /*
680 * Convert the reg_idx into the correct TC. This bitmask
681 * targets the last full 32 ring traffic class and assigns
682 * it a value of 1. From there the rest of the rings are
683 * based on shifting the mask further up to include the
684 * reg_idx / 16 and then reg_idx / 8. It assumes dcB_i
685 * will only ever be 8 or 4 and that reg_idx will never
686 * be greater then 128. The code without the power of 2
687 * optimizations would be:
688 * (((reg_idx % 32) + 32) * dcb_i) >> (9 - reg_idx / 32)
689 */
690 tc = ((reg_idx & 0X1F) + 0x20) * dcb_i;
691 tc >>= 9 - (reg_idx >> 5);
692 }
648 693
649 switch (adapter->hw.mac.type) { 694 return tc;
695}
696
697static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
698{
699 struct ixgbe_hw *hw = &adapter->hw;
700 struct ixgbe_hw_stats *hwstats = &adapter->stats;
701 u32 data = 0;
702 u32 xoff[8] = {0};
703 int i;
704
705 if ((hw->fc.current_mode == ixgbe_fc_full) ||
706 (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
707 switch (hw->mac.type) {
650 case ixgbe_mac_82598EB: 708 case ixgbe_mac_82598EB:
651 tc = reg_idx >> 2; 709 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
652 txoff = IXGBE_TFCS_TXOFF0;
653 break; 710 break;
654 case ixgbe_mac_82599EB: 711 default:
655 tc = 0; 712 data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
656 txoff = IXGBE_TFCS_TXOFF; 713 }
657 if (dcb_i == 8) { 714 hwstats->lxoffrxc += data;
658 /* TC0, TC1 */ 715
659 tc = reg_idx >> 5; 716 /* refill credits (no tx hang) if we received xoff */
660 if (tc == 2) /* TC2, TC3 */ 717 if (!data)
661 tc += (reg_idx - 64) >> 4; 718 return;
662 else if (tc == 3) /* TC4, TC5, TC6, TC7 */ 719
663 tc += 1 + ((reg_idx - 96) >> 3); 720 for (i = 0; i < adapter->num_tx_queues; i++)
664 } else if (dcb_i == 4) { 721 clear_bit(__IXGBE_HANG_CHECK_ARMED,
665 /* TC0, TC1 */ 722 &adapter->tx_ring[i]->state);
666 tc = reg_idx >> 6; 723 return;
667 if (tc == 1) { 724 } else if (!(adapter->dcb_cfg.pfc_mode_enable))
668 tc += (reg_idx - 64) >> 5; 725 return;
669 if (tc == 2) /* TC2, TC3 */ 726
670 tc += (reg_idx - 96) >> 4; 727 /* update stats for each tc, only valid with PFC enabled */
671 } 728 for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
672 } 729 switch (hw->mac.type) {
730 case ixgbe_mac_82598EB:
731 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
673 break; 732 break;
674 default: 733 default:
675 tc = 0; 734 xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
676 } 735 }
677 txoff <<= tc; 736 hwstats->pxoffrxc[i] += xoff[i];
678 } 737 }
679#endif 738
680 return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff; 739 /* disarm tx queues that have received xoff frames */
740 for (i = 0; i < adapter->num_tx_queues; i++) {
741 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
742 u32 tc = ixgbe_dcb_txq_to_tc(adapter, tx_ring->reg_idx);
743
744 if (xoff[tc])
745 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
746 }
747}
748
749static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
750{
751 return ring->tx_stats.completed;
681} 752}
682 753
683static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter, 754static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
684 struct ixgbe_ring *tx_ring,
685 unsigned int eop)
686{ 755{
756 struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
687 struct ixgbe_hw *hw = &adapter->hw; 757 struct ixgbe_hw *hw = &adapter->hw;
688 758
689 /* Detect a transmit hang in hardware, this serializes the 759 u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
690 * check with the clearing of time_stamp and movement of eop */ 760 u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
691 adapter->detect_tx_hung = false; 761
692 if (tx_ring->tx_buffer_info[eop].time_stamp && 762 if (head != tail)
693 time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) && 763 return (head < tail) ?
694 ixgbe_tx_xon_state(adapter, tx_ring)) { 764 tail - head : (tail + ring->count - head);
695 /* detected Tx unit hang */ 765
696 union ixgbe_adv_tx_desc *tx_desc; 766 return 0;
697 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); 767}
698 e_err(drv, "Detected Tx Unit Hang\n" 768
699 " Tx Queue <%d>\n" 769static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
700 " TDH, TDT <%x>, <%x>\n" 770{
701 " next_to_use <%x>\n" 771 u32 tx_done = ixgbe_get_tx_completed(tx_ring);
702 " next_to_clean <%x>\n" 772 u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
703 "tx_buffer_info[next_to_clean]\n" 773 u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
704 " time_stamp <%lx>\n" 774 bool ret = false;
705 " jiffies <%lx>\n", 775
706 tx_ring->queue_index, 776 clear_check_for_tx_hang(tx_ring);
707 IXGBE_READ_REG(hw, tx_ring->head), 777
708 IXGBE_READ_REG(hw, tx_ring->tail), 778 /*
709 tx_ring->next_to_use, eop, 779 * Check for a hung queue, but be thorough. This verifies
710 tx_ring->tx_buffer_info[eop].time_stamp, jiffies); 780 * that a transmit has been completed since the previous
711 return true; 781 * check AND there is at least one packet pending. The
782 * ARMED bit is set to indicate a potential hang. The
783 * bit is cleared if a pause frame is received to remove
784 * false hang detection due to PFC or 802.3x frames. By
785 * requiring this to fail twice we avoid races with
786 * pfc clearing the ARMED bit and conditions where we
787 * run the check_tx_hang logic with a transmit completion
788 * pending but without time to complete it yet.
789 */
790 if ((tx_done_old == tx_done) && tx_pending) {
791 /* make sure it is true for two checks in a row */
792 ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
793 &tx_ring->state);
794 } else {
795 /* update completed stats and continue */
796 tx_ring->tx_stats.tx_done_old = tx_done;
797 /* reset the countdown */
798 clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
712 } 799 }
713 800
714 return false; 801 return ret;
715} 802}
716 803
717#define IXGBE_MAX_TXD_PWR 14 804#define IXGBE_MAX_TXD_PWR 14
@@ -734,11 +821,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
734 struct ixgbe_ring *tx_ring) 821 struct ixgbe_ring *tx_ring)
735{ 822{
736 struct ixgbe_adapter *adapter = q_vector->adapter; 823 struct ixgbe_adapter *adapter = q_vector->adapter;
737 struct net_device *netdev = adapter->netdev;
738 union ixgbe_adv_tx_desc *tx_desc, *eop_desc; 824 union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
739 struct ixgbe_tx_buffer *tx_buffer_info; 825 struct ixgbe_tx_buffer *tx_buffer_info;
740 unsigned int i, eop, count = 0;
741 unsigned int total_bytes = 0, total_packets = 0; 826 unsigned int total_bytes = 0, total_packets = 0;
827 u16 i, eop, count = 0;
742 828
743 i = tx_ring->next_to_clean; 829 i = tx_ring->next_to_clean;
744 eop = tx_ring->tx_buffer_info[i].next_to_watch; 830 eop = tx_ring->tx_buffer_info[i].next_to_watch;
@@ -749,148 +835,182 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
749 bool cleaned = false; 835 bool cleaned = false;
750 rmb(); /* read buffer_info after eop_desc */ 836 rmb(); /* read buffer_info after eop_desc */
751 for ( ; !cleaned; count++) { 837 for ( ; !cleaned; count++) {
752 struct sk_buff *skb;
753 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i); 838 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
754 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 839 tx_buffer_info = &tx_ring->tx_buffer_info[i];
755 cleaned = (i == eop);
756 skb = tx_buffer_info->skb;
757
758 if (cleaned && skb) {
759 unsigned int segs, bytecount;
760 unsigned int hlen = skb_headlen(skb);
761
762 /* gso_segs is currently only valid for tcp */
763 segs = skb_shinfo(skb)->gso_segs ?: 1;
764#ifdef IXGBE_FCOE
765 /* adjust for FCoE Sequence Offload */
766 if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
767 && skb_is_gso(skb)
768 && vlan_get_protocol(skb) ==
769 htons(ETH_P_FCOE)) {
770 hlen = skb_transport_offset(skb) +
771 sizeof(struct fc_frame_header) +
772 sizeof(struct fcoe_crc_eof);
773 segs = DIV_ROUND_UP(skb->len - hlen,
774 skb_shinfo(skb)->gso_size);
775 }
776#endif /* IXGBE_FCOE */
777 /* multiply data chunks by size of headers */
778 bytecount = ((segs - 1) * hlen) + skb->len;
779 total_packets += segs;
780 total_bytes += bytecount;
781 }
782
783 ixgbe_unmap_and_free_tx_resource(adapter,
784 tx_buffer_info);
785 840
786 tx_desc->wb.status = 0; 841 tx_desc->wb.status = 0;
842 cleaned = (i == eop);
787 843
788 i++; 844 i++;
789 if (i == tx_ring->count) 845 if (i == tx_ring->count)
790 i = 0; 846 i = 0;
847
848 if (cleaned && tx_buffer_info->skb) {
849 total_bytes += tx_buffer_info->bytecount;
850 total_packets += tx_buffer_info->gso_segs;
851 }
852
853 ixgbe_unmap_and_free_tx_resource(tx_ring,
854 tx_buffer_info);
791 } 855 }
792 856
857 tx_ring->tx_stats.completed++;
793 eop = tx_ring->tx_buffer_info[i].next_to_watch; 858 eop = tx_ring->tx_buffer_info[i].next_to_watch;
794 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop); 859 eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
795 } 860 }
796 861
797 tx_ring->next_to_clean = i; 862 tx_ring->next_to_clean = i;
863 tx_ring->total_bytes += total_bytes;
864 tx_ring->total_packets += total_packets;
865 u64_stats_update_begin(&tx_ring->syncp);
866 tx_ring->stats.packets += total_packets;
867 tx_ring->stats.bytes += total_bytes;
868 u64_stats_update_end(&tx_ring->syncp);
869
870 if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
871 /* schedule immediate reset if we believe we hung */
872 struct ixgbe_hw *hw = &adapter->hw;
873 tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
874 e_err(drv, "Detected Tx Unit Hang\n"
875 " Tx Queue <%d>\n"
876 " TDH, TDT <%x>, <%x>\n"
877 " next_to_use <%x>\n"
878 " next_to_clean <%x>\n"
879 "tx_buffer_info[next_to_clean]\n"
880 " time_stamp <%lx>\n"
881 " jiffies <%lx>\n",
882 tx_ring->queue_index,
883 IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
884 IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
885 tx_ring->next_to_use, eop,
886 tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
887
888 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
889
890 e_info(probe,
891 "tx hang %d detected on queue %d, resetting adapter\n",
892 adapter->tx_timeout_count + 1, tx_ring->queue_index);
893
894 /* schedule immediate reset if we believe we hung */
895 ixgbe_tx_timeout(adapter->netdev);
896
897 /* the adapter is about to reset, no point in enabling stuff */
898 return true;
899 }
798 900
799#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) 901#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
800 if (unlikely(count && netif_carrier_ok(netdev) && 902 if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
801 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) { 903 (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
802 /* Make sure that anybody stopping the queue after this 904 /* Make sure that anybody stopping the queue after this
803 * sees the new next_to_clean. 905 * sees the new next_to_clean.
804 */ 906 */
805 smp_mb(); 907 smp_mb();
806 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 908 if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
807 !test_bit(__IXGBE_DOWN, &adapter->state)) { 909 !test_bit(__IXGBE_DOWN, &adapter->state)) {
808 netif_wake_subqueue(netdev, tx_ring->queue_index); 910 netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
809 ++tx_ring->restart_queue; 911 ++tx_ring->tx_stats.restart_queue;
810 }
811 }
812
813 if (adapter->detect_tx_hung) {
814 if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
815 /* schedule immediate reset if we believe we hung */
816 e_info(probe, "tx hang %d detected, resetting "
817 "adapter\n", adapter->tx_timeout_count + 1);
818 ixgbe_tx_timeout(adapter->netdev);
819 } 912 }
820 } 913 }
821 914
822 /* re-arm the interrupt */
823 if (count >= tx_ring->work_limit)
824 ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
825
826 tx_ring->total_bytes += total_bytes;
827 tx_ring->total_packets += total_packets;
828 u64_stats_update_begin(&tx_ring->syncp);
829 tx_ring->stats.packets += total_packets;
830 tx_ring->stats.bytes += total_bytes;
831 u64_stats_update_end(&tx_ring->syncp);
832 return count < tx_ring->work_limit; 915 return count < tx_ring->work_limit;
833} 916}
834 917
835#ifdef CONFIG_IXGBE_DCA 918#ifdef CONFIG_IXGBE_DCA
836static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, 919static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
837 struct ixgbe_ring *rx_ring) 920 struct ixgbe_ring *rx_ring,
921 int cpu)
838{ 922{
923 struct ixgbe_hw *hw = &adapter->hw;
839 u32 rxctrl; 924 u32 rxctrl;
840 int cpu = get_cpu(); 925 u8 reg_idx = rx_ring->reg_idx;
841 int q = rx_ring->reg_idx; 926
842 927 rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
843 if (rx_ring->cpu != cpu) { 928 switch (hw->mac.type) {
844 rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q)); 929 case ixgbe_mac_82598EB:
845 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 930 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
846 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK; 931 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
847 rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 932 break;
848 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 933 case ixgbe_mac_82599EB:
849 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599; 934 case ixgbe_mac_X540:
850 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << 935 rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
851 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599); 936 rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
852 } 937 IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
853 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN; 938 break;
854 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN; 939 default:
855 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN); 940 break;
856 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
857 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
858 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
859 rx_ring->cpu = cpu;
860 } 941 }
861 put_cpu(); 942 rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
943 rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
944 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
945 rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
946 IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
947 IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
862} 948}
863 949
864static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, 950static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
865 struct ixgbe_ring *tx_ring) 951 struct ixgbe_ring *tx_ring,
952 int cpu)
866{ 953{
954 struct ixgbe_hw *hw = &adapter->hw;
867 u32 txctrl; 955 u32 txctrl;
956 u8 reg_idx = tx_ring->reg_idx;
957
958 switch (hw->mac.type) {
959 case ixgbe_mac_82598EB:
960 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
961 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
962 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
963 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
964 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
965 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
966 break;
967 case ixgbe_mac_82599EB:
968 case ixgbe_mac_X540:
969 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
970 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
971 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
972 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
973 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
974 txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
975 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
976 break;
977 default:
978 break;
979 }
980}
981
982static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
983{
984 struct ixgbe_adapter *adapter = q_vector->adapter;
868 int cpu = get_cpu(); 985 int cpu = get_cpu();
869 int q = tx_ring->reg_idx; 986 long r_idx;
870 struct ixgbe_hw *hw = &adapter->hw; 987 int i;
871 988
872 if (tx_ring->cpu != cpu) { 989 if (q_vector->cpu == cpu)
873 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 990 goto out_no_update;
874 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q)); 991
875 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK; 992 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
876 txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 993 for (i = 0; i < q_vector->txr_count; i++) {
877 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; 994 ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
878 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl); 995 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
879 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 996 r_idx + 1);
880 txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q)); 997 }
881 txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599; 998
882 txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) << 999 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
883 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599); 1000 for (i = 0; i < q_vector->rxr_count; i++) {
884 txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN; 1001 ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
885 IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl); 1002 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
886 } 1003 r_idx + 1);
887 tx_ring->cpu = cpu;
888 } 1004 }
1005
1006 q_vector->cpu = cpu;
1007out_no_update:
889 put_cpu(); 1008 put_cpu();
890} 1009}
891 1010
892static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) 1011static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
893{ 1012{
1013 int num_q_vectors;
894 int i; 1014 int i;
895 1015
896 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) 1016 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
@@ -899,22 +1019,25 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
899 /* always use CB2 mode, difference is masked in the CB driver */ 1019 /* always use CB2 mode, difference is masked in the CB driver */
900 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); 1020 IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
901 1021
902 for (i = 0; i < adapter->num_tx_queues; i++) { 1022 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
903 adapter->tx_ring[i]->cpu = -1; 1023 num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
904 ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]); 1024 else
905 } 1025 num_q_vectors = 1;
906 for (i = 0; i < adapter->num_rx_queues; i++) { 1026
907 adapter->rx_ring[i]->cpu = -1; 1027 for (i = 0; i < num_q_vectors; i++) {
908 ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]); 1028 adapter->q_vector[i]->cpu = -1;
1029 ixgbe_update_dca(adapter->q_vector[i]);
909 } 1030 }
910} 1031}
911 1032
912static int __ixgbe_notify_dca(struct device *dev, void *data) 1033static int __ixgbe_notify_dca(struct device *dev, void *data)
913{ 1034{
914 struct net_device *netdev = dev_get_drvdata(dev); 1035 struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
915 struct ixgbe_adapter *adapter = netdev_priv(netdev);
916 unsigned long event = *(unsigned long *)data; 1036 unsigned long event = *(unsigned long *)data;
917 1037
1038 if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
1039 return 0;
1040
918 switch (event) { 1041 switch (event) {
919 case DCA_PROVIDER_ADD: 1042 case DCA_PROVIDER_ADD:
920 /* if we're already enabled, don't do it again */ 1043 /* if we're already enabled, don't do it again */
@@ -1013,8 +1136,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
1013 skb->ip_summed = CHECKSUM_UNNECESSARY; 1136 skb->ip_summed = CHECKSUM_UNNECESSARY;
1014} 1137}
1015 1138
1016static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw, 1139static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
1017 struct ixgbe_ring *rx_ring, u32 val)
1018{ 1140{
1019 /* 1141 /*
1020 * Force memory writes to complete before letting h/w 1142 * Force memory writes to complete before letting h/w
@@ -1023,72 +1145,81 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
1023 * such as IA-64). 1145 * such as IA-64).
1024 */ 1146 */
1025 wmb(); 1147 wmb();
1026 IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val); 1148 writel(val, rx_ring->tail);
1027} 1149}
1028 1150
1029/** 1151/**
1030 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split 1152 * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
1031 * @adapter: address of board private structure 1153 * @rx_ring: ring to place buffers on
1154 * @cleaned_count: number of buffers to replace
1032 **/ 1155 **/
1033void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter, 1156void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
1034 struct ixgbe_ring *rx_ring,
1035 int cleaned_count)
1036{ 1157{
1037 struct net_device *netdev = adapter->netdev;
1038 struct pci_dev *pdev = adapter->pdev;
1039 union ixgbe_adv_rx_desc *rx_desc; 1158 union ixgbe_adv_rx_desc *rx_desc;
1040 struct ixgbe_rx_buffer *bi; 1159 struct ixgbe_rx_buffer *bi;
1041 unsigned int i; 1160 struct sk_buff *skb;
1042 unsigned int bufsz = rx_ring->rx_buf_len; 1161 u16 i = rx_ring->next_to_use;
1043 1162
1044 i = rx_ring->next_to_use; 1163 /* do nothing if no valid netdev defined */
1045 bi = &rx_ring->rx_buffer_info[i]; 1164 if (!rx_ring->netdev)
1165 return;
1046 1166
1047 while (cleaned_count--) { 1167 while (cleaned_count--) {
1048 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); 1168 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1169 bi = &rx_ring->rx_buffer_info[i];
1170 skb = bi->skb;
1049 1171
1050 if (!bi->page_dma && 1172 if (!skb) {
1051 (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) { 1173 skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
1052 if (!bi->page) { 1174 rx_ring->rx_buf_len);
1053 bi->page = netdev_alloc_page(netdev);
1054 if (!bi->page) {
1055 adapter->alloc_rx_page_failed++;
1056 goto no_buffers;
1057 }
1058 bi->page_offset = 0;
1059 } else {
1060 /* use a half page if we're re-using */
1061 bi->page_offset ^= (PAGE_SIZE / 2);
1062 }
1063
1064 bi->page_dma = dma_map_page(&pdev->dev, bi->page,
1065 bi->page_offset,
1066 (PAGE_SIZE / 2),
1067 DMA_FROM_DEVICE);
1068 }
1069
1070 if (!bi->skb) {
1071 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
1072 bufsz);
1073 bi->skb = skb;
1074
1075 if (!skb) { 1175 if (!skb) {
1076 adapter->alloc_rx_buff_failed++; 1176 rx_ring->rx_stats.alloc_rx_buff_failed++;
1077 goto no_buffers; 1177 goto no_buffers;
1078 } 1178 }
1079 /* initialize queue mapping */ 1179 /* initialize queue mapping */
1080 skb_record_rx_queue(skb, rx_ring->queue_index); 1180 skb_record_rx_queue(skb, rx_ring->queue_index);
1181 bi->skb = skb;
1081 } 1182 }
1082 1183
1083 if (!bi->dma) { 1184 if (!bi->dma) {
1084 bi->dma = dma_map_single(&pdev->dev, 1185 bi->dma = dma_map_single(rx_ring->dev,
1085 bi->skb->data, 1186 skb->data,
1086 rx_ring->rx_buf_len, 1187 rx_ring->rx_buf_len,
1087 DMA_FROM_DEVICE); 1188 DMA_FROM_DEVICE);
1189 if (dma_mapping_error(rx_ring->dev, bi->dma)) {
1190 rx_ring->rx_stats.alloc_rx_buff_failed++;
1191 bi->dma = 0;
1192 goto no_buffers;
1193 }
1088 } 1194 }
1089 /* Refresh the desc even if buffer_addrs didn't change because 1195
1090 * each write-back erases this info. */ 1196 if (ring_is_ps_enabled(rx_ring)) {
1091 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 1197 if (!bi->page) {
1198 bi->page = netdev_alloc_page(rx_ring->netdev);
1199 if (!bi->page) {
1200 rx_ring->rx_stats.alloc_rx_page_failed++;
1201 goto no_buffers;
1202 }
1203 }
1204
1205 if (!bi->page_dma) {
1206 /* use a half page if we're re-using */
1207 bi->page_offset ^= PAGE_SIZE / 2;
1208 bi->page_dma = dma_map_page(rx_ring->dev,
1209 bi->page,
1210 bi->page_offset,
1211 PAGE_SIZE / 2,
1212 DMA_FROM_DEVICE);
1213 if (dma_mapping_error(rx_ring->dev,
1214 bi->page_dma)) {
1215 rx_ring->rx_stats.alloc_rx_page_failed++;
1216 bi->page_dma = 0;
1217 goto no_buffers;
1218 }
1219 }
1220
1221 /* Refresh the desc even if buffer_addrs didn't change
1222 * because each write-back erases this info. */
1092 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma); 1223 rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
1093 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma); 1224 rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
1094 } else { 1225 } else {
@@ -1099,56 +1230,48 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
1099 i++; 1230 i++;
1100 if (i == rx_ring->count) 1231 if (i == rx_ring->count)
1101 i = 0; 1232 i = 0;
1102 bi = &rx_ring->rx_buffer_info[i];
1103 } 1233 }
1104 1234
1105no_buffers: 1235no_buffers:
1106 if (rx_ring->next_to_use != i) { 1236 if (rx_ring->next_to_use != i) {
1107 rx_ring->next_to_use = i; 1237 rx_ring->next_to_use = i;
1108 if (i-- == 0) 1238 ixgbe_release_rx_desc(rx_ring, i);
1109 i = (rx_ring->count - 1);
1110
1111 ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
1112 } 1239 }
1113} 1240}
1114 1241
1115static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc) 1242static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
1116{ 1243{
1117 return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info; 1244 /* HW will not DMA in data larger than the given buffer, even if it
1118} 1245 * parses the (NFS, of course) header to be larger. In that case, it
1119 1246 * fills the header buffer and spills the rest into the page.
1120static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc) 1247 */
1121{ 1248 u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
1122 return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; 1249 u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
1123} 1250 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
1124 1251 if (hlen > IXGBE_RX_HDR_SIZE)
1125static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc) 1252 hlen = IXGBE_RX_HDR_SIZE;
1126{ 1253 return hlen;
1127 return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
1128 IXGBE_RXDADV_RSCCNT_MASK) >>
1129 IXGBE_RXDADV_RSCCNT_SHIFT;
1130} 1254}
1131 1255
1132/** 1256/**
1133 * ixgbe_transform_rsc_queue - change rsc queue into a full packet 1257 * ixgbe_transform_rsc_queue - change rsc queue into a full packet
1134 * @skb: pointer to the last skb in the rsc queue 1258 * @skb: pointer to the last skb in the rsc queue
1135 * @count: pointer to number of packets coalesced in this context
1136 * 1259 *
1137 * This function changes a queue full of hw rsc buffers into a completed 1260 * This function changes a queue full of hw rsc buffers into a completed
1138 * packet. It uses the ->prev pointers to find the first packet and then 1261 * packet. It uses the ->prev pointers to find the first packet and then
1139 * turns it into the frag list owner. 1262 * turns it into the frag list owner.
1140 **/ 1263 **/
1141static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb, 1264static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
1142 u64 *count)
1143{ 1265{
1144 unsigned int frag_list_size = 0; 1266 unsigned int frag_list_size = 0;
1267 unsigned int skb_cnt = 1;
1145 1268
1146 while (skb->prev) { 1269 while (skb->prev) {
1147 struct sk_buff *prev = skb->prev; 1270 struct sk_buff *prev = skb->prev;
1148 frag_list_size += skb->len; 1271 frag_list_size += skb->len;
1149 skb->prev = NULL; 1272 skb->prev = NULL;
1150 skb = prev; 1273 skb = prev;
1151 *count += 1; 1274 skb_cnt++;
1152 } 1275 }
1153 1276
1154 skb_shinfo(skb)->frag_list = skb->next; 1277 skb_shinfo(skb)->frag_list = skb->next;
@@ -1156,68 +1279,59 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
1156 skb->len += frag_list_size; 1279 skb->len += frag_list_size;
1157 skb->data_len += frag_list_size; 1280 skb->data_len += frag_list_size;
1158 skb->truesize += frag_list_size; 1281 skb->truesize += frag_list_size;
1282 IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
1283
1159 return skb; 1284 return skb;
1160} 1285}
1161 1286
1162struct ixgbe_rsc_cb { 1287static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
1163 dma_addr_t dma; 1288{
1164 bool delay_unmap; 1289 return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
1165}; 1290 IXGBE_RXDADV_RSCCNT_MASK);
1166 1291}
1167#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
1168 1292
1169static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, 1293static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1170 struct ixgbe_ring *rx_ring, 1294 struct ixgbe_ring *rx_ring,
1171 int *work_done, int work_to_do) 1295 int *work_done, int work_to_do)
1172{ 1296{
1173 struct ixgbe_adapter *adapter = q_vector->adapter; 1297 struct ixgbe_adapter *adapter = q_vector->adapter;
1174 struct pci_dev *pdev = adapter->pdev;
1175 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 1298 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
1176 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; 1299 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
1177 struct sk_buff *skb; 1300 struct sk_buff *skb;
1178 unsigned int i, rsc_count = 0;
1179 u32 len, staterr;
1180 u16 hdr_info;
1181 bool cleaned = false;
1182 int cleaned_count = 0;
1183 unsigned int total_rx_bytes = 0, total_rx_packets = 0; 1301 unsigned int total_rx_bytes = 0, total_rx_packets = 0;
1302 const int current_node = numa_node_id();
1184#ifdef IXGBE_FCOE 1303#ifdef IXGBE_FCOE
1185 int ddp_bytes = 0; 1304 int ddp_bytes = 0;
1186#endif /* IXGBE_FCOE */ 1305#endif /* IXGBE_FCOE */
1306 u32 staterr;
1307 u16 i;
1308 u16 cleaned_count = 0;
1309 bool pkt_is_rsc = false;
1187 1310
1188 i = rx_ring->next_to_clean; 1311 i = rx_ring->next_to_clean;
1189 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i); 1312 rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
1190 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1313 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1191 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1192 1314
1193 while (staterr & IXGBE_RXD_STAT_DD) { 1315 while (staterr & IXGBE_RXD_STAT_DD) {
1194 u32 upper_len = 0; 1316 u32 upper_len = 0;
1195 if (*work_done >= work_to_do)
1196 break;
1197 (*work_done)++;
1198 1317
1199 rmb(); /* read descriptor and rx_buffer_info after status DD */ 1318 rmb(); /* read descriptor and rx_buffer_info after status DD */
1200 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
1201 hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
1202 len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
1203 IXGBE_RXDADV_HDRBUFLEN_SHIFT;
1204 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1205 if ((len > IXGBE_RX_HDR_SIZE) ||
1206 (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
1207 len = IXGBE_RX_HDR_SIZE;
1208 } else {
1209 len = le16_to_cpu(rx_desc->wb.upper.length);
1210 }
1211 1319
1212 cleaned = true; 1320 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1321
1213 skb = rx_buffer_info->skb; 1322 skb = rx_buffer_info->skb;
1214 prefetch(skb->data);
1215 rx_buffer_info->skb = NULL; 1323 rx_buffer_info->skb = NULL;
1324 prefetch(skb->data);
1325
1326 if (ring_is_rsc_enabled(rx_ring))
1327 pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
1216 1328
1329 /* if this is a skb from previous receive DMA will be 0 */
1217 if (rx_buffer_info->dma) { 1330 if (rx_buffer_info->dma) {
1218 if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && 1331 u16 hlen;
1219 (!(staterr & IXGBE_RXD_STAT_EOP)) && 1332 if (pkt_is_rsc &&
1220 (!(skb->prev))) { 1333 !(staterr & IXGBE_RXD_STAT_EOP) &&
1334 !skb->prev) {
1221 /* 1335 /*
1222 * When HWRSC is enabled, delay unmapping 1336 * When HWRSC is enabled, delay unmapping
1223 * of the first packet. It carries the 1337 * of the first packet. It carries the
@@ -1228,29 +1342,42 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1228 IXGBE_RSC_CB(skb)->delay_unmap = true; 1342 IXGBE_RSC_CB(skb)->delay_unmap = true;
1229 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma; 1343 IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
1230 } else { 1344 } else {
1231 dma_unmap_single(&pdev->dev, 1345 dma_unmap_single(rx_ring->dev,
1232 rx_buffer_info->dma, 1346 rx_buffer_info->dma,
1233 rx_ring->rx_buf_len, 1347 rx_ring->rx_buf_len,
1234 DMA_FROM_DEVICE); 1348 DMA_FROM_DEVICE);
1235 } 1349 }
1236 rx_buffer_info->dma = 0; 1350 rx_buffer_info->dma = 0;
1237 skb_put(skb, len); 1351
1352 if (ring_is_ps_enabled(rx_ring)) {
1353 hlen = ixgbe_get_hlen(rx_desc);
1354 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1355 } else {
1356 hlen = le16_to_cpu(rx_desc->wb.upper.length);
1357 }
1358
1359 skb_put(skb, hlen);
1360 } else {
1361 /* assume packet split since header is unmapped */
1362 upper_len = le16_to_cpu(rx_desc->wb.upper.length);
1238 } 1363 }
1239 1364
1240 if (upper_len) { 1365 if (upper_len) {
1241 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, 1366 dma_unmap_page(rx_ring->dev,
1242 PAGE_SIZE / 2, DMA_FROM_DEVICE); 1367 rx_buffer_info->page_dma,
1368 PAGE_SIZE / 2,
1369 DMA_FROM_DEVICE);
1243 rx_buffer_info->page_dma = 0; 1370 rx_buffer_info->page_dma = 0;
1244 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags, 1371 skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1245 rx_buffer_info->page, 1372 rx_buffer_info->page,
1246 rx_buffer_info->page_offset, 1373 rx_buffer_info->page_offset,
1247 upper_len); 1374 upper_len);
1248 1375
1249 if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) || 1376 if ((page_count(rx_buffer_info->page) == 1) &&
1250 (page_count(rx_buffer_info->page) != 1)) 1377 (page_to_nid(rx_buffer_info->page) == current_node))
1251 rx_buffer_info->page = NULL;
1252 else
1253 get_page(rx_buffer_info->page); 1378 get_page(rx_buffer_info->page);
1379 else
1380 rx_buffer_info->page = NULL;
1254 1381
1255 skb->len += upper_len; 1382 skb->len += upper_len;
1256 skb->data_len += upper_len; 1383 skb->data_len += upper_len;
@@ -1265,10 +1392,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1265 prefetch(next_rxd); 1392 prefetch(next_rxd);
1266 cleaned_count++; 1393 cleaned_count++;
1267 1394
1268 if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) 1395 if (pkt_is_rsc) {
1269 rsc_count = ixgbe_get_rsc_count(rx_desc);
1270
1271 if (rsc_count) {
1272 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >> 1396 u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
1273 IXGBE_RXDADV_NEXTP_SHIFT; 1397 IXGBE_RXDADV_NEXTP_SHIFT;
1274 next_buffer = &rx_ring->rx_buffer_info[nextp]; 1398 next_buffer = &rx_ring->rx_buffer_info[nextp];
@@ -1276,32 +1400,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1276 next_buffer = &rx_ring->rx_buffer_info[i]; 1400 next_buffer = &rx_ring->rx_buffer_info[i];
1277 } 1401 }
1278 1402
1279 if (staterr & IXGBE_RXD_STAT_EOP) { 1403 if (!(staterr & IXGBE_RXD_STAT_EOP)) {
1280 if (skb->prev) 1404 if (ring_is_ps_enabled(rx_ring)) {
1281 skb = ixgbe_transform_rsc_queue(skb,
1282 &(rx_ring->rsc_count));
1283 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
1284 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1285 dma_unmap_single(&pdev->dev,
1286 IXGBE_RSC_CB(skb)->dma,
1287 rx_ring->rx_buf_len,
1288 DMA_FROM_DEVICE);
1289 IXGBE_RSC_CB(skb)->dma = 0;
1290 IXGBE_RSC_CB(skb)->delay_unmap = false;
1291 }
1292 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
1293 rx_ring->rsc_count +=
1294 skb_shinfo(skb)->nr_frags;
1295 else
1296 rx_ring->rsc_count++;
1297 rx_ring->rsc_flush++;
1298 }
1299 u64_stats_update_begin(&rx_ring->syncp);
1300 rx_ring->stats.packets++;
1301 rx_ring->stats.bytes += skb->len;
1302 u64_stats_update_end(&rx_ring->syncp);
1303 } else {
1304 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
1305 rx_buffer_info->skb = next_buffer->skb; 1405 rx_buffer_info->skb = next_buffer->skb;
1306 rx_buffer_info->dma = next_buffer->dma; 1406 rx_buffer_info->dma = next_buffer->dma;
1307 next_buffer->skb = skb; 1407 next_buffer->skb = skb;
@@ -1310,12 +1410,45 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1310 skb->next = next_buffer->skb; 1410 skb->next = next_buffer->skb;
1311 skb->next->prev = skb; 1411 skb->next->prev = skb;
1312 } 1412 }
1313 rx_ring->non_eop_descs++; 1413 rx_ring->rx_stats.non_eop_descs++;
1314 goto next_desc; 1414 goto next_desc;
1315 } 1415 }
1316 1416
1417 if (skb->prev) {
1418 skb = ixgbe_transform_rsc_queue(skb);
1419 /* if we got here without RSC the packet is invalid */
1420 if (!pkt_is_rsc) {
1421 __pskb_trim(skb, 0);
1422 rx_buffer_info->skb = skb;
1423 goto next_desc;
1424 }
1425 }
1426
1427 if (ring_is_rsc_enabled(rx_ring)) {
1428 if (IXGBE_RSC_CB(skb)->delay_unmap) {
1429 dma_unmap_single(rx_ring->dev,
1430 IXGBE_RSC_CB(skb)->dma,
1431 rx_ring->rx_buf_len,
1432 DMA_FROM_DEVICE);
1433 IXGBE_RSC_CB(skb)->dma = 0;
1434 IXGBE_RSC_CB(skb)->delay_unmap = false;
1435 }
1436 }
1437 if (pkt_is_rsc) {
1438 if (ring_is_ps_enabled(rx_ring))
1439 rx_ring->rx_stats.rsc_count +=
1440 skb_shinfo(skb)->nr_frags;
1441 else
1442 rx_ring->rx_stats.rsc_count +=
1443 IXGBE_RSC_CB(skb)->skb_cnt;
1444 rx_ring->rx_stats.rsc_flush++;
1445 }
1446
1447 /* ERR_MASK will only have valid bits if EOP set */
1317 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) { 1448 if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
1318 dev_kfree_skb_irq(skb); 1449 /* trim packet back to size 0 and recycle it */
1450 __pskb_trim(skb, 0);
1451 rx_buffer_info->skb = skb;
1319 goto next_desc; 1452 goto next_desc;
1320 } 1453 }
1321 1454
@@ -1325,7 +1458,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1325 total_rx_bytes += skb->len; 1458 total_rx_bytes += skb->len;
1326 total_rx_packets++; 1459 total_rx_packets++;
1327 1460
1328 skb->protocol = eth_type_trans(skb, adapter->netdev); 1461 skb->protocol = eth_type_trans(skb, rx_ring->netdev);
1329#ifdef IXGBE_FCOE 1462#ifdef IXGBE_FCOE
1330 /* if ddp, not passing to ULD unless for FCP_RSP or error */ 1463 /* if ddp, not passing to ULD unless for FCP_RSP or error */
1331 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { 1464 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
@@ -1339,16 +1472,18 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
1339next_desc: 1472next_desc:
1340 rx_desc->wb.upper.status_error = 0; 1473 rx_desc->wb.upper.status_error = 0;
1341 1474
1475 (*work_done)++;
1476 if (*work_done >= work_to_do)
1477 break;
1478
1342 /* return some buffers to hardware, one at a time is too slow */ 1479 /* return some buffers to hardware, one at a time is too slow */
1343 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { 1480 if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
1344 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 1481 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1345 cleaned_count = 0; 1482 cleaned_count = 0;
1346 } 1483 }
1347 1484
1348 /* use prefetched values */ 1485 /* use prefetched values */
1349 rx_desc = next_rxd; 1486 rx_desc = next_rxd;
1350 rx_buffer_info = &rx_ring->rx_buffer_info[i];
1351
1352 staterr = le32_to_cpu(rx_desc->wb.upper.status_error); 1487 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1353 } 1488 }
1354 1489
@@ -1356,14 +1491,14 @@ next_desc:
1356 cleaned_count = IXGBE_DESC_UNUSED(rx_ring); 1491 cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
1357 1492
1358 if (cleaned_count) 1493 if (cleaned_count)
1359 ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count); 1494 ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
1360 1495
1361#ifdef IXGBE_FCOE 1496#ifdef IXGBE_FCOE
1362 /* include DDPed FCoE data */ 1497 /* include DDPed FCoE data */
1363 if (ddp_bytes > 0) { 1498 if (ddp_bytes > 0) {
1364 unsigned int mss; 1499 unsigned int mss;
1365 1500
1366 mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) - 1501 mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
1367 sizeof(struct fc_frame_header) - 1502 sizeof(struct fc_frame_header) -
1368 sizeof(struct fcoe_crc_eof); 1503 sizeof(struct fcoe_crc_eof);
1369 if (mss > 512) 1504 if (mss > 512)
@@ -1375,8 +1510,10 @@ next_desc:
1375 1510
1376 rx_ring->total_packets += total_rx_packets; 1511 rx_ring->total_packets += total_rx_packets;
1377 rx_ring->total_bytes += total_rx_bytes; 1512 rx_ring->total_bytes += total_rx_bytes;
1378 1513 u64_stats_update_begin(&rx_ring->syncp);
1379 return cleaned; 1514 rx_ring->stats.packets += total_rx_packets;
1515 rx_ring->stats.bytes += total_rx_bytes;
1516 u64_stats_update_end(&rx_ring->syncp);
1380} 1517}
1381 1518
1382static int ixgbe_clean_rxonly(struct napi_struct *, int); 1519static int ixgbe_clean_rxonly(struct napi_struct *, int);
@@ -1390,7 +1527,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *, int);
1390static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) 1527static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1391{ 1528{
1392 struct ixgbe_q_vector *q_vector; 1529 struct ixgbe_q_vector *q_vector;
1393 int i, j, q_vectors, v_idx, r_idx; 1530 int i, q_vectors, v_idx, r_idx;
1394 u32 mask; 1531 u32 mask;
1395 1532
1396 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 1533 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -1406,8 +1543,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1406 adapter->num_rx_queues); 1543 adapter->num_rx_queues);
1407 1544
1408 for (i = 0; i < q_vector->rxr_count; i++) { 1545 for (i = 0; i < q_vector->rxr_count; i++) {
1409 j = adapter->rx_ring[r_idx]->reg_idx; 1546 u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
1410 ixgbe_set_ivar(adapter, 0, j, v_idx); 1547 ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
1411 r_idx = find_next_bit(q_vector->rxr_idx, 1548 r_idx = find_next_bit(q_vector->rxr_idx,
1412 adapter->num_rx_queues, 1549 adapter->num_rx_queues,
1413 r_idx + 1); 1550 r_idx + 1);
@@ -1416,8 +1553,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1416 adapter->num_tx_queues); 1553 adapter->num_tx_queues);
1417 1554
1418 for (i = 0; i < q_vector->txr_count; i++) { 1555 for (i = 0; i < q_vector->txr_count; i++) {
1419 j = adapter->tx_ring[r_idx]->reg_idx; 1556 u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
1420 ixgbe_set_ivar(adapter, 1, j, v_idx); 1557 ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
1421 r_idx = find_next_bit(q_vector->txr_idx, 1558 r_idx = find_next_bit(q_vector->txr_idx,
1422 adapter->num_tx_queues, 1559 adapter->num_tx_queues,
1423 r_idx + 1); 1560 r_idx + 1);
@@ -1448,11 +1585,19 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
1448 } 1585 }
1449 } 1586 }
1450 1587
1451 if (adapter->hw.mac.type == ixgbe_mac_82598EB) 1588 switch (adapter->hw.mac.type) {
1589 case ixgbe_mac_82598EB:
1452 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX, 1590 ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
1453 v_idx); 1591 v_idx);
1454 else if (adapter->hw.mac.type == ixgbe_mac_82599EB) 1592 break;
1593 case ixgbe_mac_82599EB:
1594 case ixgbe_mac_X540:
1455 ixgbe_set_ivar(adapter, -1, 1, v_idx); 1595 ixgbe_set_ivar(adapter, -1, 1, v_idx);
1596 break;
1597
1598 default:
1599 break;
1600 }
1456 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950); 1601 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
1457 1602
1458 /* set up to autoclear timer, and the vectors */ 1603 /* set up to autoclear timer, and the vectors */
@@ -1548,12 +1693,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1548 int v_idx = q_vector->v_idx; 1693 int v_idx = q_vector->v_idx;
1549 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr); 1694 u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
1550 1695
1551 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1696 switch (adapter->hw.mac.type) {
1697 case ixgbe_mac_82598EB:
1552 /* must write high and low 16 bits to reset counter */ 1698 /* must write high and low 16 bits to reset counter */
1553 itr_reg |= (itr_reg << 16); 1699 itr_reg |= (itr_reg << 16);
1554 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 1700 break;
1701 case ixgbe_mac_82599EB:
1702 case ixgbe_mac_X540:
1555 /* 1703 /*
1556 * 82599 can support a value of zero, so allow it for 1704 * 82599 and X540 can support a value of zero, so allow it for
1557 * max interrupt rate, but there is an errata where it can 1705 * max interrupt rate, but there is an errata where it can
1558 * not be zero with RSC 1706 * not be zero with RSC
1559 */ 1707 */
@@ -1566,6 +1714,9 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1566 * immediate assertion of the interrupt 1714 * immediate assertion of the interrupt
1567 */ 1715 */
1568 itr_reg |= IXGBE_EITR_CNT_WDIS; 1716 itr_reg |= IXGBE_EITR_CNT_WDIS;
1717 break;
1718 default:
1719 break;
1569 } 1720 }
1570 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg); 1721 IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
1571} 1722}
@@ -1573,14 +1724,13 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
1573static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector) 1724static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1574{ 1725{
1575 struct ixgbe_adapter *adapter = q_vector->adapter; 1726 struct ixgbe_adapter *adapter = q_vector->adapter;
1727 int i, r_idx;
1576 u32 new_itr; 1728 u32 new_itr;
1577 u8 current_itr, ret_itr; 1729 u8 current_itr, ret_itr;
1578 int i, r_idx;
1579 struct ixgbe_ring *rx_ring, *tx_ring;
1580 1730
1581 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 1731 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1582 for (i = 0; i < q_vector->txr_count; i++) { 1732 for (i = 0; i < q_vector->txr_count; i++) {
1583 tx_ring = adapter->tx_ring[r_idx]; 1733 struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx];
1584 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1734 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1585 q_vector->tx_itr, 1735 q_vector->tx_itr,
1586 tx_ring->total_packets, 1736 tx_ring->total_packets,
@@ -1595,7 +1745,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1595 1745
1596 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 1746 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1597 for (i = 0; i < q_vector->rxr_count; i++) { 1747 for (i = 0; i < q_vector->rxr_count; i++) {
1598 rx_ring = adapter->rx_ring[r_idx]; 1748 struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx];
1599 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr, 1749 ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
1600 q_vector->rx_itr, 1750 q_vector->rx_itr,
1601 rx_ring->total_packets, 1751 rx_ring->total_packets,
@@ -1626,7 +1776,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
1626 1776
1627 if (new_itr != q_vector->eitr) { 1777 if (new_itr != q_vector->eitr) {
1628 /* do an exponential smoothing */ 1778 /* do an exponential smoothing */
1629 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 1779 new_itr = ((q_vector->eitr * 9) + new_itr)/10;
1630 1780
1631 /* save the algorithm value here, not the smoothed one */ 1781 /* save the algorithm value here, not the smoothed one */
1632 q_vector->eitr = new_itr; 1782 q_vector->eitr = new_itr;
@@ -1694,17 +1844,18 @@ static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
1694{ 1844{
1695 struct ixgbe_hw *hw = &adapter->hw; 1845 struct ixgbe_hw *hw = &adapter->hw;
1696 1846
1847 if (eicr & IXGBE_EICR_GPI_SDP2) {
1848 /* Clear the interrupt */
1849 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1850 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1851 schedule_work(&adapter->sfp_config_module_task);
1852 }
1853
1697 if (eicr & IXGBE_EICR_GPI_SDP1) { 1854 if (eicr & IXGBE_EICR_GPI_SDP1) {
1698 /* Clear the interrupt */ 1855 /* Clear the interrupt */
1699 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); 1856 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
1700 schedule_work(&adapter->multispeed_fiber_task); 1857 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1701 } else if (eicr & IXGBE_EICR_GPI_SDP2) { 1858 schedule_work(&adapter->multispeed_fiber_task);
1702 /* Clear the interrupt */
1703 IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
1704 schedule_work(&adapter->sfp_config_module_task);
1705 } else {
1706 /* Interrupt isn't for us... */
1707 return;
1708 } 1859 }
1709} 1860}
1710 1861
@@ -1744,16 +1895,16 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1744 if (eicr & IXGBE_EICR_MAILBOX) 1895 if (eicr & IXGBE_EICR_MAILBOX)
1745 ixgbe_msg_task(adapter); 1896 ixgbe_msg_task(adapter);
1746 1897
1747 if (hw->mac.type == ixgbe_mac_82598EB) 1898 switch (hw->mac.type) {
1748 ixgbe_check_fan_failure(adapter, eicr); 1899 case ixgbe_mac_82599EB:
1749
1750 if (hw->mac.type == ixgbe_mac_82599EB) {
1751 ixgbe_check_sfp_event(adapter, eicr); 1900 ixgbe_check_sfp_event(adapter, eicr);
1752 adapter->interrupt_event = eicr;
1753 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && 1901 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
1754 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) 1902 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
1903 adapter->interrupt_event = eicr;
1755 schedule_work(&adapter->check_overtemp_task); 1904 schedule_work(&adapter->check_overtemp_task);
1756 1905 }
1906 /* now fallthrough to handle Flow Director */
1907 case ixgbe_mac_X540:
1757 /* Handle Flow Director Full threshold interrupt */ 1908 /* Handle Flow Director Full threshold interrupt */
1758 if (eicr & IXGBE_EICR_FLOW_DIR) { 1909 if (eicr & IXGBE_EICR_FLOW_DIR) {
1759 int i; 1910 int i;
@@ -1763,12 +1914,18 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
1763 for (i = 0; i < adapter->num_tx_queues; i++) { 1914 for (i = 0; i < adapter->num_tx_queues; i++) {
1764 struct ixgbe_ring *tx_ring = 1915 struct ixgbe_ring *tx_ring =
1765 adapter->tx_ring[i]; 1916 adapter->tx_ring[i];
1766 if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE, 1917 if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
1767 &tx_ring->reinit_state)) 1918 &tx_ring->state))
1768 schedule_work(&adapter->fdir_reinit_task); 1919 schedule_work(&adapter->fdir_reinit_task);
1769 } 1920 }
1770 } 1921 }
1922 break;
1923 default:
1924 break;
1771 } 1925 }
1926
1927 ixgbe_check_fan_failure(adapter, eicr);
1928
1772 if (!test_bit(__IXGBE_DOWN, &adapter->state)) 1929 if (!test_bit(__IXGBE_DOWN, &adapter->state))
1773 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER); 1930 IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
1774 1931
@@ -1779,15 +1936,24 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
1779 u64 qmask) 1936 u64 qmask)
1780{ 1937{
1781 u32 mask; 1938 u32 mask;
1939 struct ixgbe_hw *hw = &adapter->hw;
1782 1940
1783 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1941 switch (hw->mac.type) {
1942 case ixgbe_mac_82598EB:
1784 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 1943 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1785 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask); 1944 IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
1786 } else { 1945 break;
1946 case ixgbe_mac_82599EB:
1947 case ixgbe_mac_X540:
1787 mask = (qmask & 0xFFFFFFFF); 1948 mask = (qmask & 0xFFFFFFFF);
1788 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask); 1949 if (mask)
1950 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
1789 mask = (qmask >> 32); 1951 mask = (qmask >> 32);
1790 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask); 1952 if (mask)
1953 IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
1954 break;
1955 default:
1956 break;
1791 } 1957 }
1792 /* skip the flush */ 1958 /* skip the flush */
1793} 1959}
@@ -1796,15 +1962,24 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
1796 u64 qmask) 1962 u64 qmask)
1797{ 1963{
1798 u32 mask; 1964 u32 mask;
1965 struct ixgbe_hw *hw = &adapter->hw;
1799 1966
1800 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 1967 switch (hw->mac.type) {
1968 case ixgbe_mac_82598EB:
1801 mask = (IXGBE_EIMS_RTX_QUEUE & qmask); 1969 mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
1802 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask); 1970 IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
1803 } else { 1971 break;
1972 case ixgbe_mac_82599EB:
1973 case ixgbe_mac_X540:
1804 mask = (qmask & 0xFFFFFFFF); 1974 mask = (qmask & 0xFFFFFFFF);
1805 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask); 1975 if (mask)
1976 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
1806 mask = (qmask >> 32); 1977 mask = (qmask >> 32);
1807 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask); 1978 if (mask)
1979 IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
1980 break;
1981 default:
1982 break;
1808 } 1983 }
1809 /* skip the flush */ 1984 /* skip the flush */
1810} 1985}
@@ -1847,8 +2022,13 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1847 int r_idx; 2022 int r_idx;
1848 int i; 2023 int i;
1849 2024
2025#ifdef CONFIG_IXGBE_DCA
2026 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2027 ixgbe_update_dca(q_vector);
2028#endif
2029
1850 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 2030 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1851 for (i = 0; i < q_vector->rxr_count; i++) { 2031 for (i = 0; i < q_vector->rxr_count; i++) {
1852 rx_ring = adapter->rx_ring[r_idx]; 2032 rx_ring = adapter->rx_ring[r_idx];
1853 rx_ring->total_bytes = 0; 2033 rx_ring->total_bytes = 0;
1854 rx_ring->total_packets = 0; 2034 rx_ring->total_packets = 0;
@@ -1859,7 +2039,6 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1859 if (!q_vector->rxr_count) 2039 if (!q_vector->rxr_count)
1860 return IRQ_HANDLED; 2040 return IRQ_HANDLED;
1861 2041
1862 /* disable interrupts on this vector only */
1863 /* EIAM disabled interrupts (on this vector) for us */ 2042 /* EIAM disabled interrupts (on this vector) for us */
1864 napi_schedule(&q_vector->napi); 2043 napi_schedule(&q_vector->napi);
1865 2044
@@ -1918,13 +2097,14 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
1918 int work_done = 0; 2097 int work_done = 0;
1919 long r_idx; 2098 long r_idx;
1920 2099
1921 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1922 rx_ring = adapter->rx_ring[r_idx];
1923#ifdef CONFIG_IXGBE_DCA 2100#ifdef CONFIG_IXGBE_DCA
1924 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 2101 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1925 ixgbe_update_rx_dca(adapter, rx_ring); 2102 ixgbe_update_dca(q_vector);
1926#endif 2103#endif
1927 2104
2105 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
2106 rx_ring = adapter->rx_ring[r_idx];
2107
1928 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget); 2108 ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
1929 2109
1930 /* If all Rx work done, exit the polling mode */ 2110 /* If all Rx work done, exit the polling mode */
@@ -1958,13 +2138,14 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1958 long r_idx; 2138 long r_idx;
1959 bool tx_clean_complete = true; 2139 bool tx_clean_complete = true;
1960 2140
2141#ifdef CONFIG_IXGBE_DCA
2142 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2143 ixgbe_update_dca(q_vector);
2144#endif
2145
1961 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues); 2146 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
1962 for (i = 0; i < q_vector->txr_count; i++) { 2147 for (i = 0; i < q_vector->txr_count; i++) {
1963 ring = adapter->tx_ring[r_idx]; 2148 ring = adapter->tx_ring[r_idx];
1964#ifdef CONFIG_IXGBE_DCA
1965 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1966 ixgbe_update_tx_dca(adapter, ring);
1967#endif
1968 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); 2149 tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
1969 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues, 2150 r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
1970 r_idx + 1); 2151 r_idx + 1);
@@ -1977,10 +2158,6 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
1977 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues); 2158 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1978 for (i = 0; i < q_vector->rxr_count; i++) { 2159 for (i = 0; i < q_vector->rxr_count; i++) {
1979 ring = adapter->rx_ring[r_idx]; 2160 ring = adapter->rx_ring[r_idx];
1980#ifdef CONFIG_IXGBE_DCA
1981 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
1982 ixgbe_update_rx_dca(adapter, ring);
1983#endif
1984 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget); 2161 ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
1985 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues, 2162 r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
1986 r_idx + 1); 2163 r_idx + 1);
@@ -2019,13 +2196,14 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
2019 int work_done = 0; 2196 int work_done = 0;
2020 long r_idx; 2197 long r_idx;
2021 2198
2022 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
2023 tx_ring = adapter->tx_ring[r_idx];
2024#ifdef CONFIG_IXGBE_DCA 2199#ifdef CONFIG_IXGBE_DCA
2025 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) 2200 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
2026 ixgbe_update_tx_dca(adapter, tx_ring); 2201 ixgbe_update_dca(q_vector);
2027#endif 2202#endif
2028 2203
2204 r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
2205 tx_ring = adapter->tx_ring[r_idx];
2206
2029 if (!ixgbe_clean_tx_irq(q_vector, tx_ring)) 2207 if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
2030 work_done = budget; 2208 work_done = budget;
2031 2209
@@ -2046,24 +2224,27 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
2046 int r_idx) 2224 int r_idx)
2047{ 2225{
2048 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 2226 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2227 struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
2049 2228
2050 set_bit(r_idx, q_vector->rxr_idx); 2229 set_bit(r_idx, q_vector->rxr_idx);
2051 q_vector->rxr_count++; 2230 q_vector->rxr_count++;
2231 rx_ring->q_vector = q_vector;
2052} 2232}
2053 2233
2054static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx, 2234static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2055 int t_idx) 2235 int t_idx)
2056{ 2236{
2057 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx]; 2237 struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
2238 struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
2058 2239
2059 set_bit(t_idx, q_vector->txr_idx); 2240 set_bit(t_idx, q_vector->txr_idx);
2060 q_vector->txr_count++; 2241 q_vector->txr_count++;
2242 tx_ring->q_vector = q_vector;
2061} 2243}
2062 2244
2063/** 2245/**
2064 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors 2246 * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
2065 * @adapter: board private structure to initialize 2247 * @adapter: board private structure to initialize
2066 * @vectors: allotted vector count for descriptor rings
2067 * 2248 *
2068 * This function maps descriptor rings to the queue-specific vectors 2249 * This function maps descriptor rings to the queue-specific vectors
2069 * we were allotted through the MSI-X enabling code. Ideally, we'd have 2250 * we were allotted through the MSI-X enabling code. Ideally, we'd have
@@ -2071,9 +2252,9 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
2071 * group the rings as "efficiently" as possible. You would add new 2252 * group the rings as "efficiently" as possible. You would add new
2072 * mapping configurations in here. 2253 * mapping configurations in here.
2073 **/ 2254 **/
2074static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter, 2255static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
2075 int vectors)
2076{ 2256{
2257 int q_vectors;
2077 int v_start = 0; 2258 int v_start = 0;
2078 int rxr_idx = 0, txr_idx = 0; 2259 int rxr_idx = 0, txr_idx = 0;
2079 int rxr_remaining = adapter->num_rx_queues; 2260 int rxr_remaining = adapter->num_rx_queues;
@@ -2086,11 +2267,13 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
2086 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) 2267 if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
2087 goto out; 2268 goto out;
2088 2269
2270 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2271
2089 /* 2272 /*
2090 * The ideal configuration... 2273 * The ideal configuration...
2091 * We have enough vectors to map one per queue. 2274 * We have enough vectors to map one per queue.
2092 */ 2275 */
2093 if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) { 2276 if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
2094 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++) 2277 for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
2095 map_vector_to_rxq(adapter, v_start, rxr_idx); 2278 map_vector_to_rxq(adapter, v_start, rxr_idx);
2096 2279
@@ -2106,23 +2289,20 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
2106 * multiple queues per vector. 2289 * multiple queues per vector.
2107 */ 2290 */
2108 /* Re-adjusting *qpv takes care of the remainder. */ 2291 /* Re-adjusting *qpv takes care of the remainder. */
2109 for (i = v_start; i < vectors; i++) { 2292 for (i = v_start; i < q_vectors; i++) {
2110 rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i); 2293 rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
2111 for (j = 0; j < rqpv; j++) { 2294 for (j = 0; j < rqpv; j++) {
2112 map_vector_to_rxq(adapter, i, rxr_idx); 2295 map_vector_to_rxq(adapter, i, rxr_idx);
2113 rxr_idx++; 2296 rxr_idx++;
2114 rxr_remaining--; 2297 rxr_remaining--;
2115 } 2298 }
2116 } 2299 tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
2117 for (i = v_start; i < vectors; i++) {
2118 tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
2119 for (j = 0; j < tqpv; j++) { 2300 for (j = 0; j < tqpv; j++) {
2120 map_vector_to_txq(adapter, i, txr_idx); 2301 map_vector_to_txq(adapter, i, txr_idx);
2121 txr_idx++; 2302 txr_idx++;
2122 txr_remaining--; 2303 txr_remaining--;
2123 } 2304 }
2124 } 2305 }
2125
2126out: 2306out:
2127 return err; 2307 return err;
2128} 2308}
@@ -2144,30 +2324,36 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2144 /* Decrement for Other and TCP Timer vectors */ 2324 /* Decrement for Other and TCP Timer vectors */
2145 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 2325 q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
2146 2326
2147 /* Map the Tx/Rx rings to the vectors we were allotted. */ 2327 err = ixgbe_map_rings_to_vectors(adapter);
2148 err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
2149 if (err) 2328 if (err)
2150 goto out; 2329 return err;
2151 2330
2152#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \ 2331#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count) \
2153 (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \ 2332 ? &ixgbe_msix_clean_many : \
2154 &ixgbe_msix_clean_many) 2333 (_v)->rxr_count ? &ixgbe_msix_clean_rx : \
2334 (_v)->txr_count ? &ixgbe_msix_clean_tx : \
2335 NULL)
2155 for (vector = 0; vector < q_vectors; vector++) { 2336 for (vector = 0; vector < q_vectors; vector++) {
2156 handler = SET_HANDLER(adapter->q_vector[vector]); 2337 struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
2338 handler = SET_HANDLER(q_vector);
2157 2339
2158 if (handler == &ixgbe_msix_clean_rx) { 2340 if (handler == &ixgbe_msix_clean_rx) {
2159 sprintf(adapter->name[vector], "%s-%s-%d", 2341 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2160 netdev->name, "rx", ri++); 2342 "%s-%s-%d", netdev->name, "rx", ri++);
2161 } else if (handler == &ixgbe_msix_clean_tx) { 2343 } else if (handler == &ixgbe_msix_clean_tx) {
2162 sprintf(adapter->name[vector], "%s-%s-%d", 2344 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2163 netdev->name, "tx", ti++); 2345 "%s-%s-%d", netdev->name, "tx", ti++);
2164 } else 2346 } else if (handler == &ixgbe_msix_clean_many) {
2165 sprintf(adapter->name[vector], "%s-%s-%d", 2347 snprintf(q_vector->name, sizeof(q_vector->name) - 1,
2166 netdev->name, "TxRx", vector); 2348 "%s-%s-%d", netdev->name, "TxRx", ri++);
2167 2349 ti++;
2350 } else {
2351 /* skip this unused q_vector */
2352 continue;
2353 }
2168 err = request_irq(adapter->msix_entries[vector].vector, 2354 err = request_irq(adapter->msix_entries[vector].vector,
2169 handler, 0, adapter->name[vector], 2355 handler, 0, q_vector->name,
2170 adapter->q_vector[vector]); 2356 q_vector);
2171 if (err) { 2357 if (err) {
2172 e_err(probe, "request_irq failed for MSIX interrupt " 2358 e_err(probe, "request_irq failed for MSIX interrupt "
2173 "Error: %d\n", err); 2359 "Error: %d\n", err);
@@ -2175,9 +2361,9 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
2175 } 2361 }
2176 } 2362 }
2177 2363
2178 sprintf(adapter->name[vector], "%s:lsc", netdev->name); 2364 sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
2179 err = request_irq(adapter->msix_entries[vector].vector, 2365 err = request_irq(adapter->msix_entries[vector].vector,
2180 ixgbe_msix_lsc, 0, adapter->name[vector], netdev); 2366 ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev);
2181 if (err) { 2367 if (err) {
2182 e_err(probe, "request_irq for msix_lsc failed: %d\n", err); 2368 e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
2183 goto free_queue_irqs; 2369 goto free_queue_irqs;
@@ -2193,17 +2379,16 @@ free_queue_irqs:
2193 pci_disable_msix(adapter->pdev); 2379 pci_disable_msix(adapter->pdev);
2194 kfree(adapter->msix_entries); 2380 kfree(adapter->msix_entries);
2195 adapter->msix_entries = NULL; 2381 adapter->msix_entries = NULL;
2196out:
2197 return err; 2382 return err;
2198} 2383}
2199 2384
2200static void ixgbe_set_itr(struct ixgbe_adapter *adapter) 2385static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2201{ 2386{
2202 struct ixgbe_q_vector *q_vector = adapter->q_vector[0]; 2387 struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
2203 u8 current_itr;
2204 u32 new_itr = q_vector->eitr;
2205 struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; 2388 struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
2206 struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; 2389 struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
2390 u32 new_itr = q_vector->eitr;
2391 u8 current_itr;
2207 2392
2208 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr, 2393 q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
2209 q_vector->tx_itr, 2394 q_vector->tx_itr,
@@ -2233,9 +2418,9 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
2233 2418
2234 if (new_itr != q_vector->eitr) { 2419 if (new_itr != q_vector->eitr) {
2235 /* do an exponential smoothing */ 2420 /* do an exponential smoothing */
2236 new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100); 2421 new_itr = ((q_vector->eitr * 9) + new_itr)/10;
2237 2422
2238 /* save the algorithm value here, not the smoothed one */ 2423 /* save the algorithm value here */
2239 q_vector->eitr = new_itr; 2424 q_vector->eitr = new_itr;
2240 2425
2241 ixgbe_write_eitr(q_vector); 2426 ixgbe_write_eitr(q_vector);
@@ -2256,12 +2441,17 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
2256 mask |= IXGBE_EIMS_GPI_SDP0; 2441 mask |= IXGBE_EIMS_GPI_SDP0;
2257 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) 2442 if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
2258 mask |= IXGBE_EIMS_GPI_SDP1; 2443 mask |= IXGBE_EIMS_GPI_SDP1;
2259 if (adapter->hw.mac.type == ixgbe_mac_82599EB) { 2444 switch (adapter->hw.mac.type) {
2445 case ixgbe_mac_82599EB:
2446 case ixgbe_mac_X540:
2260 mask |= IXGBE_EIMS_ECC; 2447 mask |= IXGBE_EIMS_ECC;
2261 mask |= IXGBE_EIMS_GPI_SDP1; 2448 mask |= IXGBE_EIMS_GPI_SDP1;
2262 mask |= IXGBE_EIMS_GPI_SDP2; 2449 mask |= IXGBE_EIMS_GPI_SDP2;
2263 if (adapter->num_vfs) 2450 if (adapter->num_vfs)
2264 mask |= IXGBE_EIMS_MAILBOX; 2451 mask |= IXGBE_EIMS_MAILBOX;
2452 break;
2453 default:
2454 break;
2265 } 2455 }
2266 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 2456 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
2267 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 2457 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -2317,13 +2507,20 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
2317 if (eicr & IXGBE_EICR_LSC) 2507 if (eicr & IXGBE_EICR_LSC)
2318 ixgbe_check_lsc(adapter); 2508 ixgbe_check_lsc(adapter);
2319 2509
2320 if (hw->mac.type == ixgbe_mac_82599EB) 2510 switch (hw->mac.type) {
2511 case ixgbe_mac_82599EB:
2321 ixgbe_check_sfp_event(adapter, eicr); 2512 ixgbe_check_sfp_event(adapter, eicr);
2513 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2514 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
2515 adapter->interrupt_event = eicr;
2516 schedule_work(&adapter->check_overtemp_task);
2517 }
2518 break;
2519 default:
2520 break;
2521 }
2322 2522
2323 ixgbe_check_fan_failure(adapter, eicr); 2523 ixgbe_check_fan_failure(adapter, eicr);
2324 if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
2325 ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
2326 schedule_work(&adapter->check_overtemp_task);
2327 2524
2328 if (napi_schedule_prep(&(q_vector->napi))) { 2525 if (napi_schedule_prep(&(q_vector->napi))) {
2329 adapter->tx_ring[0]->total_packets = 0; 2526 adapter->tx_ring[0]->total_packets = 0;
@@ -2416,14 +2613,20 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
2416 **/ 2613 **/
2417static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) 2614static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
2418{ 2615{
2419 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 2616 switch (adapter->hw.mac.type) {
2617 case ixgbe_mac_82598EB:
2420 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0); 2618 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
2421 } else { 2619 break;
2620 case ixgbe_mac_82599EB:
2621 case ixgbe_mac_X540:
2422 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); 2622 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
2423 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); 2623 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
2424 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); 2624 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
2425 if (adapter->num_vfs > 32) 2625 if (adapter->num_vfs > 32)
2426 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0); 2626 IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
2627 break;
2628 default:
2629 break;
2427 } 2630 }
2428 IXGBE_WRITE_FLUSH(&adapter->hw); 2631 IXGBE_WRITE_FLUSH(&adapter->hw);
2429 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { 2632 if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -2469,7 +2672,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2469 u64 tdba = ring->dma; 2672 u64 tdba = ring->dma;
2470 int wait_loop = 10; 2673 int wait_loop = 10;
2471 u32 txdctl; 2674 u32 txdctl;
2472 u16 reg_idx = ring->reg_idx; 2675 u8 reg_idx = ring->reg_idx;
2473 2676
2474 /* disable queue to avoid issues while updating state */ 2677 /* disable queue to avoid issues while updating state */
2475 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); 2678 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
@@ -2484,8 +2687,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2484 ring->count * sizeof(union ixgbe_adv_tx_desc)); 2687 ring->count * sizeof(union ixgbe_adv_tx_desc));
2485 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); 2688 IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
2486 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); 2689 IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
2487 ring->head = IXGBE_TDH(reg_idx); 2690 ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
2488 ring->tail = IXGBE_TDT(reg_idx);
2489 2691
2490 /* configure fetching thresholds */ 2692 /* configure fetching thresholds */
2491 if (adapter->rx_itr_setting == 0) { 2693 if (adapter->rx_itr_setting == 0) {
@@ -2501,7 +2703,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
2501 } 2703 }
2502 2704
2503 /* reinitialize flowdirector state */ 2705 /* reinitialize flowdirector state */
2504 set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state); 2706 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
2707 adapter->atr_sample_rate) {
2708 ring->atr_sample_rate = adapter->atr_sample_rate;
2709 ring->atr_count = 0;
2710 set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
2711 } else {
2712 ring->atr_sample_rate = 0;
2713 }
2714
2715 clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
2505 2716
2506 /* enable queue */ 2717 /* enable queue */
2507 txdctl |= IXGBE_TXDCTL_ENABLE; 2718 txdctl |= IXGBE_TXDCTL_ENABLE;
@@ -2592,16 +2803,22 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2592 struct ixgbe_ring *rx_ring) 2803 struct ixgbe_ring *rx_ring)
2593{ 2804{
2594 u32 srrctl; 2805 u32 srrctl;
2595 int index; 2806 u8 reg_idx = rx_ring->reg_idx;
2596 struct ixgbe_ring_feature *feature = adapter->ring_feature;
2597 2807
2598 index = rx_ring->reg_idx; 2808 switch (adapter->hw.mac.type) {
2599 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 2809 case ixgbe_mac_82598EB: {
2600 unsigned long mask; 2810 struct ixgbe_ring_feature *feature = adapter->ring_feature;
2601 mask = (unsigned long) feature[RING_F_RSS].mask; 2811 const int mask = feature[RING_F_RSS].mask;
2602 index = index & mask; 2812 reg_idx = reg_idx & mask;
2603 } 2813 }
2604 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index)); 2814 break;
2815 case ixgbe_mac_82599EB:
2816 case ixgbe_mac_X540:
2817 default:
2818 break;
2819 }
2820
2821 srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
2605 2822
2606 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK; 2823 srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
2607 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK; 2824 srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
@@ -2611,7 +2828,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2611 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) & 2828 srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
2612 IXGBE_SRRCTL_BSIZEHDR_MASK; 2829 IXGBE_SRRCTL_BSIZEHDR_MASK;
2613 2830
2614 if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) { 2831 if (ring_is_ps_enabled(rx_ring)) {
2615#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER 2832#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
2616 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; 2833 srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
2617#else 2834#else
@@ -2624,7 +2841,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
2624 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; 2841 srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
2625 } 2842 }
2626 2843
2627 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl); 2844 IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
2628} 2845}
2629 2846
2630static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) 2847static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
@@ -2694,19 +2911,36 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
2694} 2911}
2695 2912
2696/** 2913/**
2914 * ixgbe_clear_rscctl - disable RSC for the indicated ring
2915 * @adapter: address of board private structure
2916 * @ring: structure containing ring specific data
2917 **/
2918void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
2919 struct ixgbe_ring *ring)
2920{
2921 struct ixgbe_hw *hw = &adapter->hw;
2922 u32 rscctrl;
2923 u8 reg_idx = ring->reg_idx;
2924
2925 rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
2926 rscctrl &= ~IXGBE_RSCCTL_RSCEN;
2927 IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
2928}
2929
2930/**
2697 * ixgbe_configure_rscctl - enable RSC for the indicated ring 2931 * ixgbe_configure_rscctl - enable RSC for the indicated ring
2698 * @adapter: address of board private structure 2932 * @adapter: address of board private structure
2699 * @index: index of ring to set 2933 * @index: index of ring to set
2700 **/ 2934 **/
2701static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, 2935void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2702 struct ixgbe_ring *ring) 2936 struct ixgbe_ring *ring)
2703{ 2937{
2704 struct ixgbe_hw *hw = &adapter->hw; 2938 struct ixgbe_hw *hw = &adapter->hw;
2705 u32 rscctrl; 2939 u32 rscctrl;
2706 int rx_buf_len; 2940 int rx_buf_len;
2707 u16 reg_idx = ring->reg_idx; 2941 u8 reg_idx = ring->reg_idx;
2708 2942
2709 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) 2943 if (!ring_is_rsc_enabled(ring))
2710 return; 2944 return;
2711 2945
2712 rx_buf_len = ring->rx_buf_len; 2946 rx_buf_len = ring->rx_buf_len;
@@ -2717,7 +2951,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
2717 * total size of max desc * buf_len is not greater 2951 * total size of max desc * buf_len is not greater
2718 * than 65535 2952 * than 65535
2719 */ 2953 */
2720 if (ring->flags & IXGBE_RING_RX_PS_ENABLED) { 2954 if (ring_is_ps_enabled(ring)) {
2721#if (MAX_SKB_FRAGS > 16) 2955#if (MAX_SKB_FRAGS > 16)
2722 rscctrl |= IXGBE_RSCCTL_MAXDESC_16; 2956 rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
2723#elif (MAX_SKB_FRAGS > 8) 2957#elif (MAX_SKB_FRAGS > 8)
@@ -2770,9 +3004,9 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2770 struct ixgbe_ring *ring) 3004 struct ixgbe_ring *ring)
2771{ 3005{
2772 struct ixgbe_hw *hw = &adapter->hw; 3006 struct ixgbe_hw *hw = &adapter->hw;
2773 int reg_idx = ring->reg_idx;
2774 int wait_loop = IXGBE_MAX_RX_DESC_POLL; 3007 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
2775 u32 rxdctl; 3008 u32 rxdctl;
3009 u8 reg_idx = ring->reg_idx;
2776 3010
2777 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ 3011 /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
2778 if (hw->mac.type == ixgbe_mac_82598EB && 3012 if (hw->mac.type == ixgbe_mac_82598EB &&
@@ -2790,19 +3024,47 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
2790 } 3024 }
2791} 3025}
2792 3026
3027void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter,
3028 struct ixgbe_ring *ring)
3029{
3030 struct ixgbe_hw *hw = &adapter->hw;
3031 int wait_loop = IXGBE_MAX_RX_DESC_POLL;
3032 u32 rxdctl;
3033 u8 reg_idx = ring->reg_idx;
3034
3035 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3036 rxdctl &= ~IXGBE_RXDCTL_ENABLE;
3037
3038 /* write value back with RXDCTL.ENABLE bit cleared */
3039 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
3040
3041 if (hw->mac.type == ixgbe_mac_82598EB &&
3042 !(IXGBE_READ_REG(hw, IXGBE_LINKS) & IXGBE_LINKS_UP))
3043 return;
3044
3045 /* the hardware may take up to 100us to really disable the rx queue */
3046 do {
3047 udelay(10);
3048 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
3049 } while (--wait_loop && (rxdctl & IXGBE_RXDCTL_ENABLE));
3050
3051 if (!wait_loop) {
3052 e_err(drv, "RXDCTL.ENABLE on Rx queue %d not cleared within "
3053 "the polling period\n", reg_idx);
3054 }
3055}
3056
2793void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, 3057void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
2794 struct ixgbe_ring *ring) 3058 struct ixgbe_ring *ring)
2795{ 3059{
2796 struct ixgbe_hw *hw = &adapter->hw; 3060 struct ixgbe_hw *hw = &adapter->hw;
2797 u64 rdba = ring->dma; 3061 u64 rdba = ring->dma;
2798 u32 rxdctl; 3062 u32 rxdctl;
2799 u16 reg_idx = ring->reg_idx; 3063 u8 reg_idx = ring->reg_idx;
2800 3064
2801 /* disable queue to avoid issues while updating state */ 3065 /* disable queue to avoid issues while updating state */
2802 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); 3066 rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
2803 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), 3067 ixgbe_disable_rx_queue(adapter, ring);
2804 rxdctl & ~IXGBE_RXDCTL_ENABLE);
2805 IXGBE_WRITE_FLUSH(hw);
2806 3068
2807 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); 3069 IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32)));
2808 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); 3070 IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32));
@@ -2810,8 +3072,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
2810 ring->count * sizeof(union ixgbe_adv_rx_desc)); 3072 ring->count * sizeof(union ixgbe_adv_rx_desc));
2811 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); 3073 IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
2812 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); 3074 IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
2813 ring->head = IXGBE_RDH(reg_idx); 3075 ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
2814 ring->tail = IXGBE_RDT(reg_idx);
2815 3076
2816 ixgbe_configure_srrctl(adapter, ring); 3077 ixgbe_configure_srrctl(adapter, ring);
2817 ixgbe_configure_rscctl(adapter, ring); 3078 ixgbe_configure_rscctl(adapter, ring);
@@ -2833,7 +3094,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
2833 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); 3094 IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
2834 3095
2835 ixgbe_rx_desc_queue_enable(adapter, ring); 3096 ixgbe_rx_desc_queue_enable(adapter, ring);
2836 ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring)); 3097 ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
2837} 3098}
2838 3099
2839static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) 3100static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
@@ -2899,6 +3160,9 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter)
2899 3160
2900 /* enable Tx loopback for VF/PF communication */ 3161 /* enable Tx loopback for VF/PF communication */
2901 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); 3162 IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN);
3163 /* Enable MAC Anti-Spoofing */
3164 hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0),
3165 adapter->num_vfs);
2902} 3166}
2903 3167
2904static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) 3168static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
@@ -2956,24 +3220,32 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
2956 rx_ring->rx_buf_len = rx_buf_len; 3220 rx_ring->rx_buf_len = rx_buf_len;
2957 3221
2958 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED) 3222 if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
2959 rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED; 3223 set_ring_ps_enabled(rx_ring);
3224 else
3225 clear_ring_ps_enabled(rx_ring);
3226
3227 if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
3228 set_ring_rsc_enabled(rx_ring);
2960 else 3229 else
2961 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; 3230 clear_ring_rsc_enabled(rx_ring);
2962 3231
2963#ifdef IXGBE_FCOE 3232#ifdef IXGBE_FCOE
2964 if (netdev->features & NETIF_F_FCOE_MTU) { 3233 if (netdev->features & NETIF_F_FCOE_MTU) {
2965 struct ixgbe_ring_feature *f; 3234 struct ixgbe_ring_feature *f;
2966 f = &adapter->ring_feature[RING_F_FCOE]; 3235 f = &adapter->ring_feature[RING_F_FCOE];
2967 if ((i >= f->mask) && (i < f->mask + f->indices)) { 3236 if ((i >= f->mask) && (i < f->mask + f->indices)) {
2968 rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED; 3237 clear_ring_ps_enabled(rx_ring);
2969 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE) 3238 if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
2970 rx_ring->rx_buf_len = 3239 rx_ring->rx_buf_len =
2971 IXGBE_FCOE_JUMBO_FRAME_SIZE; 3240 IXGBE_FCOE_JUMBO_FRAME_SIZE;
3241 } else if (!ring_is_rsc_enabled(rx_ring) &&
3242 !ring_is_ps_enabled(rx_ring)) {
3243 rx_ring->rx_buf_len =
3244 IXGBE_FCOE_JUMBO_FRAME_SIZE;
2972 } 3245 }
2973 } 3246 }
2974#endif /* IXGBE_FCOE */ 3247#endif /* IXGBE_FCOE */
2975 } 3248 }
2976
2977} 3249}
2978 3250
2979static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) 3251static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
@@ -2996,6 +3268,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
2996 rdrxctl |= IXGBE_RDRXCTL_MVMEN; 3268 rdrxctl |= IXGBE_RDRXCTL_MVMEN;
2997 break; 3269 break;
2998 case ixgbe_mac_82599EB: 3270 case ixgbe_mac_82599EB:
3271 case ixgbe_mac_X540:
2999 /* Disable RSC for ACK packets */ 3272 /* Disable RSC for ACK packets */
3000 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, 3273 IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
3001 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); 3274 (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
@@ -3123,6 +3396,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
3123 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 3396 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3124 break; 3397 break;
3125 case ixgbe_mac_82599EB: 3398 case ixgbe_mac_82599EB:
3399 case ixgbe_mac_X540:
3126 for (i = 0; i < adapter->num_rx_queues; i++) { 3400 for (i = 0; i < adapter->num_rx_queues; i++) {
3127 j = adapter->rx_ring[i]->reg_idx; 3401 j = adapter->rx_ring[i]->reg_idx;
3128 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 3402 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -3152,6 +3426,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
3152 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); 3426 IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
3153 break; 3427 break;
3154 case ixgbe_mac_82599EB: 3428 case ixgbe_mac_82599EB:
3429 case ixgbe_mac_X540:
3155 for (i = 0; i < adapter->num_rx_queues; i++) { 3430 for (i = 0; i < adapter->num_rx_queues; i++) {
3156 j = adapter->rx_ring[i]->reg_idx; 3431 j = adapter->rx_ring[i]->reg_idx;
3157 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); 3432 vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -3349,8 +3624,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3349{ 3624{
3350 struct ixgbe_hw *hw = &adapter->hw; 3625 struct ixgbe_hw *hw = &adapter->hw;
3351 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 3626 int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
3352 u32 txdctl;
3353 int i, j;
3354 3627
3355 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { 3628 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
3356 if (hw->mac.type == ixgbe_mac_82598EB) 3629 if (hw->mac.type == ixgbe_mac_82598EB)
@@ -3366,25 +3639,18 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3366 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); 3639 max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
3367#endif 3640#endif
3368 3641
3369 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame, 3642 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3370 DCB_TX_CONFIG); 3643 DCB_TX_CONFIG);
3371 ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame, 3644 ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
3372 DCB_RX_CONFIG); 3645 DCB_RX_CONFIG);
3373 3646
3374 /* reconfigure the hardware */
3375 ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
3376
3377 for (i = 0; i < adapter->num_tx_queues; i++) {
3378 j = adapter->tx_ring[i]->reg_idx;
3379 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
3380 /* PThresh workaround for Tx hang with DFP enabled. */
3381 txdctl |= 32;
3382 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
3383 }
3384 /* Enable VLAN tag insert/strip */ 3647 /* Enable VLAN tag insert/strip */
3385 adapter->netdev->features |= NETIF_F_HW_VLAN_RX; 3648 adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
3386 3649
3387 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); 3650 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
3651
3652 /* reconfigure the hardware */
3653 ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
3388} 3654}
3389 3655
3390#endif 3656#endif
@@ -3516,8 +3782,9 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
3516 case ixgbe_mac_82598EB: 3782 case ixgbe_mac_82598EB:
3517 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); 3783 IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
3518 break; 3784 break;
3519 default:
3520 case ixgbe_mac_82599EB: 3785 case ixgbe_mac_82599EB:
3786 case ixgbe_mac_X540:
3787 default:
3521 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); 3788 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
3522 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); 3789 IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
3523 break; 3790 break;
@@ -3561,13 +3828,24 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3561 else 3828 else
3562 ixgbe_configure_msi_and_legacy(adapter); 3829 ixgbe_configure_msi_and_legacy(adapter);
3563 3830
3564 /* enable the optics */ 3831 /* enable the optics for both mult-speed fiber and 82599 SFP+ fiber */
3565 if (hw->phy.multispeed_fiber) 3832 if (hw->mac.ops.enable_tx_laser &&
3833 ((hw->phy.multispeed_fiber) ||
3834 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
3835 (hw->mac.type == ixgbe_mac_82599EB))))
3566 hw->mac.ops.enable_tx_laser(hw); 3836 hw->mac.ops.enable_tx_laser(hw);
3567 3837
3568 clear_bit(__IXGBE_DOWN, &adapter->state); 3838 clear_bit(__IXGBE_DOWN, &adapter->state);
3569 ixgbe_napi_enable_all(adapter); 3839 ixgbe_napi_enable_all(adapter);
3570 3840
3841 if (ixgbe_is_sfp(hw)) {
3842 ixgbe_sfp_link_config(adapter);
3843 } else {
3844 err = ixgbe_non_sfp_link_config(hw);
3845 if (err)
3846 e_err(probe, "link_config FAILED %d\n", err);
3847 }
3848
3571 /* clear any pending interrupts, may auto mask */ 3849 /* clear any pending interrupts, may auto mask */
3572 IXGBE_READ_REG(hw, IXGBE_EICR); 3850 IXGBE_READ_REG(hw, IXGBE_EICR);
3573 ixgbe_irq_enable(adapter, true, true); 3851 ixgbe_irq_enable(adapter, true, true);
@@ -3590,26 +3868,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
3590 * If we're not hot-pluggable SFP+, we just need to configure link 3868 * If we're not hot-pluggable SFP+, we just need to configure link
3591 * and bring it up. 3869 * and bring it up.
3592 */ 3870 */
3593 if (hw->phy.type == ixgbe_phy_unknown) { 3871 if (hw->phy.type == ixgbe_phy_unknown)
3594 err = hw->phy.ops.identify(hw); 3872 schedule_work(&adapter->sfp_config_module_task);
3595 if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
3596 /*
3597 * Take the device down and schedule the sfp tasklet
3598 * which will unregister_netdev and log it.
3599 */
3600 ixgbe_down(adapter);
3601 schedule_work(&adapter->sfp_config_module_task);
3602 return err;
3603 }
3604 }
3605
3606 if (ixgbe_is_sfp(hw)) {
3607 ixgbe_sfp_link_config(adapter);
3608 } else {
3609 err = ixgbe_non_sfp_link_config(hw);
3610 if (err)
3611 e_err(probe, "link_config FAILED %d\n", err);
3612 }
3613 3873
3614 /* enable transmits */ 3874 /* enable transmits */
3615 netif_tx_start_all_queues(adapter->netdev); 3875 netif_tx_start_all_queues(adapter->netdev);
@@ -3687,15 +3947,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
3687 3947
3688/** 3948/**
3689 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue 3949 * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
3690 * @adapter: board private structure
3691 * @rx_ring: ring to free buffers from 3950 * @rx_ring: ring to free buffers from
3692 **/ 3951 **/
3693static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter, 3952static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
3694 struct ixgbe_ring *rx_ring)
3695{ 3953{
3696 struct pci_dev *pdev = adapter->pdev; 3954 struct device *dev = rx_ring->dev;
3697 unsigned long size; 3955 unsigned long size;
3698 unsigned int i; 3956 u16 i;
3699 3957
3700 /* ring already cleared, nothing to do */ 3958 /* ring already cleared, nothing to do */
3701 if (!rx_ring->rx_buffer_info) 3959 if (!rx_ring->rx_buffer_info)
@@ -3707,7 +3965,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3707 3965
3708 rx_buffer_info = &rx_ring->rx_buffer_info[i]; 3966 rx_buffer_info = &rx_ring->rx_buffer_info[i];
3709 if (rx_buffer_info->dma) { 3967 if (rx_buffer_info->dma) {
3710 dma_unmap_single(&pdev->dev, rx_buffer_info->dma, 3968 dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
3711 rx_ring->rx_buf_len, 3969 rx_ring->rx_buf_len,
3712 DMA_FROM_DEVICE); 3970 DMA_FROM_DEVICE);
3713 rx_buffer_info->dma = 0; 3971 rx_buffer_info->dma = 0;
@@ -3718,7 +3976,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3718 do { 3976 do {
3719 struct sk_buff *this = skb; 3977 struct sk_buff *this = skb;
3720 if (IXGBE_RSC_CB(this)->delay_unmap) { 3978 if (IXGBE_RSC_CB(this)->delay_unmap) {
3721 dma_unmap_single(&pdev->dev, 3979 dma_unmap_single(dev,
3722 IXGBE_RSC_CB(this)->dma, 3980 IXGBE_RSC_CB(this)->dma,
3723 rx_ring->rx_buf_len, 3981 rx_ring->rx_buf_len,
3724 DMA_FROM_DEVICE); 3982 DMA_FROM_DEVICE);
@@ -3732,7 +3990,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3732 if (!rx_buffer_info->page) 3990 if (!rx_buffer_info->page)
3733 continue; 3991 continue;
3734 if (rx_buffer_info->page_dma) { 3992 if (rx_buffer_info->page_dma) {
3735 dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma, 3993 dma_unmap_page(dev, rx_buffer_info->page_dma,
3736 PAGE_SIZE / 2, DMA_FROM_DEVICE); 3994 PAGE_SIZE / 2, DMA_FROM_DEVICE);
3737 rx_buffer_info->page_dma = 0; 3995 rx_buffer_info->page_dma = 0;
3738 } 3996 }
@@ -3749,24 +4007,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
3749 4007
3750 rx_ring->next_to_clean = 0; 4008 rx_ring->next_to_clean = 0;
3751 rx_ring->next_to_use = 0; 4009 rx_ring->next_to_use = 0;
3752
3753 if (rx_ring->head)
3754 writel(0, adapter->hw.hw_addr + rx_ring->head);
3755 if (rx_ring->tail)
3756 writel(0, adapter->hw.hw_addr + rx_ring->tail);
3757} 4010}
3758 4011
3759/** 4012/**
3760 * ixgbe_clean_tx_ring - Free Tx Buffers 4013 * ixgbe_clean_tx_ring - Free Tx Buffers
3761 * @adapter: board private structure
3762 * @tx_ring: ring to be cleaned 4014 * @tx_ring: ring to be cleaned
3763 **/ 4015 **/
3764static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter, 4016static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
3765 struct ixgbe_ring *tx_ring)
3766{ 4017{
3767 struct ixgbe_tx_buffer *tx_buffer_info; 4018 struct ixgbe_tx_buffer *tx_buffer_info;
3768 unsigned long size; 4019 unsigned long size;
3769 unsigned int i; 4020 u16 i;
3770 4021
3771 /* ring already cleared, nothing to do */ 4022 /* ring already cleared, nothing to do */
3772 if (!tx_ring->tx_buffer_info) 4023 if (!tx_ring->tx_buffer_info)
@@ -3775,7 +4026,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
3775 /* Free all the Tx ring sk_buffs */ 4026 /* Free all the Tx ring sk_buffs */
3776 for (i = 0; i < tx_ring->count; i++) { 4027 for (i = 0; i < tx_ring->count; i++) {
3777 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 4028 tx_buffer_info = &tx_ring->tx_buffer_info[i];
3778 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 4029 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
3779 } 4030 }
3780 4031
3781 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 4032 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
@@ -3786,11 +4037,6 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
3786 4037
3787 tx_ring->next_to_use = 0; 4038 tx_ring->next_to_use = 0;
3788 tx_ring->next_to_clean = 0; 4039 tx_ring->next_to_clean = 0;
3789
3790 if (tx_ring->head)
3791 writel(0, adapter->hw.hw_addr + tx_ring->head);
3792 if (tx_ring->tail)
3793 writel(0, adapter->hw.hw_addr + tx_ring->tail);
3794} 4040}
3795 4041
3796/** 4042/**
@@ -3802,7 +4048,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
3802 int i; 4048 int i;
3803 4049
3804 for (i = 0; i < adapter->num_rx_queues; i++) 4050 for (i = 0; i < adapter->num_rx_queues; i++)
3805 ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]); 4051 ixgbe_clean_rx_ring(adapter->rx_ring[i]);
3806} 4052}
3807 4053
3808/** 4054/**
@@ -3814,7 +4060,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
3814 int i; 4060 int i;
3815 4061
3816 for (i = 0; i < adapter->num_tx_queues; i++) 4062 for (i = 0; i < adapter->num_tx_queues; i++)
3817 ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]); 4063 ixgbe_clean_tx_ring(adapter->tx_ring[i]);
3818} 4064}
3819 4065
3820void ixgbe_down(struct ixgbe_adapter *adapter) 4066void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -3823,7 +4069,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3823 struct ixgbe_hw *hw = &adapter->hw; 4069 struct ixgbe_hw *hw = &adapter->hw;
3824 u32 rxctrl; 4070 u32 rxctrl;
3825 u32 txdctl; 4071 u32 txdctl;
3826 int i, j; 4072 int i;
3827 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS; 4073 int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
3828 4074
3829 /* signal that we are down to the interrupt handler */ 4075 /* signal that we are down to the interrupt handler */
@@ -3846,7 +4092,11 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3846 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); 4092 rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
3847 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); 4093 IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN);
3848 4094
3849 IXGBE_WRITE_FLUSH(hw); 4095 /* disable all enabled rx queues */
4096 for (i = 0; i < adapter->num_rx_queues; i++)
4097 /* this call also flushes the previous write */
4098 ixgbe_disable_rx_queue(adapter, adapter->rx_ring[i]);
4099
3850 msleep(10); 4100 msleep(10);
3851 4101
3852 netif_tx_stop_all_queues(netdev); 4102 netif_tx_stop_all_queues(netdev);
@@ -3881,26 +4131,36 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
3881 4131
3882 /* disable transmits in the hardware now that interrupts are off */ 4132 /* disable transmits in the hardware now that interrupts are off */
3883 for (i = 0; i < adapter->num_tx_queues; i++) { 4133 for (i = 0; i < adapter->num_tx_queues; i++) {
3884 j = adapter->tx_ring[i]->reg_idx; 4134 u8 reg_idx = adapter->tx_ring[i]->reg_idx;
3885 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j)); 4135 txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
3886 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), 4136 IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
3887 (txdctl & ~IXGBE_TXDCTL_ENABLE)); 4137 (txdctl & ~IXGBE_TXDCTL_ENABLE));
3888 } 4138 }
3889 /* Disable the Tx DMA engine on 82599 */ 4139 /* Disable the Tx DMA engine on 82599 */
3890 if (hw->mac.type == ixgbe_mac_82599EB) 4140 switch (hw->mac.type) {
4141 case ixgbe_mac_82599EB:
4142 case ixgbe_mac_X540:
3891 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, 4143 IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
3892 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & 4144 (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
3893 ~IXGBE_DMATXCTL_TE)); 4145 ~IXGBE_DMATXCTL_TE));
3894 4146 break;
3895 /* power down the optics */ 4147 default:
3896 if (hw->phy.multispeed_fiber) 4148 break;
3897 hw->mac.ops.disable_tx_laser(hw); 4149 }
3898 4150
3899 /* clear n-tuple filters that are cached */ 4151 /* clear n-tuple filters that are cached */
3900 ethtool_ntuple_flush(netdev); 4152 ethtool_ntuple_flush(netdev);
3901 4153
3902 if (!pci_channel_offline(adapter->pdev)) 4154 if (!pci_channel_offline(adapter->pdev))
3903 ixgbe_reset(adapter); 4155 ixgbe_reset(adapter);
4156
4157 /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
4158 if (hw->mac.ops.disable_tx_laser &&
4159 ((hw->phy.multispeed_fiber) ||
4160 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
4161 (hw->mac.type == ixgbe_mac_82599EB))))
4162 hw->mac.ops.disable_tx_laser(hw);
4163
3904 ixgbe_clean_all_tx_rings(adapter); 4164 ixgbe_clean_all_tx_rings(adapter);
3905 ixgbe_clean_all_rx_rings(adapter); 4165 ixgbe_clean_all_rx_rings(adapter);
3906 4166
@@ -3925,10 +4185,8 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
3925 int tx_clean_complete, work_done = 0; 4185 int tx_clean_complete, work_done = 0;
3926 4186
3927#ifdef CONFIG_IXGBE_DCA 4187#ifdef CONFIG_IXGBE_DCA
3928 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 4188 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
3929 ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]); 4189 ixgbe_update_dca(q_vector);
3930 ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
3931 }
3932#endif 4190#endif
3933 4191
3934 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]); 4192 tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
@@ -3956,6 +4214,8 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
3956{ 4214{
3957 struct ixgbe_adapter *adapter = netdev_priv(netdev); 4215 struct ixgbe_adapter *adapter = netdev_priv(netdev);
3958 4216
4217 adapter->tx_timeout_count++;
4218
3959 /* Do the reset outside of interrupt context */ 4219 /* Do the reset outside of interrupt context */
3960 schedule_work(&adapter->reset_task); 4220 schedule_work(&adapter->reset_task);
3961} 4221}
@@ -3970,8 +4230,6 @@ static void ixgbe_reset_task(struct work_struct *work)
3970 test_bit(__IXGBE_RESETTING, &adapter->state)) 4230 test_bit(__IXGBE_RESETTING, &adapter->state))
3971 return; 4231 return;
3972 4232
3973 adapter->tx_timeout_count++;
3974
3975 ixgbe_dump(adapter); 4233 ixgbe_dump(adapter);
3976 netdev_err(adapter->netdev, "Reset adapter\n"); 4234 netdev_err(adapter->netdev, "Reset adapter\n");
3977 ixgbe_reinit_locked(adapter); 4235 ixgbe_reinit_locked(adapter);
@@ -4221,19 +4479,16 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
4221static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) 4479static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
4222{ 4480{
4223 int i; 4481 int i;
4224 bool ret = false;
4225 4482
4226 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 4483 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
4227 for (i = 0; i < adapter->num_rx_queues; i++) 4484 return false;
4228 adapter->rx_ring[i]->reg_idx = i;
4229 for (i = 0; i < adapter->num_tx_queues; i++)
4230 adapter->tx_ring[i]->reg_idx = i;
4231 ret = true;
4232 } else {
4233 ret = false;
4234 }
4235 4485
4236 return ret; 4486 for (i = 0; i < adapter->num_rx_queues; i++)
4487 adapter->rx_ring[i]->reg_idx = i;
4488 for (i = 0; i < adapter->num_tx_queues; i++)
4489 adapter->tx_ring[i]->reg_idx = i;
4490
4491 return true;
4237} 4492}
4238 4493
4239#ifdef CONFIG_IXGBE_DCB 4494#ifdef CONFIG_IXGBE_DCB
@@ -4250,71 +4505,67 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
4250 bool ret = false; 4505 bool ret = false;
4251 int dcb_i = adapter->ring_feature[RING_F_DCB].indices; 4506 int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
4252 4507
4253 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 4508 if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
4254 if (adapter->hw.mac.type == ixgbe_mac_82598EB) { 4509 return false;
4255 /* the number of queues is assumed to be symmetric */
4256 for (i = 0; i < dcb_i; i++) {
4257 adapter->rx_ring[i]->reg_idx = i << 3;
4258 adapter->tx_ring[i]->reg_idx = i << 2;
4259 }
4260 ret = true;
4261 } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
4262 if (dcb_i == 8) {
4263 /*
4264 * Tx TC0 starts at: descriptor queue 0
4265 * Tx TC1 starts at: descriptor queue 32
4266 * Tx TC2 starts at: descriptor queue 64
4267 * Tx TC3 starts at: descriptor queue 80
4268 * Tx TC4 starts at: descriptor queue 96
4269 * Tx TC5 starts at: descriptor queue 104
4270 * Tx TC6 starts at: descriptor queue 112
4271 * Tx TC7 starts at: descriptor queue 120
4272 *
4273 * Rx TC0-TC7 are offset by 16 queues each
4274 */
4275 for (i = 0; i < 3; i++) {
4276 adapter->tx_ring[i]->reg_idx = i << 5;
4277 adapter->rx_ring[i]->reg_idx = i << 4;
4278 }
4279 for ( ; i < 5; i++) {
4280 adapter->tx_ring[i]->reg_idx =
4281 ((i + 2) << 4);
4282 adapter->rx_ring[i]->reg_idx = i << 4;
4283 }
4284 for ( ; i < dcb_i; i++) {
4285 adapter->tx_ring[i]->reg_idx =
4286 ((i + 8) << 3);
4287 adapter->rx_ring[i]->reg_idx = i << 4;
4288 }
4289 4510
4290 ret = true; 4511 /* the number of queues is assumed to be symmetric */
4291 } else if (dcb_i == 4) { 4512 switch (adapter->hw.mac.type) {
4292 /* 4513 case ixgbe_mac_82598EB:
4293 * Tx TC0 starts at: descriptor queue 0 4514 for (i = 0; i < dcb_i; i++) {
4294 * Tx TC1 starts at: descriptor queue 64 4515 adapter->rx_ring[i]->reg_idx = i << 3;
4295 * Tx TC2 starts at: descriptor queue 96 4516 adapter->tx_ring[i]->reg_idx = i << 2;
4296 * Tx TC3 starts at: descriptor queue 112 4517 }
4297 * 4518 ret = true;
4298 * Rx TC0-TC3 are offset by 32 queues each 4519 break;
4299 */ 4520 case ixgbe_mac_82599EB:
4300 adapter->tx_ring[0]->reg_idx = 0; 4521 case ixgbe_mac_X540:
4301 adapter->tx_ring[1]->reg_idx = 64; 4522 if (dcb_i == 8) {
4302 adapter->tx_ring[2]->reg_idx = 96; 4523 /*
4303 adapter->tx_ring[3]->reg_idx = 112; 4524 * Tx TC0 starts at: descriptor queue 0
4304 for (i = 0 ; i < dcb_i; i++) 4525 * Tx TC1 starts at: descriptor queue 32
4305 adapter->rx_ring[i]->reg_idx = i << 5; 4526 * Tx TC2 starts at: descriptor queue 64
4306 4527 * Tx TC3 starts at: descriptor queue 80
4307 ret = true; 4528 * Tx TC4 starts at: descriptor queue 96
4308 } else { 4529 * Tx TC5 starts at: descriptor queue 104
4309 ret = false; 4530 * Tx TC6 starts at: descriptor queue 112
4531 * Tx TC7 starts at: descriptor queue 120
4532 *
4533 * Rx TC0-TC7 are offset by 16 queues each
4534 */
4535 for (i = 0; i < 3; i++) {
4536 adapter->tx_ring[i]->reg_idx = i << 5;
4537 adapter->rx_ring[i]->reg_idx = i << 4;
4310 } 4538 }
4311 } else { 4539 for ( ; i < 5; i++) {
4312 ret = false; 4540 adapter->tx_ring[i]->reg_idx = ((i + 2) << 4);
4541 adapter->rx_ring[i]->reg_idx = i << 4;
4542 }
4543 for ( ; i < dcb_i; i++) {
4544 adapter->tx_ring[i]->reg_idx = ((i + 8) << 3);
4545 adapter->rx_ring[i]->reg_idx = i << 4;
4546 }
4547 ret = true;
4548 } else if (dcb_i == 4) {
4549 /*
4550 * Tx TC0 starts at: descriptor queue 0
4551 * Tx TC1 starts at: descriptor queue 64
4552 * Tx TC2 starts at: descriptor queue 96
4553 * Tx TC3 starts at: descriptor queue 112
4554 *
4555 * Rx TC0-TC3 are offset by 32 queues each
4556 */
4557 adapter->tx_ring[0]->reg_idx = 0;
4558 adapter->tx_ring[1]->reg_idx = 64;
4559 adapter->tx_ring[2]->reg_idx = 96;
4560 adapter->tx_ring[3]->reg_idx = 112;
4561 for (i = 0 ; i < dcb_i; i++)
4562 adapter->rx_ring[i]->reg_idx = i << 5;
4563 ret = true;
4313 } 4564 }
4314 } else { 4565 break;
4315 ret = false; 4566 default:
4567 break;
4316 } 4568 }
4317
4318 return ret; 4569 return ret;
4319} 4570}
4320#endif 4571#endif
@@ -4354,55 +4605,55 @@ static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
4354 */ 4605 */
4355static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter) 4606static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
4356{ 4607{
4357 int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
4358 bool ret = false;
4359 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; 4608 struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
4609 int i;
4610 u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
4611
4612 if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
4613 return false;
4360 4614
4361 if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
4362#ifdef CONFIG_IXGBE_DCB 4615#ifdef CONFIG_IXGBE_DCB
4363 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { 4616 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
4364 struct ixgbe_fcoe *fcoe = &adapter->fcoe; 4617 struct ixgbe_fcoe *fcoe = &adapter->fcoe;
4365 4618
4366 ixgbe_cache_ring_dcb(adapter); 4619 ixgbe_cache_ring_dcb(adapter);
4367 /* find out queues in TC for FCoE */ 4620 /* find out queues in TC for FCoE */
4368 fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1; 4621 fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
4369 fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1; 4622 fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
4370 /* 4623 /*
4371 * In 82599, the number of Tx queues for each traffic 4624 * In 82599, the number of Tx queues for each traffic
4372 * class for both 8-TC and 4-TC modes are: 4625 * class for both 8-TC and 4-TC modes are:
4373 * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7 4626 * TCs : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
4374 * 8 TCs: 32 32 16 16 8 8 8 8 4627 * 8 TCs: 32 32 16 16 8 8 8 8
4375 * 4 TCs: 64 64 32 32 4628 * 4 TCs: 64 64 32 32
4376 * We have max 8 queues for FCoE, where 8 the is 4629 * We have max 8 queues for FCoE, where 8 the is
4377 * FCoE redirection table size. If TC for FCoE is 4630 * FCoE redirection table size. If TC for FCoE is
4378 * less than or equal to TC3, we have enough queues 4631 * less than or equal to TC3, we have enough queues
4379 * to add max of 8 queues for FCoE, so we start FCoE 4632 * to add max of 8 queues for FCoE, so we start FCoE
4380 * tx descriptor from the next one, i.e., reg_idx + 1. 4633 * Tx queue from the next one, i.e., reg_idx + 1.
4381 * If TC for FCoE is above TC3, implying 8 TC mode, 4634 * If TC for FCoE is above TC3, implying 8 TC mode,
4382 * and we need 8 for FCoE, we have to take all queues 4635 * and we need 8 for FCoE, we have to take all queues
4383 * in that traffic class for FCoE. 4636 * in that traffic class for FCoE.
4384 */ 4637 */
4385 if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3)) 4638 if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
4386 fcoe_tx_i--; 4639 fcoe_tx_i--;
4387 } 4640 }
4388#endif /* CONFIG_IXGBE_DCB */ 4641#endif /* CONFIG_IXGBE_DCB */
4389 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) { 4642 if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
4390 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) || 4643 if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
4391 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) 4644 (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
4392 ixgbe_cache_ring_fdir(adapter); 4645 ixgbe_cache_ring_fdir(adapter);
4393 else 4646 else
4394 ixgbe_cache_ring_rss(adapter); 4647 ixgbe_cache_ring_rss(adapter);
4395 4648
4396 fcoe_rx_i = f->mask; 4649 fcoe_rx_i = f->mask;
4397 fcoe_tx_i = f->mask; 4650 fcoe_tx_i = f->mask;
4398 }
4399 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
4400 adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
4401 adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
4402 }
4403 ret = true;
4404 } 4651 }
4405 return ret; 4652 for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
4653 adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
4654 adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
4655 }
4656 return true;
4406} 4657}
4407 4658
4408#endif /* IXGBE_FCOE */ 4659#endif /* IXGBE_FCOE */
@@ -4471,65 +4722,55 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
4471 **/ 4722 **/
4472static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter) 4723static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
4473{ 4724{
4474 int i; 4725 int rx = 0, tx = 0, nid = adapter->node;
4475 int orig_node = adapter->node;
4476 4726
4477 for (i = 0; i < adapter->num_tx_queues; i++) { 4727 if (nid < 0 || !node_online(nid))
4478 struct ixgbe_ring *ring = adapter->tx_ring[i]; 4728 nid = first_online_node;
4479 if (orig_node == -1) { 4729
4480 int cur_node = next_online_node(adapter->node); 4730 for (; tx < adapter->num_tx_queues; tx++) {
4481 if (cur_node == MAX_NUMNODES) 4731 struct ixgbe_ring *ring;
4482 cur_node = first_online_node; 4732
4483 adapter->node = cur_node; 4733 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
4484 }
4485 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
4486 adapter->node);
4487 if (!ring) 4734 if (!ring)
4488 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); 4735 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
4489 if (!ring) 4736 if (!ring)
4490 goto err_tx_ring_allocation; 4737 goto err_allocation;
4491 ring->count = adapter->tx_ring_count; 4738 ring->count = adapter->tx_ring_count;
4492 ring->queue_index = i; 4739 ring->queue_index = tx;
4493 ring->numa_node = adapter->node; 4740 ring->numa_node = nid;
4741 ring->dev = &adapter->pdev->dev;
4742 ring->netdev = adapter->netdev;
4494 4743
4495 adapter->tx_ring[i] = ring; 4744 adapter->tx_ring[tx] = ring;
4496 } 4745 }
4497 4746
4498 /* Restore the adapter's original node */ 4747 for (; rx < adapter->num_rx_queues; rx++) {
4499 adapter->node = orig_node; 4748 struct ixgbe_ring *ring;
4500 4749
4501 for (i = 0; i < adapter->num_rx_queues; i++) { 4750 ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
4502 struct ixgbe_ring *ring = adapter->rx_ring[i];
4503 if (orig_node == -1) {
4504 int cur_node = next_online_node(adapter->node);
4505 if (cur_node == MAX_NUMNODES)
4506 cur_node = first_online_node;
4507 adapter->node = cur_node;
4508 }
4509 ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
4510 adapter->node);
4511 if (!ring) 4751 if (!ring)
4512 ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL); 4752 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
4513 if (!ring) 4753 if (!ring)
4514 goto err_rx_ring_allocation; 4754 goto err_allocation;
4515 ring->count = adapter->rx_ring_count; 4755 ring->count = adapter->rx_ring_count;
4516 ring->queue_index = i; 4756 ring->queue_index = rx;
4517 ring->numa_node = adapter->node; 4757 ring->numa_node = nid;
4758 ring->dev = &adapter->pdev->dev;
4759 ring->netdev = adapter->netdev;
4518 4760
4519 adapter->rx_ring[i] = ring; 4761 adapter->rx_ring[rx] = ring;
4520 } 4762 }
4521 4763
4522 /* Restore the adapter's original node */
4523 adapter->node = orig_node;
4524
4525 ixgbe_cache_ring_register(adapter); 4764 ixgbe_cache_ring_register(adapter);
4526 4765
4527 return 0; 4766 return 0;
4528 4767
4529err_rx_ring_allocation: 4768err_allocation:
4530 for (i = 0; i < adapter->num_tx_queues; i++) 4769 while (tx)
4531 kfree(adapter->tx_ring[i]); 4770 kfree(adapter->tx_ring[--tx]);
4532err_tx_ring_allocation: 4771
4772 while (rx)
4773 kfree(adapter->rx_ring[--rx]);
4533 return -ENOMEM; 4774 return -ENOMEM;
4534} 4775}
4535 4776
@@ -4580,6 +4821,12 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
4580 4821
4581 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; 4822 adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED;
4582 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED; 4823 adapter->flags &= ~IXGBE_FLAG_RSS_ENABLED;
4824 if (adapter->flags & (IXGBE_FLAG_FDIR_HASH_CAPABLE |
4825 IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) {
4826 e_err(probe,
4827 "Flow Director is not supported while multiple "
4828 "queues are disabled. Disabling Flow Director\n");
4829 }
4583 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; 4830 adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE;
4584 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 4831 adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE;
4585 adapter->atr_sample_rate = 0; 4832 adapter->atr_sample_rate = 0;
@@ -4751,6 +4998,11 @@ err_set_interrupt:
4751 return err; 4998 return err;
4752} 4999}
4753 5000
5001static void ring_free_rcu(struct rcu_head *head)
5002{
5003 kfree(container_of(head, struct ixgbe_ring, rcu));
5004}
5005
4754/** 5006/**
4755 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings 5007 * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
4756 * @adapter: board private structure to clear interrupt scheme on 5008 * @adapter: board private structure to clear interrupt scheme on
@@ -4767,7 +5019,12 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
4767 adapter->tx_ring[i] = NULL; 5019 adapter->tx_ring[i] = NULL;
4768 } 5020 }
4769 for (i = 0; i < adapter->num_rx_queues; i++) { 5021 for (i = 0; i < adapter->num_rx_queues; i++) {
4770 kfree(adapter->rx_ring[i]); 5022 struct ixgbe_ring *ring = adapter->rx_ring[i];
5023
5024 /* ixgbe_get_stats64() might access this ring, we must wait
5025 * a grace period before freeing it.
5026 */
5027 call_rcu(&ring->rcu, ring_free_rcu);
4771 adapter->rx_ring[i] = NULL; 5028 adapter->rx_ring[i] = NULL;
4772 } 5029 }
4773 5030
@@ -4847,6 +5104,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4847 int j; 5104 int j;
4848 struct tc_configuration *tc; 5105 struct tc_configuration *tc;
4849#endif 5106#endif
5107 int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
4850 5108
4851 /* PCI config space info */ 5109 /* PCI config space info */
4852 5110
@@ -4861,26 +5119,24 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4861 adapter->ring_feature[RING_F_RSS].indices = rss; 5119 adapter->ring_feature[RING_F_RSS].indices = rss;
4862 adapter->flags |= IXGBE_FLAG_RSS_ENABLED; 5120 adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
4863 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES; 5121 adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
4864 if (hw->mac.type == ixgbe_mac_82598EB) { 5122 switch (hw->mac.type) {
5123 case ixgbe_mac_82598EB:
4865 if (hw->device_id == IXGBE_DEV_ID_82598AT) 5124 if (hw->device_id == IXGBE_DEV_ID_82598AT)
4866 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; 5125 adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
4867 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598; 5126 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
4868 } else if (hw->mac.type == ixgbe_mac_82599EB) { 5127 break;
5128 case ixgbe_mac_82599EB:
5129 case ixgbe_mac_X540:
4869 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599; 5130 adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
4870 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; 5131 adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
4871 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; 5132 adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
4872 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) 5133 if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM)
4873 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; 5134 adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE;
4874 if (dev->features & NETIF_F_NTUPLE) { 5135 /* n-tuple support exists, always init our spinlock */
4875 /* Flow Director perfect filter enabled */ 5136 spin_lock_init(&adapter->fdir_perfect_lock);
4876 adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; 5137 /* Flow Director hash filters enabled */
4877 adapter->atr_sample_rate = 0; 5138 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
4878 spin_lock_init(&adapter->fdir_perfect_lock); 5139 adapter->atr_sample_rate = 20;
4879 } else {
4880 /* Flow Director hash filters enabled */
4881 adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE;
4882 adapter->atr_sample_rate = 20;
4883 }
4884 adapter->ring_feature[RING_F_FDIR].indices = 5140 adapter->ring_feature[RING_F_FDIR].indices =
4885 IXGBE_MAX_FDIR_INDICES; 5141 IXGBE_MAX_FDIR_INDICES;
4886 adapter->fdir_pballoc = 0; 5142 adapter->fdir_pballoc = 0;
@@ -4894,6 +5150,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4894 adapter->fcoe.up = IXGBE_FCOE_DEFTC; 5150 adapter->fcoe.up = IXGBE_FCOE_DEFTC;
4895#endif 5151#endif
4896#endif /* IXGBE_FCOE */ 5152#endif /* IXGBE_FCOE */
5153 break;
5154 default:
5155 break;
4897 } 5156 }
4898 5157
4899#ifdef CONFIG_IXGBE_DCB 5158#ifdef CONFIG_IXGBE_DCB
@@ -4923,8 +5182,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4923#ifdef CONFIG_DCB 5182#ifdef CONFIG_DCB
4924 adapter->last_lfc_mode = hw->fc.current_mode; 5183 adapter->last_lfc_mode = hw->fc.current_mode;
4925#endif 5184#endif
4926 hw->fc.high_water = IXGBE_DEFAULT_FCRTH; 5185 hw->fc.high_water = FC_HIGH_WATER(max_frame);
4927 hw->fc.low_water = IXGBE_DEFAULT_FCRTL; 5186 hw->fc.low_water = FC_LOW_WATER(max_frame);
4928 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; 5187 hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
4929 hw->fc.send_xon = true; 5188 hw->fc.send_xon = true;
4930 hw->fc.disable_fc_autoneg = false; 5189 hw->fc.disable_fc_autoneg = false;
@@ -4962,30 +5221,27 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
4962 5221
4963/** 5222/**
4964 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors) 5223 * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
4965 * @adapter: board private structure
4966 * @tx_ring: tx descriptor ring (for a specific queue) to setup 5224 * @tx_ring: tx descriptor ring (for a specific queue) to setup
4967 * 5225 *
4968 * Return 0 on success, negative on failure 5226 * Return 0 on success, negative on failure
4969 **/ 5227 **/
4970int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter, 5228int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
4971 struct ixgbe_ring *tx_ring)
4972{ 5229{
4973 struct pci_dev *pdev = adapter->pdev; 5230 struct device *dev = tx_ring->dev;
4974 int size; 5231 int size;
4975 5232
4976 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count; 5233 size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
4977 tx_ring->tx_buffer_info = vmalloc_node(size, tx_ring->numa_node); 5234 tx_ring->tx_buffer_info = vzalloc_node(size, tx_ring->numa_node);
4978 if (!tx_ring->tx_buffer_info) 5235 if (!tx_ring->tx_buffer_info)
4979 tx_ring->tx_buffer_info = vmalloc(size); 5236 tx_ring->tx_buffer_info = vzalloc(size);
4980 if (!tx_ring->tx_buffer_info) 5237 if (!tx_ring->tx_buffer_info)
4981 goto err; 5238 goto err;
4982 memset(tx_ring->tx_buffer_info, 0, size);
4983 5239
4984 /* round up to nearest 4K */ 5240 /* round up to nearest 4K */
4985 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 5241 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
4986 tx_ring->size = ALIGN(tx_ring->size, 4096); 5242 tx_ring->size = ALIGN(tx_ring->size, 4096);
4987 5243
4988 tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size, 5244 tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
4989 &tx_ring->dma, GFP_KERNEL); 5245 &tx_ring->dma, GFP_KERNEL);
4990 if (!tx_ring->desc) 5246 if (!tx_ring->desc)
4991 goto err; 5247 goto err;
@@ -4998,7 +5254,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
4998err: 5254err:
4999 vfree(tx_ring->tx_buffer_info); 5255 vfree(tx_ring->tx_buffer_info);
5000 tx_ring->tx_buffer_info = NULL; 5256 tx_ring->tx_buffer_info = NULL;
5001 e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n"); 5257 dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
5002 return -ENOMEM; 5258 return -ENOMEM;
5003} 5259}
5004 5260
@@ -5017,7 +5273,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
5017 int i, err = 0; 5273 int i, err = 0;
5018 5274
5019 for (i = 0; i < adapter->num_tx_queues; i++) { 5275 for (i = 0; i < adapter->num_tx_queues; i++) {
5020 err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]); 5276 err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
5021 if (!err) 5277 if (!err)
5022 continue; 5278 continue;
5023 e_err(probe, "Allocation for Tx Queue %u failed\n", i); 5279 e_err(probe, "Allocation for Tx Queue %u failed\n", i);
@@ -5029,48 +5285,40 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
5029 5285
5030/** 5286/**
5031 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors) 5287 * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
5032 * @adapter: board private structure
5033 * @rx_ring: rx descriptor ring (for a specific queue) to setup 5288 * @rx_ring: rx descriptor ring (for a specific queue) to setup
5034 * 5289 *
5035 * Returns 0 on success, negative on failure 5290 * Returns 0 on success, negative on failure
5036 **/ 5291 **/
5037int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter, 5292int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
5038 struct ixgbe_ring *rx_ring)
5039{ 5293{
5040 struct pci_dev *pdev = adapter->pdev; 5294 struct device *dev = rx_ring->dev;
5041 int size; 5295 int size;
5042 5296
5043 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; 5297 size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
5044 rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node); 5298 rx_ring->rx_buffer_info = vzalloc_node(size, rx_ring->numa_node);
5045 if (!rx_ring->rx_buffer_info) 5299 if (!rx_ring->rx_buffer_info)
5046 rx_ring->rx_buffer_info = vmalloc(size); 5300 rx_ring->rx_buffer_info = vzalloc(size);
5047 if (!rx_ring->rx_buffer_info) { 5301 if (!rx_ring->rx_buffer_info)
5048 e_err(probe, "vmalloc allocation failed for the Rx " 5302 goto err;
5049 "descriptor ring\n");
5050 goto alloc_failed;
5051 }
5052 memset(rx_ring->rx_buffer_info, 0, size);
5053 5303
5054 /* Round up to nearest 4K */ 5304 /* Round up to nearest 4K */
5055 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 5305 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
5056 rx_ring->size = ALIGN(rx_ring->size, 4096); 5306 rx_ring->size = ALIGN(rx_ring->size, 4096);
5057 5307
5058 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 5308 rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
5059 &rx_ring->dma, GFP_KERNEL); 5309 &rx_ring->dma, GFP_KERNEL);
5060 5310
5061 if (!rx_ring->desc) { 5311 if (!rx_ring->desc)
5062 e_err(probe, "Memory allocation failed for the Rx " 5312 goto err;
5063 "descriptor ring\n");
5064 vfree(rx_ring->rx_buffer_info);
5065 goto alloc_failed;
5066 }
5067 5313
5068 rx_ring->next_to_clean = 0; 5314 rx_ring->next_to_clean = 0;
5069 rx_ring->next_to_use = 0; 5315 rx_ring->next_to_use = 0;
5070 5316
5071 return 0; 5317 return 0;
5072 5318err:
5073alloc_failed: 5319 vfree(rx_ring->rx_buffer_info);
5320 rx_ring->rx_buffer_info = NULL;
5321 dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
5074 return -ENOMEM; 5322 return -ENOMEM;
5075} 5323}
5076 5324
@@ -5084,13 +5332,12 @@ alloc_failed:
5084 * 5332 *
5085 * Return 0 on success, negative on failure 5333 * Return 0 on success, negative on failure
5086 **/ 5334 **/
5087
5088static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) 5335static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5089{ 5336{
5090 int i, err = 0; 5337 int i, err = 0;
5091 5338
5092 for (i = 0; i < adapter->num_rx_queues; i++) { 5339 for (i = 0; i < adapter->num_rx_queues; i++) {
5093 err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]); 5340 err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
5094 if (!err) 5341 if (!err)
5095 continue; 5342 continue;
5096 e_err(probe, "Allocation for Rx Queue %u failed\n", i); 5343 e_err(probe, "Allocation for Rx Queue %u failed\n", i);
@@ -5102,23 +5349,23 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
5102 5349
5103/** 5350/**
5104 * ixgbe_free_tx_resources - Free Tx Resources per Queue 5351 * ixgbe_free_tx_resources - Free Tx Resources per Queue
5105 * @adapter: board private structure
5106 * @tx_ring: Tx descriptor ring for a specific queue 5352 * @tx_ring: Tx descriptor ring for a specific queue
5107 * 5353 *
5108 * Free all transmit software resources 5354 * Free all transmit software resources
5109 **/ 5355 **/
5110void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter, 5356void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
5111 struct ixgbe_ring *tx_ring)
5112{ 5357{
5113 struct pci_dev *pdev = adapter->pdev; 5358 ixgbe_clean_tx_ring(tx_ring);
5114
5115 ixgbe_clean_tx_ring(adapter, tx_ring);
5116 5359
5117 vfree(tx_ring->tx_buffer_info); 5360 vfree(tx_ring->tx_buffer_info);
5118 tx_ring->tx_buffer_info = NULL; 5361 tx_ring->tx_buffer_info = NULL;
5119 5362
5120 dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc, 5363 /* if not set, then don't free */
5121 tx_ring->dma); 5364 if (!tx_ring->desc)
5365 return;
5366
5367 dma_free_coherent(tx_ring->dev, tx_ring->size,
5368 tx_ring->desc, tx_ring->dma);
5122 5369
5123 tx_ring->desc = NULL; 5370 tx_ring->desc = NULL;
5124} 5371}
@@ -5135,28 +5382,28 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
5135 5382
5136 for (i = 0; i < adapter->num_tx_queues; i++) 5383 for (i = 0; i < adapter->num_tx_queues; i++)
5137 if (adapter->tx_ring[i]->desc) 5384 if (adapter->tx_ring[i]->desc)
5138 ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]); 5385 ixgbe_free_tx_resources(adapter->tx_ring[i]);
5139} 5386}
5140 5387
5141/** 5388/**
5142 * ixgbe_free_rx_resources - Free Rx Resources 5389 * ixgbe_free_rx_resources - Free Rx Resources
5143 * @adapter: board private structure
5144 * @rx_ring: ring to clean the resources from 5390 * @rx_ring: ring to clean the resources from
5145 * 5391 *
5146 * Free all receive software resources 5392 * Free all receive software resources
5147 **/ 5393 **/
5148void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter, 5394void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
5149 struct ixgbe_ring *rx_ring)
5150{ 5395{
5151 struct pci_dev *pdev = adapter->pdev; 5396 ixgbe_clean_rx_ring(rx_ring);
5152
5153 ixgbe_clean_rx_ring(adapter, rx_ring);
5154 5397
5155 vfree(rx_ring->rx_buffer_info); 5398 vfree(rx_ring->rx_buffer_info);
5156 rx_ring->rx_buffer_info = NULL; 5399 rx_ring->rx_buffer_info = NULL;
5157 5400
5158 dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc, 5401 /* if not set, then don't free */
5159 rx_ring->dma); 5402 if (!rx_ring->desc)
5403 return;
5404
5405 dma_free_coherent(rx_ring->dev, rx_ring->size,
5406 rx_ring->desc, rx_ring->dma);
5160 5407
5161 rx_ring->desc = NULL; 5408 rx_ring->desc = NULL;
5162} 5409}
@@ -5173,7 +5420,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5173 5420
5174 for (i = 0; i < adapter->num_rx_queues; i++) 5421 for (i = 0; i < adapter->num_rx_queues; i++)
5175 if (adapter->rx_ring[i]->desc) 5422 if (adapter->rx_ring[i]->desc)
5176 ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]); 5423 ixgbe_free_rx_resources(adapter->rx_ring[i]);
5177} 5424}
5178 5425
5179/** 5426/**
@@ -5186,6 +5433,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
5186static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) 5433static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5187{ 5434{
5188 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5435 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5436 struct ixgbe_hw *hw = &adapter->hw;
5189 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 5437 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
5190 5438
5191 /* MTU < 68 is an error and causes problems on some kernels */ 5439 /* MTU < 68 is an error and causes problems on some kernels */
@@ -5196,6 +5444,9 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
5196 /* must set new MTU before calling down or up */ 5444 /* must set new MTU before calling down or up */
5197 netdev->mtu = new_mtu; 5445 netdev->mtu = new_mtu;
5198 5446
5447 hw->fc.high_water = FC_HIGH_WATER(max_frame);
5448 hw->fc.low_water = FC_LOW_WATER(max_frame);
5449
5199 if (netif_running(netdev)) 5450 if (netif_running(netdev))
5200 ixgbe_reinit_locked(adapter); 5451 ixgbe_reinit_locked(adapter);
5201 5452
@@ -5291,8 +5542,8 @@ static int ixgbe_close(struct net_device *netdev)
5291#ifdef CONFIG_PM 5542#ifdef CONFIG_PM
5292static int ixgbe_resume(struct pci_dev *pdev) 5543static int ixgbe_resume(struct pci_dev *pdev)
5293{ 5544{
5294 struct net_device *netdev = pci_get_drvdata(pdev); 5545 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5295 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5546 struct net_device *netdev = adapter->netdev;
5296 u32 err; 5547 u32 err;
5297 5548
5298 pci_set_power_state(pdev, PCI_D0); 5549 pci_set_power_state(pdev, PCI_D0);
@@ -5323,7 +5574,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
5323 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 5574 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
5324 5575
5325 if (netif_running(netdev)) { 5576 if (netif_running(netdev)) {
5326 err = ixgbe_open(adapter->netdev); 5577 err = ixgbe_open(netdev);
5327 if (err) 5578 if (err)
5328 return err; 5579 return err;
5329 } 5580 }
@@ -5336,8 +5587,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
5336 5587
5337static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) 5588static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5338{ 5589{
5339 struct net_device *netdev = pci_get_drvdata(pdev); 5590 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
5340 struct ixgbe_adapter *adapter = netdev_priv(netdev); 5591 struct net_device *netdev = adapter->netdev;
5341 struct ixgbe_hw *hw = &adapter->hw; 5592 struct ixgbe_hw *hw = &adapter->hw;
5342 u32 ctrl, fctrl; 5593 u32 ctrl, fctrl;
5343 u32 wufc = adapter->wol; 5594 u32 wufc = adapter->wol;
@@ -5354,6 +5605,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5354 ixgbe_free_all_rx_resources(adapter); 5605 ixgbe_free_all_rx_resources(adapter);
5355 } 5606 }
5356 5607
5608 ixgbe_clear_interrupt_scheme(adapter);
5609
5357#ifdef CONFIG_PM 5610#ifdef CONFIG_PM
5358 retval = pci_save_state(pdev); 5611 retval = pci_save_state(pdev);
5359 if (retval) 5612 if (retval)
@@ -5380,15 +5633,20 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
5380 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0); 5633 IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
5381 } 5634 }
5382 5635
5383 if (wufc && hw->mac.type == ixgbe_mac_82599EB) 5636 switch (hw->mac.type) {
5384 pci_wake_from_d3(pdev, true); 5637 case ixgbe_mac_82598EB:
5385 else
5386 pci_wake_from_d3(pdev, false); 5638 pci_wake_from_d3(pdev, false);
5639 break;
5640 case ixgbe_mac_82599EB:
5641 case ixgbe_mac_X540:
5642 pci_wake_from_d3(pdev, !!wufc);
5643 break;
5644 default:
5645 break;
5646 }
5387 5647
5388 *enable_wake = !!wufc; 5648 *enable_wake = !!wufc;
5389 5649
5390 ixgbe_clear_interrupt_scheme(adapter);
5391
5392 ixgbe_release_hw_control(adapter); 5650 ixgbe_release_hw_control(adapter);
5393 5651
5394 pci_disable_device(pdev); 5652 pci_disable_device(pdev);
@@ -5437,10 +5695,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5437{ 5695{
5438 struct net_device *netdev = adapter->netdev; 5696 struct net_device *netdev = adapter->netdev;
5439 struct ixgbe_hw *hw = &adapter->hw; 5697 struct ixgbe_hw *hw = &adapter->hw;
5698 struct ixgbe_hw_stats *hwstats = &adapter->stats;
5440 u64 total_mpc = 0; 5699 u64 total_mpc = 0;
5441 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; 5700 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
5442 u64 non_eop_descs = 0, restart_queue = 0; 5701 u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
5443 struct ixgbe_hw_stats *hwstats = &adapter->stats; 5702 u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
5703 u64 bytes = 0, packets = 0;
5444 5704
5445 if (test_bit(__IXGBE_DOWN, &adapter->state) || 5705 if (test_bit(__IXGBE_DOWN, &adapter->state) ||
5446 test_bit(__IXGBE_RESETTING, &adapter->state)) 5706 test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5453,21 +5713,41 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5453 adapter->hw_rx_no_dma_resources += 5713 adapter->hw_rx_no_dma_resources +=
5454 IXGBE_READ_REG(hw, IXGBE_QPRDC(i)); 5714 IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5455 for (i = 0; i < adapter->num_rx_queues; i++) { 5715 for (i = 0; i < adapter->num_rx_queues; i++) {
5456 rsc_count += adapter->rx_ring[i]->rsc_count; 5716 rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
5457 rsc_flush += adapter->rx_ring[i]->rsc_flush; 5717 rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
5458 } 5718 }
5459 adapter->rsc_total_count = rsc_count; 5719 adapter->rsc_total_count = rsc_count;
5460 adapter->rsc_total_flush = rsc_flush; 5720 adapter->rsc_total_flush = rsc_flush;
5461 } 5721 }
5462 5722
5723 for (i = 0; i < adapter->num_rx_queues; i++) {
5724 struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
5725 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
5726 alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
5727 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
5728 bytes += rx_ring->stats.bytes;
5729 packets += rx_ring->stats.packets;
5730 }
5731 adapter->non_eop_descs = non_eop_descs;
5732 adapter->alloc_rx_page_failed = alloc_rx_page_failed;
5733 adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
5734 netdev->stats.rx_bytes = bytes;
5735 netdev->stats.rx_packets = packets;
5736
5737 bytes = 0;
5738 packets = 0;
5463 /* gather some stats to the adapter struct that are per queue */ 5739 /* gather some stats to the adapter struct that are per queue */
5464 for (i = 0; i < adapter->num_tx_queues; i++) 5740 for (i = 0; i < adapter->num_tx_queues; i++) {
5465 restart_queue += adapter->tx_ring[i]->restart_queue; 5741 struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
5742 restart_queue += tx_ring->tx_stats.restart_queue;
5743 tx_busy += tx_ring->tx_stats.tx_busy;
5744 bytes += tx_ring->stats.bytes;
5745 packets += tx_ring->stats.packets;
5746 }
5466 adapter->restart_queue = restart_queue; 5747 adapter->restart_queue = restart_queue;
5467 5748 adapter->tx_busy = tx_busy;
5468 for (i = 0; i < adapter->num_rx_queues; i++) 5749 netdev->stats.tx_bytes = bytes;
5469 non_eop_descs += adapter->rx_ring[i]->non_eop_descs; 5750 netdev->stats.tx_packets = packets;
5470 adapter->non_eop_descs = non_eop_descs;
5471 5751
5472 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); 5752 hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
5473 for (i = 0; i < 8; i++) { 5753 for (i = 0; i < 8; i++) {
@@ -5482,17 +5762,18 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5482 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i)); 5762 hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
5483 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); 5763 hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
5484 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i)); 5764 hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
5485 if (hw->mac.type == ixgbe_mac_82599EB) { 5765 switch (hw->mac.type) {
5486 hwstats->pxonrxc[i] += 5766 case ixgbe_mac_82598EB:
5487 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
5488 hwstats->pxoffrxc[i] +=
5489 IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
5490 hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
5491 } else {
5492 hwstats->pxonrxc[i] += 5767 hwstats->pxonrxc[i] +=
5493 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i)); 5768 IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
5494 hwstats->pxoffrxc[i] += 5769 break;
5495 IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); 5770 case ixgbe_mac_82599EB:
5771 case ixgbe_mac_X540:
5772 hwstats->pxonrxc[i] +=
5773 IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
5774 break;
5775 default:
5776 break;
5496 } 5777 }
5497 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i)); 5778 hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
5498 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i)); 5779 hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
@@ -5501,21 +5782,25 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5501 /* work around hardware counting issue */ 5782 /* work around hardware counting issue */
5502 hwstats->gprc -= missed_rx; 5783 hwstats->gprc -= missed_rx;
5503 5784
5785 ixgbe_update_xoff_received(adapter);
5786
5504 /* 82598 hardware only has a 32 bit counter in the high register */ 5787 /* 82598 hardware only has a 32 bit counter in the high register */
5505 if (hw->mac.type == ixgbe_mac_82599EB) { 5788 switch (hw->mac.type) {
5506 u64 tmp; 5789 case ixgbe_mac_82598EB:
5790 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
5791 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5792 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5793 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5794 break;
5795 case ixgbe_mac_82599EB:
5796 case ixgbe_mac_X540:
5507 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL); 5797 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
5508 tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF; 5798 IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
5509 /* 4 high bits of GORC */
5510 hwstats->gorc += (tmp << 32);
5511 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL); 5799 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
5512 tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF; 5800 IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
5513 /* 4 high bits of GOTC */
5514 hwstats->gotc += (tmp << 32);
5515 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); 5801 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
5516 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ 5802 IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
5517 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); 5803 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
5518 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
5519 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); 5804 hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
5520 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); 5805 hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
5521#ifdef IXGBE_FCOE 5806#ifdef IXGBE_FCOE
@@ -5526,12 +5811,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
5526 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); 5811 hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
5527 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); 5812 hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
5528#endif /* IXGBE_FCOE */ 5813#endif /* IXGBE_FCOE */
5529 } else { 5814 break;
5530 hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC); 5815 default:
5531 hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC); 5816 break;
5532 hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
5533 hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
5534 hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
5535 } 5817 }
5536 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC); 5818 bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
5537 hwstats->bprc += bprc; 5819 hwstats->bprc += bprc;
@@ -5704,8 +5986,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
5704 5986
5705 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { 5987 if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
5706 for (i = 0; i < adapter->num_tx_queues; i++) 5988 for (i = 0; i < adapter->num_tx_queues; i++)
5707 set_bit(__IXGBE_FDIR_INIT_DONE, 5989 set_bit(__IXGBE_TX_FDIR_INIT_DONE,
5708 &(adapter->tx_ring[i]->reinit_state)); 5990 &(adapter->tx_ring[i]->state));
5709 } else { 5991 } else {
5710 e_err(probe, "failed to finish FDIR re-initialization, " 5992 e_err(probe, "failed to finish FDIR re-initialization, "
5711 "ignored adding FDIR ATR filters\n"); 5993 "ignored adding FDIR ATR filters\n");
@@ -5714,6 +5996,26 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
5714 netif_tx_start_all_queues(adapter->netdev); 5996 netif_tx_start_all_queues(adapter->netdev);
5715} 5997}
5716 5998
5999static void ixgbe_spoof_check(struct ixgbe_adapter *adapter)
6000{
6001 u32 ssvpc;
6002
6003 /* Do not perform spoof check for 82598 */
6004 if (adapter->hw.mac.type == ixgbe_mac_82598EB)
6005 return;
6006
6007 ssvpc = IXGBE_READ_REG(&adapter->hw, IXGBE_SSVPC);
6008
6009 /*
6010 * ssvpc register is cleared on read, if zero then no
6011 * spoofed packets in the last interval.
6012 */
6013 if (!ssvpc)
6014 return;
6015
6016 e_warn(drv, "%d Spoofed packets detected\n", ssvpc);
6017}
6018
5717static DEFINE_MUTEX(ixgbe_watchdog_lock); 6019static DEFINE_MUTEX(ixgbe_watchdog_lock);
5718 6020
5719/** 6021/**
@@ -5767,17 +6069,27 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5767 if (!netif_carrier_ok(netdev)) { 6069 if (!netif_carrier_ok(netdev)) {
5768 bool flow_rx, flow_tx; 6070 bool flow_rx, flow_tx;
5769 6071
5770 if (hw->mac.type == ixgbe_mac_82599EB) { 6072 switch (hw->mac.type) {
5771 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); 6073 case ixgbe_mac_82598EB: {
5772 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
5773 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
5774 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
5775 } else {
5776 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL); 6074 u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
5777 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS); 6075 u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
5778 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE); 6076 flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
5779 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); 6077 flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
5780 } 6078 }
6079 break;
6080 case ixgbe_mac_82599EB:
6081 case ixgbe_mac_X540: {
6082 u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
6083 u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
6084 flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
6085 flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
6086 }
6087 break;
6088 default:
6089 flow_tx = false;
6090 flow_rx = false;
6091 break;
6092 }
5781 6093
5782 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", 6094 e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
5783 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? 6095 (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
@@ -5791,7 +6103,10 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5791 netif_carrier_on(netdev); 6103 netif_carrier_on(netdev);
5792 } else { 6104 } else {
5793 /* Force detection of hung controller */ 6105 /* Force detection of hung controller */
5794 adapter->detect_tx_hung = true; 6106 for (i = 0; i < adapter->num_tx_queues; i++) {
6107 tx_ring = adapter->tx_ring[i];
6108 set_check_for_tx_hang(tx_ring);
6109 }
5795 } 6110 }
5796 } else { 6111 } else {
5797 adapter->link_up = false; 6112 adapter->link_up = false;
@@ -5821,6 +6136,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
5821 } 6136 }
5822 } 6137 }
5823 6138
6139 ixgbe_spoof_check(adapter);
5824 ixgbe_update_stats(adapter); 6140 ixgbe_update_stats(adapter);
5825 mutex_unlock(&ixgbe_watchdog_lock); 6141 mutex_unlock(&ixgbe_watchdog_lock);
5826} 6142}
@@ -6003,15 +6319,17 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
6003static int ixgbe_tx_map(struct ixgbe_adapter *adapter, 6319static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
6004 struct ixgbe_ring *tx_ring, 6320 struct ixgbe_ring *tx_ring,
6005 struct sk_buff *skb, u32 tx_flags, 6321 struct sk_buff *skb, u32 tx_flags,
6006 unsigned int first) 6322 unsigned int first, const u8 hdr_len)
6007{ 6323{
6008 struct pci_dev *pdev = adapter->pdev; 6324 struct device *dev = tx_ring->dev;
6009 struct ixgbe_tx_buffer *tx_buffer_info; 6325 struct ixgbe_tx_buffer *tx_buffer_info;
6010 unsigned int len; 6326 unsigned int len;
6011 unsigned int total = skb->len; 6327 unsigned int total = skb->len;
6012 unsigned int offset = 0, size, count = 0, i; 6328 unsigned int offset = 0, size, count = 0, i;
6013 unsigned int nr_frags = skb_shinfo(skb)->nr_frags; 6329 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
6014 unsigned int f; 6330 unsigned int f;
6331 unsigned int bytecount = skb->len;
6332 u16 gso_segs = 1;
6015 6333
6016 i = tx_ring->next_to_use; 6334 i = tx_ring->next_to_use;
6017 6335
@@ -6026,10 +6344,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
6026 6344
6027 tx_buffer_info->length = size; 6345 tx_buffer_info->length = size;
6028 tx_buffer_info->mapped_as_page = false; 6346 tx_buffer_info->mapped_as_page = false;
6029 tx_buffer_info->dma = dma_map_single(&pdev->dev, 6347 tx_buffer_info->dma = dma_map_single(dev,
6030 skb->data + offset, 6348 skb->data + offset,
6031 size, DMA_TO_DEVICE); 6349 size, DMA_TO_DEVICE);
6032 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) 6350 if (dma_mapping_error(dev, tx_buffer_info->dma))
6033 goto dma_error; 6351 goto dma_error;
6034 tx_buffer_info->time_stamp = jiffies; 6352 tx_buffer_info->time_stamp = jiffies;
6035 tx_buffer_info->next_to_watch = i; 6353 tx_buffer_info->next_to_watch = i;
@@ -6062,12 +6380,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
6062 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD); 6380 size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
6063 6381
6064 tx_buffer_info->length = size; 6382 tx_buffer_info->length = size;
6065 tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev, 6383 tx_buffer_info->dma = dma_map_page(dev,
6066 frag->page, 6384 frag->page,
6067 offset, size, 6385 offset, size,
6068 DMA_TO_DEVICE); 6386 DMA_TO_DEVICE);
6069 tx_buffer_info->mapped_as_page = true; 6387 tx_buffer_info->mapped_as_page = true;
6070 if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma)) 6388 if (dma_mapping_error(dev, tx_buffer_info->dma))
6071 goto dma_error; 6389 goto dma_error;
6072 tx_buffer_info->time_stamp = jiffies; 6390 tx_buffer_info->time_stamp = jiffies;
6073 tx_buffer_info->next_to_watch = i; 6391 tx_buffer_info->next_to_watch = i;
@@ -6081,6 +6399,19 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
6081 break; 6399 break;
6082 } 6400 }
6083 6401
6402 if (tx_flags & IXGBE_TX_FLAGS_TSO)
6403 gso_segs = skb_shinfo(skb)->gso_segs;
6404#ifdef IXGBE_FCOE
6405 /* adjust for FCoE Sequence Offload */
6406 else if (tx_flags & IXGBE_TX_FLAGS_FSO)
6407 gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
6408 skb_shinfo(skb)->gso_size);
6409#endif /* IXGBE_FCOE */
6410 bytecount += (gso_segs - 1) * hdr_len;
6411
6412 /* multiply data chunks by size of headers */
6413 tx_ring->tx_buffer_info[i].bytecount = bytecount;
6414 tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
6084 tx_ring->tx_buffer_info[i].skb = skb; 6415 tx_ring->tx_buffer_info[i].skb = skb;
6085 tx_ring->tx_buffer_info[first].next_to_watch = i; 6416 tx_ring->tx_buffer_info[first].next_to_watch = i;
6086 6417
@@ -6102,14 +6433,13 @@ dma_error:
6102 i += tx_ring->count; 6433 i += tx_ring->count;
6103 i--; 6434 i--;
6104 tx_buffer_info = &tx_ring->tx_buffer_info[i]; 6435 tx_buffer_info = &tx_ring->tx_buffer_info[i];
6105 ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info); 6436 ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
6106 } 6437 }
6107 6438
6108 return 0; 6439 return 0;
6109} 6440}
6110 6441
6111static void ixgbe_tx_queue(struct ixgbe_adapter *adapter, 6442static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
6112 struct ixgbe_ring *tx_ring,
6113 int tx_flags, int count, u32 paylen, u8 hdr_len) 6443 int tx_flags, int count, u32 paylen, u8 hdr_len)
6114{ 6444{
6115 union ixgbe_adv_tx_desc *tx_desc = NULL; 6445 union ixgbe_adv_tx_desc *tx_desc = NULL;
@@ -6174,60 +6504,100 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
6174 wmb(); 6504 wmb();
6175 6505
6176 tx_ring->next_to_use = i; 6506 tx_ring->next_to_use = i;
6177 writel(i, adapter->hw.hw_addr + tx_ring->tail); 6507 writel(i, tx_ring->tail);
6178} 6508}
6179 6509
6180static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb, 6510static void ixgbe_atr(struct ixgbe_ring *ring, struct sk_buff *skb,
6181 int queue, u32 tx_flags, __be16 protocol) 6511 u32 tx_flags, __be16 protocol)
6182{ 6512{
6183 struct ixgbe_atr_input atr_input; 6513 struct ixgbe_q_vector *q_vector = ring->q_vector;
6514 union ixgbe_atr_hash_dword input = { .dword = 0 };
6515 union ixgbe_atr_hash_dword common = { .dword = 0 };
6516 union {
6517 unsigned char *network;
6518 struct iphdr *ipv4;
6519 struct ipv6hdr *ipv6;
6520 } hdr;
6184 struct tcphdr *th; 6521 struct tcphdr *th;
6185 struct iphdr *iph = ip_hdr(skb); 6522 __be16 vlan_id;
6186 struct ethhdr *eth = (struct ethhdr *)skb->data; 6523
6187 u16 vlan_id, src_port, dst_port, flex_bytes; 6524 /* if ring doesn't have a interrupt vector, cannot perform ATR */
6188 u32 src_ipv4_addr, dst_ipv4_addr; 6525 if (!q_vector)
6189 u8 l4type = 0;
6190
6191 /* Right now, we support IPv4 only */
6192 if (protocol != htons(ETH_P_IP))
6193 return; 6526 return;
6194 /* check if we're UDP or TCP */ 6527
6195 if (iph->protocol == IPPROTO_TCP) { 6528 /* do nothing if sampling is disabled */
6196 th = tcp_hdr(skb); 6529 if (!ring->atr_sample_rate)
6197 src_port = th->source;
6198 dst_port = th->dest;
6199 l4type |= IXGBE_ATR_L4TYPE_TCP;
6200 /* l4type IPv4 type is 0, no need to assign */
6201 } else {
6202 /* Unsupported L4 header, just bail here */
6203 return; 6530 return;
6204 }
6205 6531
6206 memset(&atr_input, 0, sizeof(struct ixgbe_atr_input)); 6532 ring->atr_count++;
6207 6533
6208 vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >> 6534 /* snag network header to get L4 type and address */
6209 IXGBE_TX_FLAGS_VLAN_SHIFT; 6535 hdr.network = skb_network_header(skb);
6210 src_ipv4_addr = iph->saddr;
6211 dst_ipv4_addr = iph->daddr;
6212 flex_bytes = eth->h_proto;
6213 6536
6214 ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id); 6537 /* Currently only IPv4/IPv6 with TCP is supported */
6215 ixgbe_atr_set_src_port_82599(&atr_input, dst_port); 6538 if ((protocol != __constant_htons(ETH_P_IPV6) ||
6216 ixgbe_atr_set_dst_port_82599(&atr_input, src_port); 6539 hdr.ipv6->nexthdr != IPPROTO_TCP) &&
6217 ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes); 6540 (protocol != __constant_htons(ETH_P_IP) ||
6218 ixgbe_atr_set_l4type_82599(&atr_input, l4type); 6541 hdr.ipv4->protocol != IPPROTO_TCP))
6219 /* src and dst are inverted, think how the receiver sees them */ 6542 return;
6220 ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr); 6543
6221 ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr); 6544 th = tcp_hdr(skb);
6545
6546 /* skip this packet since the socket is closing */
6547 if (th->fin)
6548 return;
6549
6550 /* sample on all syn packets or once every atr sample count */
6551 if (!th->syn && (ring->atr_count < ring->atr_sample_rate))
6552 return;
6553
6554 /* reset sample count */
6555 ring->atr_count = 0;
6556
6557 vlan_id = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT);
6558
6559 /*
6560 * src and dst are inverted, think how the receiver sees them
6561 *
6562 * The input is broken into two sections, a non-compressed section
6563 * containing vm_pool, vlan_id, and flow_type. The rest of the data
6564 * is XORed together and stored in the compressed dword.
6565 */
6566 input.formatted.vlan_id = vlan_id;
6567
6568 /*
6569 * since src port and flex bytes occupy the same word XOR them together
6570 * and write the value to source port portion of compressed dword
6571 */
6572 if (vlan_id)
6573 common.port.src ^= th->dest ^ __constant_htons(ETH_P_8021Q);
6574 else
6575 common.port.src ^= th->dest ^ protocol;
6576 common.port.dst ^= th->source;
6577
6578 if (protocol == __constant_htons(ETH_P_IP)) {
6579 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4;
6580 common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr;
6581 } else {
6582 input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6;
6583 common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^
6584 hdr.ipv6->saddr.s6_addr32[1] ^
6585 hdr.ipv6->saddr.s6_addr32[2] ^
6586 hdr.ipv6->saddr.s6_addr32[3] ^
6587 hdr.ipv6->daddr.s6_addr32[0] ^
6588 hdr.ipv6->daddr.s6_addr32[1] ^
6589 hdr.ipv6->daddr.s6_addr32[2] ^
6590 hdr.ipv6->daddr.s6_addr32[3];
6591 }
6222 6592
6223 /* This assumes the Rx queue and Tx queue are bound to the same CPU */ 6593 /* This assumes the Rx queue and Tx queue are bound to the same CPU */
6224 ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue); 6594 ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw,
6595 input, common, ring->queue_index);
6225} 6596}
6226 6597
6227static int __ixgbe_maybe_stop_tx(struct net_device *netdev, 6598static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
6228 struct ixgbe_ring *tx_ring, int size)
6229{ 6599{
6230 netif_stop_subqueue(netdev, tx_ring->queue_index); 6600 netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
6231 /* Herbert's original patch had: 6601 /* Herbert's original patch had:
6232 * smp_mb__after_netif_stop_queue(); 6602 * smp_mb__after_netif_stop_queue();
6233 * but since that doesn't exist yet, just open code it. */ 6603 * but since that doesn't exist yet, just open code it. */
@@ -6239,17 +6609,16 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
6239 return -EBUSY; 6609 return -EBUSY;
6240 6610
6241 /* A reprieve! - use start_queue because it doesn't call schedule */ 6611 /* A reprieve! - use start_queue because it doesn't call schedule */
6242 netif_start_subqueue(netdev, tx_ring->queue_index); 6612 netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
6243 ++tx_ring->restart_queue; 6613 ++tx_ring->tx_stats.restart_queue;
6244 return 0; 6614 return 0;
6245} 6615}
6246 6616
6247static int ixgbe_maybe_stop_tx(struct net_device *netdev, 6617static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
6248 struct ixgbe_ring *tx_ring, int size)
6249{ 6618{
6250 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size)) 6619 if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
6251 return 0; 6620 return 0;
6252 return __ixgbe_maybe_stop_tx(netdev, tx_ring, size); 6621 return __ixgbe_maybe_stop_tx(tx_ring, size);
6253} 6622}
6254 6623
6255static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) 6624static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
@@ -6294,10 +6663,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
6294 return skb_tx_hash(dev, skb); 6663 return skb_tx_hash(dev, skb);
6295} 6664}
6296 6665
6297netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev, 6666netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
6298 struct ixgbe_adapter *adapter, 6667 struct ixgbe_adapter *adapter,
6299 struct ixgbe_ring *tx_ring) 6668 struct ixgbe_ring *tx_ring)
6300{ 6669{
6670 struct net_device *netdev = tx_ring->netdev;
6301 struct netdev_queue *txq; 6671 struct netdev_queue *txq;
6302 unsigned int first; 6672 unsigned int first;
6303 unsigned int tx_flags = 0; 6673 unsigned int tx_flags = 0;
@@ -6355,8 +6725,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6355 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++) 6725 for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
6356 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size); 6726 count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
6357 6727
6358 if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) { 6728 if (ixgbe_maybe_stop_tx(tx_ring, count)) {
6359 adapter->tx_busy++; 6729 tx_ring->tx_stats.tx_busy++;
6360 return NETDEV_TX_BUSY; 6730 return NETDEV_TX_BUSY;
6361 } 6731 }
6362 6732
@@ -6390,25 +6760,16 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
6390 tx_flags |= IXGBE_TX_FLAGS_CSUM; 6760 tx_flags |= IXGBE_TX_FLAGS_CSUM;
6391 } 6761 }
6392 6762
6393 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first); 6763 count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
6394 if (count) { 6764 if (count) {
6395 /* add the ATR filter if ATR is on */ 6765 /* add the ATR filter if ATR is on */
6396 if (tx_ring->atr_sample_rate) { 6766 if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state))
6397 ++tx_ring->atr_count; 6767 ixgbe_atr(tx_ring, skb, tx_flags, protocol);
6398 if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
6399 test_bit(__IXGBE_FDIR_INIT_DONE,
6400 &tx_ring->reinit_state)) {
6401 ixgbe_atr(adapter, skb, tx_ring->queue_index,
6402 tx_flags, protocol);
6403 tx_ring->atr_count = 0;
6404 }
6405 }
6406 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index); 6768 txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
6407 txq->tx_bytes += skb->len; 6769 txq->tx_bytes += skb->len;
6408 txq->tx_packets++; 6770 txq->tx_packets++;
6409 ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len, 6771 ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
6410 hdr_len); 6772 ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
6411 ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
6412 6773
6413 } else { 6774 } else {
6414 dev_kfree_skb_any(skb); 6775 dev_kfree_skb_any(skb);
@@ -6425,7 +6786,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netd
6425 struct ixgbe_ring *tx_ring; 6786 struct ixgbe_ring *tx_ring;
6426 6787
6427 tx_ring = adapter->tx_ring[skb->queue_mapping]; 6788 tx_ring = adapter->tx_ring[skb->queue_mapping];
6428 return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring); 6789 return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
6429} 6790}
6430 6791
6431/** 6792/**
@@ -6566,20 +6927,23 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
6566 6927
6567 /* accurate rx/tx bytes/packets stats */ 6928 /* accurate rx/tx bytes/packets stats */
6568 dev_txq_stats_fold(netdev, stats); 6929 dev_txq_stats_fold(netdev, stats);
6930 rcu_read_lock();
6569 for (i = 0; i < adapter->num_rx_queues; i++) { 6931 for (i = 0; i < adapter->num_rx_queues; i++) {
6570 struct ixgbe_ring *ring = adapter->rx_ring[i]; 6932 struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
6571 u64 bytes, packets; 6933 u64 bytes, packets;
6572 unsigned int start; 6934 unsigned int start;
6573 6935
6574 do { 6936 if (ring) {
6575 start = u64_stats_fetch_begin_bh(&ring->syncp); 6937 do {
6576 packets = ring->stats.packets; 6938 start = u64_stats_fetch_begin_bh(&ring->syncp);
6577 bytes = ring->stats.bytes; 6939 packets = ring->stats.packets;
6578 } while (u64_stats_fetch_retry_bh(&ring->syncp, start)); 6940 bytes = ring->stats.bytes;
6579 stats->rx_packets += packets; 6941 } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
6580 stats->rx_bytes += bytes; 6942 stats->rx_packets += packets;
6943 stats->rx_bytes += bytes;
6944 }
6581 } 6945 }
6582 6946 rcu_read_unlock();
6583 /* following stats updated by ixgbe_watchdog_task() */ 6947 /* following stats updated by ixgbe_watchdog_task() */
6584 stats->multicast = netdev->stats.multicast; 6948 stats->multicast = netdev->stats.multicast;
6585 stats->rx_errors = netdev->stats.rx_errors; 6949 stats->rx_errors = netdev->stats.rx_errors;
@@ -6628,7 +6992,7 @@ static void __devinit ixgbe_probe_vf(struct ixgbe_adapter *adapter,
6628 struct ixgbe_hw *hw = &adapter->hw; 6992 struct ixgbe_hw *hw = &adapter->hw;
6629 int err; 6993 int err;
6630 6994
6631 if (hw->mac.type != ixgbe_mac_82599EB || !max_vfs) 6995 if (hw->mac.type == ixgbe_mac_82598EB || !max_vfs)
6632 return; 6996 return;
6633 6997
6634 /* The 82599 supports up to 64 VFs per physical function 6998 /* The 82599 supports up to 64 VFs per physical function
@@ -6694,11 +7058,12 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6694 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; 7058 const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data];
6695 static int cards_found; 7059 static int cards_found;
6696 int i, err, pci_using_dac; 7060 int i, err, pci_using_dac;
7061 u8 part_str[IXGBE_PBANUM_LENGTH];
6697 unsigned int indices = num_possible_cpus(); 7062 unsigned int indices = num_possible_cpus();
6698#ifdef IXGBE_FCOE 7063#ifdef IXGBE_FCOE
6699 u16 device_caps; 7064 u16 device_caps;
6700#endif 7065#endif
6701 u32 part_num, eec; 7066 u32 eec;
6702 7067
6703 /* Catch broken hardware that put the wrong VF device ID in 7068 /* Catch broken hardware that put the wrong VF device ID in
6704 * the PCIe SR-IOV capability. 7069 * the PCIe SR-IOV capability.
@@ -6761,8 +7126,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6761 7126
6762 SET_NETDEV_DEV(netdev, &pdev->dev); 7127 SET_NETDEV_DEV(netdev, &pdev->dev);
6763 7128
6764 pci_set_drvdata(pdev, netdev);
6765 adapter = netdev_priv(netdev); 7129 adapter = netdev_priv(netdev);
7130 pci_set_drvdata(pdev, adapter);
6766 7131
6767 adapter->netdev = netdev; 7132 adapter->netdev = netdev;
6768 adapter->pdev = pdev; 7133 adapter->pdev = pdev;
@@ -6785,7 +7150,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6785 netdev->netdev_ops = &ixgbe_netdev_ops; 7150 netdev->netdev_ops = &ixgbe_netdev_ops;
6786 ixgbe_set_ethtool_ops(netdev); 7151 ixgbe_set_ethtool_ops(netdev);
6787 netdev->watchdog_timeo = 5 * HZ; 7152 netdev->watchdog_timeo = 5 * HZ;
6788 strcpy(netdev->name, pci_name(pdev)); 7153 strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
6789 7154
6790 adapter->bd_number = cards_found; 7155 adapter->bd_number = cards_found;
6791 7156
@@ -6835,8 +7200,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6835 goto err_sw_init; 7200 goto err_sw_init;
6836 7201
6837 /* Make it possible the adapter to be woken up via WOL */ 7202 /* Make it possible the adapter to be woken up via WOL */
6838 if (adapter->hw.mac.type == ixgbe_mac_82599EB) 7203 switch (adapter->hw.mac.type) {
7204 case ixgbe_mac_82599EB:
7205 case ixgbe_mac_X540:
6839 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); 7206 IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
7207 break;
7208 default:
7209 break;
7210 }
6840 7211
6841 /* 7212 /*
6842 * If there is a fan on this device and it has failed log the 7213 * If there is a fan on this device and it has failed log the
@@ -6944,8 +7315,11 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6944 goto err_eeprom; 7315 goto err_eeprom;
6945 } 7316 }
6946 7317
6947 /* power down the optics */ 7318 /* power down the optics for multispeed fiber and 82599 SFP+ fiber */
6948 if (hw->phy.multispeed_fiber) 7319 if (hw->mac.ops.disable_tx_laser &&
7320 ((hw->phy.multispeed_fiber) ||
7321 ((hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) &&
7322 (hw->mac.type == ixgbe_mac_82599EB))))
6949 hw->mac.ops.disable_tx_laser(hw); 7323 hw->mac.ops.disable_tx_laser(hw);
6950 7324
6951 init_timer(&adapter->watchdog_timer); 7325 init_timer(&adapter->watchdog_timer);
@@ -6960,6 +7334,18 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6960 goto err_sw_init; 7334 goto err_sw_init;
6961 7335
6962 switch (pdev->device) { 7336 switch (pdev->device) {
7337 case IXGBE_DEV_ID_82599_SFP:
7338 /* Only this subdevice supports WOL */
7339 if (pdev->subsystem_device == IXGBE_SUBDEV_ID_82599_SFP)
7340 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
7341 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
7342 break;
7343 case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
7344 /* All except this subdevice support WOL */
7345 if (pdev->subsystem_device != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ)
7346 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
7347 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
7348 break;
6963 case IXGBE_DEV_ID_82599_KX4: 7349 case IXGBE_DEV_ID_82599_KX4:
6964 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX | 7350 adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
6965 IXGBE_WUFC_MC | IXGBE_WUFC_BC); 7351 IXGBE_WUFC_MC | IXGBE_WUFC_BC);
@@ -6983,16 +7369,17 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
6983 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" : 7369 hw->bus.width == ixgbe_bus_width_pcie_x1 ? "Width x1" :
6984 "Unknown"), 7370 "Unknown"),
6985 netdev->dev_addr); 7371 netdev->dev_addr);
6986 ixgbe_read_pba_num_generic(hw, &part_num); 7372
7373 err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
7374 if (err)
7375 strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
6987 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) 7376 if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
6988 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, " 7377 e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
6989 "PBA No: %06x-%03x\n",
6990 hw->mac.type, hw->phy.type, hw->phy.sfp_type, 7378 hw->mac.type, hw->phy.type, hw->phy.sfp_type,
6991 (part_num >> 8), (part_num & 0xff)); 7379 part_str);
6992 else 7380 else
6993 e_dev_info("MAC: %d, PHY: %d, PBA No: %06x-%03x\n", 7381 e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n",
6994 hw->mac.type, hw->phy.type, 7382 hw->mac.type, hw->phy.type, part_str);
6995 (part_num >> 8), (part_num & 0xff));
6996 7383
6997 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) { 7384 if (hw->bus.width <= ixgbe_bus_width_pcie_x4) {
6998 e_dev_warn("PCI-Express bandwidth available for this card is " 7385 e_dev_warn("PCI-Express bandwidth available for this card is "
@@ -7085,17 +7472,19 @@ err_dma:
7085 **/ 7472 **/
7086static void __devexit ixgbe_remove(struct pci_dev *pdev) 7473static void __devexit ixgbe_remove(struct pci_dev *pdev)
7087{ 7474{
7088 struct net_device *netdev = pci_get_drvdata(pdev); 7475 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7089 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7476 struct net_device *netdev = adapter->netdev;
7090 7477
7091 set_bit(__IXGBE_DOWN, &adapter->state); 7478 set_bit(__IXGBE_DOWN, &adapter->state);
7092 /* clear the module not found bit to make sure the worker won't 7479
7093 * reschedule 7480 /*
7481 * The timers may be rescheduled, so explicitly disable them
7482 * from being rescheduled.
7094 */ 7483 */
7095 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state); 7484 clear_bit(__IXGBE_SFP_MODULE_NOT_FOUND, &adapter->state);
7096 del_timer_sync(&adapter->watchdog_timer); 7485 del_timer_sync(&adapter->watchdog_timer);
7097
7098 del_timer_sync(&adapter->sfp_timer); 7486 del_timer_sync(&adapter->sfp_timer);
7487
7099 cancel_work_sync(&adapter->watchdog_task); 7488 cancel_work_sync(&adapter->watchdog_task);
7100 cancel_work_sync(&adapter->sfp_task); 7489 cancel_work_sync(&adapter->sfp_task);
7101 cancel_work_sync(&adapter->multispeed_fiber_task); 7490 cancel_work_sync(&adapter->multispeed_fiber_task);
@@ -7103,7 +7492,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
7103 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE || 7492 if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
7104 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) 7493 adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
7105 cancel_work_sync(&adapter->fdir_reinit_task); 7494 cancel_work_sync(&adapter->fdir_reinit_task);
7106 flush_scheduled_work(); 7495 if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE)
7496 cancel_work_sync(&adapter->check_overtemp_task);
7107 7497
7108#ifdef CONFIG_IXGBE_DCA 7498#ifdef CONFIG_IXGBE_DCA
7109 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { 7499 if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
@@ -7156,8 +7546,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
7156static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, 7546static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
7157 pci_channel_state_t state) 7547 pci_channel_state_t state)
7158{ 7548{
7159 struct net_device *netdev = pci_get_drvdata(pdev); 7549 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7160 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7550 struct net_device *netdev = adapter->netdev;
7161 7551
7162 netif_device_detach(netdev); 7552 netif_device_detach(netdev);
7163 7553
@@ -7180,8 +7570,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
7180 */ 7570 */
7181static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) 7571static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
7182{ 7572{
7183 struct net_device *netdev = pci_get_drvdata(pdev); 7573 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7184 struct ixgbe_adapter *adapter = netdev_priv(netdev);
7185 pci_ers_result_t result; 7574 pci_ers_result_t result;
7186 int err; 7575 int err;
7187 7576
@@ -7219,8 +7608,8 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
7219 */ 7608 */
7220static void ixgbe_io_resume(struct pci_dev *pdev) 7609static void ixgbe_io_resume(struct pci_dev *pdev)
7221{ 7610{
7222 struct net_device *netdev = pci_get_drvdata(pdev); 7611 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
7223 struct ixgbe_adapter *adapter = netdev_priv(netdev); 7612 struct net_device *netdev = adapter->netdev;
7224 7613
7225 if (netif_running(netdev)) { 7614 if (netif_running(netdev)) {
7226 if (ixgbe_up(adapter)) { 7615 if (ixgbe_up(adapter)) {
@@ -7285,6 +7674,7 @@ static void __exit ixgbe_exit_module(void)
7285 dca_unregister_notify(&dca_notifier); 7674 dca_unregister_notify(&dca_notifier);
7286#endif 7675#endif
7287 pci_unregister_driver(&ixgbe_driver); 7676 pci_unregister_driver(&ixgbe_driver);
7677 rcu_barrier(); /* Wait for completion of call_rcu()'s */
7288} 7678}
7289 7679
7290#ifdef CONFIG_IXGBE_DCA 7680#ifdef CONFIG_IXGBE_DCA
diff --git a/drivers/net/ixgbe/ixgbe_mbx.c b/drivers/net/ixgbe/ixgbe_mbx.c
index 471f0f2cdb98..ea82c5a1cd3e 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.c
+++ b/drivers/net/ixgbe/ixgbe_mbx.c
@@ -319,8 +319,16 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
319 u32 vflre = 0; 319 u32 vflre = 0;
320 s32 ret_val = IXGBE_ERR_MBX; 320 s32 ret_val = IXGBE_ERR_MBX;
321 321
322 if (hw->mac.type == ixgbe_mac_82599EB) 322 switch (hw->mac.type) {
323 case ixgbe_mac_82599EB:
323 vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); 324 vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset));
325 break;
326 case ixgbe_mac_X540:
327 vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset));
328 break;
329 default:
330 break;
331 }
324 332
325 if (vflre & (1 << vf_shift)) { 333 if (vflre & (1 << vf_shift)) {
326 ret_val = 0; 334 ret_val = 0;
@@ -439,22 +447,26 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw)
439{ 447{
440 struct ixgbe_mbx_info *mbx = &hw->mbx; 448 struct ixgbe_mbx_info *mbx = &hw->mbx;
441 449
442 if (hw->mac.type != ixgbe_mac_82599EB) 450 switch (hw->mac.type) {
443 return; 451 case ixgbe_mac_82599EB:
444 452 case ixgbe_mac_X540:
445 mbx->timeout = 0; 453 mbx->timeout = 0;
446 mbx->usec_delay = 0; 454 mbx->usec_delay = 0;
447 455
448 mbx->size = IXGBE_VFMAILBOX_SIZE; 456 mbx->size = IXGBE_VFMAILBOX_SIZE;
449 457
450 mbx->stats.msgs_tx = 0; 458 mbx->stats.msgs_tx = 0;
451 mbx->stats.msgs_rx = 0; 459 mbx->stats.msgs_rx = 0;
452 mbx->stats.reqs = 0; 460 mbx->stats.reqs = 0;
453 mbx->stats.acks = 0; 461 mbx->stats.acks = 0;
454 mbx->stats.rsts = 0; 462 mbx->stats.rsts = 0;
463 break;
464 default:
465 break;
466 }
455} 467}
456 468
457struct ixgbe_mbx_operations mbx_ops_82599 = { 469struct ixgbe_mbx_operations mbx_ops_generic = {
458 .read = ixgbe_read_mbx_pf, 470 .read = ixgbe_read_mbx_pf,
459 .write = ixgbe_write_mbx_pf, 471 .write = ixgbe_write_mbx_pf,
460 .read_posted = ixgbe_read_posted_mbx, 472 .read_posted = ixgbe_read_posted_mbx,
diff --git a/drivers/net/ixgbe/ixgbe_mbx.h b/drivers/net/ixgbe/ixgbe_mbx.h
index 7e0d08ff5b53..3df9b1590218 100644
--- a/drivers/net/ixgbe/ixgbe_mbx.h
+++ b/drivers/net/ixgbe/ixgbe_mbx.h
@@ -88,6 +88,6 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16);
88s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); 88s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16);
89void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); 89void ixgbe_init_mbx_params_pf(struct ixgbe_hw *);
90 90
91extern struct ixgbe_mbx_operations mbx_ops_82599; 91extern struct ixgbe_mbx_operations mbx_ops_generic;
92 92
93#endif /* _IXGBE_MBX_H_ */ 93#endif /* _IXGBE_MBX_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_phy.c b/drivers/net/ixgbe/ixgbe_phy.c
index 6c0d42e33f21..8f7123e8fc0a 100644
--- a/drivers/net/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ixgbe/ixgbe_phy.c
@@ -115,6 +115,9 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id)
115 case TN1010_PHY_ID: 115 case TN1010_PHY_ID:
116 phy_type = ixgbe_phy_tn; 116 phy_type = ixgbe_phy_tn;
117 break; 117 break;
118 case X540_PHY_ID:
119 phy_type = ixgbe_phy_aq;
120 break;
118 case QT2022_PHY_ID: 121 case QT2022_PHY_ID:
119 phy_type = ixgbe_phy_qt; 122 phy_type = ixgbe_phy_qt;
120 break; 123 break;
@@ -425,6 +428,39 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
425} 428}
426 429
427/** 430/**
431 * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities
432 * @hw: pointer to hardware structure
433 * @speed: pointer to link speed
434 * @autoneg: boolean auto-negotiation value
435 *
436 * Determines the link capabilities by reading the AUTOC register.
437 */
438s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
439 ixgbe_link_speed *speed,
440 bool *autoneg)
441{
442 s32 status = IXGBE_ERR_LINK_SETUP;
443 u16 speed_ability;
444
445 *speed = 0;
446 *autoneg = true;
447
448 status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD,
449 &speed_ability);
450
451 if (status == 0) {
452 if (speed_ability & MDIO_SPEED_10G)
453 *speed |= IXGBE_LINK_SPEED_10GB_FULL;
454 if (speed_ability & MDIO_PMA_SPEED_1000)
455 *speed |= IXGBE_LINK_SPEED_1GB_FULL;
456 if (speed_ability & MDIO_PMA_SPEED_100)
457 *speed |= IXGBE_LINK_SPEED_100_FULL;
458 }
459
460 return status;
461}
462
463/**
428 * ixgbe_reset_phy_nl - Performs a PHY reset 464 * ixgbe_reset_phy_nl - Performs a PHY reset
429 * @hw: pointer to hardware structure 465 * @hw: pointer to hardware structure
430 **/ 466 **/
@@ -1378,6 +1414,22 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
1378} 1414}
1379 1415
1380/** 1416/**
1417 * ixgbe_get_phy_firmware_version_generic - Gets the PHY Firmware Version
1418 * @hw: pointer to hardware structure
1419 * @firmware_version: pointer to the PHY Firmware Version
1420**/
1421s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
1422 u16 *firmware_version)
1423{
1424 s32 status = 0;
1425
1426 status = hw->phy.ops.read_reg(hw, AQ_FW_REV, MDIO_MMD_VEND1,
1427 firmware_version);
1428
1429 return status;
1430}
1431
1432/**
1381 * ixgbe_tn_check_overtemp - Checks if an overtemp occured. 1433 * ixgbe_tn_check_overtemp - Checks if an overtemp occured.
1382 * @hw: pointer to hardware structure 1434 * @hw: pointer to hardware structure
1383 * 1435 *
diff --git a/drivers/net/ixgbe/ixgbe_phy.h b/drivers/net/ixgbe/ixgbe_phy.h
index fb3898f12fc5..e2c6b7eac641 100644
--- a/drivers/net/ixgbe/ixgbe_phy.h
+++ b/drivers/net/ixgbe/ixgbe_phy.h
@@ -96,6 +96,9 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw,
96 ixgbe_link_speed speed, 96 ixgbe_link_speed speed,
97 bool autoneg, 97 bool autoneg,
98 bool autoneg_wait_to_complete); 98 bool autoneg_wait_to_complete);
99s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
100 ixgbe_link_speed *speed,
101 bool *autoneg);
99 102
100/* PHY specific */ 103/* PHY specific */
101s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, 104s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
@@ -103,6 +106,8 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw,
103 bool *link_up); 106 bool *link_up);
104s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, 107s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
105 u16 *firmware_version); 108 u16 *firmware_version);
109s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
110 u16 *firmware_version);
106 111
107s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); 112s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw);
108s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); 113s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw);
diff --git a/drivers/net/ixgbe/ixgbe_sriov.c b/drivers/net/ixgbe/ixgbe_sriov.c
index 5428153af8f3..47b15738b009 100644
--- a/drivers/net/ixgbe/ixgbe_sriov.c
+++ b/drivers/net/ixgbe/ixgbe_sriov.c
@@ -68,7 +68,7 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter,
68 * addresses 68 * addresses
69 */ 69 */
70 for (i = 0; i < entries; i++) { 70 for (i = 0; i < entries; i++) {
71 vfinfo->vf_mc_hashes[i] = hash_list[i];; 71 vfinfo->vf_mc_hashes[i] = hash_list[i];
72 } 72 }
73 73
74 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { 74 for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) {
@@ -178,8 +178,7 @@ static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter,
178int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) 178int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask)
179{ 179{
180 unsigned char vf_mac_addr[6]; 180 unsigned char vf_mac_addr[6];
181 struct net_device *netdev = pci_get_drvdata(pdev); 181 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
182 struct ixgbe_adapter *adapter = netdev_priv(netdev);
183 unsigned int vfn = (event_mask & 0x3f); 182 unsigned int vfn = (event_mask & 0x3f);
184 183
185 bool enable = ((event_mask & 0x10000000U) != 0); 184 bool enable = ((event_mask & 0x10000000U) != 0);
@@ -216,6 +215,11 @@ static inline void ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf)
216 reg |= (reg | (1 << vf_shift)); 215 reg |= (reg | (1 << vf_shift));
217 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg); 216 IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), reg);
218 217
218 /* Enable counting of spoofed packets in the SSVPC register */
219 reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset));
220 reg |= (1 << vf_shift);
221 IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg);
222
219 ixgbe_vf_reset_event(adapter, vf); 223 ixgbe_vf_reset_event(adapter, vf);
220} 224}
221 225
@@ -228,6 +232,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
228 int entries; 232 int entries;
229 u16 *hash_list; 233 u16 *hash_list;
230 int add, vid; 234 int add, vid;
235 u8 *new_mac;
231 236
232 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf); 237 retval = ixgbe_read_mbx(hw, msgbuf, mbx_size, vf);
233 238
@@ -245,15 +250,22 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
245 250
246 if (msgbuf[0] == IXGBE_VF_RESET) { 251 if (msgbuf[0] == IXGBE_VF_RESET) {
247 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; 252 unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses;
248 u8 *addr = (u8 *)(&msgbuf[1]); 253 new_mac = (u8 *)(&msgbuf[1]);
249 e_info(probe, "VF Reset msg received from vf %d\n", vf); 254 e_info(probe, "VF Reset msg received from vf %d\n", vf);
250 adapter->vfinfo[vf].clear_to_send = false; 255 adapter->vfinfo[vf].clear_to_send = false;
251 ixgbe_vf_reset_msg(adapter, vf); 256 ixgbe_vf_reset_msg(adapter, vf);
252 adapter->vfinfo[vf].clear_to_send = true; 257 adapter->vfinfo[vf].clear_to_send = true;
253 258
259 if (is_valid_ether_addr(new_mac) &&
260 !adapter->vfinfo[vf].pf_set_mac)
261 ixgbe_set_vf_mac(adapter, vf, vf_mac);
262 else
263 ixgbe_set_vf_mac(adapter,
264 vf, adapter->vfinfo[vf].vf_mac_addresses);
265
254 /* reply to reset with ack and vf mac address */ 266 /* reply to reset with ack and vf mac address */
255 msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK; 267 msgbuf[0] = IXGBE_VF_RESET | IXGBE_VT_MSGTYPE_ACK;
256 memcpy(addr, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS); 268 memcpy(new_mac, vf_mac, IXGBE_ETH_LENGTH_OF_ADDRESS);
257 /* 269 /*
258 * Piggyback the multicast filter type so VF can compute the 270 * Piggyback the multicast filter type so VF can compute the
259 * correct vectors 271 * correct vectors
@@ -272,14 +284,16 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
272 284
273 switch ((msgbuf[0] & 0xFFFF)) { 285 switch ((msgbuf[0] & 0xFFFF)) {
274 case IXGBE_VF_SET_MAC_ADDR: 286 case IXGBE_VF_SET_MAC_ADDR:
275 { 287 new_mac = ((u8 *)(&msgbuf[1]));
276 u8 *new_mac = ((u8 *)(&msgbuf[1])); 288 if (is_valid_ether_addr(new_mac) &&
277 if (is_valid_ether_addr(new_mac) && 289 !adapter->vfinfo[vf].pf_set_mac) {
278 !adapter->vfinfo[vf].pf_set_mac) 290 ixgbe_set_vf_mac(adapter, vf, new_mac);
279 ixgbe_set_vf_mac(adapter, vf, new_mac); 291 } else if (memcmp(adapter->vfinfo[vf].vf_mac_addresses,
280 else 292 new_mac, ETH_ALEN)) {
281 ixgbe_set_vf_mac(adapter, 293 e_warn(drv, "VF %d attempted to override "
282 vf, adapter->vfinfo[vf].vf_mac_addresses); 294 "administratively set MAC address\nReload "
295 "the VF driver to resume operations\n", vf);
296 retval = -1;
283 } 297 }
284 break; 298 break;
285 case IXGBE_VF_SET_MULTICAST: 299 case IXGBE_VF_SET_MULTICAST:
@@ -296,7 +310,15 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
296 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) 310 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK)
297 >> IXGBE_VT_MSGINFO_SHIFT; 311 >> IXGBE_VT_MSGINFO_SHIFT;
298 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); 312 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK);
299 retval = ixgbe_set_vf_vlan(adapter, add, vid, vf); 313 if (adapter->vfinfo[vf].pf_vlan) {
314 e_warn(drv, "VF %d attempted to override "
315 "administratively set VLAN configuration\n"
316 "Reload the VF driver to resume operations\n",
317 vf);
318 retval = -1;
319 } else {
320 retval = ixgbe_set_vf_vlan(adapter, add, vid, vf);
321 }
300 break; 322 break;
301 default: 323 default:
302 e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); 324 e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]);
@@ -395,6 +417,7 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
395{ 417{
396 int err = 0; 418 int err = 0;
397 struct ixgbe_adapter *adapter = netdev_priv(netdev); 419 struct ixgbe_adapter *adapter = netdev_priv(netdev);
420 struct ixgbe_hw *hw = &adapter->hw;
398 421
399 if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) 422 if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7))
400 return -EINVAL; 423 return -EINVAL;
@@ -403,7 +426,8 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
403 if (err) 426 if (err)
404 goto out; 427 goto out;
405 ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf); 428 ixgbe_set_vmvir(adapter, vlan | (qos << VLAN_PRIO_SHIFT), vf);
406 ixgbe_set_vmolr(&adapter->hw, vf, false); 429 ixgbe_set_vmolr(hw, vf, false);
430 hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf);
407 adapter->vfinfo[vf].pf_vlan = vlan; 431 adapter->vfinfo[vf].pf_vlan = vlan;
408 adapter->vfinfo[vf].pf_qos = qos; 432 adapter->vfinfo[vf].pf_qos = qos;
409 dev_info(&adapter->pdev->dev, 433 dev_info(&adapter->pdev->dev,
@@ -420,7 +444,8 @@ int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
420 err = ixgbe_set_vf_vlan(adapter, false, 444 err = ixgbe_set_vf_vlan(adapter, false,
421 adapter->vfinfo[vf].pf_vlan, vf); 445 adapter->vfinfo[vf].pf_vlan, vf);
422 ixgbe_set_vmvir(adapter, vlan, vf); 446 ixgbe_set_vmvir(adapter, vlan, vf);
423 ixgbe_set_vmolr(&adapter->hw, vf, true); 447 ixgbe_set_vmolr(hw, vf, true);
448 hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf);
424 adapter->vfinfo[vf].pf_vlan = 0; 449 adapter->vfinfo[vf].pf_vlan = 0;
425 adapter->vfinfo[vf].pf_qos = 0; 450 adapter->vfinfo[vf].pf_qos = 0;
426 } 451 }
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index d3cc6ce7c973..fd3358f54139 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -54,9 +54,14 @@
54#define IXGBE_DEV_ID_82599_T3_LOM 0x151C 54#define IXGBE_DEV_ID_82599_T3_LOM 0x151C
55#define IXGBE_DEV_ID_82599_CX4 0x10F9 55#define IXGBE_DEV_ID_82599_CX4 0x10F9
56#define IXGBE_DEV_ID_82599_SFP 0x10FB 56#define IXGBE_DEV_ID_82599_SFP 0x10FB
57#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a
58#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529
59#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9
57#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 60#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
58#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC 61#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
59#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 62#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
63#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C
64#define IXGBE_DEV_ID_X540T 0x1528
60 65
61/* General Registers */ 66/* General Registers */
62#define IXGBE_CTRL 0x00000 67#define IXGBE_CTRL 0x00000
@@ -225,6 +230,7 @@
225#define IXGBE_VT_CTL 0x051B0 230#define IXGBE_VT_CTL 0x051B0
226#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) 231#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4))
227#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) 232#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4))
233#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4))
228#define IXGBE_QDE 0x2F04 234#define IXGBE_QDE 0x2F04
229#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ 235#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */
230#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) 236#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4))
@@ -279,7 +285,8 @@
279#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) 285#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40))
280#define IXGBE_DTXCTL 0x07E00 286#define IXGBE_DTXCTL 0x07E00
281 287
282#define IXGBE_DMATXCTL 0x04A80 288#define IXGBE_DMATXCTL 0x04A80
289#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */
283#define IXGBE_PFDTXGSWC 0x08220 290#define IXGBE_PFDTXGSWC 0x08220
284#define IXGBE_DTXMXSZRQ 0x08100 291#define IXGBE_DTXMXSZRQ 0x08100
285#define IXGBE_DTXTCPFLGL 0x04A88 292#define IXGBE_DTXTCPFLGL 0x04A88
@@ -293,6 +300,13 @@
293#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ 300#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */
294 301
295#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ 302#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */
303
304/* Anti-spoofing defines */
305#define IXGBE_SPOOF_MACAS_MASK 0xFF
306#define IXGBE_SPOOF_VLANAS_MASK 0xFF00
307#define IXGBE_SPOOF_VLANAS_SHIFT 8
308#define IXGBE_PFVFSPOOF_REG_COUNT 8
309
296#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ 310#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */
297/* Tx DCA Control register : 128 of these (0-127) */ 311/* Tx DCA Control register : 128 of these (0-127) */
298#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) 312#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40))
@@ -994,8 +1008,10 @@
994/* PHY IDs*/ 1008/* PHY IDs*/
995#define TN1010_PHY_ID 0x00A19410 1009#define TN1010_PHY_ID 0x00A19410
996#define TNX_FW_REV 0xB 1010#define TNX_FW_REV 0xB
1011#define X540_PHY_ID 0x01540200
997#define QT2022_PHY_ID 0x0043A400 1012#define QT2022_PHY_ID 0x0043A400
998#define ATH_PHY_ID 0x03429050 1013#define ATH_PHY_ID 0x03429050
1014#define AQ_FW_REV 0x20
999 1015
1000/* PHY Types */ 1016/* PHY Types */
1001#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 1017#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0
@@ -1463,6 +1479,8 @@
1463#define IXGBE_ANLP1_PAUSE 0x0C00 1479#define IXGBE_ANLP1_PAUSE 0x0C00
1464#define IXGBE_ANLP1_SYM_PAUSE 0x0400 1480#define IXGBE_ANLP1_SYM_PAUSE 0x0400
1465#define IXGBE_ANLP1_ASM_PAUSE 0x0800 1481#define IXGBE_ANLP1_ASM_PAUSE 0x0800
1482#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000
1483
1466 1484
1467/* SW Semaphore Register bitmasks */ 1485/* SW Semaphore Register bitmasks */
1468#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ 1486#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */
@@ -1491,6 +1509,7 @@
1491#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ 1509#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */
1492#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ 1510#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */
1493#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ 1511#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */
1512#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */
1494#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ 1513#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */
1495/* EEPROM Addressing bits based on type (0-small, 1-large) */ 1514/* EEPROM Addressing bits based on type (0-small, 1-large) */
1496#define IXGBE_EEC_ADDR_SIZE 0x00000400 1515#define IXGBE_EEC_ADDR_SIZE 0x00000400
@@ -1500,12 +1519,18 @@
1500#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 1519#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6
1501#define IXGBE_EEPROM_OPCODE_BITS 8 1520#define IXGBE_EEPROM_OPCODE_BITS 8
1502 1521
1522/* Part Number String Length */
1523#define IXGBE_PBANUM_LENGTH 11
1524
1503/* Checksum and EEPROM pointers */ 1525/* Checksum and EEPROM pointers */
1526#define IXGBE_PBANUM_PTR_GUARD 0xFAFA
1504#define IXGBE_EEPROM_CHECKSUM 0x3F 1527#define IXGBE_EEPROM_CHECKSUM 0x3F
1505#define IXGBE_EEPROM_SUM 0xBABA 1528#define IXGBE_EEPROM_SUM 0xBABA
1506#define IXGBE_PCIE_ANALOG_PTR 0x03 1529#define IXGBE_PCIE_ANALOG_PTR 0x03
1507#define IXGBE_ATLAS0_CONFIG_PTR 0x04 1530#define IXGBE_ATLAS0_CONFIG_PTR 0x04
1531#define IXGBE_PHY_PTR 0x04
1508#define IXGBE_ATLAS1_CONFIG_PTR 0x05 1532#define IXGBE_ATLAS1_CONFIG_PTR 0x05
1533#define IXGBE_OPTION_ROM_PTR 0x05
1509#define IXGBE_PCIE_GENERAL_PTR 0x06 1534#define IXGBE_PCIE_GENERAL_PTR 0x06
1510#define IXGBE_PCIE_CONFIG0_PTR 0x07 1535#define IXGBE_PCIE_CONFIG0_PTR 0x07
1511#define IXGBE_PCIE_CONFIG1_PTR 0x08 1536#define IXGBE_PCIE_CONFIG1_PTR 0x08
@@ -1922,10 +1947,9 @@ enum ixgbe_fdir_pballoc_type {
1922#define IXGBE_FDIRM_VLANID 0x00000001 1947#define IXGBE_FDIRM_VLANID 0x00000001
1923#define IXGBE_FDIRM_VLANP 0x00000002 1948#define IXGBE_FDIRM_VLANP 0x00000002
1924#define IXGBE_FDIRM_POOL 0x00000004 1949#define IXGBE_FDIRM_POOL 0x00000004
1925#define IXGBE_FDIRM_L3P 0x00000008 1950#define IXGBE_FDIRM_L4P 0x00000008
1926#define IXGBE_FDIRM_L4P 0x00000010 1951#define IXGBE_FDIRM_FLEX 0x00000010
1927#define IXGBE_FDIRM_FLEX 0x00000020 1952#define IXGBE_FDIRM_DIPv6 0x00000020
1928#define IXGBE_FDIRM_DIPv6 0x00000040
1929 1953
1930#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF 1954#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF
1931#define IXGBE_FDIRFREE_FREE_SHIFT 0 1955#define IXGBE_FDIRFREE_FREE_SHIFT 0
@@ -1965,6 +1989,7 @@ enum ixgbe_fdir_pballoc_type {
1965#define IXGBE_FDIRCMD_LAST 0x00000800 1989#define IXGBE_FDIRCMD_LAST 0x00000800
1966#define IXGBE_FDIRCMD_COLLISION 0x00001000 1990#define IXGBE_FDIRCMD_COLLISION 0x00001000
1967#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 1991#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000
1992#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5
1968#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 1993#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16
1969#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 1994#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24
1970#define IXGBE_FDIR_INIT_DONE_POLL 10 1995#define IXGBE_FDIR_INIT_DONE_POLL 10
@@ -2113,57 +2138,95 @@ typedef u32 ixgbe_physical_layer;
2113#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 2138#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
2114#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000 2139#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
2115 2140
2141/* Flow Control Macros */
2142#define PAUSE_RTT 8
2143#define PAUSE_MTU(MTU) ((MTU + 1024 - 1) / 1024)
2144
2145#define FC_HIGH_WATER(MTU) ((((PAUSE_RTT + PAUSE_MTU(MTU)) * 144) + 99) / 100 +\
2146 PAUSE_MTU(MTU))
2147#define FC_LOW_WATER(MTU) (2 * (2 * PAUSE_MTU(MTU) + PAUSE_RTT))
2148
2116/* Software ATR hash keys */ 2149/* Software ATR hash keys */
2117#define IXGBE_ATR_BUCKET_HASH_KEY 0xE214AD3D 2150#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2
2118#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x14364D17 2151#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614
2119
2120/* Software ATR input stream offsets and masks */
2121#define IXGBE_ATR_VLAN_OFFSET 0
2122#define IXGBE_ATR_SRC_IPV6_OFFSET 2
2123#define IXGBE_ATR_SRC_IPV4_OFFSET 14
2124#define IXGBE_ATR_DST_IPV6_OFFSET 18
2125#define IXGBE_ATR_DST_IPV4_OFFSET 30
2126#define IXGBE_ATR_SRC_PORT_OFFSET 34
2127#define IXGBE_ATR_DST_PORT_OFFSET 36
2128#define IXGBE_ATR_FLEX_BYTE_OFFSET 38
2129#define IXGBE_ATR_VM_POOL_OFFSET 40
2130#define IXGBE_ATR_L4TYPE_OFFSET 41
2131 2152
2153/* Software ATR input stream values and masks */
2154#define IXGBE_ATR_HASH_MASK 0x7fff
2132#define IXGBE_ATR_L4TYPE_MASK 0x3 2155#define IXGBE_ATR_L4TYPE_MASK 0x3
2133#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
2134#define IXGBE_ATR_L4TYPE_UDP 0x1 2156#define IXGBE_ATR_L4TYPE_UDP 0x1
2135#define IXGBE_ATR_L4TYPE_TCP 0x2 2157#define IXGBE_ATR_L4TYPE_TCP 0x2
2136#define IXGBE_ATR_L4TYPE_SCTP 0x3 2158#define IXGBE_ATR_L4TYPE_SCTP 0x3
2137#define IXGBE_ATR_HASH_MASK 0x7fff 2159#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4
2160enum ixgbe_atr_flow_type {
2161 IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0,
2162 IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1,
2163 IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2,
2164 IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3,
2165 IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4,
2166 IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5,
2167 IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6,
2168 IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7,
2169};
2138 2170
2139/* Flow Director ATR input struct. */ 2171/* Flow Director ATR input struct. */
2140struct ixgbe_atr_input { 2172union ixgbe_atr_input {
2141 /* Byte layout in order, all values with MSB first: 2173 /*
2174 * Byte layout in order, all values with MSB first:
2142 * 2175 *
2176 * vm_pool - 1 byte
2177 * flow_type - 1 byte
2143 * vlan_id - 2 bytes 2178 * vlan_id - 2 bytes
2144 * src_ip - 16 bytes 2179 * src_ip - 16 bytes
2145 * dst_ip - 16 bytes 2180 * dst_ip - 16 bytes
2146 * src_port - 2 bytes 2181 * src_port - 2 bytes
2147 * dst_port - 2 bytes 2182 * dst_port - 2 bytes
2148 * flex_bytes - 2 bytes 2183 * flex_bytes - 2 bytes
2149 * vm_pool - 1 byte 2184 * rsvd0 - 2 bytes - space reserved must be 0.
2150 * l4type - 1 byte
2151 */ 2185 */
2152 u8 byte_stream[42]; 2186 struct {
2187 u8 vm_pool;
2188 u8 flow_type;
2189 __be16 vlan_id;
2190 __be32 dst_ip[4];
2191 __be32 src_ip[4];
2192 __be16 src_port;
2193 __be16 dst_port;
2194 __be16 flex_bytes;
2195 __be16 rsvd0;
2196 } formatted;
2197 __be32 dword_stream[11];
2198};
2199
2200/* Flow Director compressed ATR hash input struct */
2201union ixgbe_atr_hash_dword {
2202 struct {
2203 u8 vm_pool;
2204 u8 flow_type;
2205 __be16 vlan_id;
2206 } formatted;
2207 __be32 ip;
2208 struct {
2209 __be16 src;
2210 __be16 dst;
2211 } port;
2212 __be16 flex_bytes;
2213 __be32 dword;
2153}; 2214};
2154 2215
2155struct ixgbe_atr_input_masks { 2216struct ixgbe_atr_input_masks {
2156 u32 src_ip_mask; 2217 __be16 rsvd0;
2157 u32 dst_ip_mask; 2218 __be16 vlan_id_mask;
2158 u16 src_port_mask; 2219 __be32 dst_ip_mask[4];
2159 u16 dst_port_mask; 2220 __be32 src_ip_mask[4];
2160 u16 vlan_id_mask; 2221 __be16 src_port_mask;
2161 u16 data_mask; 2222 __be16 dst_port_mask;
2223 __be16 flex_mask;
2162}; 2224};
2163 2225
2164enum ixgbe_eeprom_type { 2226enum ixgbe_eeprom_type {
2165 ixgbe_eeprom_uninitialized = 0, 2227 ixgbe_eeprom_uninitialized = 0,
2166 ixgbe_eeprom_spi, 2228 ixgbe_eeprom_spi,
2229 ixgbe_flash,
2167 ixgbe_eeprom_none /* No NVM support */ 2230 ixgbe_eeprom_none /* No NVM support */
2168}; 2231};
2169 2232
@@ -2171,12 +2234,14 @@ enum ixgbe_mac_type {
2171 ixgbe_mac_unknown = 0, 2234 ixgbe_mac_unknown = 0,
2172 ixgbe_mac_82598EB, 2235 ixgbe_mac_82598EB,
2173 ixgbe_mac_82599EB, 2236 ixgbe_mac_82599EB,
2237 ixgbe_mac_X540,
2174 ixgbe_num_macs 2238 ixgbe_num_macs
2175}; 2239};
2176 2240
2177enum ixgbe_phy_type { 2241enum ixgbe_phy_type {
2178 ixgbe_phy_unknown = 0, 2242 ixgbe_phy_unknown = 0,
2179 ixgbe_phy_tn, 2243 ixgbe_phy_tn,
2244 ixgbe_phy_aq,
2180 ixgbe_phy_cu_unknown, 2245 ixgbe_phy_cu_unknown,
2181 ixgbe_phy_qt, 2246 ixgbe_phy_qt,
2182 ixgbe_phy_xaui, 2247 ixgbe_phy_xaui,
@@ -2405,6 +2470,7 @@ struct ixgbe_eeprom_operations {
2405 s32 (*write)(struct ixgbe_hw *, u16, u16); 2470 s32 (*write)(struct ixgbe_hw *, u16, u16);
2406 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); 2471 s32 (*validate_checksum)(struct ixgbe_hw *, u16 *);
2407 s32 (*update_checksum)(struct ixgbe_hw *); 2472 s32 (*update_checksum)(struct ixgbe_hw *);
2473 u16 (*calc_checksum)(struct ixgbe_hw *);
2408}; 2474};
2409 2475
2410struct ixgbe_mac_operations { 2476struct ixgbe_mac_operations {
@@ -2454,6 +2520,8 @@ struct ixgbe_mac_operations {
2454 s32 (*clear_vfta)(struct ixgbe_hw *); 2520 s32 (*clear_vfta)(struct ixgbe_hw *);
2455 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); 2521 s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool);
2456 s32 (*init_uta_tables)(struct ixgbe_hw *); 2522 s32 (*init_uta_tables)(struct ixgbe_hw *);
2523 void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int);
2524 void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int);
2457 2525
2458 /* Flow Control */ 2526 /* Flow Control */
2459 s32 (*fc_enable)(struct ixgbe_hw *, s32); 2527 s32 (*fc_enable)(struct ixgbe_hw *, s32);
@@ -2574,6 +2642,7 @@ struct ixgbe_hw {
2574 u16 subsystem_vendor_id; 2642 u16 subsystem_vendor_id;
2575 u8 revision_id; 2643 u8 revision_id;
2576 bool adapter_stopped; 2644 bool adapter_stopped;
2645 bool force_full_reset;
2577}; 2646};
2578 2647
2579struct ixgbe_info { 2648struct ixgbe_info {
@@ -2614,6 +2683,9 @@ struct ixgbe_info {
2614#define IXGBE_ERR_NO_SPACE -25 2683#define IXGBE_ERR_NO_SPACE -25
2615#define IXGBE_ERR_OVERTEMP -26 2684#define IXGBE_ERR_OVERTEMP -26
2616#define IXGBE_ERR_RAR_INDEX -27 2685#define IXGBE_ERR_RAR_INDEX -27
2686#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30
2687#define IXGBE_ERR_PBA_SECTION -31
2688#define IXGBE_ERR_INVALID_ARGUMENT -32
2617#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF 2689#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF
2618 2690
2619#endif /* _IXGBE_TYPE_H_ */ 2691#endif /* _IXGBE_TYPE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_x540.c b/drivers/net/ixgbe/ixgbe_x540.c
new file mode 100644
index 000000000000..3a8923993ce3
--- /dev/null
+++ b/drivers/net/ixgbe/ixgbe_x540.c
@@ -0,0 +1,724 @@
1/*******************************************************************************
2
3 Intel 10 Gigabit PCI Express Linux driver
4 Copyright(c) 1999 - 2010 Intel Corporation.
5
6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License,
8 version 2, as published by the Free Software Foundation.
9
10 This program is distributed in the hope it will be useful, but WITHOUT
11 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 more details.
14
15 You should have received a copy of the GNU General Public License along with
16 this program; if not, write to the Free Software Foundation, Inc.,
17 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18
19 The full GNU General Public License is included in this distribution in
20 the file called "COPYING".
21
22 Contact Information:
23 e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
24 Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
25
26*******************************************************************************/
27
28#include <linux/pci.h>
29#include <linux/delay.h>
30#include <linux/sched.h>
31
32#include "ixgbe.h"
33#include "ixgbe_phy.h"
34//#include "ixgbe_mbx.h"
35
36#define IXGBE_X540_MAX_TX_QUEUES 128
37#define IXGBE_X540_MAX_RX_QUEUES 128
38#define IXGBE_X540_RAR_ENTRIES 128
39#define IXGBE_X540_MC_TBL_SIZE 128
40#define IXGBE_X540_VFT_TBL_SIZE 128
41
42static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw);
43static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw);
44static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
45static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask);
46static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw);
47static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw);
48
49static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw)
50{
51 return ixgbe_media_type_copper;
52}
53
54static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw)
55{
56 struct ixgbe_mac_info *mac = &hw->mac;
57
58 /* Call PHY identify routine to get the phy type */
59 ixgbe_identify_phy_generic(hw);
60
61 mac->mcft_size = IXGBE_X540_MC_TBL_SIZE;
62 mac->vft_size = IXGBE_X540_VFT_TBL_SIZE;
63 mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES;
64 mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES;
65 mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES;
66 mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw);
67
68 return 0;
69}
70
71/**
72 * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires
73 * @hw: pointer to hardware structure
74 * @speed: new link speed
75 * @autoneg: true if autonegotiation enabled
76 * @autoneg_wait_to_complete: true when waiting for completion is needed
77 **/
78static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw,
79 ixgbe_link_speed speed, bool autoneg,
80 bool autoneg_wait_to_complete)
81{
82 return hw->phy.ops.setup_link_speed(hw, speed, autoneg,
83 autoneg_wait_to_complete);
84}
85
86/**
87 * ixgbe_reset_hw_X540 - Perform hardware reset
88 * @hw: pointer to hardware structure
89 *
90 * Resets the hardware by resetting the transmit and receive units, masks
91 * and clears all interrupts, perform a PHY reset, and perform a link (MAC)
92 * reset.
93 **/
94static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
95{
96 ixgbe_link_speed link_speed;
97 s32 status = 0;
98 u32 ctrl;
99 u32 ctrl_ext;
100 u32 reset_bit;
101 u32 i;
102 u32 autoc;
103 u32 autoc2;
104 bool link_up = false;
105
106 /* Call adapter stop to disable tx/rx and clear interrupts */
107 hw->mac.ops.stop_adapter(hw);
108
109 /*
110 * Prevent the PCI-E bus from from hanging by disabling PCI-E master
111 * access and verify no pending requests before reset
112 */
113 status = ixgbe_disable_pcie_master(hw);
114 if (status != 0) {
115 status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
116 hw_dbg(hw, "PCI-E Master disable polling has failed.\n");
117 }
118
119 /*
120 * Issue global reset to the MAC. Needs to be SW reset if link is up.
121 * If link reset is used when link is up, it might reset the PHY when
122 * mng is using it. If link is down or the flag to force full link
123 * reset is set, then perform link reset.
124 */
125 if (hw->force_full_reset) {
126 reset_bit = IXGBE_CTRL_LNK_RST;
127 } else {
128 hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
129 if (!link_up)
130 reset_bit = IXGBE_CTRL_LNK_RST;
131 else
132 reset_bit = IXGBE_CTRL_RST;
133 }
134
135 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
136 IXGBE_WRITE_REG(hw, IXGBE_CTRL, (ctrl | IXGBE_CTRL_RST));
137 IXGBE_WRITE_FLUSH(hw);
138
139 /* Poll for reset bit to self-clear indicating reset is complete */
140 for (i = 0; i < 10; i++) {
141 udelay(1);
142 ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL);
143 if (!(ctrl & IXGBE_CTRL_RST))
144 break;
145 }
146 if (ctrl & IXGBE_CTRL_RST) {
147 status = IXGBE_ERR_RESET_FAILED;
148 hw_dbg(hw, "Reset polling failed to complete.\n");
149 }
150
151 /* Clear PF Reset Done bit so PF/VF Mail Ops can work */
152 ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
153 ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD;
154 IXGBE_WRITE_REG(hw, IXGBE_CTRL_EXT, ctrl_ext);
155
156 msleep(50);
157
158 /* Set the Rx packet buffer size. */
159 IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(0), 384 << IXGBE_RXPBSIZE_SHIFT);
160
161 /* Store the permanent mac address */
162 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
163
164 /*
165 * Store the original AUTOC/AUTOC2 values if they have not been
166 * stored off yet. Otherwise restore the stored original
167 * values since the reset operation sets back to defaults.
168 */
169 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
170 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
171 if (hw->mac.orig_link_settings_stored == false) {
172 hw->mac.orig_autoc = autoc;
173 hw->mac.orig_autoc2 = autoc2;
174 hw->mac.orig_link_settings_stored = true;
175 } else {
176 if (autoc != hw->mac.orig_autoc)
177 IXGBE_WRITE_REG(hw, IXGBE_AUTOC, (hw->mac.orig_autoc |
178 IXGBE_AUTOC_AN_RESTART));
179
180 if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
181 (hw->mac.orig_autoc2 & IXGBE_AUTOC2_UPPER_MASK)) {
182 autoc2 &= ~IXGBE_AUTOC2_UPPER_MASK;
183 autoc2 |= (hw->mac.orig_autoc2 &
184 IXGBE_AUTOC2_UPPER_MASK);
185 IXGBE_WRITE_REG(hw, IXGBE_AUTOC2, autoc2);
186 }
187 }
188
189 /*
190 * Store MAC address from RAR0, clear receive address registers, and
191 * clear the multicast table. Also reset num_rar_entries to 128,
192 * since we modify this value when programming the SAN MAC address.
193 */
194 hw->mac.num_rar_entries = 128;
195 hw->mac.ops.init_rx_addrs(hw);
196
197 /* Store the permanent mac address */
198 hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr);
199
200 /* Store the permanent SAN mac address */
201 hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr);
202
203 /* Add the SAN MAC address to the RAR only if it's a valid address */
204 if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) {
205 hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1,
206 hw->mac.san_addr, 0, IXGBE_RAH_AV);
207
208 /* Reserve the last RAR for the SAN MAC address */
209 hw->mac.num_rar_entries--;
210 }
211
212 /* Store the alternative WWNN/WWPN prefix */
213 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
214 &hw->mac.wwpn_prefix);
215
216 return status;
217}
218
219/**
220 * ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
221 * @hw: pointer to hardware structure
222 *
223 * Determines physical layer capabilities of the current configuration.
224 **/
225static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
226{
227 u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
228 u16 ext_ability = 0;
229
230 hw->phy.ops.identify(hw);
231
232 hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
233 &ext_ability);
234 if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
235 physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
236 if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
237 physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
238 if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
239 physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
240
241 return physical_layer;
242}
243
244/**
245 * ixgbe_init_eeprom_params_X540 - Initialize EEPROM params
246 * @hw: pointer to hardware structure
247 **/
248static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
249{
250 struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
251 u32 eec;
252 u16 eeprom_size;
253
254 if (eeprom->type == ixgbe_eeprom_uninitialized) {
255 eeprom->semaphore_delay = 10;
256 eeprom->type = ixgbe_flash;
257
258 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
259 eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >>
260 IXGBE_EEC_SIZE_SHIFT);
261 eeprom->word_size = 1 << (eeprom_size +
262 IXGBE_EEPROM_WORD_SIZE_SHIFT);
263
264 hw_dbg(hw, "Eeprom params: type = %d, size = %d\n",
265 eeprom->type, eeprom->word_size);
266 }
267
268 return 0;
269}
270
271/**
272 * ixgbe_read_eerd_X540 - Read EEPROM word using EERD
273 * @hw: pointer to hardware structure
274 * @offset: offset of word in the EEPROM to read
275 * @data: word read from the EERPOM
276 **/
277static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
278{
279 s32 status;
280
281 if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0)
282 status = ixgbe_read_eerd_generic(hw, offset, data);
283 else
284 status = IXGBE_ERR_SWFW_SYNC;
285
286 ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
287 return status;
288}
289
290/**
291 * ixgbe_write_eewr_X540 - Write EEPROM word using EEWR
292 * @hw: pointer to hardware structure
293 * @offset: offset of word in the EEPROM to write
294 * @data: word write to the EEPROM
295 *
296 * Write a 16 bit word to the EEPROM using the EEWR register.
297 **/
298static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
299{
300 u32 eewr;
301 s32 status;
302
303 hw->eeprom.ops.init_params(hw);
304
305 if (offset >= hw->eeprom.word_size) {
306 status = IXGBE_ERR_EEPROM;
307 goto out;
308 }
309
310 eewr = (offset << IXGBE_EEPROM_RW_ADDR_SHIFT) |
311 (data << IXGBE_EEPROM_RW_REG_DATA) |
312 IXGBE_EEPROM_RW_REG_START;
313
314 if (ixgbe_acquire_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM) == 0) {
315 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
316 if (status != 0) {
317 hw_dbg(hw, "Eeprom write EEWR timed out\n");
318 goto out;
319 }
320
321 IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
322
323 status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
324 if (status != 0) {
325 hw_dbg(hw, "Eeprom write EEWR timed out\n");
326 goto out;
327 }
328 } else {
329 status = IXGBE_ERR_SWFW_SYNC;
330 }
331
332out:
333 ixgbe_release_swfw_sync_X540(hw, IXGBE_GSSR_EEP_SM);
334 return status;
335}
336
337/**
338 * ixgbe_calc_eeprom_checksum_X540 - Calculates and returns the checksum
339 * @hw: pointer to hardware structure
340 **/
341static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw)
342{
343 u16 i;
344 u16 j;
345 u16 checksum = 0;
346 u16 length = 0;
347 u16 pointer = 0;
348 u16 word = 0;
349
350 /* Include 0x0-0x3F in the checksum */
351 for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) {
352 if (hw->eeprom.ops.read(hw, i, &word) != 0) {
353 hw_dbg(hw, "EEPROM read failed\n");
354 break;
355 }
356 checksum += word;
357 }
358
359 /*
360 * Include all data from pointers 0x3, 0x6-0xE. This excludes the
361 * FW, PHY module, and PCIe Expansion/Option ROM pointers.
362 */
363 for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) {
364 if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR)
365 continue;
366
367 if (hw->eeprom.ops.read(hw, i, &pointer) != 0) {
368 hw_dbg(hw, "EEPROM read failed\n");
369 break;
370 }
371
372 /* Skip pointer section if the pointer is invalid. */
373 if (pointer == 0xFFFF || pointer == 0 ||
374 pointer >= hw->eeprom.word_size)
375 continue;
376
377 if (hw->eeprom.ops.read(hw, pointer, &length) != 0) {
378 hw_dbg(hw, "EEPROM read failed\n");
379 break;
380 }
381
382 /* Skip pointer section if length is invalid. */
383 if (length == 0xFFFF || length == 0 ||
384 (pointer + length) >= hw->eeprom.word_size)
385 continue;
386
387 for (j = pointer+1; j <= pointer+length; j++) {
388 if (hw->eeprom.ops.read(hw, j, &word) != 0) {
389 hw_dbg(hw, "EEPROM read failed\n");
390 break;
391 }
392 checksum += word;
393 }
394 }
395
396 checksum = (u16)IXGBE_EEPROM_SUM - checksum;
397
398 return checksum;
399}
400
401/**
402 * ixgbe_update_eeprom_checksum_X540 - Updates the EEPROM checksum and flash
403 * @hw: pointer to hardware structure
404 *
405 * After writing EEPROM to shadow RAM using EEWR register, software calculates
406 * checksum and updates the EEPROM and instructs the hardware to update
407 * the flash.
408 **/
409static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
410{
411 s32 status;
412
413 status = ixgbe_update_eeprom_checksum_generic(hw);
414
415 if (status)
416 status = ixgbe_update_flash_X540(hw);
417
418 return status;
419}
420
421/**
422 * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device
423 * @hw: pointer to hardware structure
424 *
425 * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy
426 * EEPROM from shadow RAM to the flash device.
427 **/
428static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
429{
430 u32 flup;
431 s32 status = IXGBE_ERR_EEPROM;
432
433 status = ixgbe_poll_flash_update_done_X540(hw);
434 if (status == IXGBE_ERR_EEPROM) {
435 hw_dbg(hw, "Flash update time out\n");
436 goto out;
437 }
438
439 flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
440 IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
441
442 status = ixgbe_poll_flash_update_done_X540(hw);
443 if (status)
444 hw_dbg(hw, "Flash update complete\n");
445 else
446 hw_dbg(hw, "Flash update time out\n");
447
448 if (hw->revision_id == 0) {
449 flup = IXGBE_READ_REG(hw, IXGBE_EEC);
450
451 if (flup & IXGBE_EEC_SEC1VAL) {
452 flup |= IXGBE_EEC_FLUP;
453 IXGBE_WRITE_REG(hw, IXGBE_EEC, flup);
454 }
455
456 status = ixgbe_poll_flash_update_done_X540(hw);
457 if (status)
458 hw_dbg(hw, "Flash update complete\n");
459 else
460 hw_dbg(hw, "Flash update time out\n");
461
462 }
463out:
464 return status;
465}
466
467/**
468 * ixgbe_poll_flash_update_done_X540 - Poll flash update status
469 * @hw: pointer to hardware structure
470 *
471 * Polls the FLUDONE (bit 26) of the EEC Register to determine when the
472 * flash update is done.
473 **/
474static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
475{
476 u32 i;
477 u32 reg;
478 s32 status = IXGBE_ERR_EEPROM;
479
480 for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
481 reg = IXGBE_READ_REG(hw, IXGBE_EEC);
482 if (reg & IXGBE_EEC_FLUDONE) {
483 status = 0;
484 break;
485 }
486 udelay(5);
487 }
488 return status;
489}
490
491/**
492 * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore
493 * @hw: pointer to hardware structure
494 * @mask: Mask to specify which semaphore to acquire
495 *
496 * Acquires the SWFW semaphore thought the SW_FW_SYNC register for
497 * the specified function (CSR, PHY0, PHY1, NVM, Flash)
498 **/
499static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
500{
501 u32 swfw_sync;
502 u32 swmask = mask;
503 u32 fwmask = mask << 5;
504 u32 hwmask = 0;
505 u32 timeout = 200;
506 u32 i;
507
508 if (swmask == IXGBE_GSSR_EEP_SM)
509 hwmask = IXGBE_GSSR_FLASH_SM;
510
511 for (i = 0; i < timeout; i++) {
512 /*
513 * SW NVM semaphore bit is used for access to all
514 * SW_FW_SYNC bits (not just NVM)
515 */
516 if (ixgbe_get_swfw_sync_semaphore(hw))
517 return IXGBE_ERR_SWFW_SYNC;
518
519 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
520 if (!(swfw_sync & (fwmask | swmask | hwmask))) {
521 swfw_sync |= swmask;
522 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
523 ixgbe_release_swfw_sync_semaphore(hw);
524 break;
525 } else {
526 /*
527 * Firmware currently using resource (fwmask),
528 * hardware currently using resource (hwmask),
529 * or other software thread currently using
530 * resource (swmask)
531 */
532 ixgbe_release_swfw_sync_semaphore(hw);
533 msleep(5);
534 }
535 }
536
537 /*
538 * If the resource is not released by the FW/HW the SW can assume that
539 * the FW/HW malfunctions. In that case the SW should sets the
540 * SW bit(s) of the requested resource(s) while ignoring the
541 * corresponding FW/HW bits in the SW_FW_SYNC register.
542 */
543 if (i >= timeout) {
544 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
545 if (swfw_sync & (fwmask | hwmask)) {
546 if (ixgbe_get_swfw_sync_semaphore(hw))
547 return IXGBE_ERR_SWFW_SYNC;
548
549 swfw_sync |= swmask;
550 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
551 ixgbe_release_swfw_sync_semaphore(hw);
552 }
553 }
554
555 msleep(5);
556 return 0;
557}
558
559/**
560 * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore
561 * @hw: pointer to hardware structure
562 * @mask: Mask to specify which semaphore to release
563 *
564 * Releases the SWFW semaphore throught the SW_FW_SYNC register
565 * for the specified function (CSR, PHY0, PHY1, EVM, Flash)
566 **/
567static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
568{
569 u32 swfw_sync;
570 u32 swmask = mask;
571
572 ixgbe_get_swfw_sync_semaphore(hw);
573
574 swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
575 swfw_sync &= ~swmask;
576 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync);
577
578 ixgbe_release_swfw_sync_semaphore(hw);
579 msleep(5);
580}
581
582/**
583 * ixgbe_get_nvm_semaphore - Get hardware semaphore
584 * @hw: pointer to hardware structure
585 *
586 * Sets the hardware semaphores so SW/FW can gain control of shared resources
587 **/
588static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
589{
590 s32 status = IXGBE_ERR_EEPROM;
591 u32 timeout = 2000;
592 u32 i;
593 u32 swsm;
594
595 /* Get SMBI software semaphore between device drivers first */
596 for (i = 0; i < timeout; i++) {
597 /*
598 * If the SMBI bit is 0 when we read it, then the bit will be
599 * set and we have the semaphore
600 */
601 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
602 if (!(swsm & IXGBE_SWSM_SMBI)) {
603 status = 0;
604 break;
605 }
606 udelay(50);
607 }
608
609 /* Now get the semaphore between SW/FW through the REGSMP bit */
610 if (status) {
611 for (i = 0; i < timeout; i++) {
612 swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
613 if (!(swsm & IXGBE_SWFW_REGSMP))
614 break;
615
616 udelay(50);
617 }
618 } else {
619 hw_dbg(hw, "Software semaphore SMBI between device drivers "
620 "not granted.\n");
621 }
622
623 return status;
624}
625
626/**
627 * ixgbe_release_nvm_semaphore - Release hardware semaphore
628 * @hw: pointer to hardware structure
629 *
630 * This function clears hardware semaphore bits.
631 **/
632static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw)
633{
634 u32 swsm;
635
636 /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */
637
638 swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
639 swsm &= ~IXGBE_SWSM_SMBI;
640 IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
641
642 swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
643 swsm &= ~IXGBE_SWFW_REGSMP;
644 IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm);
645
646 IXGBE_WRITE_FLUSH(hw);
647}
648
649static struct ixgbe_mac_operations mac_ops_X540 = {
650 .init_hw = &ixgbe_init_hw_generic,
651 .reset_hw = &ixgbe_reset_hw_X540,
652 .start_hw = &ixgbe_start_hw_generic,
653 .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic,
654 .get_media_type = &ixgbe_get_media_type_X540,
655 .get_supported_physical_layer =
656 &ixgbe_get_supported_physical_layer_X540,
657 .enable_rx_dma = &ixgbe_enable_rx_dma_generic,
658 .get_mac_addr = &ixgbe_get_mac_addr_generic,
659 .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic,
660 .get_device_caps = NULL,
661 .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic,
662 .stop_adapter = &ixgbe_stop_adapter_generic,
663 .get_bus_info = &ixgbe_get_bus_info_generic,
664 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
665 .read_analog_reg8 = NULL,
666 .write_analog_reg8 = NULL,
667 .setup_link = &ixgbe_setup_mac_link_X540,
668 .check_link = &ixgbe_check_mac_link_generic,
669 .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic,
670 .led_on = &ixgbe_led_on_generic,
671 .led_off = &ixgbe_led_off_generic,
672 .blink_led_start = &ixgbe_blink_led_start_generic,
673 .blink_led_stop = &ixgbe_blink_led_stop_generic,
674 .set_rar = &ixgbe_set_rar_generic,
675 .clear_rar = &ixgbe_clear_rar_generic,
676 .set_vmdq = &ixgbe_set_vmdq_generic,
677 .clear_vmdq = &ixgbe_clear_vmdq_generic,
678 .init_rx_addrs = &ixgbe_init_rx_addrs_generic,
679 .update_uc_addr_list = &ixgbe_update_uc_addr_list_generic,
680 .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic,
681 .enable_mc = &ixgbe_enable_mc_generic,
682 .disable_mc = &ixgbe_disable_mc_generic,
683 .clear_vfta = &ixgbe_clear_vfta_generic,
684 .set_vfta = &ixgbe_set_vfta_generic,
685 .fc_enable = &ixgbe_fc_enable_generic,
686 .init_uta_tables = &ixgbe_init_uta_tables_generic,
687 .setup_sfp = NULL,
688 .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing,
689 .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing,
690};
691
692static struct ixgbe_eeprom_operations eeprom_ops_X540 = {
693 .init_params = &ixgbe_init_eeprom_params_X540,
694 .read = &ixgbe_read_eerd_X540,
695 .write = &ixgbe_write_eewr_X540,
696 .calc_checksum = &ixgbe_calc_eeprom_checksum_X540,
697 .validate_checksum = &ixgbe_validate_eeprom_checksum_generic,
698 .update_checksum = &ixgbe_update_eeprom_checksum_X540,
699};
700
701static struct ixgbe_phy_operations phy_ops_X540 = {
702 .identify = &ixgbe_identify_phy_generic,
703 .identify_sfp = &ixgbe_identify_sfp_module_generic,
704 .init = NULL,
705 .reset = &ixgbe_reset_phy_generic,
706 .read_reg = &ixgbe_read_phy_reg_generic,
707 .write_reg = &ixgbe_write_phy_reg_generic,
708 .setup_link = &ixgbe_setup_phy_link_generic,
709 .setup_link_speed = &ixgbe_setup_phy_link_speed_generic,
710 .read_i2c_byte = &ixgbe_read_i2c_byte_generic,
711 .write_i2c_byte = &ixgbe_write_i2c_byte_generic,
712 .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic,
713 .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic,
714 .check_overtemp = &ixgbe_tn_check_overtemp,
715};
716
717struct ixgbe_info ixgbe_X540_info = {
718 .mac = ixgbe_mac_X540,
719 .get_invariants = &ixgbe_get_invariants_X540,
720 .mac_ops = &mac_ops_X540,
721 .eeprom_ops = &eeprom_ops_X540,
722 .phy_ops = &phy_ops_X540,
723 .mbx_ops = &mbx_ops_generic,
724};
diff --git a/drivers/net/ixgbevf/Makefile b/drivers/net/ixgbevf/Makefile
index dd4e0d27e8cc..1f35d229e71a 100644
--- a/drivers/net/ixgbevf/Makefile
+++ b/drivers/net/ixgbevf/Makefile
@@ -1,7 +1,7 @@
1################################################################################ 1################################################################################
2# 2#
3# Intel 82599 Virtual Function driver 3# Intel 82599 Virtual Function driver
4# Copyright(c) 1999 - 2009 Intel Corporation. 4# Copyright(c) 1999 - 2010 Intel Corporation.
5# 5#
6# This program is free software; you can redistribute it and/or modify it 6# This program is free software; you can redistribute it and/or modify it
7# under the terms and conditions of the GNU General Public License, 7# under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/defines.h b/drivers/net/ixgbevf/defines.h
index ca2c81f49a05..de643eb2ada6 100644
--- a/drivers/net/ixgbevf/defines.h
+++ b/drivers/net/ixgbevf/defines.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -30,6 +30,7 @@
30 30
31/* Device IDs */ 31/* Device IDs */
32#define IXGBE_DEV_ID_82599_VF 0x10ED 32#define IXGBE_DEV_ID_82599_VF 0x10ED
33#define IXGBE_DEV_ID_X540_VF 0x1515
33 34
34#define IXGBE_VF_IRQ_CLEAR_MASK 7 35#define IXGBE_VF_IRQ_CLEAR_MASK 7
35#define IXGBE_VF_MAX_TX_QUEUES 1 36#define IXGBE_VF_MAX_TX_QUEUES 1
diff --git a/drivers/net/ixgbevf/ethtool.c b/drivers/net/ixgbevf/ethtool.c
index 4cc817acfb62..fa29b3c8c464 100644
--- a/drivers/net/ixgbevf/ethtool.c
+++ b/drivers/net/ixgbevf/ethtool.c
@@ -544,7 +544,7 @@ struct ixgbevf_reg_test {
544#define TABLE64_TEST_HI 6 544#define TABLE64_TEST_HI 6
545 545
546/* default VF register test */ 546/* default VF register test */
547static struct ixgbevf_reg_test reg_test_vf[] = { 547static const struct ixgbevf_reg_test reg_test_vf[] = {
548 { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 }, 548 { IXGBE_VFRDBAL(0), 2, PATTERN_TEST, 0xFFFFFF80, 0xFFFFFF80 },
549 { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, 549 { IXGBE_VFRDBAH(0), 2, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF },
550 { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF }, 550 { IXGBE_VFRDLEN(0), 2, PATTERN_TEST, 0x000FFF80, 0x000FFFFF },
@@ -557,19 +557,23 @@ static struct ixgbevf_reg_test reg_test_vf[] = {
557 { 0, 0, 0, 0 } 557 { 0, 0, 0, 0 }
558}; 558};
559 559
560static const u32 register_test_patterns[] = {
561 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF
562};
563
560#define REG_PATTERN_TEST(R, M, W) \ 564#define REG_PATTERN_TEST(R, M, W) \
561{ \ 565{ \
562 u32 pat, val, before; \ 566 u32 pat, val, before; \
563 const u32 _test[] = {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; \ 567 for (pat = 0; pat < ARRAY_SIZE(register_test_patterns); pat++) { \
564 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { \
565 before = readl(adapter->hw.hw_addr + R); \ 568 before = readl(adapter->hw.hw_addr + R); \
566 writel((_test[pat] & W), (adapter->hw.hw_addr + R)); \ 569 writel((register_test_patterns[pat] & W), \
570 (adapter->hw.hw_addr + R)); \
567 val = readl(adapter->hw.hw_addr + R); \ 571 val = readl(adapter->hw.hw_addr + R); \
568 if (val != (_test[pat] & W & M)) { \ 572 if (val != (register_test_patterns[pat] & W & M)) { \
569 hw_dbg(&adapter->hw, \ 573 hw_dbg(&adapter->hw, \
570 "pattern test reg %04X failed: got " \ 574 "pattern test reg %04X failed: got " \
571 "0x%08X expected 0x%08X\n", \ 575 "0x%08X expected 0x%08X\n", \
572 R, val, (_test[pat] & W & M)); \ 576 R, val, (register_test_patterns[pat] & W & M)); \
573 *data = R; \ 577 *data = R; \
574 writel(before, adapter->hw.hw_addr + R); \ 578 writel(before, adapter->hw.hw_addr + R); \
575 return 1; \ 579 return 1; \
@@ -596,7 +600,7 @@ static struct ixgbevf_reg_test reg_test_vf[] = {
596 600
597static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data) 601static int ixgbevf_reg_test(struct ixgbevf_adapter *adapter, u64 *data)
598{ 602{
599 struct ixgbevf_reg_test *test; 603 const struct ixgbevf_reg_test *test;
600 u32 i; 604 u32 i;
601 605
602 test = reg_test_vf; 606 test = reg_test_vf;
diff --git a/drivers/net/ixgbevf/ixgbevf.h b/drivers/net/ixgbevf/ixgbevf.h
index da4033c6efa2..a63efcb2cf1b 100644
--- a/drivers/net/ixgbevf/ixgbevf.h
+++ b/drivers/net/ixgbevf/ixgbevf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -275,9 +275,11 @@ enum ixbgevf_state_t {
275 275
276enum ixgbevf_boards { 276enum ixgbevf_boards {
277 board_82599_vf, 277 board_82599_vf,
278 board_X540_vf,
278}; 279};
279 280
280extern struct ixgbevf_info ixgbevf_vf_info; 281extern struct ixgbevf_info ixgbevf_82599_vf_info;
282extern struct ixgbevf_info ixgbevf_X540_vf_info;
281extern struct ixgbe_mac_operations ixgbevf_mbx_ops; 283extern struct ixgbe_mac_operations ixgbevf_mbx_ops;
282 284
283/* needed by ethtool.c */ 285/* needed by ethtool.c */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index dc03c9652389..464e6c9d3fc2 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -51,12 +51,14 @@ char ixgbevf_driver_name[] = "ixgbevf";
51static const char ixgbevf_driver_string[] = 51static const char ixgbevf_driver_string[] =
52 "Intel(R) 82599 Virtual Function"; 52 "Intel(R) 82599 Virtual Function";
53 53
54#define DRV_VERSION "1.0.0-k0" 54#define DRV_VERSION "1.0.19-k0"
55const char ixgbevf_driver_version[] = DRV_VERSION; 55const char ixgbevf_driver_version[] = DRV_VERSION;
56static char ixgbevf_copyright[] = "Copyright (c) 2009 Intel Corporation."; 56static char ixgbevf_copyright[] =
57 "Copyright (c) 2009 - 2010 Intel Corporation.";
57 58
58static const struct ixgbevf_info *ixgbevf_info_tbl[] = { 59static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
59 [board_82599_vf] = &ixgbevf_vf_info, 60 [board_82599_vf] = &ixgbevf_82599_vf_info,
61 [board_X540_vf] = &ixgbevf_X540_vf_info,
60}; 62};
61 63
62/* ixgbevf_pci_tbl - PCI Device ID Table 64/* ixgbevf_pci_tbl - PCI Device ID Table
@@ -70,6 +72,8 @@ static const struct ixgbevf_info *ixgbevf_info_tbl[] = {
70static struct pci_device_id ixgbevf_pci_tbl[] = { 72static struct pci_device_id ixgbevf_pci_tbl[] = {
71 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF), 73 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_VF),
72 board_82599_vf}, 74 board_82599_vf},
75 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540_VF),
76 board_X540_vf},
73 77
74 /* required last entry */ 78 /* required last entry */
75 {0, } 79 {0, }
@@ -2488,10 +2492,9 @@ int ixgbevf_setup_tx_resources(struct ixgbevf_adapter *adapter,
2488 int size; 2492 int size;
2489 2493
2490 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count; 2494 size = sizeof(struct ixgbevf_tx_buffer) * tx_ring->count;
2491 tx_ring->tx_buffer_info = vmalloc(size); 2495 tx_ring->tx_buffer_info = vzalloc(size);
2492 if (!tx_ring->tx_buffer_info) 2496 if (!tx_ring->tx_buffer_info)
2493 goto err; 2497 goto err;
2494 memset(tx_ring->tx_buffer_info, 0, size);
2495 2498
2496 /* round up to nearest 4K */ 2499 /* round up to nearest 4K */
2497 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); 2500 tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
@@ -2555,14 +2558,13 @@ int ixgbevf_setup_rx_resources(struct ixgbevf_adapter *adapter,
2555 int size; 2558 int size;
2556 2559
2557 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count; 2560 size = sizeof(struct ixgbevf_rx_buffer) * rx_ring->count;
2558 rx_ring->rx_buffer_info = vmalloc(size); 2561 rx_ring->rx_buffer_info = vzalloc(size);
2559 if (!rx_ring->rx_buffer_info) { 2562 if (!rx_ring->rx_buffer_info) {
2560 hw_dbg(&adapter->hw, 2563 hw_dbg(&adapter->hw,
2561 "Unable to vmalloc buffer memory for " 2564 "Unable to vmalloc buffer memory for "
2562 "the receive descriptor ring\n"); 2565 "the receive descriptor ring\n");
2563 goto alloc_failed; 2566 goto alloc_failed;
2564 } 2567 }
2565 memset(rx_ring->rx_buffer_info, 0, size);
2566 2568
2567 /* Round up to nearest 4K */ 2569 /* Round up to nearest 4K */
2568 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); 2570 rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
@@ -3424,10 +3426,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3424 if (hw->mac.ops.get_bus_info) 3426 if (hw->mac.ops.get_bus_info)
3425 hw->mac.ops.get_bus_info(hw); 3427 hw->mac.ops.get_bus_info(hw);
3426 3428
3427
3428 netif_carrier_off(netdev);
3429 netif_tx_stop_all_queues(netdev);
3430
3431 strcpy(netdev->name, "eth%d"); 3429 strcpy(netdev->name, "eth%d");
3432 3430
3433 err = register_netdev(netdev); 3431 err = register_netdev(netdev);
@@ -3436,6 +3434,8 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
3436 3434
3437 adapter->netdev_registered = true; 3435 adapter->netdev_registered = true;
3438 3436
3437 netif_carrier_off(netdev);
3438
3439 ixgbevf_init_last_counter_stats(adapter); 3439 ixgbevf_init_last_counter_stats(adapter);
3440 3440
3441 /* print the MAC address */ 3441 /* print the MAC address */
@@ -3487,10 +3487,9 @@ static void __devexit ixgbevf_remove(struct pci_dev *pdev)
3487 3487
3488 del_timer_sync(&adapter->watchdog_timer); 3488 del_timer_sync(&adapter->watchdog_timer);
3489 3489
3490 cancel_work_sync(&adapter->reset_task);
3490 cancel_work_sync(&adapter->watchdog_task); 3491 cancel_work_sync(&adapter->watchdog_task);
3491 3492
3492 flush_scheduled_work();
3493
3494 if (adapter->netdev_registered) { 3493 if (adapter->netdev_registered) {
3495 unregister_netdev(netdev); 3494 unregister_netdev(netdev);
3496 adapter->netdev_registered = false; 3495 adapter->netdev_registered = false;
diff --git a/drivers/net/ixgbevf/mbx.c b/drivers/net/ixgbevf/mbx.c
index 84ac486f4a65..7a8833125770 100644
--- a/drivers/net/ixgbevf/mbx.c
+++ b/drivers/net/ixgbevf/mbx.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/mbx.h b/drivers/net/ixgbevf/mbx.h
index 8c063bebee7f..b2b5bf5daa3d 100644
--- a/drivers/net/ixgbevf/mbx.h
+++ b/drivers/net/ixgbevf/mbx.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/regs.h b/drivers/net/ixgbevf/regs.h
index 12f75960aec1..fb80ca1bcc93 100644
--- a/drivers/net/ixgbevf/regs.h
+++ b/drivers/net/ixgbevf/regs.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
diff --git a/drivers/net/ixgbevf/vf.c b/drivers/net/ixgbevf/vf.c
index bfe42c1fcfaf..eecd3bf6833f 100644
--- a/drivers/net/ixgbevf/vf.c
+++ b/drivers/net/ixgbevf/vf.c
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -381,8 +381,12 @@ static struct ixgbe_mac_operations ixgbevf_mac_ops = {
381 .set_vfta = ixgbevf_set_vfta_vf, 381 .set_vfta = ixgbevf_set_vfta_vf,
382}; 382};
383 383
384struct ixgbevf_info ixgbevf_vf_info = { 384struct ixgbevf_info ixgbevf_82599_vf_info = {
385 .mac = ixgbe_mac_82599_vf, 385 .mac = ixgbe_mac_82599_vf,
386 .mac_ops = &ixgbevf_mac_ops, 386 .mac_ops = &ixgbevf_mac_ops,
387}; 387};
388 388
389struct ixgbevf_info ixgbevf_X540_vf_info = {
390 .mac = ixgbe_mac_X540_vf,
391 .mac_ops = &ixgbevf_mac_ops,
392};
diff --git a/drivers/net/ixgbevf/vf.h b/drivers/net/ixgbevf/vf.h
index 61f9dc831424..23eb114c149f 100644
--- a/drivers/net/ixgbevf/vf.h
+++ b/drivers/net/ixgbevf/vf.h
@@ -1,7 +1,7 @@
1/******************************************************************************* 1/*******************************************************************************
2 2
3 Intel 82599 Virtual Function driver 3 Intel 82599 Virtual Function driver
4 Copyright(c) 1999 - 2009 Intel Corporation. 4 Copyright(c) 1999 - 2010 Intel Corporation.
5 5
6 This program is free software; you can redistribute it and/or modify it 6 This program is free software; you can redistribute it and/or modify it
7 under the terms and conditions of the GNU General Public License, 7 under the terms and conditions of the GNU General Public License,
@@ -73,6 +73,7 @@ struct ixgbe_mac_operations {
73enum ixgbe_mac_type { 73enum ixgbe_mac_type {
74 ixgbe_mac_unknown = 0, 74 ixgbe_mac_unknown = 0,
75 ixgbe_mac_82599_vf, 75 ixgbe_mac_82599_vf,
76 ixgbe_mac_X540_vf,
76 ixgbe_num_macs 77 ixgbe_num_macs
77}; 78};
78 79
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index c57d9a43ceca..e97ebef3cf47 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -135,7 +135,7 @@ jme_reset_phy_processor(struct jme_adapter *jme)
135 135
136static void 136static void
137jme_setup_wakeup_frame(struct jme_adapter *jme, 137jme_setup_wakeup_frame(struct jme_adapter *jme,
138 u32 *mask, u32 crc, int fnr) 138 const u32 *mask, u32 crc, int fnr)
139{ 139{
140 int i; 140 int i;
141 141
@@ -163,7 +163,7 @@ jme_setup_wakeup_frame(struct jme_adapter *jme,
163static inline void 163static inline void
164jme_reset_mac_processor(struct jme_adapter *jme) 164jme_reset_mac_processor(struct jme_adapter *jme)
165{ 165{
166 u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0}; 166 static const u32 mask[WAKEUP_FRAME_MASK_DWNR] = {0, 0, 0, 0};
167 u32 crc = 0xCDCDCDCD; 167 u32 crc = 0xCDCDCDCD;
168 u32 gpreg0; 168 u32 gpreg0;
169 int i; 169 int i;
@@ -2076,12 +2076,11 @@ jme_change_mtu(struct net_device *netdev, int new_mtu)
2076 } 2076 }
2077 2077
2078 if (new_mtu > 1900) { 2078 if (new_mtu > 1900) {
2079 netdev->features &= ~(NETIF_F_HW_CSUM | 2079 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2080 NETIF_F_TSO | 2080 NETIF_F_TSO | NETIF_F_TSO6);
2081 NETIF_F_TSO6);
2082 } else { 2081 } else {
2083 if (test_bit(JME_FLAG_TXCSUM, &jme->flags)) 2082 if (test_bit(JME_FLAG_TXCSUM, &jme->flags))
2084 netdev->features |= NETIF_F_HW_CSUM; 2083 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2085 if (test_bit(JME_FLAG_TSO, &jme->flags)) 2084 if (test_bit(JME_FLAG_TSO, &jme->flags))
2086 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6; 2085 netdev->features |= NETIF_F_TSO | NETIF_F_TSO6;
2087 } 2086 }
@@ -2514,10 +2513,12 @@ jme_set_tx_csum(struct net_device *netdev, u32 on)
2514 if (on) { 2513 if (on) {
2515 set_bit(JME_FLAG_TXCSUM, &jme->flags); 2514 set_bit(JME_FLAG_TXCSUM, &jme->flags);
2516 if (netdev->mtu <= 1900) 2515 if (netdev->mtu <= 1900)
2517 netdev->features |= NETIF_F_HW_CSUM; 2516 netdev->features |=
2517 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2518 } else { 2518 } else {
2519 clear_bit(JME_FLAG_TXCSUM, &jme->flags); 2519 clear_bit(JME_FLAG_TXCSUM, &jme->flags);
2520 netdev->features &= ~NETIF_F_HW_CSUM; 2520 netdev->features &=
2521 ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
2521 } 2522 }
2522 2523
2523 return 0; 2524 return 0;
@@ -2797,7 +2798,8 @@ jme_init_one(struct pci_dev *pdev,
2797 netdev->netdev_ops = &jme_netdev_ops; 2798 netdev->netdev_ops = &jme_netdev_ops;
2798 netdev->ethtool_ops = &jme_ethtool_ops; 2799 netdev->ethtool_ops = &jme_ethtool_ops;
2799 netdev->watchdog_timeo = TX_TIMEOUT; 2800 netdev->watchdog_timeo = TX_TIMEOUT;
2800 netdev->features = NETIF_F_HW_CSUM | 2801 netdev->features = NETIF_F_IP_CSUM |
2802 NETIF_F_IPV6_CSUM |
2801 NETIF_F_SG | 2803 NETIF_F_SG |
2802 NETIF_F_TSO | 2804 NETIF_F_TSO |
2803 NETIF_F_TSO6 | 2805 NETIF_F_TSO6 |
diff --git a/drivers/net/ks8851.c b/drivers/net/ks8851.c
index 51919fcd50c2..0fa4a9887ba2 100644
--- a/drivers/net/ks8851.c
+++ b/drivers/net/ks8851.c
@@ -1545,6 +1545,37 @@ static int ks8851_read_selftest(struct ks8851_net *ks)
1545 1545
1546/* driver bus management functions */ 1546/* driver bus management functions */
1547 1547
1548#ifdef CONFIG_PM
1549static int ks8851_suspend(struct spi_device *spi, pm_message_t state)
1550{
1551 struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
1552 struct net_device *dev = ks->netdev;
1553
1554 if (netif_running(dev)) {
1555 netif_device_detach(dev);
1556 ks8851_net_stop(dev);
1557 }
1558
1559 return 0;
1560}
1561
1562static int ks8851_resume(struct spi_device *spi)
1563{
1564 struct ks8851_net *ks = dev_get_drvdata(&spi->dev);
1565 struct net_device *dev = ks->netdev;
1566
1567 if (netif_running(dev)) {
1568 ks8851_net_open(dev);
1569 netif_device_attach(dev);
1570 }
1571
1572 return 0;
1573}
1574#else
1575#define ks8851_suspend NULL
1576#define ks8851_resume NULL
1577#endif
1578
1548static int __devinit ks8851_probe(struct spi_device *spi) 1579static int __devinit ks8851_probe(struct spi_device *spi)
1549{ 1580{
1550 struct net_device *ndev; 1581 struct net_device *ndev;
@@ -1679,6 +1710,8 @@ static struct spi_driver ks8851_driver = {
1679 }, 1710 },
1680 .probe = ks8851_probe, 1711 .probe = ks8851_probe,
1681 .remove = __devexit_p(ks8851_remove), 1712 .remove = __devexit_p(ks8851_remove),
1713 .suspend = ks8851_suspend,
1714 .resume = ks8851_resume,
1682}; 1715};
1683 1716
1684static int __init ks8851_init(void) 1717static int __init ks8851_init(void)
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 37504a398906..540a8dcbcc46 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -3570,7 +3570,7 @@ static void hw_cfg_wol(struct ksz_hw *hw, u16 frame, int set)
3570 * This routine is used to program Wake-on-LAN pattern. 3570 * This routine is used to program Wake-on-LAN pattern.
3571 */ 3571 */
3572static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size, 3572static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size,
3573 u8 *mask, uint frame_size, u8 *pattern) 3573 const u8 *mask, uint frame_size, const u8 *pattern)
3574{ 3574{
3575 int bits; 3575 int bits;
3576 int from; 3576 int from;
@@ -3626,9 +3626,9 @@ static void hw_set_wol_frame(struct ksz_hw *hw, int i, uint mask_size,
3626 * 3626 *
3627 * This routine is used to add ARP pattern for waking up the host. 3627 * This routine is used to add ARP pattern for waking up the host.
3628 */ 3628 */
3629static void hw_add_wol_arp(struct ksz_hw *hw, u8 *ip_addr) 3629static void hw_add_wol_arp(struct ksz_hw *hw, const u8 *ip_addr)
3630{ 3630{
3631 u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 }; 3631 static const u8 mask[6] = { 0x3F, 0xF0, 0x3F, 0x00, 0xC0, 0x03 };
3632 u8 pattern[42] = { 3632 u8 pattern[42] = {
3633 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 3633 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
3634 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 3634 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
@@ -3651,8 +3651,8 @@ static void hw_add_wol_arp(struct ksz_hw *hw, u8 *ip_addr)
3651 */ 3651 */
3652static void hw_add_wol_bcast(struct ksz_hw *hw) 3652static void hw_add_wol_bcast(struct ksz_hw *hw)
3653{ 3653{
3654 u8 mask[] = { 0x3F }; 3654 static const u8 mask[] = { 0x3F };
3655 u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }; 3655 static const u8 pattern[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
3656 3656
3657 hw_set_wol_frame(hw, 2, 1, mask, MAC_ADDR_LEN, pattern); 3657 hw_set_wol_frame(hw, 2, 1, mask, MAC_ADDR_LEN, pattern);
3658} 3658}
@@ -3669,7 +3669,7 @@ static void hw_add_wol_bcast(struct ksz_hw *hw)
3669 */ 3669 */
3670static void hw_add_wol_mcast(struct ksz_hw *hw) 3670static void hw_add_wol_mcast(struct ksz_hw *hw)
3671{ 3671{
3672 u8 mask[] = { 0x3F }; 3672 static const u8 mask[] = { 0x3F };
3673 u8 pattern[] = { 0x33, 0x33, 0xFF, 0x00, 0x00, 0x00 }; 3673 u8 pattern[] = { 0x33, 0x33, 0xFF, 0x00, 0x00, 0x00 };
3674 3674
3675 memcpy(&pattern[3], &hw->override_addr[3], 3); 3675 memcpy(&pattern[3], &hw->override_addr[3], 3);
@@ -3687,7 +3687,7 @@ static void hw_add_wol_mcast(struct ksz_hw *hw)
3687 */ 3687 */
3688static void hw_add_wol_ucast(struct ksz_hw *hw) 3688static void hw_add_wol_ucast(struct ksz_hw *hw)
3689{ 3689{
3690 u8 mask[] = { 0x3F }; 3690 static const u8 mask[] = { 0x3F };
3691 3691
3692 hw_set_wol_frame(hw, 0, 1, mask, MAC_ADDR_LEN, hw->override_addr); 3692 hw_set_wol_frame(hw, 0, 1, mask, MAC_ADDR_LEN, hw->override_addr);
3693} 3693}
@@ -3700,7 +3700,7 @@ static void hw_add_wol_ucast(struct ksz_hw *hw)
3700 * 3700 *
3701 * This routine is used to enable Wake-on-LAN depending on driver settings. 3701 * This routine is used to enable Wake-on-LAN depending on driver settings.
3702 */ 3702 */
3703static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, u8 *net_addr) 3703static void hw_enable_wol(struct ksz_hw *hw, u32 wol_enable, const u8 *net_addr)
3704{ 3704{
3705 hw_cfg_wol(hw, KS8841_WOL_MAGIC_ENABLE, (wol_enable & WAKE_MAGIC)); 3705 hw_cfg_wol(hw, KS8841_WOL_MAGIC_ENABLE, (wol_enable & WAKE_MAGIC));
3706 hw_cfg_wol(hw, KS8841_WOL_FRAME0_ENABLE, (wol_enable & WAKE_UCAST)); 3706 hw_cfg_wol(hw, KS8841_WOL_FRAME0_ENABLE, (wol_enable & WAKE_UCAST));
@@ -6208,7 +6208,7 @@ static int netdev_set_wol(struct net_device *dev,
6208 struct dev_info *hw_priv = priv->adapter; 6208 struct dev_info *hw_priv = priv->adapter;
6209 6209
6210 /* Need to find a way to retrieve the device IP address. */ 6210 /* Need to find a way to retrieve the device IP address. */
6211 u8 net_addr[] = { 192, 168, 1, 1 }; 6211 static const u8 net_addr[] = { 192, 168, 1, 1 };
6212 6212
6213 if (wol->wolopts & ~hw_priv->wol_support) 6213 if (wol->wolopts & ~hw_priv->wol_support)
6214 return -EINVAL; 6214 return -EINVAL;
@@ -6953,7 +6953,7 @@ static void read_other_addr(struct ksz_hw *hw)
6953#define PCI_VENDOR_ID_MICREL_KS 0x16c6 6953#define PCI_VENDOR_ID_MICREL_KS 0x16c6
6954#endif 6954#endif
6955 6955
6956static int __init pcidev_init(struct pci_dev *pdev, 6956static int __devinit pcidev_init(struct pci_dev *pdev,
6957 const struct pci_device_id *id) 6957 const struct pci_device_id *id)
6958{ 6958{
6959 struct net_device *dev; 6959 struct net_device *dev;
@@ -7241,7 +7241,7 @@ static int pcidev_suspend(struct pci_dev *pdev, pm_message_t state)
7241 struct ksz_hw *hw = &hw_priv->hw; 7241 struct ksz_hw *hw = &hw_priv->hw;
7242 7242
7243 /* Need to find a way to retrieve the device IP address. */ 7243 /* Need to find a way to retrieve the device IP address. */
7244 u8 net_addr[] = { 192, 168, 1, 1 }; 7244 static const u8 net_addr[] = { 192, 168, 1, 1 };
7245 7245
7246 for (i = 0; i < hw->dev_count; i++) { 7246 for (i = 0; i < hw->dev_count; i++) {
7247 if (info->netdev[i]) { 7247 if (info->netdev[i]) {
diff --git a/drivers/net/lance.c b/drivers/net/lance.c
index f06296bfe293..02336edce748 100644
--- a/drivers/net/lance.c
+++ b/drivers/net/lance.c
@@ -207,7 +207,7 @@ tx_full and tbusy flags.
207#define LANCE_BUS_IF 0x16 207#define LANCE_BUS_IF 0x16
208#define LANCE_TOTAL_SIZE 0x18 208#define LANCE_TOTAL_SIZE 0x18
209 209
210#define TX_TIMEOUT 20 210#define TX_TIMEOUT (HZ/5)
211 211
212/* The LANCE Rx and Tx ring descriptors. */ 212/* The LANCE Rx and Tx ring descriptors. */
213struct lance_rx_head { 213struct lance_rx_head {
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index c27f4291b350..9e042894479b 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -161,7 +161,7 @@ enum commands {
161#define RX_SUSPEND 0x0030 161#define RX_SUSPEND 0x0030
162#define RX_ABORT 0x0040 162#define RX_ABORT 0x0040
163 163
164#define TX_TIMEOUT 5 164#define TX_TIMEOUT (HZ/20)
165 165
166 166
167struct i596_reg { 167struct i596_reg {
diff --git a/drivers/net/lib8390.c b/drivers/net/lib8390.c
index e7030ceb178b..da74db4a03d4 100644
--- a/drivers/net/lib8390.c
+++ b/drivers/net/lib8390.c
@@ -203,7 +203,7 @@ static void __NS8390_init(struct net_device *dev, int startp);
203static int __ei_open(struct net_device *dev) 203static int __ei_open(struct net_device *dev)
204{ 204{
205 unsigned long flags; 205 unsigned long flags;
206 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 206 struct ei_device *ei_local = netdev_priv(dev);
207 207
208 if (dev->watchdog_timeo <= 0) 208 if (dev->watchdog_timeo <= 0)
209 dev->watchdog_timeo = TX_TIMEOUT; 209 dev->watchdog_timeo = TX_TIMEOUT;
@@ -231,7 +231,7 @@ static int __ei_open(struct net_device *dev)
231 */ 231 */
232static int __ei_close(struct net_device *dev) 232static int __ei_close(struct net_device *dev)
233{ 233{
234 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 234 struct ei_device *ei_local = netdev_priv(dev);
235 unsigned long flags; 235 unsigned long flags;
236 236
237 /* 237 /*
@@ -256,7 +256,7 @@ static int __ei_close(struct net_device *dev)
256static void __ei_tx_timeout(struct net_device *dev) 256static void __ei_tx_timeout(struct net_device *dev)
257{ 257{
258 unsigned long e8390_base = dev->base_addr; 258 unsigned long e8390_base = dev->base_addr;
259 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 259 struct ei_device *ei_local = netdev_priv(dev);
260 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev); 260 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
261 unsigned long flags; 261 unsigned long flags;
262 262
@@ -303,7 +303,7 @@ static netdev_tx_t __ei_start_xmit(struct sk_buff *skb,
303 struct net_device *dev) 303 struct net_device *dev)
304{ 304{
305 unsigned long e8390_base = dev->base_addr; 305 unsigned long e8390_base = dev->base_addr;
306 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 306 struct ei_device *ei_local = netdev_priv(dev);
307 int send_length = skb->len, output_page; 307 int send_length = skb->len, output_page;
308 unsigned long flags; 308 unsigned long flags;
309 char buf[ETH_ZLEN]; 309 char buf[ETH_ZLEN];
@@ -592,7 +592,7 @@ static void ei_tx_err(struct net_device *dev)
592static void ei_tx_intr(struct net_device *dev) 592static void ei_tx_intr(struct net_device *dev)
593{ 593{
594 unsigned long e8390_base = dev->base_addr; 594 unsigned long e8390_base = dev->base_addr;
595 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 595 struct ei_device *ei_local = netdev_priv(dev);
596 int status = ei_inb(e8390_base + EN0_TSR); 596 int status = ei_inb(e8390_base + EN0_TSR);
597 597
598 ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */ 598 ei_outb_p(ENISR_TX, e8390_base + EN0_ISR); /* Ack intr. */
@@ -675,7 +675,7 @@ static void ei_tx_intr(struct net_device *dev)
675static void ei_receive(struct net_device *dev) 675static void ei_receive(struct net_device *dev)
676{ 676{
677 unsigned long e8390_base = dev->base_addr; 677 unsigned long e8390_base = dev->base_addr;
678 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 678 struct ei_device *ei_local = netdev_priv(dev);
679 unsigned char rxing_page, this_frame, next_frame; 679 unsigned char rxing_page, this_frame, next_frame;
680 unsigned short current_offset; 680 unsigned short current_offset;
681 int rx_pkt_count = 0; 681 int rx_pkt_count = 0;
@@ -879,7 +879,7 @@ static void ei_rx_overrun(struct net_device *dev)
879static struct net_device_stats *__ei_get_stats(struct net_device *dev) 879static struct net_device_stats *__ei_get_stats(struct net_device *dev)
880{ 880{
881 unsigned long ioaddr = dev->base_addr; 881 unsigned long ioaddr = dev->base_addr;
882 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 882 struct ei_device *ei_local = netdev_priv(dev);
883 unsigned long flags; 883 unsigned long flags;
884 884
885 /* If the card is stopped, just return the present stats. */ 885 /* If the card is stopped, just return the present stats. */
@@ -927,7 +927,7 @@ static void do_set_multicast_list(struct net_device *dev)
927{ 927{
928 unsigned long e8390_base = dev->base_addr; 928 unsigned long e8390_base = dev->base_addr;
929 int i; 929 int i;
930 struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev); 930 struct ei_device *ei_local = netdev_priv(dev);
931 931
932 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) 932 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI)))
933 { 933 {
@@ -981,7 +981,7 @@ static void do_set_multicast_list(struct net_device *dev)
981static void __ei_set_multicast_list(struct net_device *dev) 981static void __ei_set_multicast_list(struct net_device *dev)
982{ 982{
983 unsigned long flags; 983 unsigned long flags;
984 struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev); 984 struct ei_device *ei_local = netdev_priv(dev);
985 985
986 spin_lock_irqsave(&ei_local->page_lock, flags); 986 spin_lock_irqsave(&ei_local->page_lock, flags);
987 do_set_multicast_list(dev); 987 do_set_multicast_list(dev);
@@ -998,7 +998,7 @@ static void __ei_set_multicast_list(struct net_device *dev)
998 998
999static void ethdev_setup(struct net_device *dev) 999static void ethdev_setup(struct net_device *dev)
1000{ 1000{
1001 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 1001 struct ei_device *ei_local = netdev_priv(dev);
1002 if (ei_debug > 1) 1002 if (ei_debug > 1)
1003 printk(version); 1003 printk(version);
1004 1004
@@ -1036,7 +1036,7 @@ static struct net_device *____alloc_ei_netdev(int size)
1036static void __NS8390_init(struct net_device *dev, int startp) 1036static void __NS8390_init(struct net_device *dev, int startp)
1037{ 1037{
1038 unsigned long e8390_base = dev->base_addr; 1038 unsigned long e8390_base = dev->base_addr;
1039 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 1039 struct ei_device *ei_local = netdev_priv(dev);
1040 int i; 1040 int i;
1041 int endcfg = ei_local->word16 1041 int endcfg = ei_local->word16
1042 ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0)) 1042 ? (0x48 | ENDCFG_WTS | (ei_local->bigendian ? ENDCFG_BOS : 0))
@@ -1099,7 +1099,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1099 int start_page) 1099 int start_page)
1100{ 1100{
1101 unsigned long e8390_base = dev->base_addr; 1101 unsigned long e8390_base = dev->base_addr;
1102 struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev); 1102 struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
1103 1103
1104 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD); 1104 ei_outb_p(E8390_NODMA+E8390_PAGE0, e8390_base+E8390_CMD);
1105 1105
diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c
index 661ed1ff4c2e..f35554d11441 100644
--- a/drivers/net/ll_temac_main.c
+++ b/drivers/net/ll_temac_main.c
@@ -692,7 +692,7 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
692 692
693 cur_p->app0 = 0; 693 cur_p->app0 = 0;
694 if (skb->ip_summed == CHECKSUM_PARTIAL) { 694 if (skb->ip_summed == CHECKSUM_PARTIAL) {
695 unsigned int csum_start_off = skb_transport_offset(skb); 695 unsigned int csum_start_off = skb_checksum_start_offset(skb);
696 unsigned int csum_index_off = csum_start_off + skb->csum_offset; 696 unsigned int csum_index_off = csum_start_off + skb->csum_offset;
697 697
698 cur_p->app0 |= 1; /* TX Checksum Enabled */ 698 cur_p->app0 |= 1; /* TX Checksum Enabled */
@@ -952,7 +952,7 @@ static const struct attribute_group temac_attr_group = {
952 .attrs = temac_device_attrs, 952 .attrs = temac_device_attrs,
953}; 953};
954 954
955static int __init 955static int __devinit
956temac_of_probe(struct platform_device *op, const struct of_device_id *match) 956temac_of_probe(struct platform_device *op, const struct of_device_id *match)
957{ 957{
958 struct device_node *np; 958 struct device_node *np;
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 0fc9dc7f20db..6ed577b065df 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -38,6 +38,7 @@ struct macvlan_port {
38 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; 38 struct hlist_head vlan_hash[MACVLAN_HASH_SIZE];
39 struct list_head vlans; 39 struct list_head vlans;
40 struct rcu_head rcu; 40 struct rcu_head rcu;
41 bool passthru;
41}; 42};
42 43
43#define macvlan_port_get_rcu(dev) \ 44#define macvlan_port_get_rcu(dev) \
@@ -169,6 +170,7 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
169 macvlan_broadcast(skb, port, NULL, 170 macvlan_broadcast(skb, port, NULL,
170 MACVLAN_MODE_PRIVATE | 171 MACVLAN_MODE_PRIVATE |
171 MACVLAN_MODE_VEPA | 172 MACVLAN_MODE_VEPA |
173 MACVLAN_MODE_PASSTHRU|
172 MACVLAN_MODE_BRIDGE); 174 MACVLAN_MODE_BRIDGE);
173 else if (src->mode == MACVLAN_MODE_VEPA) 175 else if (src->mode == MACVLAN_MODE_VEPA)
174 /* flood to everyone except source */ 176 /* flood to everyone except source */
@@ -185,7 +187,10 @@ static struct sk_buff *macvlan_handle_frame(struct sk_buff *skb)
185 return skb; 187 return skb;
186 } 188 }
187 189
188 vlan = macvlan_hash_lookup(port, eth->h_dest); 190 if (port->passthru)
191 vlan = list_first_entry(&port->vlans, struct macvlan_dev, list);
192 else
193 vlan = macvlan_hash_lookup(port, eth->h_dest);
189 if (vlan == NULL) 194 if (vlan == NULL)
190 return skb; 195 return skb;
191 196
@@ -243,18 +248,22 @@ xmit_world:
243netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, 248netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
244 struct net_device *dev) 249 struct net_device *dev)
245{ 250{
246 int i = skb_get_queue_mapping(skb);
247 struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
248 unsigned int len = skb->len; 251 unsigned int len = skb->len;
249 int ret; 252 int ret;
253 const struct macvlan_dev *vlan = netdev_priv(dev);
250 254
251 ret = macvlan_queue_xmit(skb, dev); 255 ret = macvlan_queue_xmit(skb, dev);
252 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { 256 if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
253 txq->tx_packets++; 257 struct macvlan_pcpu_stats *pcpu_stats;
254 txq->tx_bytes += len;
255 } else
256 txq->tx_dropped++;
257 258
259 pcpu_stats = this_cpu_ptr(vlan->pcpu_stats);
260 u64_stats_update_begin(&pcpu_stats->syncp);
261 pcpu_stats->tx_packets++;
262 pcpu_stats->tx_bytes += len;
263 u64_stats_update_end(&pcpu_stats->syncp);
264 } else {
265 this_cpu_inc(vlan->pcpu_stats->tx_dropped);
266 }
258 return ret; 267 return ret;
259} 268}
260EXPORT_SYMBOL_GPL(macvlan_start_xmit); 269EXPORT_SYMBOL_GPL(macvlan_start_xmit);
@@ -284,6 +293,11 @@ static int macvlan_open(struct net_device *dev)
284 struct net_device *lowerdev = vlan->lowerdev; 293 struct net_device *lowerdev = vlan->lowerdev;
285 int err; 294 int err;
286 295
296 if (vlan->port->passthru) {
297 dev_set_promiscuity(lowerdev, 1);
298 goto hash_add;
299 }
300
287 err = -EBUSY; 301 err = -EBUSY;
288 if (macvlan_addr_busy(vlan->port, dev->dev_addr)) 302 if (macvlan_addr_busy(vlan->port, dev->dev_addr))
289 goto out; 303 goto out;
@@ -296,6 +310,8 @@ static int macvlan_open(struct net_device *dev)
296 if (err < 0) 310 if (err < 0)
297 goto del_unicast; 311 goto del_unicast;
298 } 312 }
313
314hash_add:
299 macvlan_hash_add(vlan); 315 macvlan_hash_add(vlan);
300 return 0; 316 return 0;
301 317
@@ -310,12 +326,18 @@ static int macvlan_stop(struct net_device *dev)
310 struct macvlan_dev *vlan = netdev_priv(dev); 326 struct macvlan_dev *vlan = netdev_priv(dev);
311 struct net_device *lowerdev = vlan->lowerdev; 327 struct net_device *lowerdev = vlan->lowerdev;
312 328
329 if (vlan->port->passthru) {
330 dev_set_promiscuity(lowerdev, -1);
331 goto hash_del;
332 }
333
313 dev_mc_unsync(lowerdev, dev); 334 dev_mc_unsync(lowerdev, dev);
314 if (dev->flags & IFF_ALLMULTI) 335 if (dev->flags & IFF_ALLMULTI)
315 dev_set_allmulti(lowerdev, -1); 336 dev_set_allmulti(lowerdev, -1);
316 337
317 dev_uc_del(lowerdev, dev->dev_addr); 338 dev_uc_del(lowerdev, dev->dev_addr);
318 339
340hash_del:
319 macvlan_hash_del(vlan); 341 macvlan_hash_del(vlan);
320 return 0; 342 return 0;
321} 343}
@@ -414,14 +436,15 @@ static int macvlan_init(struct net_device *dev)
414 dev->state = (dev->state & ~MACVLAN_STATE_MASK) | 436 dev->state = (dev->state & ~MACVLAN_STATE_MASK) |
415 (lowerdev->state & MACVLAN_STATE_MASK); 437 (lowerdev->state & MACVLAN_STATE_MASK);
416 dev->features = lowerdev->features & MACVLAN_FEATURES; 438 dev->features = lowerdev->features & MACVLAN_FEATURES;
439 dev->features |= NETIF_F_LLTX;
417 dev->gso_max_size = lowerdev->gso_max_size; 440 dev->gso_max_size = lowerdev->gso_max_size;
418 dev->iflink = lowerdev->ifindex; 441 dev->iflink = lowerdev->ifindex;
419 dev->hard_header_len = lowerdev->hard_header_len; 442 dev->hard_header_len = lowerdev->hard_header_len;
420 443
421 macvlan_set_lockdep_class(dev); 444 macvlan_set_lockdep_class(dev);
422 445
423 vlan->rx_stats = alloc_percpu(struct macvlan_rx_stats); 446 vlan->pcpu_stats = alloc_percpu(struct macvlan_pcpu_stats);
424 if (!vlan->rx_stats) 447 if (!vlan->pcpu_stats)
425 return -ENOMEM; 448 return -ENOMEM;
426 449
427 return 0; 450 return 0;
@@ -431,7 +454,7 @@ static void macvlan_uninit(struct net_device *dev)
431{ 454{
432 struct macvlan_dev *vlan = netdev_priv(dev); 455 struct macvlan_dev *vlan = netdev_priv(dev);
433 456
434 free_percpu(vlan->rx_stats); 457 free_percpu(vlan->pcpu_stats);
435} 458}
436 459
437static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev, 460static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
@@ -439,33 +462,38 @@ static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev,
439{ 462{
440 struct macvlan_dev *vlan = netdev_priv(dev); 463 struct macvlan_dev *vlan = netdev_priv(dev);
441 464
442 dev_txq_stats_fold(dev, stats); 465 if (vlan->pcpu_stats) {
443 466 struct macvlan_pcpu_stats *p;
444 if (vlan->rx_stats) { 467 u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
445 struct macvlan_rx_stats *p, accum = {0}; 468 u32 rx_errors = 0, tx_dropped = 0;
446 u64 rx_packets, rx_bytes, rx_multicast;
447 unsigned int start; 469 unsigned int start;
448 int i; 470 int i;
449 471
450 for_each_possible_cpu(i) { 472 for_each_possible_cpu(i) {
451 p = per_cpu_ptr(vlan->rx_stats, i); 473 p = per_cpu_ptr(vlan->pcpu_stats, i);
452 do { 474 do {
453 start = u64_stats_fetch_begin_bh(&p->syncp); 475 start = u64_stats_fetch_begin_bh(&p->syncp);
454 rx_packets = p->rx_packets; 476 rx_packets = p->rx_packets;
455 rx_bytes = p->rx_bytes; 477 rx_bytes = p->rx_bytes;
456 rx_multicast = p->rx_multicast; 478 rx_multicast = p->rx_multicast;
479 tx_packets = p->tx_packets;
480 tx_bytes = p->tx_bytes;
457 } while (u64_stats_fetch_retry_bh(&p->syncp, start)); 481 } while (u64_stats_fetch_retry_bh(&p->syncp, start));
458 accum.rx_packets += rx_packets; 482
459 accum.rx_bytes += rx_bytes; 483 stats->rx_packets += rx_packets;
460 accum.rx_multicast += rx_multicast; 484 stats->rx_bytes += rx_bytes;
461 /* rx_errors is an ulong, updated without syncp protection */ 485 stats->multicast += rx_multicast;
462 accum.rx_errors += p->rx_errors; 486 stats->tx_packets += tx_packets;
487 stats->tx_bytes += tx_bytes;
488 /* rx_errors & tx_dropped are u32, updated
489 * without syncp protection.
490 */
491 rx_errors += p->rx_errors;
492 tx_dropped += p->tx_dropped;
463 } 493 }
464 stats->rx_packets = accum.rx_packets; 494 stats->rx_errors = rx_errors;
465 stats->rx_bytes = accum.rx_bytes; 495 stats->rx_dropped = rx_errors;
466 stats->rx_errors = accum.rx_errors; 496 stats->tx_dropped = tx_dropped;
467 stats->rx_dropped = accum.rx_errors;
468 stats->multicast = accum.rx_multicast;
469 } 497 }
470 return stats; 498 return stats;
471} 499}
@@ -549,6 +577,7 @@ static int macvlan_port_create(struct net_device *dev)
549 if (port == NULL) 577 if (port == NULL)
550 return -ENOMEM; 578 return -ENOMEM;
551 579
580 port->passthru = false;
552 port->dev = dev; 581 port->dev = dev;
553 INIT_LIST_HEAD(&port->vlans); 582 INIT_LIST_HEAD(&port->vlans);
554 for (i = 0; i < MACVLAN_HASH_SIZE; i++) 583 for (i = 0; i < MACVLAN_HASH_SIZE; i++)
@@ -593,6 +622,7 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
593 case MACVLAN_MODE_PRIVATE: 622 case MACVLAN_MODE_PRIVATE:
594 case MACVLAN_MODE_VEPA: 623 case MACVLAN_MODE_VEPA:
595 case MACVLAN_MODE_BRIDGE: 624 case MACVLAN_MODE_BRIDGE:
625 case MACVLAN_MODE_PASSTHRU:
596 break; 626 break;
597 default: 627 default:
598 return -EINVAL; 628 return -EINVAL;
@@ -601,25 +631,6 @@ static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[])
601 return 0; 631 return 0;
602} 632}
603 633
604static int macvlan_get_tx_queues(struct net *net,
605 struct nlattr *tb[],
606 unsigned int *num_tx_queues,
607 unsigned int *real_num_tx_queues)
608{
609 struct net_device *real_dev;
610
611 if (!tb[IFLA_LINK])
612 return -EINVAL;
613
614 real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
615 if (!real_dev)
616 return -ENODEV;
617
618 *num_tx_queues = real_dev->num_tx_queues;
619 *real_num_tx_queues = real_dev->real_num_tx_queues;
620 return 0;
621}
622
623int macvlan_common_newlink(struct net *src_net, struct net_device *dev, 634int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
624 struct nlattr *tb[], struct nlattr *data[], 635 struct nlattr *tb[], struct nlattr *data[],
625 int (*receive)(struct sk_buff *skb), 636 int (*receive)(struct sk_buff *skb),
@@ -661,6 +672,10 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
661 } 672 }
662 port = macvlan_port_get(lowerdev); 673 port = macvlan_port_get(lowerdev);
663 674
675 /* Only 1 macvlan device can be created in passthru mode */
676 if (port->passthru)
677 return -EINVAL;
678
664 vlan->lowerdev = lowerdev; 679 vlan->lowerdev = lowerdev;
665 vlan->dev = dev; 680 vlan->dev = dev;
666 vlan->port = port; 681 vlan->port = port;
@@ -671,6 +686,13 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev,
671 if (data && data[IFLA_MACVLAN_MODE]) 686 if (data && data[IFLA_MACVLAN_MODE])
672 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); 687 vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]);
673 688
689 if (vlan->mode == MACVLAN_MODE_PASSTHRU) {
690 if (!list_empty(&port->vlans))
691 return -EINVAL;
692 port->passthru = true;
693 memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN);
694 }
695
674 err = register_netdevice(dev); 696 err = register_netdevice(dev);
675 if (err < 0) 697 if (err < 0)
676 goto destroy_port; 698 goto destroy_port;
@@ -743,7 +765,6 @@ int macvlan_link_register(struct rtnl_link_ops *ops)
743{ 765{
744 /* common fields */ 766 /* common fields */
745 ops->priv_size = sizeof(struct macvlan_dev); 767 ops->priv_size = sizeof(struct macvlan_dev);
746 ops->get_tx_queues = macvlan_get_tx_queues;
747 ops->validate = macvlan_validate; 768 ops->validate = macvlan_validate;
748 ops->maxtype = IFLA_MACVLAN_MAX; 769 ops->maxtype = IFLA_MACVLAN_MAX;
749 ops->policy = macvlan_policy; 770 ops->policy = macvlan_policy;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 42567279843e..21845affea13 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -504,8 +504,7 @@ static int macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
504 504
505 if (skb->ip_summed == CHECKSUM_PARTIAL) { 505 if (skb->ip_summed == CHECKSUM_PARTIAL) {
506 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 506 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
507 vnet_hdr->csum_start = skb->csum_start - 507 vnet_hdr->csum_start = skb_checksum_start_offset(skb);
508 skb_headroom(skb);
509 vnet_hdr->csum_offset = skb->csum_offset; 508 vnet_hdr->csum_offset = skb->csum_offset;
510 } /* else everything is zero */ 509 } /* else everything is zero */
511 510
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c
index 8f4bf1f07c11..3a4277f6fac4 100644
--- a/drivers/net/mlx4/alloc.c
+++ b/drivers/net/mlx4/alloc.c
@@ -178,6 +178,7 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct,
178 } else { 178 } else {
179 int i; 179 int i;
180 180
181 buf->direct.buf = NULL;
181 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; 182 buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE;
182 buf->npages = buf->nbufs; 183 buf->npages = buf->nbufs;
183 buf->page_shift = PAGE_SHIFT; 184 buf->page_shift = PAGE_SHIFT;
@@ -229,7 +230,7 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf)
229 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, 230 dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf,
230 buf->direct.map); 231 buf->direct.map);
231 else { 232 else {
232 if (BITS_PER_LONG == 64) 233 if (BITS_PER_LONG == 64 && buf->direct.buf)
233 vunmap(buf->direct.buf); 234 vunmap(buf->direct.buf);
234 235
235 for (i = 0; i < buf->nbufs; ++i) 236 for (i = 0; i < buf->nbufs; ++i)
diff --git a/drivers/net/mlx4/en_netdev.c b/drivers/net/mlx4/en_netdev.c
index 6d6806b361e3..897f576b8b17 100644
--- a/drivers/net/mlx4/en_netdev.c
+++ b/drivers/net/mlx4/en_netdev.c
@@ -972,7 +972,8 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
972 int i; 972 int i;
973 int err; 973 int err;
974 974
975 dev = alloc_etherdev_mq(sizeof(struct mlx4_en_priv), prof->tx_ring_num); 975 dev = alloc_etherdev_mqs(sizeof(struct mlx4_en_priv),
976 prof->tx_ring_num, prof->rx_ring_num);
976 if (dev == NULL) { 977 if (dev == NULL) {
977 mlx4_err(mdev, "Net device allocation failed\n"); 978 mlx4_err(mdev, "Net device allocation failed\n");
978 return -ENOMEM; 979 return -ENOMEM;
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index 7a7e18ba278a..5de1db897835 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -289,10 +289,8 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
289 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET); 289 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_BF_REG_SZ_OFFSET);
290 dev_cap->bf_reg_size = 1 << (field & 0x1f); 290 dev_cap->bf_reg_size = 1 << (field & 0x1f);
291 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET); 291 MLX4_GET(field, outbox, QUERY_DEV_CAP_LOG_MAX_BF_REGS_PER_PAGE_OFFSET);
292 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size)) { 292 if ((1 << (field & 0x3f)) > (PAGE_SIZE / dev_cap->bf_reg_size))
293 mlx4_warn(dev, "firmware bug: log2 # of blue flame regs is invalid (%d), forcing 3\n", field & 0x1f);
294 field = 3; 293 field = 3;
295 }
296 dev_cap->bf_regs_per_page = 1 << (field & 0x3f); 294 dev_cap->bf_regs_per_page = 1 << (field & 0x3f);
297 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n", 295 mlx4_dbg(dev, "BlueFlame available (reg size %d, regs/page %d)\n",
298 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page); 296 dev_cap->bf_reg_size, dev_cap->bf_regs_per_page);
diff --git a/drivers/net/mv643xx_eth.c b/drivers/net/mv643xx_eth.c
index dd2b6a71c6d7..02076e16542a 100644
--- a/drivers/net/mv643xx_eth.c
+++ b/drivers/net/mv643xx_eth.c
@@ -1514,11 +1514,6 @@ static int mv643xx_eth_nway_reset(struct net_device *dev)
1514 return genphy_restart_aneg(mp->phy); 1514 return genphy_restart_aneg(mp->phy);
1515} 1515}
1516 1516
1517static u32 mv643xx_eth_get_link(struct net_device *dev)
1518{
1519 return !!netif_carrier_ok(dev);
1520}
1521
1522static int 1517static int
1523mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec) 1518mv643xx_eth_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
1524{ 1519{
@@ -1658,7 +1653,7 @@ static const struct ethtool_ops mv643xx_eth_ethtool_ops = {
1658 .set_settings = mv643xx_eth_set_settings, 1653 .set_settings = mv643xx_eth_set_settings,
1659 .get_drvinfo = mv643xx_eth_get_drvinfo, 1654 .get_drvinfo = mv643xx_eth_get_drvinfo,
1660 .nway_reset = mv643xx_eth_nway_reset, 1655 .nway_reset = mv643xx_eth_nway_reset,
1661 .get_link = mv643xx_eth_get_link, 1656 .get_link = ethtool_op_get_link,
1662 .get_coalesce = mv643xx_eth_get_coalesce, 1657 .get_coalesce = mv643xx_eth_get_coalesce,
1663 .set_coalesce = mv643xx_eth_set_coalesce, 1658 .set_coalesce = mv643xx_eth_set_coalesce,
1664 .get_ringparam = mv643xx_eth_get_ringparam, 1659 .get_ringparam = mv643xx_eth_get_ringparam,
@@ -2983,7 +2978,7 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
2983 unregister_netdev(mp->dev); 2978 unregister_netdev(mp->dev);
2984 if (mp->phy != NULL) 2979 if (mp->phy != NULL)
2985 phy_detach(mp->phy); 2980 phy_detach(mp->phy);
2986 flush_scheduled_work(); 2981 cancel_work_sync(&mp->tx_timeout_task);
2987 free_netdev(mp->dev); 2982 free_netdev(mp->dev);
2988 2983
2989 platform_set_drvdata(pdev, NULL); 2984 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index 8524cc40ec57..a37fcf11ab36 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -2736,7 +2736,7 @@ again:
2736 odd_flag = 0; 2736 odd_flag = 0;
2737 flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST); 2737 flags = (MXGEFW_FLAGS_NO_TSO | MXGEFW_FLAGS_FIRST);
2738 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 2738 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
2739 cksum_offset = skb_transport_offset(skb); 2739 cksum_offset = skb_checksum_start_offset(skb);
2740 pseudo_hdr_offset = cksum_offset + skb->csum_offset; 2740 pseudo_hdr_offset = cksum_offset + skb->csum_offset;
2741 /* If the headers are excessively large, then we must 2741 /* If the headers are excessively large, then we must
2742 * fall back to a software checksum */ 2742 * fall back to a software checksum */
@@ -4067,7 +4067,7 @@ static void myri10ge_remove(struct pci_dev *pdev)
4067 if (mgp == NULL) 4067 if (mgp == NULL)
4068 return; 4068 return;
4069 4069
4070 flush_scheduled_work(); 4070 cancel_work_sync(&mgp->watchdog_work);
4071 netdev = mgp->dev; 4071 netdev = mgp->dev;
4072 unregister_netdev(netdev); 4072 unregister_netdev(netdev);
4073 4073
diff --git a/drivers/net/ne-h8300.c b/drivers/net/ne-h8300.c
index e0b0ef11f110..30be8c634ebd 100644
--- a/drivers/net/ne-h8300.c
+++ b/drivers/net/ne-h8300.c
@@ -86,7 +86,7 @@ static u32 reg_offset[16];
86 86
87static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr) 87static int __init init_reg_offset(struct net_device *dev,unsigned long base_addr)
88{ 88{
89 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 89 struct ei_device *ei_local = netdev_priv(dev);
90 int i; 90 int i;
91 unsigned char bus_width; 91 unsigned char bus_width;
92 92
@@ -218,7 +218,7 @@ static int __init ne_probe1(struct net_device *dev, int ioaddr)
218 int start_page, stop_page; 218 int start_page, stop_page;
219 int reg0, ret; 219 int reg0, ret;
220 static unsigned version_printed; 220 static unsigned version_printed;
221 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 221 struct ei_device *ei_local = netdev_priv(dev);
222 unsigned char bus_width; 222 unsigned char bus_width;
223 223
224 if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME)) 224 if (!request_region(ioaddr, NE_IO_EXTENT, DRV_NAME))
@@ -371,7 +371,7 @@ static int ne_close(struct net_device *dev)
371static void ne_reset_8390(struct net_device *dev) 371static void ne_reset_8390(struct net_device *dev)
372{ 372{
373 unsigned long reset_start_time = jiffies; 373 unsigned long reset_start_time = jiffies;
374 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 374 struct ei_device *ei_local = netdev_priv(dev);
375 375
376 if (ei_debug > 1) 376 if (ei_debug > 1)
377 printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies); 377 printk(KERN_DEBUG "resetting the 8390 t=%ld...", jiffies);
@@ -397,7 +397,7 @@ static void ne_reset_8390(struct net_device *dev)
397 397
398static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) 398static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page)
399{ 399{
400 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 400 struct ei_device *ei_local = netdev_priv(dev);
401 /* This *shouldn't* happen. If it does, it's the last thing you'll see */ 401 /* This *shouldn't* happen. If it does, it's the last thing you'll see */
402 402
403 if (ei_status.dmaing) 403 if (ei_status.dmaing)
@@ -437,7 +437,7 @@ static void ne_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, i
437 437
438static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) 438static void ne_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset)
439{ 439{
440 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 440 struct ei_device *ei_local = netdev_priv(dev);
441#ifdef NE_SANITY_CHECK 441#ifdef NE_SANITY_CHECK
442 int xfer_count = count; 442 int xfer_count = count;
443#endif 443#endif
@@ -507,7 +507,7 @@ static void ne_block_input(struct net_device *dev, int count, struct sk_buff *sk
507static void ne_block_output(struct net_device *dev, int count, 507static void ne_block_output(struct net_device *dev, int count,
508 const unsigned char *buf, const int start_page) 508 const unsigned char *buf, const int start_page)
509{ 509{
510 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 510 struct ei_device *ei_local = netdev_priv(dev);
511 unsigned long dma_start; 511 unsigned long dma_start;
512#ifdef NE_SANITY_CHECK 512#ifdef NE_SANITY_CHECK
513 int retries = 0; 513 int retries = 0;
diff --git a/drivers/net/netconsole.c b/drivers/net/netconsole.c
index 94255f09093d..dfb67eb2a94b 100644
--- a/drivers/net/netconsole.c
+++ b/drivers/net/netconsole.c
@@ -664,6 +664,7 @@ static int netconsole_netdev_event(struct notifier_block *this,
664 unsigned long flags; 664 unsigned long flags;
665 struct netconsole_target *nt; 665 struct netconsole_target *nt;
666 struct net_device *dev = ptr; 666 struct net_device *dev = ptr;
667 bool stopped = false;
667 668
668 if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER || 669 if (!(event == NETDEV_CHANGENAME || event == NETDEV_UNREGISTER ||
669 event == NETDEV_BONDING_DESLAVE || event == NETDEV_GOING_DOWN)) 670 event == NETDEV_BONDING_DESLAVE || event == NETDEV_GOING_DOWN))
@@ -690,15 +691,16 @@ static int netconsole_netdev_event(struct notifier_block *this,
690 case NETDEV_GOING_DOWN: 691 case NETDEV_GOING_DOWN:
691 case NETDEV_BONDING_DESLAVE: 692 case NETDEV_BONDING_DESLAVE:
692 nt->enabled = 0; 693 nt->enabled = 0;
694 stopped = true;
693 break; 695 break;
694 } 696 }
695 } 697 }
696 netconsole_target_put(nt); 698 netconsole_target_put(nt);
697 } 699 }
698 spin_unlock_irqrestore(&target_list_lock, flags); 700 spin_unlock_irqrestore(&target_list_lock, flags);
699 if (event == NETDEV_UNREGISTER || event == NETDEV_BONDING_DESLAVE) 701 if (stopped && (event == NETDEV_UNREGISTER || event == NETDEV_BONDING_DESLAVE))
700 printk(KERN_INFO "netconsole: network logging stopped, " 702 printk(KERN_INFO "netconsole: network logging stopped on "
701 "interface %s %s\n", dev->name, 703 "interface %s as it %s\n", dev->name,
702 event == NETDEV_UNREGISTER ? "unregistered" : "released slaves"); 704 event == NETDEV_UNREGISTER ? "unregistered" : "released slaves");
703 705
704done: 706done:
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 8e8a97839cb0..a11380544e6c 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
53 53
54#define _NETXEN_NIC_LINUX_MAJOR 4 54#define _NETXEN_NIC_LINUX_MAJOR 4
55#define _NETXEN_NIC_LINUX_MINOR 0 55#define _NETXEN_NIC_LINUX_MINOR 0
56#define _NETXEN_NIC_LINUX_SUBVERSION 74 56#define _NETXEN_NIC_LINUX_SUBVERSION 75
57#define NETXEN_NIC_LINUX_VERSIONID "4.0.74" 57#define NETXEN_NIC_LINUX_VERSIONID "4.0.75"
58 58
59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
60#define _major(v) (((v) >> 24) & 0xff) 60#define _major(v) (((v) >> 24) & 0xff)
@@ -1132,6 +1132,7 @@ typedef struct {
1132#define NETXEN_NIC_MSI_ENABLED 0x02 1132#define NETXEN_NIC_MSI_ENABLED 0x02
1133#define NETXEN_NIC_MSIX_ENABLED 0x04 1133#define NETXEN_NIC_MSIX_ENABLED 0x04
1134#define NETXEN_NIC_LRO_ENABLED 0x08 1134#define NETXEN_NIC_LRO_ENABLED 0x08
1135#define NETXEN_NIC_LRO_DISABLED 0x00
1135#define NETXEN_NIC_BRIDGE_ENABLED 0X10 1136#define NETXEN_NIC_BRIDGE_ENABLED 0X10
1136#define NETXEN_NIC_DIAG_ENABLED 0x20 1137#define NETXEN_NIC_DIAG_ENABLED 0x20
1137#define NETXEN_IS_MSI_FAMILY(adapter) \ 1138#define NETXEN_IS_MSI_FAMILY(adapter) \
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index b30de24f4a52..587498e140bb 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -720,7 +720,21 @@ static u32 netxen_nic_get_rx_csum(struct net_device *dev)
720static int netxen_nic_set_rx_csum(struct net_device *dev, u32 data) 720static int netxen_nic_set_rx_csum(struct net_device *dev, u32 data)
721{ 721{
722 struct netxen_adapter *adapter = netdev_priv(dev); 722 struct netxen_adapter *adapter = netdev_priv(dev);
723 adapter->rx_csum = !!data; 723
724 if (data) {
725 adapter->rx_csum = data;
726 return 0;
727 }
728
729 if (dev->features & NETIF_F_LRO) {
730 if (netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_DISABLED))
731 return -EIO;
732
733 dev->features &= ~NETIF_F_LRO;
734 netxen_send_lro_cleanup(adapter);
735 netdev_info(dev, "disabling LRO as rx_csum is off\n");
736 }
737 adapter->rx_csum = data;
724 return 0; 738 return 0;
725} 739}
726 740
@@ -893,11 +907,19 @@ static int netxen_nic_set_flags(struct net_device *netdev, u32 data)
893 if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO)) 907 if (!(adapter->capabilities & NX_FW_CAPABILITY_HW_LRO))
894 return -EINVAL; 908 return -EINVAL;
895 909
910 if (!adapter->rx_csum) {
911 netdev_info(netdev, "rx csum is off, cannot toggle LRO\n");
912 return -EINVAL;
913 }
914
915 if (!!(data & ETH_FLAG_LRO) == !!(netdev->features & NETIF_F_LRO))
916 return 0;
917
896 if (data & ETH_FLAG_LRO) { 918 if (data & ETH_FLAG_LRO) {
897 hw_lro = NETXEN_NIC_LRO_ENABLED; 919 hw_lro = NETXEN_NIC_LRO_ENABLED;
898 netdev->features |= NETIF_F_LRO; 920 netdev->features |= NETIF_F_LRO;
899 } else { 921 } else {
900 hw_lro = 0; 922 hw_lro = NETXEN_NIC_LRO_DISABLED;
901 netdev->features &= ~NETIF_F_LRO; 923 netdev->features &= ~NETIF_F_LRO;
902 } 924 }
903 925
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 37d3ebd65be8..5cef718fe35f 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -655,7 +655,7 @@ nx_p3_sre_macaddr_change(struct netxen_adapter *adapter, u8 *addr, unsigned op)
655} 655}
656 656
657static int nx_p3_nic_add_mac(struct netxen_adapter *adapter, 657static int nx_p3_nic_add_mac(struct netxen_adapter *adapter,
658 u8 *addr, struct list_head *del_list) 658 const u8 *addr, struct list_head *del_list)
659{ 659{
660 struct list_head *head; 660 struct list_head *head;
661 nx_mac_list_t *cur; 661 nx_mac_list_t *cur;
@@ -686,7 +686,9 @@ static void netxen_p3_nic_set_multi(struct net_device *netdev)
686{ 686{
687 struct netxen_adapter *adapter = netdev_priv(netdev); 687 struct netxen_adapter *adapter = netdev_priv(netdev);
688 struct netdev_hw_addr *ha; 688 struct netdev_hw_addr *ha;
689 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 689 static const u8 bcast_addr[ETH_ALEN] = {
690 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
691 };
690 u32 mode = VPORT_MISS_MODE_DROP; 692 u32 mode = VPORT_MISS_MODE_DROP;
691 LIST_HEAD(del_list); 693 LIST_HEAD(del_list);
692 struct list_head *head; 694 struct list_head *head;
@@ -807,9 +809,6 @@ int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable)
807 u64 word; 809 u64 word;
808 int rv = 0; 810 int rv = 0;
809 811
810 if ((adapter->flags & NETXEN_NIC_LRO_ENABLED) == enable)
811 return 0;
812
813 memset(&req, 0, sizeof(nx_nic_req_t)); 812 memset(&req, 0, sizeof(nx_nic_req_t));
814 813
815 req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23); 814 req.qhdr = cpu_to_le64(NX_HOST_REQUEST << 23);
@@ -825,8 +824,6 @@ int netxen_config_hw_lro(struct netxen_adapter *adapter, int enable)
825 "configure hw lro request\n"); 824 "configure hw lro request\n");
826 } 825 }
827 826
828 adapter->flags ^= NETXEN_NIC_LRO_ENABLED;
829
830 return rv; 827 return rv;
831} 828}
832 829
@@ -869,9 +866,11 @@ int netxen_config_rss(struct netxen_adapter *adapter, int enable)
869 u64 word; 866 u64 word;
870 int i, rv; 867 int i, rv;
871 868
872 u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, 869 static const u64 key[] = {
873 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 870 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
874 0x255b0ec26d5a56daULL }; 871 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
872 0x255b0ec26d5a56daULL
873 };
875 874
876 875
877 memset(&req, 0, sizeof(nx_nic_req_t)); 876 memset(&req, 0, sizeof(nx_nic_req_t));
@@ -895,7 +894,7 @@ int netxen_config_rss(struct netxen_adapter *adapter, int enable)
895 ((u64)(enable & 0x1) << 8) | 894 ((u64)(enable & 0x1) << 8) |
896 ((0x7ULL) << 48); 895 ((0x7ULL) << 48);
897 req.words[0] = cpu_to_le64(word); 896 req.words[0] = cpu_to_le64(word);
898 for (i = 0; i < 5; i++) 897 for (i = 0; i < ARRAY_SIZE(key); i++)
899 req.words[i+1] = cpu_to_le64(key[i]); 898 req.words[i+1] = cpu_to_le64(key[i]);
900 899
901 900
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 95fe552aa279..731077d8d962 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -214,13 +214,12 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
214 tx_ring->num_desc = adapter->num_txd; 214 tx_ring->num_desc = adapter->num_txd;
215 tx_ring->txq = netdev_get_tx_queue(netdev, 0); 215 tx_ring->txq = netdev_get_tx_queue(netdev, 0);
216 216
217 cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring)); 217 cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
218 if (cmd_buf_arr == NULL) { 218 if (cmd_buf_arr == NULL) {
219 dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n", 219 dev_err(&pdev->dev, "%s: failed to allocate cmd buffer ring\n",
220 netdev->name); 220 netdev->name);
221 goto err_out; 221 goto err_out;
222 } 222 }
223 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
224 tx_ring->cmd_buf_arr = cmd_buf_arr; 223 tx_ring->cmd_buf_arr = cmd_buf_arr;
225 224
226 recv_ctx = &adapter->recv_ctx; 225 recv_ctx = &adapter->recv_ctx;
@@ -279,8 +278,7 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
279 break; 278 break;
280 279
281 } 280 }
282 rds_ring->rx_buf_arr = (struct netxen_rx_buffer *) 281 rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
283 vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
284 if (rds_ring->rx_buf_arr == NULL) { 282 if (rds_ring->rx_buf_arr == NULL) {
285 printk(KERN_ERR "%s: Failed to allocate " 283 printk(KERN_ERR "%s: Failed to allocate "
286 "rx buffer ring %d\n", 284 "rx buffer ring %d\n",
@@ -288,7 +286,6 @@ int netxen_alloc_sw_resources(struct netxen_adapter *adapter)
288 /* free whatever was already allocated */ 286 /* free whatever was already allocated */
289 goto err_out; 287 goto err_out;
290 } 288 }
291 memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
292 INIT_LIST_HEAD(&rds_ring->free_list); 289 INIT_LIST_HEAD(&rds_ring->free_list);
293 /* 290 /*
294 * Now go through all of them, set reference handles 291 * Now go through all of them, set reference handles
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index e1d30d7f2071..33fac32e0d9f 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -38,7 +38,7 @@
38#include <linux/sysfs.h> 38#include <linux/sysfs.h>
39#include <linux/aer.h> 39#include <linux/aer.h>
40 40
41MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver"); 41MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Intelligent Ethernet Driver");
42MODULE_LICENSE("GPL"); 42MODULE_LICENSE("GPL");
43MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); 43MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
44MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME); 44MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
@@ -762,8 +762,6 @@ netxen_check_options(struct netxen_adapter *adapter)
762 if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222)) 762 if (adapter->fw_version >= NETXEN_VERSION_CODE(4, 0, 222))
763 adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1); 763 adapter->capabilities = NXRD32(adapter, CRB_FW_CAPABILITIES_1);
764 764
765 adapter->flags &= ~NETXEN_NIC_LRO_ENABLED;
766
767 if (adapter->ahw.port_type == NETXEN_NIC_XGBE) { 765 if (adapter->ahw.port_type == NETXEN_NIC_XGBE) {
768 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G; 766 adapter->num_rxd = DEFAULT_RCV_DESCRIPTORS_10G;
769 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G; 767 adapter->num_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
@@ -990,7 +988,7 @@ __netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
990 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 988 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
991 netxen_config_intr_coalesce(adapter); 989 netxen_config_intr_coalesce(adapter);
992 990
993 if (adapter->capabilities & NX_FW_CAPABILITY_HW_LRO) 991 if (netdev->features & NETIF_F_LRO)
994 netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_ENABLED); 992 netxen_config_hw_lro(adapter, NETXEN_NIC_LRO_ENABLED);
995 993
996 netxen_napi_enable(adapter); 994 netxen_napi_enable(adapter);
@@ -1277,6 +1275,7 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1277 int i = 0, err; 1275 int i = 0, err;
1278 int pci_func_id = PCI_FUNC(pdev->devfn); 1276 int pci_func_id = PCI_FUNC(pdev->devfn);
1279 uint8_t revision_id; 1277 uint8_t revision_id;
1278 u32 val;
1280 1279
1281 if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) { 1280 if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) {
1282 pr_warning("%s: chip revisions between 0x%x-0x%x " 1281 pr_warning("%s: chip revisions between 0x%x-0x%x "
@@ -1352,8 +1351,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1352 break; 1351 break;
1353 } 1352 }
1354 1353
1355 if (reset_devices) { 1354 if (adapter->portnum == 0) {
1356 if (adapter->portnum == 0) { 1355 val = NXRD32(adapter, NX_CRB_DEV_REF_COUNT);
1356 if (val != 0xffffffff && val != 0) {
1357 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0); 1357 NXWR32(adapter, NX_CRB_DEV_REF_COUNT, 0);
1358 adapter->need_fw_reset = 1; 1358 adapter->need_fw_reset = 1;
1359 } 1359 }
diff --git a/drivers/net/ni52.c b/drivers/net/ni52.c
index 33618edc61f9..d973fc6c6b88 100644
--- a/drivers/net/ni52.c
+++ b/drivers/net/ni52.c
@@ -388,9 +388,9 @@ static long memend; /* e.g 0xd4000 */
388struct net_device * __init ni52_probe(int unit) 388struct net_device * __init ni52_probe(int unit)
389{ 389{
390 struct net_device *dev = alloc_etherdev(sizeof(struct priv)); 390 struct net_device *dev = alloc_etherdev(sizeof(struct priv));
391 static int ports[] = {0x300, 0x280, 0x360 , 0x320 , 0x340, 0}; 391 static const int ports[] = {0x300, 0x280, 0x360, 0x320, 0x340, 0};
392 const int *port;
392 struct priv *p; 393 struct priv *p;
393 int *port;
394 int err = 0; 394 int err = 0;
395 395
396 if (!dev) 396 if (!dev)
diff --git a/drivers/net/ni65.c b/drivers/net/ni65.c
index da228a0dd6cd..c75ae85eb918 100644
--- a/drivers/net/ni65.c
+++ b/drivers/net/ni65.c
@@ -361,8 +361,8 @@ static int dma;
361struct net_device * __init ni65_probe(int unit) 361struct net_device * __init ni65_probe(int unit)
362{ 362{
363 struct net_device *dev = alloc_etherdev(0); 363 struct net_device *dev = alloc_etherdev(0);
364 static int ports[] = {0x360,0x300,0x320,0x340, 0}; 364 static const int ports[] = { 0x360, 0x300, 0x320, 0x340, 0 };
365 int *port; 365 const int *port;
366 int err = 0; 366 int err = 0;
367 367
368 if (!dev) 368 if (!dev)
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index 781e368329f9..2541321bad82 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -6589,7 +6589,7 @@ static u64 niu_compute_tx_flags(struct sk_buff *skb, struct ethhdr *ehdr,
6589 (ip_proto == IPPROTO_UDP ? 6589 (ip_proto == IPPROTO_UDP ?
6590 TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP)); 6590 TXHDR_CSUM_UDP : TXHDR_CSUM_SCTP));
6591 6591
6592 start = skb_transport_offset(skb) - 6592 start = skb_checksum_start_offset(skb) -
6593 (pad_bytes + sizeof(struct tx_pkt_hdr)); 6593 (pad_bytes + sizeof(struct tx_pkt_hdr));
6594 stuff = start + skb->csum_offset; 6594 stuff = start + skb->csum_offset;
6595 6595
@@ -9917,7 +9917,7 @@ static int niu_suspend(struct pci_dev *pdev, pm_message_t state)
9917 if (!netif_running(dev)) 9917 if (!netif_running(dev))
9918 return 0; 9918 return 0;
9919 9919
9920 flush_scheduled_work(); 9920 flush_work_sync(&np->reset_task);
9921 niu_netif_stop(np); 9921 niu_netif_stop(np);
9922 9922
9923 del_timer_sync(&np->timer); 9923 del_timer_sync(&np->timer);
diff --git a/drivers/net/pch_gbe/pch_gbe_ethtool.c b/drivers/net/pch_gbe/pch_gbe_ethtool.c
index c8cc32c0edc9..c8c873b31a89 100644
--- a/drivers/net/pch_gbe/pch_gbe_ethtool.c
+++ b/drivers/net/pch_gbe/pch_gbe_ethtool.c
@@ -469,18 +469,6 @@ static int pch_gbe_set_rx_csum(struct net_device *netdev, u32 data)
469} 469}
470 470
471/** 471/**
472 * pch_gbe_get_tx_csum - Report whether transmit checksums are turned on or off
473 * @netdev: Network interface device structure
474 * Returns
475 * true(1): Checksum On
476 * false(0): Checksum Off
477 */
478static u32 pch_gbe_get_tx_csum(struct net_device *netdev)
479{
480 return (netdev->features & NETIF_F_HW_CSUM) != 0;
481}
482
483/**
484 * pch_gbe_set_tx_csum - Turn transmit checksums on or off 472 * pch_gbe_set_tx_csum - Turn transmit checksums on or off
485 * @netdev: Network interface device structure 473 * @netdev: Network interface device structure
486 * @data: Checksum on[true] or off[false] 474 * @data: Checksum on[true] or off[false]
@@ -493,11 +481,7 @@ static int pch_gbe_set_tx_csum(struct net_device *netdev, u32 data)
493 struct pch_gbe_adapter *adapter = netdev_priv(netdev); 481 struct pch_gbe_adapter *adapter = netdev_priv(netdev);
494 482
495 adapter->tx_csum = data; 483 adapter->tx_csum = data;
496 if (data) 484 return ethtool_op_set_tx_ipv6_csum(netdev, data);
497 netdev->features |= NETIF_F_HW_CSUM;
498 else
499 netdev->features &= ~NETIF_F_HW_CSUM;
500 return 0;
501} 485}
502 486
503/** 487/**
@@ -572,7 +556,6 @@ static const struct ethtool_ops pch_gbe_ethtool_ops = {
572 .set_pauseparam = pch_gbe_set_pauseparam, 556 .set_pauseparam = pch_gbe_set_pauseparam,
573 .get_rx_csum = pch_gbe_get_rx_csum, 557 .get_rx_csum = pch_gbe_get_rx_csum,
574 .set_rx_csum = pch_gbe_set_rx_csum, 558 .set_rx_csum = pch_gbe_set_rx_csum,
575 .get_tx_csum = pch_gbe_get_tx_csum,
576 .set_tx_csum = pch_gbe_set_tx_csum, 559 .set_tx_csum = pch_gbe_set_tx_csum,
577 .get_strings = pch_gbe_get_strings, 560 .get_strings = pch_gbe_get_strings,
578 .get_ethtool_stats = pch_gbe_get_ethtool_stats, 561 .get_ethtool_stats = pch_gbe_get_ethtool_stats,
diff --git a/drivers/net/pch_gbe/pch_gbe_main.c b/drivers/net/pch_gbe/pch_gbe_main.c
index 03a1d280105f..d7355306a738 100644
--- a/drivers/net/pch_gbe/pch_gbe_main.c
+++ b/drivers/net/pch_gbe/pch_gbe_main.c
@@ -1523,12 +1523,11 @@ int pch_gbe_setup_tx_resources(struct pch_gbe_adapter *adapter,
1523 int desNo; 1523 int desNo;
1524 1524
1525 size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count; 1525 size = (int)sizeof(struct pch_gbe_buffer) * tx_ring->count;
1526 tx_ring->buffer_info = vmalloc(size); 1526 tx_ring->buffer_info = vzalloc(size);
1527 if (!tx_ring->buffer_info) { 1527 if (!tx_ring->buffer_info) {
1528 pr_err("Unable to allocate memory for the buffer infomation\n"); 1528 pr_err("Unable to allocate memory for the buffer infomation\n");
1529 return -ENOMEM; 1529 return -ENOMEM;
1530 } 1530 }
1531 memset(tx_ring->buffer_info, 0, size);
1532 1531
1533 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc); 1532 tx_ring->size = tx_ring->count * (int)sizeof(struct pch_gbe_tx_desc);
1534 1533
@@ -1573,12 +1572,11 @@ int pch_gbe_setup_rx_resources(struct pch_gbe_adapter *adapter,
1573 int desNo; 1572 int desNo;
1574 1573
1575 size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count; 1574 size = (int)sizeof(struct pch_gbe_buffer) * rx_ring->count;
1576 rx_ring->buffer_info = vmalloc(size); 1575 rx_ring->buffer_info = vzalloc(size);
1577 if (!rx_ring->buffer_info) { 1576 if (!rx_ring->buffer_info) {
1578 pr_err("Unable to allocate memory for the receive descriptor ring\n"); 1577 pr_err("Unable to allocate memory for the receive descriptor ring\n");
1579 return -ENOMEM; 1578 return -ENOMEM;
1580 } 1579 }
1581 memset(rx_ring->buffer_info, 0, size);
1582 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc); 1580 rx_ring->size = rx_ring->count * (int)sizeof(struct pch_gbe_rx_desc);
1583 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size, 1581 rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
1584 &rx_ring->dma, GFP_KERNEL); 1582 &rx_ring->dma, GFP_KERNEL);
@@ -2321,7 +2319,7 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2321 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD; 2319 netdev->watchdog_timeo = PCH_GBE_WATCHDOG_PERIOD;
2322 netif_napi_add(netdev, &adapter->napi, 2320 netif_napi_add(netdev, &adapter->napi,
2323 pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT); 2321 pch_gbe_napi_poll, PCH_GBE_RX_WEIGHT);
2324 netdev->features = NETIF_F_HW_CSUM | NETIF_F_GRO; 2322 netdev->features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_GRO;
2325 pch_gbe_set_ethtool_ops(netdev); 2323 pch_gbe_set_ethtool_ops(netdev);
2326 2324
2327 pch_gbe_mac_reset_hw(&adapter->hw); 2325 pch_gbe_mac_reset_hw(&adapter->hw);
@@ -2360,9 +2358,9 @@ static int pch_gbe_probe(struct pci_dev *pdev,
2360 pch_gbe_check_options(adapter); 2358 pch_gbe_check_options(adapter);
2361 2359
2362 if (adapter->tx_csum) 2360 if (adapter->tx_csum)
2363 netdev->features |= NETIF_F_HW_CSUM; 2361 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
2364 else 2362 else
2365 netdev->features &= ~NETIF_F_HW_CSUM; 2363 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
2366 2364
2367 /* initialize the wol settings based on the eeprom settings */ 2365 /* initialize the wol settings based on the eeprom settings */
2368 adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING; 2366 adapter->wake_up_evt = PCH_GBE_WL_INIT_SETTING;
diff --git a/drivers/net/pcmcia/axnet_cs.c b/drivers/net/pcmcia/axnet_cs.c
index 8a4d19e5de06..1f42f6ac8551 100644
--- a/drivers/net/pcmcia/axnet_cs.c
+++ b/drivers/net/pcmcia/axnet_cs.c
@@ -690,6 +690,7 @@ static void block_output(struct net_device *dev, int count,
690static struct pcmcia_device_id axnet_ids[] = { 690static struct pcmcia_device_id axnet_ids[] = {
691 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x016c, 0x0081), 691 PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x016c, 0x0081),
692 PCMCIA_DEVICE_MANF_CARD(0x018a, 0x0301), 692 PCMCIA_DEVICE_MANF_CARD(0x018a, 0x0301),
693 PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328),
693 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0301), 694 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0301),
694 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0303), 695 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0303),
695 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309), 696 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0309),
@@ -875,7 +876,7 @@ static void do_set_multicast_list(struct net_device *dev);
875static int ax_open(struct net_device *dev) 876static int ax_open(struct net_device *dev)
876{ 877{
877 unsigned long flags; 878 unsigned long flags;
878 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 879 struct ei_device *ei_local = netdev_priv(dev);
879 880
880 /* 881 /*
881 * Grab the page lock so we own the register set, then call 882 * Grab the page lock so we own the register set, then call
@@ -926,7 +927,7 @@ static int ax_close(struct net_device *dev)
926static void axnet_tx_timeout(struct net_device *dev) 927static void axnet_tx_timeout(struct net_device *dev)
927{ 928{
928 long e8390_base = dev->base_addr; 929 long e8390_base = dev->base_addr;
929 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 930 struct ei_device *ei_local = netdev_priv(dev);
930 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev); 931 int txsr, isr, tickssofar = jiffies - dev_trans_start(dev);
931 unsigned long flags; 932 unsigned long flags;
932 933
@@ -973,7 +974,7 @@ static netdev_tx_t axnet_start_xmit(struct sk_buff *skb,
973 struct net_device *dev) 974 struct net_device *dev)
974{ 975{
975 long e8390_base = dev->base_addr; 976 long e8390_base = dev->base_addr;
976 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 977 struct ei_device *ei_local = netdev_priv(dev);
977 int length, send_length, output_page; 978 int length, send_length, output_page;
978 unsigned long flags; 979 unsigned long flags;
979 u8 packet[ETH_ZLEN]; 980 u8 packet[ETH_ZLEN];
@@ -1270,7 +1271,7 @@ static void ei_tx_err(struct net_device *dev)
1270static void ei_tx_intr(struct net_device *dev) 1271static void ei_tx_intr(struct net_device *dev)
1271{ 1272{
1272 long e8390_base = dev->base_addr; 1273 long e8390_base = dev->base_addr;
1273 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 1274 struct ei_device *ei_local = netdev_priv(dev);
1274 int status = inb(e8390_base + EN0_TSR); 1275 int status = inb(e8390_base + EN0_TSR);
1275 1276
1276 /* 1277 /*
@@ -1354,7 +1355,7 @@ static void ei_tx_intr(struct net_device *dev)
1354static void ei_receive(struct net_device *dev) 1355static void ei_receive(struct net_device *dev)
1355{ 1356{
1356 long e8390_base = dev->base_addr; 1357 long e8390_base = dev->base_addr;
1357 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 1358 struct ei_device *ei_local = netdev_priv(dev);
1358 unsigned char rxing_page, this_frame, next_frame; 1359 unsigned char rxing_page, this_frame, next_frame;
1359 unsigned short current_offset; 1360 unsigned short current_offset;
1360 int rx_pkt_count = 0; 1361 int rx_pkt_count = 0;
@@ -1539,7 +1540,7 @@ static void ei_rx_overrun(struct net_device *dev)
1539static struct net_device_stats *get_stats(struct net_device *dev) 1540static struct net_device_stats *get_stats(struct net_device *dev)
1540{ 1541{
1541 long ioaddr = dev->base_addr; 1542 long ioaddr = dev->base_addr;
1542 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 1543 struct ei_device *ei_local = netdev_priv(dev);
1543 unsigned long flags; 1544 unsigned long flags;
1544 1545
1545 /* If the card is stopped, just return the present stats. */ 1546 /* If the card is stopped, just return the present stats. */
@@ -1588,7 +1589,7 @@ static void do_set_multicast_list(struct net_device *dev)
1588{ 1589{
1589 long e8390_base = dev->base_addr; 1590 long e8390_base = dev->base_addr;
1590 int i; 1591 int i;
1591 struct ei_device *ei_local = (struct ei_device*)netdev_priv(dev); 1592 struct ei_device *ei_local = netdev_priv(dev);
1592 1593
1593 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) { 1594 if (!(dev->flags&(IFF_PROMISC|IFF_ALLMULTI))) {
1594 memset(ei_local->mcfilter, 0, 8); 1595 memset(ei_local->mcfilter, 0, 8);
@@ -1646,7 +1647,7 @@ static void AX88190_init(struct net_device *dev, int startp)
1646{ 1647{
1647 axnet_dev_t *info = PRIV(dev); 1648 axnet_dev_t *info = PRIV(dev);
1648 long e8390_base = dev->base_addr; 1649 long e8390_base = dev->base_addr;
1649 struct ei_device *ei_local = (struct ei_device *) netdev_priv(dev); 1650 struct ei_device *ei_local = netdev_priv(dev);
1650 int i; 1651 int i;
1651 int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48; 1652 int endcfg = ei_local->word16 ? (0x48 | ENDCFG_WTS) : 0x48;
1652 1653
@@ -1712,7 +1713,7 @@ static void NS8390_trigger_send(struct net_device *dev, unsigned int length,
1712 int start_page) 1713 int start_page)
1713{ 1714{
1714 long e8390_base = dev->base_addr; 1715 long e8390_base = dev->base_addr;
1715 struct ei_device *ei_local __attribute((unused)) = (struct ei_device *) netdev_priv(dev); 1716 struct ei_device *ei_local __attribute((unused)) = netdev_priv(dev);
1716 1717
1717 if (inb_p(e8390_base) & E8390_TRANS) 1718 if (inb_p(e8390_base) & E8390_TRANS)
1718 { 1719 {
diff --git a/drivers/net/pcmcia/nmclan_cs.c b/drivers/net/pcmcia/nmclan_cs.c
index 0a2b0f9cdf33..76683d97d83b 100644
--- a/drivers/net/pcmcia/nmclan_cs.c
+++ b/drivers/net/pcmcia/nmclan_cs.c
@@ -1291,7 +1291,7 @@ updateCRC
1291 1291
1292static void updateCRC(int *CRC, int bit) 1292static void updateCRC(int *CRC, int bit)
1293{ 1293{
1294 int poly[]={ 1294 static const int poly[]={
1295 1,1,1,0, 1,1,0,1, 1295 1,1,1,0, 1,1,0,1,
1296 1,0,1,1, 1,0,0,0, 1296 1,0,1,1, 1,0,0,0,
1297 1,0,0,0, 0,0,1,1, 1297 1,0,0,0, 0,0,1,1,
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index d05c44692f08..e953793a33ff 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1493,7 +1493,6 @@ static struct pcmcia_device_id pcnet_ids[] = {
1493 PCMCIA_DEVICE_MANF_CARD(0x0149, 0x4530), 1493 PCMCIA_DEVICE_MANF_CARD(0x0149, 0x4530),
1494 PCMCIA_DEVICE_MANF_CARD(0x0149, 0xc1ab), 1494 PCMCIA_DEVICE_MANF_CARD(0x0149, 0xc1ab),
1495 PCMCIA_DEVICE_MANF_CARD(0x0186, 0x0110), 1495 PCMCIA_DEVICE_MANF_CARD(0x0186, 0x0110),
1496 PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x2328),
1497 PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x8041), 1496 PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x8041),
1498 PCMCIA_DEVICE_MANF_CARD(0x0213, 0x2452), 1497 PCMCIA_DEVICE_MANF_CARD(0x0213, 0x2452),
1499 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0300), 1498 PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0300),
@@ -1537,6 +1536,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
1537 PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722), 1536 PCMCIA_DEVICE_PROD_ID12("CONTEC", "C-NET(PC)C-10L", 0x21cab552, 0xf6f90722),
1538 PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2), 1537 PCMCIA_DEVICE_PROD_ID12("corega", "FEther PCC-TXF", 0x0a21501a, 0xa51564a2),
1539 PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a), 1538 PCMCIA_DEVICE_PROD_ID12("corega", "Ether CF-TD", 0x0a21501a, 0x6589340a),
1539 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega Ether CF-TD LAN Card", 0x5261440f, 0x8797663b),
1540 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd), 1540 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-T", 0x5261440f, 0xfa9d85bd),
1541 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d), 1541 PCMCIA_DEVICE_PROD_ID12("corega K.K.", "corega EtherII PCC-TD", 0x5261440f, 0xc49bd73d),
1542 PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d), 1542 PCMCIA_DEVICE_PROD_ID12("Corega K.K.", "corega EtherII PCC-TD", 0xd4fdcbd8, 0xc49bd73d),
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 7670aac0e93f..a8445c72fc13 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -47,11 +47,11 @@ void phy_print_status(struct phy_device *phydev)
47 pr_info("PHY: %s - Link is %s", dev_name(&phydev->dev), 47 pr_info("PHY: %s - Link is %s", dev_name(&phydev->dev),
48 phydev->link ? "Up" : "Down"); 48 phydev->link ? "Up" : "Down");
49 if (phydev->link) 49 if (phydev->link)
50 printk(" - %d/%s", phydev->speed, 50 printk(KERN_CONT " - %d/%s", phydev->speed,
51 DUPLEX_FULL == phydev->duplex ? 51 DUPLEX_FULL == phydev->duplex ?
52 "Full" : "Half"); 52 "Full" : "Half");
53 53
54 printk("\n"); 54 printk(KERN_CONT "\n");
55} 55}
56EXPORT_SYMBOL(phy_print_status); 56EXPORT_SYMBOL(phy_print_status);
57 57
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 78d70a6481bf..a1b82c9c67d2 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -32,6 +32,7 @@
32#include <linux/init.h> 32#include <linux/init.h>
33#include <linux/jiffies.h> 33#include <linux/jiffies.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <asm/unaligned.h>
35#include <asm/uaccess.h> 36#include <asm/uaccess.h>
36#include <asm/string.h> 37#include <asm/string.h>
37 38
@@ -542,7 +543,7 @@ ppp_async_encode(struct asyncppp *ap)
542 data = ap->tpkt->data; 543 data = ap->tpkt->data;
543 count = ap->tpkt->len; 544 count = ap->tpkt->len;
544 fcs = ap->tfcs; 545 fcs = ap->tfcs;
545 proto = (data[0] << 8) + data[1]; 546 proto = get_unaligned_be16(data);
546 547
547 /* 548 /*
548 * LCP packets with code values between 1 (configure-reqest) 549 * LCP packets with code values between 1 (configure-reqest)
@@ -963,7 +964,7 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
963 code = data[0]; 964 code = data[0];
964 if (code != CONFACK && code != CONFREQ) 965 if (code != CONFACK && code != CONFREQ)
965 return; 966 return;
966 dlen = (data[2] << 8) + data[3]; 967 dlen = get_unaligned_be16(data + 2);
967 if (len < dlen) 968 if (len < dlen)
968 return; /* packet got truncated or length is bogus */ 969 return; /* packet got truncated or length is bogus */
969 970
@@ -997,15 +998,14 @@ static void async_lcp_peek(struct asyncppp *ap, unsigned char *data,
997 while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) { 998 while (dlen >= 2 && dlen >= data[1] && data[1] >= 2) {
998 switch (data[0]) { 999 switch (data[0]) {
999 case LCP_MRU: 1000 case LCP_MRU:
1000 val = (data[2] << 8) + data[3]; 1001 val = get_unaligned_be16(data + 2);
1001 if (inbound) 1002 if (inbound)
1002 ap->mru = val; 1003 ap->mru = val;
1003 else 1004 else
1004 ap->chan.mtu = val; 1005 ap->chan.mtu = val;
1005 break; 1006 break;
1006 case LCP_ASYNCMAP: 1007 case LCP_ASYNCMAP:
1007 val = (data[2] << 24) + (data[3] << 16) 1008 val = get_unaligned_be32(data + 2);
1008 + (data[4] << 8) + data[5];
1009 if (inbound) 1009 if (inbound)
1010 ap->raccm = val; 1010 ap->raccm = val;
1011 else 1011 else
diff --git a/drivers/net/ppp_deflate.c b/drivers/net/ppp_deflate.c
index 695bc83e0cfd..43583309a65d 100644
--- a/drivers/net/ppp_deflate.c
+++ b/drivers/net/ppp_deflate.c
@@ -41,6 +41,7 @@
41#include <linux/ppp-comp.h> 41#include <linux/ppp-comp.h>
42 42
43#include <linux/zlib.h> 43#include <linux/zlib.h>
44#include <asm/unaligned.h>
44 45
45/* 46/*
46 * State for a Deflate (de)compressor. 47 * State for a Deflate (de)compressor.
@@ -232,11 +233,9 @@ static int z_compress(void *arg, unsigned char *rptr, unsigned char *obuf,
232 */ 233 */
233 wptr[0] = PPP_ADDRESS(rptr); 234 wptr[0] = PPP_ADDRESS(rptr);
234 wptr[1] = PPP_CONTROL(rptr); 235 wptr[1] = PPP_CONTROL(rptr);
235 wptr[2] = PPP_COMP >> 8; 236 put_unaligned_be16(PPP_COMP, wptr + 2);
236 wptr[3] = PPP_COMP;
237 wptr += PPP_HDRLEN; 237 wptr += PPP_HDRLEN;
238 wptr[0] = state->seqno >> 8; 238 put_unaligned_be16(state->seqno, wptr);
239 wptr[1] = state->seqno;
240 wptr += DEFLATE_OVHD; 239 wptr += DEFLATE_OVHD;
241 olen = PPP_HDRLEN + DEFLATE_OVHD; 240 olen = PPP_HDRLEN + DEFLATE_OVHD;
242 state->strm.next_out = wptr; 241 state->strm.next_out = wptr;
@@ -451,7 +450,7 @@ static int z_decompress(void *arg, unsigned char *ibuf, int isize,
451 } 450 }
452 451
453 /* Check the sequence number. */ 452 /* Check the sequence number. */
454 seq = (ibuf[PPP_HDRLEN] << 8) + ibuf[PPP_HDRLEN+1]; 453 seq = get_unaligned_be16(ibuf + PPP_HDRLEN);
455 if (seq != (state->seqno & 0xffff)) { 454 if (seq != (state->seqno & 0xffff)) {
456 if (state->debug) 455 if (state->debug)
457 printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n", 456 printk(KERN_DEBUG "z_decompress%d: bad seq # %d, expected %d\n",
diff --git a/drivers/net/ppp_generic.c b/drivers/net/ppp_generic.c
index 39659976a1ac..c7a6c4466978 100644
--- a/drivers/net/ppp_generic.c
+++ b/drivers/net/ppp_generic.c
@@ -46,6 +46,7 @@
46#include <linux/device.h> 46#include <linux/device.h>
47#include <linux/mutex.h> 47#include <linux/mutex.h>
48#include <linux/slab.h> 48#include <linux/slab.h>
49#include <asm/unaligned.h>
49#include <net/slhc_vj.h> 50#include <net/slhc_vj.h>
50#include <asm/atomic.h> 51#include <asm/atomic.h>
51 52
@@ -210,7 +211,7 @@ struct ppp_net {
210}; 211};
211 212
212/* Get the PPP protocol number from a skb */ 213/* Get the PPP protocol number from a skb */
213#define PPP_PROTO(skb) (((skb)->data[0] << 8) + (skb)->data[1]) 214#define PPP_PROTO(skb) get_unaligned_be16((skb)->data)
214 215
215/* We limit the length of ppp->file.rq to this (arbitrary) value */ 216/* We limit the length of ppp->file.rq to this (arbitrary) value */
216#define PPP_MAX_RQLEN 32 217#define PPP_MAX_RQLEN 32
@@ -964,8 +965,7 @@ ppp_start_xmit(struct sk_buff *skb, struct net_device *dev)
964 965
965 pp = skb_push(skb, 2); 966 pp = skb_push(skb, 2);
966 proto = npindex_to_proto[npi]; 967 proto = npindex_to_proto[npi];
967 pp[0] = proto >> 8; 968 put_unaligned_be16(proto, pp);
968 pp[1] = proto;
969 969
970 netif_stop_queue(dev); 970 netif_stop_queue(dev);
971 skb_queue_tail(&ppp->file.xq, skb); 971 skb_queue_tail(&ppp->file.xq, skb);
@@ -1136,8 +1136,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1136 a four-byte PPP header on each packet */ 1136 a four-byte PPP header on each packet */
1137 *skb_push(skb, 2) = 1; 1137 *skb_push(skb, 2) = 1;
1138 if (ppp->pass_filter && 1138 if (ppp->pass_filter &&
1139 sk_run_filter(skb, ppp->pass_filter, 1139 sk_run_filter(skb, ppp->pass_filter) == 0) {
1140 ppp->pass_len) == 0) {
1141 if (ppp->debug & 1) 1140 if (ppp->debug & 1)
1142 printk(KERN_DEBUG "PPP: outbound frame not passed\n"); 1141 printk(KERN_DEBUG "PPP: outbound frame not passed\n");
1143 kfree_skb(skb); 1142 kfree_skb(skb);
@@ -1145,8 +1144,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
1145 } 1144 }
1146 /* if this packet passes the active filter, record the time */ 1145 /* if this packet passes the active filter, record the time */
1147 if (!(ppp->active_filter && 1146 if (!(ppp->active_filter &&
1148 sk_run_filter(skb, ppp->active_filter, 1147 sk_run_filter(skb, ppp->active_filter) == 0))
1149 ppp->active_len) == 0))
1150 ppp->last_xmit = jiffies; 1148 ppp->last_xmit = jiffies;
1151 skb_pull(skb, 2); 1149 skb_pull(skb, 2);
1152#else 1150#else
@@ -1285,6 +1283,11 @@ ppp_push(struct ppp *ppp)
1285} 1283}
1286 1284
1287#ifdef CONFIG_PPP_MULTILINK 1285#ifdef CONFIG_PPP_MULTILINK
1286static bool mp_protocol_compress __read_mostly = true;
1287module_param(mp_protocol_compress, bool, S_IRUGO | S_IWUSR);
1288MODULE_PARM_DESC(mp_protocol_compress,
1289 "compress protocol id in multilink fragments");
1290
1288/* 1291/*
1289 * Divide a packet to be transmitted into fragments and 1292 * Divide a packet to be transmitted into fragments and
1290 * send them out the individual links. 1293 * send them out the individual links.
@@ -1347,10 +1350,10 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1347 if (nfree == 0 || nfree < navail / 2) 1350 if (nfree == 0 || nfree < navail / 2)
1348 return 0; /* can't take now, leave it in xmit_pending */ 1351 return 0; /* can't take now, leave it in xmit_pending */
1349 1352
1350 /* Do protocol field compression (XXX this should be optional) */ 1353 /* Do protocol field compression */
1351 p = skb->data; 1354 p = skb->data;
1352 len = skb->len; 1355 len = skb->len;
1353 if (*p == 0) { 1356 if (*p == 0 && mp_protocol_compress) {
1354 ++p; 1357 ++p;
1355 --len; 1358 --len;
1356 } 1359 }
@@ -1470,8 +1473,7 @@ static int ppp_mp_explode(struct ppp *ppp, struct sk_buff *skb)
1470 q = skb_put(frag, flen + hdrlen); 1473 q = skb_put(frag, flen + hdrlen);
1471 1474
1472 /* make the MP header */ 1475 /* make the MP header */
1473 q[0] = PPP_MP >> 8; 1476 put_unaligned_be16(PPP_MP, q);
1474 q[1] = PPP_MP;
1475 if (ppp->flags & SC_MP_XSHORTSEQ) { 1477 if (ppp->flags & SC_MP_XSHORTSEQ) {
1476 q[2] = bits + ((ppp->nxseq >> 8) & 0xf); 1478 q[2] = bits + ((ppp->nxseq >> 8) & 0xf);
1477 q[3] = ppp->nxseq; 1479 q[3] = ppp->nxseq;
@@ -1758,8 +1760,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1758 1760
1759 *skb_push(skb, 2) = 0; 1761 *skb_push(skb, 2) = 0;
1760 if (ppp->pass_filter && 1762 if (ppp->pass_filter &&
1761 sk_run_filter(skb, ppp->pass_filter, 1763 sk_run_filter(skb, ppp->pass_filter) == 0) {
1762 ppp->pass_len) == 0) {
1763 if (ppp->debug & 1) 1764 if (ppp->debug & 1)
1764 printk(KERN_DEBUG "PPP: inbound frame " 1765 printk(KERN_DEBUG "PPP: inbound frame "
1765 "not passed\n"); 1766 "not passed\n");
@@ -1767,8 +1768,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
1767 return; 1768 return;
1768 } 1769 }
1769 if (!(ppp->active_filter && 1770 if (!(ppp->active_filter &&
1770 sk_run_filter(skb, ppp->active_filter, 1771 sk_run_filter(skb, ppp->active_filter) == 0))
1771 ppp->active_len) == 0))
1772 ppp->last_recv = jiffies; 1772 ppp->last_recv = jiffies;
1773 __skb_pull(skb, 2); 1773 __skb_pull(skb, 2);
1774 } else 1774 } else
diff --git a/drivers/net/ppp_mppe.c b/drivers/net/ppp_mppe.c
index 6d1a1b80cc3e..9a1849a83e2a 100644
--- a/drivers/net/ppp_mppe.c
+++ b/drivers/net/ppp_mppe.c
@@ -55,6 +55,7 @@
55#include <linux/ppp_defs.h> 55#include <linux/ppp_defs.h>
56#include <linux/ppp-comp.h> 56#include <linux/ppp-comp.h>
57#include <linux/scatterlist.h> 57#include <linux/scatterlist.h>
58#include <asm/unaligned.h>
58 59
59#include "ppp_mppe.h" 60#include "ppp_mppe.h"
60 61
@@ -395,16 +396,14 @@ mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
395 */ 396 */
396 obuf[0] = PPP_ADDRESS(ibuf); 397 obuf[0] = PPP_ADDRESS(ibuf);
397 obuf[1] = PPP_CONTROL(ibuf); 398 obuf[1] = PPP_CONTROL(ibuf);
398 obuf[2] = PPP_COMP >> 8; /* isize + MPPE_OVHD + 1 */ 399 put_unaligned_be16(PPP_COMP, obuf + 2);
399 obuf[3] = PPP_COMP; /* isize + MPPE_OVHD + 2 */
400 obuf += PPP_HDRLEN; 400 obuf += PPP_HDRLEN;
401 401
402 state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; 402 state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
403 if (state->debug >= 7) 403 if (state->debug >= 7)
404 printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit, 404 printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit,
405 state->ccount); 405 state->ccount);
406 obuf[0] = state->ccount >> 8; 406 put_unaligned_be16(state->ccount, obuf);
407 obuf[1] = state->ccount & 0xff;
408 407
409 if (!state->stateful || /* stateless mode */ 408 if (!state->stateful || /* stateless mode */
410 ((state->ccount & 0xff) == 0xff) || /* "flag" packet */ 409 ((state->ccount & 0xff) == 0xff) || /* "flag" packet */
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index 4c95ec3fb8d4..4e6b72f57de8 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -45,6 +45,7 @@
45#include <linux/completion.h> 45#include <linux/completion.h>
46#include <linux/init.h> 46#include <linux/init.h>
47#include <linux/slab.h> 47#include <linux/slab.h>
48#include <asm/unaligned.h>
48#include <asm/uaccess.h> 49#include <asm/uaccess.h>
49 50
50#define PPP_VERSION "2.4.2" 51#define PPP_VERSION "2.4.2"
@@ -563,7 +564,7 @@ ppp_sync_txmunge(struct syncppp *ap, struct sk_buff *skb)
563 int islcp; 564 int islcp;
564 565
565 data = skb->data; 566 data = skb->data;
566 proto = (data[0] << 8) + data[1]; 567 proto = get_unaligned_be16(data);
567 568
568 /* LCP packets with codes between 1 (configure-request) 569 /* LCP packets with codes between 1 (configure-request)
569 * and 7 (code-reject) must be sent as though no options 570 * and 7 (code-reject) must be sent as though no options
diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c
index ccbc91326bfa..164cfad6ce79 100644
--- a/drivers/net/pptp.c
+++ b/drivers/net/pptp.c
@@ -277,7 +277,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
277 iph->tos = 0; 277 iph->tos = 0;
278 iph->daddr = rt->rt_dst; 278 iph->daddr = rt->rt_dst;
279 iph->saddr = rt->rt_src; 279 iph->saddr = rt->rt_src;
280 iph->ttl = dst_metric(&rt->dst, RTAX_HOPLIMIT); 280 iph->ttl = ip4_dst_hoplimit(&rt->dst);
281 iph->tot_len = htons(skb->len); 281 iph->tot_len = htons(skb->len);
282 282
283 skb_dst_drop(skb); 283 skb_dst_drop(skb);
@@ -673,8 +673,7 @@ static int __init pptp_init_module(void)
673 int err = 0; 673 int err = 0;
674 pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n"); 674 pr_info("PPTP driver version " PPTP_DRIVER_VERSION "\n");
675 675
676 callid_sock = __vmalloc((MAX_CALLID + 1) * sizeof(void *), 676 callid_sock = vzalloc((MAX_CALLID + 1) * sizeof(void *));
677 GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
678 if (!callid_sock) { 677 if (!callid_sock) {
679 pr_err("PPTP: cann't allocate memory\n"); 678 pr_err("PPTP: cann't allocate memory\n");
680 return -ENOMEM; 679 return -ENOMEM;
diff --git a/drivers/net/pxa168_eth.c b/drivers/net/pxa168_eth.c
index 18c0297743f1..1b63c8aef121 100644
--- a/drivers/net/pxa168_eth.c
+++ b/drivers/net/pxa168_eth.c
@@ -1450,16 +1450,11 @@ static void pxa168_get_drvinfo(struct net_device *dev,
1450 strncpy(info->bus_info, "N/A", 32); 1450 strncpy(info->bus_info, "N/A", 32);
1451} 1451}
1452 1452
1453static u32 pxa168_get_link(struct net_device *dev)
1454{
1455 return !!netif_carrier_ok(dev);
1456}
1457
1458static const struct ethtool_ops pxa168_ethtool_ops = { 1453static const struct ethtool_ops pxa168_ethtool_ops = {
1459 .get_settings = pxa168_get_settings, 1454 .get_settings = pxa168_get_settings,
1460 .set_settings = pxa168_set_settings, 1455 .set_settings = pxa168_set_settings,
1461 .get_drvinfo = pxa168_get_drvinfo, 1456 .get_drvinfo = pxa168_get_drvinfo,
1462 .get_link = pxa168_get_link, 1457 .get_link = ethtool_op_get_link,
1463}; 1458};
1464 1459
1465static const struct net_device_ops pxa168_eth_netdev_ops = { 1460static const struct net_device_ops pxa168_eth_netdev_ops = {
@@ -1607,7 +1602,7 @@ static int pxa168_eth_remove(struct platform_device *pdev)
1607 mdiobus_unregister(pep->smi_bus); 1602 mdiobus_unregister(pep->smi_bus);
1608 mdiobus_free(pep->smi_bus); 1603 mdiobus_free(pep->smi_bus);
1609 unregister_netdev(dev); 1604 unregister_netdev(dev);
1610 flush_scheduled_work(); 1605 cancel_work_sync(&pep->tx_timeout_task);
1611 free_netdev(dev); 1606 free_netdev(dev);
1612 platform_set_drvdata(pdev, NULL); 1607 platform_set_drvdata(pdev, NULL);
1613 return 0; 1608 return 0;
diff --git a/drivers/net/qla3xxx.c b/drivers/net/qla3xxx.c
index 7496ed2c34ab..1a3584edd79c 100644
--- a/drivers/net/qla3xxx.c
+++ b/drivers/net/qla3xxx.c
@@ -2467,7 +2467,7 @@ map_error:
2467static netdev_tx_t ql3xxx_send(struct sk_buff *skb, 2467static netdev_tx_t ql3xxx_send(struct sk_buff *skb,
2468 struct net_device *ndev) 2468 struct net_device *ndev)
2469{ 2469{
2470 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 2470 struct ql3_adapter *qdev = netdev_priv(ndev);
2471 struct ql3xxx_port_registers __iomem *port_regs = 2471 struct ql3xxx_port_registers __iomem *port_regs =
2472 qdev->mem_map_registers; 2472 qdev->mem_map_registers;
2473 struct ql_tx_buf_cb *tx_cb; 2473 struct ql_tx_buf_cb *tx_cb;
@@ -3390,7 +3390,7 @@ static void ql_set_mac_info(struct ql3_adapter *qdev)
3390 3390
3391static void ql_display_dev_info(struct net_device *ndev) 3391static void ql_display_dev_info(struct net_device *ndev)
3392{ 3392{
3393 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3393 struct ql3_adapter *qdev = netdev_priv(ndev);
3394 struct pci_dev *pdev = qdev->pdev; 3394 struct pci_dev *pdev = qdev->pdev;
3395 3395
3396 netdev_info(ndev, 3396 netdev_info(ndev,
@@ -3573,7 +3573,7 @@ static int ql3xxx_open(struct net_device *ndev)
3573 3573
3574static int ql3xxx_set_mac_address(struct net_device *ndev, void *p) 3574static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3575{ 3575{
3576 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3576 struct ql3_adapter *qdev = netdev_priv(ndev);
3577 struct ql3xxx_port_registers __iomem *port_regs = 3577 struct ql3xxx_port_registers __iomem *port_regs =
3578 qdev->mem_map_registers; 3578 qdev->mem_map_registers;
3579 struct sockaddr *addr = p; 3579 struct sockaddr *addr = p;
@@ -3608,7 +3608,7 @@ static int ql3xxx_set_mac_address(struct net_device *ndev, void *p)
3608 3608
3609static void ql3xxx_tx_timeout(struct net_device *ndev) 3609static void ql3xxx_tx_timeout(struct net_device *ndev)
3610{ 3610{
3611 struct ql3_adapter *qdev = (struct ql3_adapter *)netdev_priv(ndev); 3611 struct ql3_adapter *qdev = netdev_priv(ndev);
3612 3612
3613 netdev_err(ndev, "Resetting...\n"); 3613 netdev_err(ndev, "Resetting...\n");
3614 /* 3614 /*
diff --git a/drivers/net/qlcnic/qlcnic.h b/drivers/net/qlcnic/qlcnic.h
index 8ecc170c9b74..44e316fd67b8 100644
--- a/drivers/net/qlcnic/qlcnic.h
+++ b/drivers/net/qlcnic/qlcnic.h
@@ -1,25 +1,8 @@
1/* 1/*
2 * Copyright (C) 2009 - QLogic Corporation. 2 * QLogic qlcnic NIC Driver
3 * All rights reserved. 3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 * 4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
23 */ 6 */
24 7
25#ifndef _QLCNIC_H_ 8#ifndef _QLCNIC_H_
@@ -51,8 +34,8 @@
51 34
52#define _QLCNIC_LINUX_MAJOR 5 35#define _QLCNIC_LINUX_MAJOR 5
53#define _QLCNIC_LINUX_MINOR 0 36#define _QLCNIC_LINUX_MINOR 0
54#define _QLCNIC_LINUX_SUBVERSION 11 37#define _QLCNIC_LINUX_SUBVERSION 15
55#define QLCNIC_LINUX_VERSIONID "5.0.11" 38#define QLCNIC_LINUX_VERSIONID "5.0.15"
56#define QLCNIC_DRV_IDC_VER 0x01 39#define QLCNIC_DRV_IDC_VER 0x01
57#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\ 40#define QLCNIC_DRIVER_VERSION ((_QLCNIC_LINUX_MAJOR << 16) |\
58 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION)) 41 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
@@ -306,6 +289,26 @@ struct uni_data_desc{
306 u32 reserved[5]; 289 u32 reserved[5];
307}; 290};
308 291
292/* Flash Defines and Structures */
293#define QLCNIC_FLT_LOCATION 0x3F1000
294#define QLCNIC_FW_IMAGE_REGION 0x74
295struct qlcnic_flt_header {
296 u16 version;
297 u16 len;
298 u16 checksum;
299 u16 reserved;
300};
301
302struct qlcnic_flt_entry {
303 u8 region;
304 u8 reserved0;
305 u8 attrib;
306 u8 reserved1;
307 u32 size;
308 u32 start_addr;
309 u32 end_add;
310};
311
309/* Magic number to let user know flash is programmed */ 312/* Magic number to let user know flash is programmed */
310#define QLCNIC_BDINFO_MAGIC 0x12345678 313#define QLCNIC_BDINFO_MAGIC 0x12345678
311 314
@@ -798,7 +801,6 @@ struct qlcnic_nic_intr_coalesce {
798#define QLCNIC_H2C_OPCODE_GET_NET_STATS 16 801#define QLCNIC_H2C_OPCODE_GET_NET_STATS 16
799#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V 17 802#define QLCNIC_H2C_OPCODE_PROXY_UPDATE_P2V 17
800#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 18 803#define QLCNIC_H2C_OPCODE_CONFIG_IPADDR 18
801#define QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK 19
802#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE 20 804#define QLCNIC_H2C_OPCODE_PROXY_STOP_DONE 20
803#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 21 805#define QLCNIC_H2C_OPCODE_GET_LINKEVENT 21
804#define QLCNIC_C2C_OPCODE 22 806#define QLCNIC_C2C_OPCODE 22
@@ -923,6 +925,7 @@ struct qlcnic_ipaddr {
923#define QLCNIC_MACSPOOF 0x200 925#define QLCNIC_MACSPOOF 0x200
924#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400 926#define QLCNIC_MAC_OVERRIDE_DISABLED 0x400
925#define QLCNIC_PROMISC_DISABLED 0x800 927#define QLCNIC_PROMISC_DISABLED 0x800
928#define QLCNIC_NEED_FLR 0x1000
926#define QLCNIC_IS_MSI_FAMILY(adapter) \ 929#define QLCNIC_IS_MSI_FAMILY(adapter) \
927 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED)) 930 ((adapter)->flags & (QLCNIC_MSI_ENABLED | QLCNIC_MSIX_ENABLED))
928 931
@@ -942,6 +945,7 @@ struct qlcnic_ipaddr {
942 945
943#define QLCNIC_INTERRUPT_TEST 1 946#define QLCNIC_INTERRUPT_TEST 1
944#define QLCNIC_LOOPBACK_TEST 2 947#define QLCNIC_LOOPBACK_TEST 2
948#define QLCNIC_LED_TEST 3
945 949
946#define QLCNIC_FILTER_AGE 80 950#define QLCNIC_FILTER_AGE 80
947#define QLCNIC_READD_AGE 20 951#define QLCNIC_READD_AGE 20
@@ -1126,8 +1130,7 @@ struct qlcnic_eswitch {
1126/* Return codes for Error handling */ 1130/* Return codes for Error handling */
1127#define QL_STATUS_INVALID_PARAM -1 1131#define QL_STATUS_INVALID_PARAM -1
1128 1132
1129#define MAX_BW 100 1133#define MAX_BW 100 /* % of link speed */
1130#define MIN_BW 1
1131#define MAX_VLAN_ID 4095 1134#define MAX_VLAN_ID 4095
1132#define MIN_VLAN_ID 2 1135#define MIN_VLAN_ID 2
1133#define MAX_TX_QUEUES 1 1136#define MAX_TX_QUEUES 1
@@ -1135,7 +1138,7 @@ struct qlcnic_eswitch {
1135#define DEFAULT_MAC_LEARN 1 1138#define DEFAULT_MAC_LEARN 1
1136 1139
1137#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID) 1140#define IS_VALID_VLAN(vlan) (vlan >= MIN_VLAN_ID && vlan < MAX_VLAN_ID)
1138#define IS_VALID_BW(bw) (bw >= MIN_BW && bw <= MAX_BW) 1141#define IS_VALID_BW(bw) (bw <= MAX_BW)
1139#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES) 1142#define IS_VALID_TX_QUEUES(que) (que > 0 && que <= MAX_TX_QUEUES)
1140#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES) 1143#define IS_VALID_RX_QUEUES(que) (que > 0 && que <= MAX_RX_QUEUES)
1141 1144
@@ -1314,21 +1317,15 @@ int qlcnic_config_bridged_mode(struct qlcnic_adapter *adapter, u32 enable);
1314int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter); 1317int qlcnic_send_lro_cleanup(struct qlcnic_adapter *adapter);
1315void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter, 1318void qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
1316 struct qlcnic_host_tx_ring *tx_ring); 1319 struct qlcnic_host_tx_ring *tx_ring);
1317void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter);
1318int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter);
1319void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *); 1320void qlcnic_fetch_mac(struct qlcnic_adapter *, u32, u32, u8, u8 *);
1320 1321
1321/* Functions from qlcnic_main.c */ 1322/* Functions from qlcnic_main.c */
1322int qlcnic_request_quiscent_mode(struct qlcnic_adapter *adapter);
1323void qlcnic_clear_quiscent_mode(struct qlcnic_adapter *adapter);
1324int qlcnic_reset_context(struct qlcnic_adapter *); 1323int qlcnic_reset_context(struct qlcnic_adapter *);
1325u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter, 1324u32 qlcnic_issue_cmd(struct qlcnic_adapter *adapter,
1326 u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd); 1325 u32 pci_fn, u32 version, u32 arg1, u32 arg2, u32 arg3, u32 cmd);
1327void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings); 1326void qlcnic_diag_free_res(struct net_device *netdev, int max_sds_rings);
1328int qlcnic_diag_alloc_res(struct net_device *netdev, int test); 1327int qlcnic_diag_alloc_res(struct net_device *netdev, int test);
1329int qlcnic_check_loopback_buff(unsigned char *data);
1330netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev); 1328netdev_tx_t qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
1331void qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring);
1332 1329
1333/* Management functions */ 1330/* Management functions */
1334int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*); 1331int qlcnic_get_mac_address(struct qlcnic_adapter *, u8*);
@@ -1377,6 +1374,8 @@ static const struct qlcnic_brdinfo qlcnic_boards[] = {
1377 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"}, 1374 "3200 Series Single Port 10Gb Intelligent Ethernet Adapter"},
1378 {0x1077, 0x8020, 0x103c, 0x3733, 1375 {0x1077, 0x8020, 0x103c, 0x3733,
1379 "NC523SFP 10Gb 2-port Server Adapter"}, 1376 "NC523SFP 10Gb 2-port Server Adapter"},
1377 {0x1077, 0x8020, 0x103c, 0x3346,
1378 "CN1000Q Dual Port Converged Network Adapter"},
1380 {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"}, 1379 {0x1077, 0x8020, 0x0, 0x0, "cLOM8214 1/10GbE Controller"},
1381}; 1380};
1382 1381
diff --git a/drivers/net/qlcnic/qlcnic_ctx.c b/drivers/net/qlcnic/qlcnic_ctx.c
index 1cdc05dade6b..27631f23b3fd 100644
--- a/drivers/net/qlcnic/qlcnic_ctx.c
+++ b/drivers/net/qlcnic/qlcnic_ctx.c
@@ -1,25 +1,8 @@
1/* 1/*
2 * Copyright (C) 2009 - QLogic Corporation. 2 * QLogic qlcnic NIC Driver
3 * All rights reserved. 3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 * 4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
23 */ 6 */
24 7
25#include "qlcnic.h" 8#include "qlcnic.h"
@@ -480,6 +463,11 @@ int qlcnic_fw_create_ctx(struct qlcnic_adapter *adapter)
480{ 463{
481 int err; 464 int err;
482 465
466 if (adapter->flags & QLCNIC_NEED_FLR) {
467 pci_reset_function(adapter->pdev);
468 adapter->flags &= ~QLCNIC_NEED_FLR;
469 }
470
483 err = qlcnic_fw_cmd_create_rx_ctx(adapter); 471 err = qlcnic_fw_cmd_create_rx_ctx(adapter);
484 if (err) 472 if (err)
485 return err; 473 return err;
diff --git a/drivers/net/qlcnic/qlcnic_ethtool.c b/drivers/net/qlcnic/qlcnic_ethtool.c
index ec21d24015c4..4c14510e2a87 100644
--- a/drivers/net/qlcnic/qlcnic_ethtool.c
+++ b/drivers/net/qlcnic/qlcnic_ethtool.c
@@ -1,25 +1,8 @@
1/* 1/*
2 * Copyright (C) 2009 - QLogic Corporation. 2 * QLogic qlcnic NIC Driver
3 * All rights reserved. 3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 * 4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
23 */ 6 */
24 7
25#include <linux/types.h> 8#include <linux/types.h>
@@ -101,8 +84,7 @@ static const char qlcnic_device_gstrings_stats[][ETH_GSTRING_LEN] = {
101static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = { 84static const char qlcnic_gstrings_test[][ETH_GSTRING_LEN] = {
102 "Register_Test_on_offline", 85 "Register_Test_on_offline",
103 "Link_Test_on_offline", 86 "Link_Test_on_offline",
104 "Interrupt_Test_offline", 87 "Interrupt_Test_offline"
105 "Loopback_Test_offline"
106}; 88};
107 89
108#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test) 90#define QLCNIC_TEST_LEN ARRAY_SIZE(qlcnic_gstrings_test)
@@ -643,104 +625,6 @@ static int qlcnic_get_sset_count(struct net_device *dev, int sset)
643 } 625 }
644} 626}
645 627
646#define QLC_ILB_PKT_SIZE 64
647#define QLC_NUM_ILB_PKT 16
648#define QLC_ILB_MAX_RCV_LOOP 10
649
650static void qlcnic_create_loopback_buff(unsigned char *data)
651{
652 unsigned char random_data[] = {0xa8, 0x06, 0x45, 0x00};
653 memset(data, 0x4e, QLC_ILB_PKT_SIZE);
654 memset(data, 0xff, 12);
655 memcpy(data + 12, random_data, sizeof(random_data));
656}
657
658int qlcnic_check_loopback_buff(unsigned char *data)
659{
660 unsigned char buff[QLC_ILB_PKT_SIZE];
661 qlcnic_create_loopback_buff(buff);
662 return memcmp(data, buff, QLC_ILB_PKT_SIZE);
663}
664
665static int qlcnic_do_ilb_test(struct qlcnic_adapter *adapter)
666{
667 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
668 struct qlcnic_host_sds_ring *sds_ring = &recv_ctx->sds_rings[0];
669 struct sk_buff *skb;
670 int i, loop, cnt = 0;
671
672 for (i = 0; i < QLC_NUM_ILB_PKT; i++) {
673 skb = dev_alloc_skb(QLC_ILB_PKT_SIZE);
674 qlcnic_create_loopback_buff(skb->data);
675 skb_put(skb, QLC_ILB_PKT_SIZE);
676
677 adapter->diag_cnt = 0;
678 qlcnic_xmit_frame(skb, adapter->netdev);
679
680 loop = 0;
681 do {
682 msleep(1);
683 qlcnic_process_rcv_ring_diag(sds_ring);
684 } while (loop++ < QLC_ILB_MAX_RCV_LOOP &&
685 !adapter->diag_cnt);
686
687 dev_kfree_skb_any(skb);
688
689 if (!adapter->diag_cnt)
690 dev_warn(&adapter->pdev->dev, "ILB Test: %dth packet"
691 " not recevied\n", i + 1);
692 else
693 cnt++;
694 }
695 if (cnt != i) {
696 dev_warn(&adapter->pdev->dev, "ILB Test failed\n");
697 return -1;
698 }
699 return 0;
700}
701
702static int qlcnic_loopback_test(struct net_device *netdev)
703{
704 struct qlcnic_adapter *adapter = netdev_priv(netdev);
705 int max_sds_rings = adapter->max_sds_rings;
706 int ret;
707
708 if (adapter->op_mode == QLCNIC_NON_PRIV_FUNC) {
709 dev_warn(&adapter->pdev->dev, "Loopback test not supported"
710 "for non privilege function\n");
711 return 0;
712 }
713
714 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
715 return -EIO;
716
717 if (qlcnic_request_quiscent_mode(adapter)) {
718 clear_bit(__QLCNIC_RESETTING, &adapter->state);
719 return -EIO;
720 }
721
722 ret = qlcnic_diag_alloc_res(netdev, QLCNIC_LOOPBACK_TEST);
723 if (ret)
724 goto clear_it;
725
726 ret = qlcnic_set_ilb_mode(adapter);
727 if (ret)
728 goto done;
729
730 ret = qlcnic_do_ilb_test(adapter);
731
732 qlcnic_clear_ilb_mode(adapter);
733
734done:
735 qlcnic_diag_free_res(netdev, max_sds_rings);
736
737clear_it:
738 qlcnic_clear_quiscent_mode(adapter);
739 adapter->max_sds_rings = max_sds_rings;
740 clear_bit(__QLCNIC_RESETTING, &adapter->state);
741 return ret;
742}
743
744static int qlcnic_irq_test(struct net_device *netdev) 628static int qlcnic_irq_test(struct net_device *netdev)
745{ 629{
746 struct qlcnic_adapter *adapter = netdev_priv(netdev); 630 struct qlcnic_adapter *adapter = netdev_priv(netdev);
@@ -788,14 +672,11 @@ qlcnic_diag_test(struct net_device *dev, struct ethtool_test *eth_test,
788 if (data[1]) 672 if (data[1])
789 eth_test->flags |= ETH_TEST_FL_FAILED; 673 eth_test->flags |= ETH_TEST_FL_FAILED;
790 674
791 if (eth_test->flags == ETH_TEST_FL_OFFLINE) { 675 if (eth_test->flags & ETH_TEST_FL_OFFLINE) {
792 data[2] = qlcnic_irq_test(dev); 676 data[2] = qlcnic_irq_test(dev);
793 if (data[2]) 677 if (data[2])
794 eth_test->flags |= ETH_TEST_FL_FAILED; 678 eth_test->flags |= ETH_TEST_FL_FAILED;
795 679
796 data[3] = qlcnic_loopback_test(dev);
797 if (data[3])
798 eth_test->flags |= ETH_TEST_FL_FAILED;
799 680
800 } 681 }
801} 682}
@@ -925,9 +806,10 @@ static int qlcnic_set_rx_csum(struct net_device *dev, u32 data)
925 806
926 dev->features &= ~NETIF_F_LRO; 807 dev->features &= ~NETIF_F_LRO;
927 qlcnic_send_lro_cleanup(adapter); 808 qlcnic_send_lro_cleanup(adapter);
809 dev_info(&adapter->pdev->dev,
810 "disabling LRO as rx_csum is off\n");
928 } 811 }
929 adapter->rx_csum = !!data; 812 adapter->rx_csum = !!data;
930 dev_info(&adapter->pdev->dev, "disabling LRO as rx_csum is off\n");
931 return 0; 813 return 0;
932} 814}
933 815
@@ -952,16 +834,27 @@ static int qlcnic_set_tso(struct net_device *dev, u32 data)
952static int qlcnic_blink_led(struct net_device *dev, u32 val) 834static int qlcnic_blink_led(struct net_device *dev, u32 val)
953{ 835{
954 struct qlcnic_adapter *adapter = netdev_priv(dev); 836 struct qlcnic_adapter *adapter = netdev_priv(dev);
837 int max_sds_rings = adapter->max_sds_rings;
838 int dev_down = 0;
955 int ret; 839 int ret;
956 840
957 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) 841 if (!test_bit(__QLCNIC_DEV_UP, &adapter->state)) {
958 return -EIO; 842 dev_down = 1;
843 if (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
844 return -EIO;
845
846 ret = qlcnic_diag_alloc_res(dev, QLCNIC_LED_TEST);
847 if (ret) {
848 clear_bit(__QLCNIC_RESETTING, &adapter->state);
849 return ret;
850 }
851 }
959 852
960 ret = adapter->nic_ops->config_led(adapter, 1, 0xf); 853 ret = adapter->nic_ops->config_led(adapter, 1, 0xf);
961 if (ret) { 854 if (ret) {
962 dev_err(&adapter->pdev->dev, 855 dev_err(&adapter->pdev->dev,
963 "Failed to set LED blink state.\n"); 856 "Failed to set LED blink state.\n");
964 return ret; 857 goto done;
965 } 858 }
966 859
967 msleep_interruptible(val * 1000); 860 msleep_interruptible(val * 1000);
@@ -970,10 +863,16 @@ static int qlcnic_blink_led(struct net_device *dev, u32 val)
970 if (ret) { 863 if (ret) {
971 dev_err(&adapter->pdev->dev, 864 dev_err(&adapter->pdev->dev,
972 "Failed to reset LED blink state.\n"); 865 "Failed to reset LED blink state.\n");
973 return ret; 866 goto done;
974 } 867 }
975 868
976 return 0; 869done:
870 if (dev_down) {
871 qlcnic_diag_free_res(dev, max_sds_rings);
872 clear_bit(__QLCNIC_RESETTING, &adapter->state);
873 }
874 return ret;
875
977} 876}
978 877
979static void 878static void
diff --git a/drivers/net/qlcnic/qlcnic_hdr.h b/drivers/net/qlcnic/qlcnic_hdr.h
index 4290b80cde1a..726ef555b6bc 100644
--- a/drivers/net/qlcnic/qlcnic_hdr.h
+++ b/drivers/net/qlcnic/qlcnic_hdr.h
@@ -1,25 +1,8 @@
1/* 1/*
2 * Copyright (C) 2009 - QLogic Corporation. 2 * QLogic qlcnic NIC Driver
3 * All rights reserved. 3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 * 4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
23 */ 6 */
24 7
25#ifndef __QLCNIC_HDR_H_ 8#ifndef __QLCNIC_HDR_H_
@@ -638,7 +621,7 @@ enum {
638#define PCIX_INT_MASK (0x10104) 621#define PCIX_INT_MASK (0x10104)
639 622
640#define PCIX_OCM_WINDOW (0x10800) 623#define PCIX_OCM_WINDOW (0x10800)
641#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x20 * (func)) 624#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x4 * (func))
642 625
643#define PCIX_TARGET_STATUS (0x10118) 626#define PCIX_TARGET_STATUS (0x10118)
644#define PCIX_TARGET_STATUS_F1 (0x10160) 627#define PCIX_TARGET_STATUS_F1 (0x10160)
@@ -722,7 +705,7 @@ enum {
722#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */ 705#define QLCNIC_DEV_NPAR_OPER 1 /* NPAR Operational */
723#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */ 706#define QLCNIC_DEV_NPAR_OPER_TIMEO 30 /* Operational time out */
724 707
725#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) &= (1 << (FN * 4))) 708#define QLC_DEV_CHECK_ACTIVE(VAL, FN) ((VAL) & (1 << (FN * 4)))
726#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4))) 709#define QLC_DEV_SET_REF_CNT(VAL, FN) ((VAL) |= (1 << (FN * 4)))
727#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4))) 710#define QLC_DEV_CLR_REF_CNT(VAL, FN) ((VAL) &= ~(1 << (FN * 4)))
728#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4))) 711#define QLC_DEV_SET_RST_RDY(VAL, FN) ((VAL) |= (1 << (FN * 4)))
diff --git a/drivers/net/qlcnic/qlcnic_hw.c b/drivers/net/qlcnic/qlcnic_hw.c
index 7a47a2a7ee27..616940f0a8d0 100644
--- a/drivers/net/qlcnic/qlcnic_hw.c
+++ b/drivers/net/qlcnic/qlcnic_hw.c
@@ -1,25 +1,8 @@
1/* 1/*
2 * Copyright (C) 2009 - QLogic Corporation. 2 * QLogic qlcnic NIC Driver
3 * All rights reserved. 3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 * 4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
23 */ 6 */
24 7
25#include "qlcnic.h" 8#include "qlcnic.h"
@@ -398,7 +381,7 @@ qlcnic_sre_macaddr_change(struct qlcnic_adapter *adapter, u8 *addr,
398 return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1); 381 return qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
399} 382}
400 383
401static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, u8 *addr) 384static int qlcnic_nic_add_mac(struct qlcnic_adapter *adapter, const u8 *addr)
402{ 385{
403 struct list_head *head; 386 struct list_head *head;
404 struct qlcnic_mac_list_s *cur; 387 struct qlcnic_mac_list_s *cur;
@@ -432,7 +415,9 @@ void qlcnic_set_multi(struct net_device *netdev)
432{ 415{
433 struct qlcnic_adapter *adapter = netdev_priv(netdev); 416 struct qlcnic_adapter *adapter = netdev_priv(netdev);
434 struct netdev_hw_addr *ha; 417 struct netdev_hw_addr *ha;
435 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }; 418 static const u8 bcast_addr[ETH_ALEN] = {
419 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
420 };
436 u32 mode = VPORT_MISS_MODE_DROP; 421 u32 mode = VPORT_MISS_MODE_DROP;
437 422
438 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state)) 423 if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
@@ -638,10 +623,11 @@ int qlcnic_config_rss(struct qlcnic_adapter *adapter, int enable)
638 u64 word; 623 u64 word;
639 int i, rv; 624 int i, rv;
640 625
641 const u64 key[] = { 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL, 626 static const u64 key[] = {
642 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL, 627 0xbeac01fa6a42b73bULL, 0x8030f20c77cb2da3ULL,
643 0x255b0ec26d5a56daULL }; 628 0xae7b30b4d0ca2bcbULL, 0x43a38fb04167253dULL,
644 629 0x255b0ec26d5a56daULL
630 };
645 631
646 memset(&req, 0, sizeof(struct qlcnic_nic_req)); 632 memset(&req, 0, sizeof(struct qlcnic_nic_req));
647 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23); 633 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
@@ -1234,56 +1220,3 @@ int qlcnic_config_led(struct qlcnic_adapter *adapter, u32 state, u32 rate)
1234 1220
1235 return rv; 1221 return rv;
1236} 1222}
1237
1238static int qlcnic_set_fw_loopback(struct qlcnic_adapter *adapter, u32 flag)
1239{
1240 struct qlcnic_nic_req req;
1241 int rv;
1242 u64 word;
1243
1244 memset(&req, 0, sizeof(struct qlcnic_nic_req));
1245 req.qhdr = cpu_to_le64(QLCNIC_HOST_REQUEST << 23);
1246
1247 word = QLCNIC_H2C_OPCODE_CONFIG_LOOPBACK |
1248 ((u64)adapter->portnum << 16);
1249 req.req_hdr = cpu_to_le64(word);
1250 req.words[0] = cpu_to_le64(flag);
1251
1252 rv = qlcnic_send_cmd_descs(adapter, (struct cmd_desc_type0 *)&req, 1);
1253 if (rv)
1254 dev_err(&adapter->pdev->dev,
1255 "%sting loopback mode failed.\n",
1256 flag ? "Set" : "Reset");
1257 return rv;
1258}
1259
1260int qlcnic_set_ilb_mode(struct qlcnic_adapter *adapter)
1261{
1262 if (qlcnic_set_fw_loopback(adapter, 1))
1263 return -EIO;
1264
1265 if (qlcnic_nic_set_promisc(adapter,
1266 VPORT_MISS_MODE_ACCEPT_ALL)) {
1267 qlcnic_set_fw_loopback(adapter, 0);
1268 return -EIO;
1269 }
1270
1271 msleep(1000);
1272 return 0;
1273}
1274
1275void qlcnic_clear_ilb_mode(struct qlcnic_adapter *adapter)
1276{
1277 int mode = VPORT_MISS_MODE_DROP;
1278 struct net_device *netdev = adapter->netdev;
1279
1280 qlcnic_set_fw_loopback(adapter, 0);
1281
1282 if (netdev->flags & IFF_PROMISC)
1283 mode = VPORT_MISS_MODE_ACCEPT_ALL;
1284 else if (netdev->flags & IFF_ALLMULTI)
1285 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
1286
1287 qlcnic_nic_set_promisc(adapter, mode);
1288 msleep(1000);
1289}
diff --git a/drivers/net/qlcnic/qlcnic_init.c b/drivers/net/qlcnic/qlcnic_init.c
index 0d180c6e41fe..a7f1d5b7e811 100644
--- a/drivers/net/qlcnic/qlcnic_init.c
+++ b/drivers/net/qlcnic/qlcnic_init.c
@@ -1,25 +1,8 @@
1/* 1/*
2 * Copyright (C) 2009 - QLogic Corporation. 2 * QLogic qlcnic NIC Driver
3 * All rights reserved. 3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 * 4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
23 */ 6 */
24 7
25#include <linux/netdevice.h> 8#include <linux/netdevice.h>
@@ -236,12 +219,11 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
236 tx_ring->num_desc = adapter->num_txd; 219 tx_ring->num_desc = adapter->num_txd;
237 tx_ring->txq = netdev_get_tx_queue(netdev, 0); 220 tx_ring->txq = netdev_get_tx_queue(netdev, 0);
238 221
239 cmd_buf_arr = vmalloc(TX_BUFF_RINGSIZE(tx_ring)); 222 cmd_buf_arr = vzalloc(TX_BUFF_RINGSIZE(tx_ring));
240 if (cmd_buf_arr == NULL) { 223 if (cmd_buf_arr == NULL) {
241 dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n"); 224 dev_err(&netdev->dev, "failed to allocate cmd buffer ring\n");
242 goto err_out; 225 goto err_out;
243 } 226 }
244 memset(cmd_buf_arr, 0, TX_BUFF_RINGSIZE(tx_ring));
245 tx_ring->cmd_buf_arr = cmd_buf_arr; 227 tx_ring->cmd_buf_arr = cmd_buf_arr;
246 228
247 recv_ctx = &adapter->recv_ctx; 229 recv_ctx = &adapter->recv_ctx;
@@ -275,14 +257,12 @@ int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
275 rds_ring->dma_size + NET_IP_ALIGN; 257 rds_ring->dma_size + NET_IP_ALIGN;
276 break; 258 break;
277 } 259 }
278 rds_ring->rx_buf_arr = (struct qlcnic_rx_buffer *) 260 rds_ring->rx_buf_arr = vzalloc(RCV_BUFF_RINGSIZE(rds_ring));
279 vmalloc(RCV_BUFF_RINGSIZE(rds_ring));
280 if (rds_ring->rx_buf_arr == NULL) { 261 if (rds_ring->rx_buf_arr == NULL) {
281 dev_err(&netdev->dev, "Failed to allocate " 262 dev_err(&netdev->dev, "Failed to allocate "
282 "rx buffer ring %d\n", ring); 263 "rx buffer ring %d\n", ring);
283 goto err_out; 264 goto err_out;
284 } 265 }
285 memset(rds_ring->rx_buf_arr, 0, RCV_BUFF_RINGSIZE(rds_ring));
286 INIT_LIST_HEAD(&rds_ring->free_list); 266 INIT_LIST_HEAD(&rds_ring->free_list);
287 /* 267 /*
288 * Now go through all of them, set reference handles 268 * Now go through all of them, set reference handles
@@ -647,12 +627,73 @@ qlcnic_setup_idc_param(struct qlcnic_adapter *adapter) {
647 return 0; 627 return 0;
648} 628}
649 629
630static int qlcnic_get_flt_entry(struct qlcnic_adapter *adapter, u8 region,
631 struct qlcnic_flt_entry *region_entry)
632{
633 struct qlcnic_flt_header flt_hdr;
634 struct qlcnic_flt_entry *flt_entry;
635 int i = 0, ret;
636 u32 entry_size;
637
638 memset(region_entry, 0, sizeof(struct qlcnic_flt_entry));
639 ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION,
640 (u8 *)&flt_hdr,
641 sizeof(struct qlcnic_flt_header));
642 if (ret) {
643 dev_warn(&adapter->pdev->dev,
644 "error reading flash layout header\n");
645 return -EIO;
646 }
647
648 entry_size = flt_hdr.len - sizeof(struct qlcnic_flt_header);
649 flt_entry = (struct qlcnic_flt_entry *)vzalloc(entry_size);
650 if (flt_entry == NULL) {
651 dev_warn(&adapter->pdev->dev, "error allocating memory\n");
652 return -EIO;
653 }
654
655 ret = qlcnic_rom_fast_read_words(adapter, QLCNIC_FLT_LOCATION +
656 sizeof(struct qlcnic_flt_header),
657 (u8 *)flt_entry, entry_size);
658 if (ret) {
659 dev_warn(&adapter->pdev->dev,
660 "error reading flash layout entries\n");
661 goto err_out;
662 }
663
664 while (i < (entry_size/sizeof(struct qlcnic_flt_entry))) {
665 if (flt_entry[i].region == region)
666 break;
667 i++;
668 }
669 if (i >= (entry_size/sizeof(struct qlcnic_flt_entry))) {
670 dev_warn(&adapter->pdev->dev,
671 "region=%x not found in %d regions\n", region, i);
672 ret = -EIO;
673 goto err_out;
674 }
675 memcpy(region_entry, &flt_entry[i], sizeof(struct qlcnic_flt_entry));
676
677err_out:
678 vfree(flt_entry);
679 return ret;
680}
681
650int 682int
651qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter) 683qlcnic_check_flash_fw_ver(struct qlcnic_adapter *adapter)
652{ 684{
685 struct qlcnic_flt_entry fw_entry;
653 u32 ver = -1, min_ver; 686 u32 ver = -1, min_ver;
687 int ret;
654 688
655 qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET, (int *)&ver); 689 ret = qlcnic_get_flt_entry(adapter, QLCNIC_FW_IMAGE_REGION, &fw_entry);
690 if (!ret)
691 /* 0-4:-signature, 4-8:-fw version */
692 qlcnic_rom_fast_read(adapter, fw_entry.start_addr + 4,
693 (int *)&ver);
694 else
695 qlcnic_rom_fast_read(adapter, QLCNIC_FW_VERSION_OFFSET,
696 (int *)&ver);
656 697
657 ver = QLCNIC_DECODE_VERSION(ver); 698 ver = QLCNIC_DECODE_VERSION(ver);
658 min_ver = QLCNIC_MIN_FW_VERSION; 699 min_ver = QLCNIC_MIN_FW_VERSION;
@@ -1693,99 +1734,6 @@ qlcnic_post_rx_buffers_nodb(struct qlcnic_adapter *adapter,
1693 spin_unlock(&rds_ring->lock); 1734 spin_unlock(&rds_ring->lock);
1694} 1735}
1695 1736
1696static void dump_skb(struct sk_buff *skb)
1697{
1698 int i;
1699 unsigned char *data = skb->data;
1700
1701 for (i = 0; i < skb->len; i++) {
1702 printk("%02x ", data[i]);
1703 if ((i & 0x0f) == 8)
1704 printk("\n");
1705 }
1706}
1707
1708static struct qlcnic_rx_buffer *
1709qlcnic_process_rcv_diag(struct qlcnic_adapter *adapter,
1710 struct qlcnic_host_sds_ring *sds_ring,
1711 int ring, u64 sts_data0)
1712{
1713 struct qlcnic_recv_context *recv_ctx = &adapter->recv_ctx;
1714 struct qlcnic_rx_buffer *buffer;
1715 struct sk_buff *skb;
1716 struct qlcnic_host_rds_ring *rds_ring;
1717 int index, length, cksum, pkt_offset;
1718
1719 if (unlikely(ring >= adapter->max_rds_rings))
1720 return NULL;
1721
1722 rds_ring = &recv_ctx->rds_rings[ring];
1723
1724 index = qlcnic_get_sts_refhandle(sts_data0);
1725 if (unlikely(index >= rds_ring->num_desc))
1726 return NULL;
1727
1728 buffer = &rds_ring->rx_buf_arr[index];
1729
1730 length = qlcnic_get_sts_totallength(sts_data0);
1731 cksum = qlcnic_get_sts_status(sts_data0);
1732 pkt_offset = qlcnic_get_sts_pkt_offset(sts_data0);
1733
1734 skb = qlcnic_process_rxbuf(adapter, rds_ring, index, cksum);
1735 if (!skb)
1736 return buffer;
1737
1738 if (length > rds_ring->skb_size)
1739 skb_put(skb, rds_ring->skb_size);
1740 else
1741 skb_put(skb, length);
1742
1743 if (pkt_offset)
1744 skb_pull(skb, pkt_offset);
1745
1746 if (!qlcnic_check_loopback_buff(skb->data))
1747 adapter->diag_cnt++;
1748 else
1749 dump_skb(skb);
1750
1751 dev_kfree_skb_any(skb);
1752 adapter->stats.rx_pkts++;
1753 adapter->stats.rxbytes += length;
1754
1755 return buffer;
1756}
1757
1758void
1759qlcnic_process_rcv_ring_diag(struct qlcnic_host_sds_ring *sds_ring)
1760{
1761 struct qlcnic_adapter *adapter = sds_ring->adapter;
1762 struct status_desc *desc;
1763 struct qlcnic_rx_buffer *rxbuf;
1764 u64 sts_data0;
1765
1766 int opcode, ring, desc_cnt;
1767 u32 consumer = sds_ring->consumer;
1768
1769 desc = &sds_ring->desc_head[consumer];
1770 sts_data0 = le64_to_cpu(desc->status_desc_data[0]);
1771
1772 if (!(sts_data0 & STATUS_OWNER_HOST))
1773 return;
1774
1775 desc_cnt = qlcnic_get_sts_desc_cnt(sts_data0);
1776 opcode = qlcnic_get_sts_opcode(sts_data0);
1777
1778 ring = qlcnic_get_sts_type(sts_data0);
1779 rxbuf = qlcnic_process_rcv_diag(adapter, sds_ring,
1780 ring, sts_data0);
1781
1782 desc->status_desc_data[0] = cpu_to_le64(STATUS_OWNER_PHANTOM);
1783 consumer = get_next_index(consumer, sds_ring->num_desc);
1784
1785 sds_ring->consumer = consumer;
1786 writel(consumer, sds_ring->crb_sts_consumer);
1787}
1788
1789void 1737void
1790qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2, 1738qlcnic_fetch_mac(struct qlcnic_adapter *adapter, u32 off1, u32 off2,
1791 u8 alt_mac, u8 *mac) 1739 u8 alt_mac, u8 *mac)
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index a3dcd04be22f..37c04b4fade3 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -1,25 +1,8 @@
1/* 1/*
2 * Copyright (C) 2009 - QLogic Corporation. 2 * QLogic qlcnic NIC Driver
3 * All rights reserved. 3 * Copyright (c) 2009-2010 QLogic Corporation
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place - Suite 330, Boston,
18 * MA 02111-1307, USA.
19 *
20 * The full GNU General Public License is included in this distribution
21 * in the file called "COPYING".
22 * 4 *
5 * See LICENSE.qlcnic for copyright and licensing details.
23 */ 6 */
24 7
25#include <linux/slab.h> 8#include <linux/slab.h>
@@ -48,15 +31,15 @@ static const char qlcnic_driver_string[] = "QLogic 1/10 GbE "
48 31
49static struct workqueue_struct *qlcnic_wq; 32static struct workqueue_struct *qlcnic_wq;
50static int qlcnic_mac_learn; 33static int qlcnic_mac_learn;
51module_param(qlcnic_mac_learn, int, 0644); 34module_param(qlcnic_mac_learn, int, 0444);
52MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)"); 35MODULE_PARM_DESC(qlcnic_mac_learn, "Mac Filter (0=disabled, 1=enabled)");
53 36
54static int use_msi = 1; 37static int use_msi = 1;
55module_param(use_msi, int, 0644); 38module_param(use_msi, int, 0444);
56MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled"); 39MODULE_PARM_DESC(use_msi, "MSI interrupt (0=disabled, 1=enabled");
57 40
58static int use_msi_x = 1; 41static int use_msi_x = 1;
59module_param(use_msi_x, int, 0644); 42module_param(use_msi_x, int, 0444);
60MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled"); 43MODULE_PARM_DESC(use_msi_x, "MSI-X interrupt (0=disabled, 1=enabled");
61 44
62static int auto_fw_reset = AUTO_FW_RESET_ENABLED; 45static int auto_fw_reset = AUTO_FW_RESET_ENABLED;
@@ -64,11 +47,11 @@ module_param(auto_fw_reset, int, 0644);
64MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled"); 47MODULE_PARM_DESC(auto_fw_reset, "Auto firmware reset (0=disabled, 1=enabled");
65 48
66static int load_fw_file; 49static int load_fw_file;
67module_param(load_fw_file, int, 0644); 50module_param(load_fw_file, int, 0444);
68MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file"); 51MODULE_PARM_DESC(load_fw_file, "Load firmware from (0=flash, 1=file");
69 52
70static int qlcnic_config_npars; 53static int qlcnic_config_npars;
71module_param(qlcnic_config_npars, int, 0644); 54module_param(qlcnic_config_npars, int, 0444);
72MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled"); 55MODULE_PARM_DESC(qlcnic_config_npars, "Configure NPARs (0=disabled, 1=enabled");
73 56
74static int __devinit qlcnic_probe(struct pci_dev *pdev, 57static int __devinit qlcnic_probe(struct pci_dev *pdev,
@@ -1546,6 +1529,8 @@ qlcnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1546 if (err) 1529 if (err)
1547 goto err_out_iounmap; 1530 goto err_out_iounmap;
1548 1531
1532 adapter->flags |= QLCNIC_NEED_FLR;
1533
1549 err = adapter->nic_ops->start_firmware(adapter); 1534 err = adapter->nic_ops->start_firmware(adapter);
1550 if (err) { 1535 if (err) {
1551 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n"); 1536 dev_err(&pdev->dev, "Loading fw failed.Please Reboot\n");
@@ -2854,61 +2839,6 @@ qlcnic_set_npar_non_operational(struct qlcnic_adapter *adapter)
2854 qlcnic_api_unlock(adapter); 2839 qlcnic_api_unlock(adapter);
2855} 2840}
2856 2841
2857/* Caller should held RESETTING bit.
2858 * This should be call in sync with qlcnic_request_quiscent_mode.
2859 */
2860void qlcnic_clear_quiscent_mode(struct qlcnic_adapter *adapter)
2861{
2862 qlcnic_clr_drv_state(adapter);
2863 qlcnic_api_lock(adapter);
2864 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_READY);
2865 qlcnic_api_unlock(adapter);
2866}
2867
2868/* Caller should held RESETTING bit.
2869 */
2870int qlcnic_request_quiscent_mode(struct qlcnic_adapter *adapter)
2871{
2872 u8 timeo = adapter->dev_init_timeo / 2;
2873 u32 state;
2874
2875 if (qlcnic_api_lock(adapter))
2876 return -EIO;
2877
2878 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2879 if (state != QLCNIC_DEV_READY)
2880 return -EIO;
2881
2882 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE, QLCNIC_DEV_NEED_QUISCENT);
2883 qlcnic_api_unlock(adapter);
2884 QLCDB(adapter, DRV, "NEED QUISCENT state set\n");
2885 qlcnic_idc_debug_info(adapter, 0);
2886
2887 qlcnic_set_drv_state(adapter, QLCNIC_DEV_NEED_QUISCENT);
2888
2889 do {
2890 msleep(2000);
2891 state = QLCRD32(adapter, QLCNIC_CRB_DEV_STATE);
2892 if (state == QLCNIC_DEV_QUISCENT)
2893 return 0;
2894 if (!qlcnic_check_drv_state(adapter)) {
2895 if (qlcnic_api_lock(adapter))
2896 return -EIO;
2897 QLCWR32(adapter, QLCNIC_CRB_DEV_STATE,
2898 QLCNIC_DEV_QUISCENT);
2899 qlcnic_api_unlock(adapter);
2900 QLCDB(adapter, DRV, "QUISCENT mode set\n");
2901 return 0;
2902 }
2903 } while (--timeo);
2904
2905 dev_err(&adapter->pdev->dev, "Failed to quiesce device, DRV_STATE=%08x"
2906 " DRV_ACTIVE=%08x\n", QLCRD32(adapter, QLCNIC_CRB_DRV_STATE),
2907 QLCRD32(adapter, QLCNIC_CRB_DRV_ACTIVE));
2908 qlcnic_clear_quiscent_mode(adapter);
2909 return -EIO;
2910}
2911
2912/*Transit to RESET state from READY state only */ 2842/*Transit to RESET state from READY state only */
2913static void 2843static void
2914qlcnic_dev_request_reset(struct qlcnic_adapter *adapter) 2844qlcnic_dev_request_reset(struct qlcnic_adapter *adapter)
@@ -3587,9 +3517,12 @@ validate_esw_config(struct qlcnic_adapter *adapter,
3587 case QLCNIC_PORT_DEFAULTS: 3517 case QLCNIC_PORT_DEFAULTS:
3588 if (QLC_DEV_GET_DRV(op_mode, pci_func) != 3518 if (QLC_DEV_GET_DRV(op_mode, pci_func) !=
3589 QLCNIC_NON_PRIV_FUNC) { 3519 QLCNIC_NON_PRIV_FUNC) {
3590 esw_cfg[i].mac_anti_spoof = 0; 3520 if (esw_cfg[i].mac_anti_spoof != 0)
3591 esw_cfg[i].mac_override = 1; 3521 return QL_STATUS_INVALID_PARAM;
3592 esw_cfg[i].promisc_mode = 1; 3522 if (esw_cfg[i].mac_override != 1)
3523 return QL_STATUS_INVALID_PARAM;
3524 if (esw_cfg[i].promisc_mode != 1)
3525 return QL_STATUS_INVALID_PARAM;
3593 } 3526 }
3594 break; 3527 break;
3595 case QLCNIC_ADD_VLAN: 3528 case QLCNIC_ADD_VLAN:
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index 9787dff90d3f..4757c59a07a2 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -16,7 +16,7 @@
16 */ 16 */
17#define DRV_NAME "qlge" 17#define DRV_NAME "qlge"
18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
19#define DRV_VERSION "v1.00.00.25.00.00-01" 19#define DRV_VERSION "v1.00.00.27.00.00-01"
20 20
21#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */ 21#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
22 22
@@ -2222,6 +2222,7 @@ int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
2222int ql_unpause_mpi_risc(struct ql_adapter *qdev); 2222int ql_unpause_mpi_risc(struct ql_adapter *qdev);
2223int ql_pause_mpi_risc(struct ql_adapter *qdev); 2223int ql_pause_mpi_risc(struct ql_adapter *qdev);
2224int ql_hard_reset_mpi_risc(struct ql_adapter *qdev); 2224int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
2225int ql_soft_reset_mpi_risc(struct ql_adapter *qdev);
2225int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf, 2226int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
2226 u32 ram_addr, int word_count); 2227 u32 ram_addr, int word_count);
2227int ql_core_dump(struct ql_adapter *qdev, 2228int ql_core_dump(struct ql_adapter *qdev,
@@ -2237,6 +2238,7 @@ int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
2237int ql_mb_get_port_cfg(struct ql_adapter *qdev); 2238int ql_mb_get_port_cfg(struct ql_adapter *qdev);
2238int ql_mb_set_port_cfg(struct ql_adapter *qdev); 2239int ql_mb_set_port_cfg(struct ql_adapter *qdev);
2239int ql_wait_fifo_empty(struct ql_adapter *qdev); 2240int ql_wait_fifo_empty(struct ql_adapter *qdev);
2241void ql_get_dump(struct ql_adapter *qdev, void *buff);
2240void ql_gen_reg_dump(struct ql_adapter *qdev, 2242void ql_gen_reg_dump(struct ql_adapter *qdev,
2241 struct ql_reg_dump *mpi_coredump); 2243 struct ql_reg_dump *mpi_coredump);
2242netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev); 2244netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index 4747492935ef..fca804f36d61 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1317,9 +1317,28 @@ void ql_gen_reg_dump(struct ql_adapter *qdev,
1317 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]); 1317 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
1318 if (status) 1318 if (status)
1319 return; 1319 return;
1320}
1321
1322void ql_get_dump(struct ql_adapter *qdev, void *buff)
1323{
1324 /*
1325 * If the dump has already been taken and is stored
1326 * in our internal buffer and if force dump is set then
1327 * just start the spool to dump it to the log file
1328 * and also, take a snapshot of the general regs to
1329 * to the user's buffer or else take complete dump
1330 * to the user's buffer if force is not set.
1331 */
1320 1332
1321 if (test_bit(QL_FRC_COREDUMP, &qdev->flags)) 1333 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags)) {
1334 if (!ql_core_dump(qdev, buff))
1335 ql_soft_reset_mpi_risc(qdev);
1336 else
1337 netif_err(qdev, drv, qdev->ndev, "coredump failed!\n");
1338 } else {
1339 ql_gen_reg_dump(qdev, buff);
1322 ql_get_core_dump(qdev); 1340 ql_get_core_dump(qdev);
1341 }
1323} 1342}
1324 1343
1325/* Coredump to messages log file using separate worker thread */ 1344/* Coredump to messages log file using separate worker thread */
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 4892d64f4e05..8149cc9de4ca 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -375,7 +375,10 @@ static void ql_get_drvinfo(struct net_device *ndev,
375 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32); 375 strncpy(drvinfo->bus_info, pci_name(qdev->pdev), 32);
376 drvinfo->n_stats = 0; 376 drvinfo->n_stats = 0;
377 drvinfo->testinfo_len = 0; 377 drvinfo->testinfo_len = 0;
378 drvinfo->regdump_len = 0; 378 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
379 drvinfo->regdump_len = sizeof(struct ql_mpi_coredump);
380 else
381 drvinfo->regdump_len = sizeof(struct ql_reg_dump);
379 drvinfo->eedump_len = 0; 382 drvinfo->eedump_len = 0;
380} 383}
381 384
@@ -547,7 +550,12 @@ static void ql_self_test(struct net_device *ndev,
547 550
548static int ql_get_regs_len(struct net_device *ndev) 551static int ql_get_regs_len(struct net_device *ndev)
549{ 552{
550 return sizeof(struct ql_reg_dump); 553 struct ql_adapter *qdev = netdev_priv(ndev);
554
555 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
556 return sizeof(struct ql_mpi_coredump);
557 else
558 return sizeof(struct ql_reg_dump);
551} 559}
552 560
553static void ql_get_regs(struct net_device *ndev, 561static void ql_get_regs(struct net_device *ndev,
@@ -555,7 +563,12 @@ static void ql_get_regs(struct net_device *ndev,
555{ 563{
556 struct ql_adapter *qdev = netdev_priv(ndev); 564 struct ql_adapter *qdev = netdev_priv(ndev);
557 565
558 ql_gen_reg_dump(qdev, p); 566 ql_get_dump(qdev, p);
567 qdev->core_is_dumped = 0;
568 if (!test_bit(QL_FRC_COREDUMP, &qdev->flags))
569 regs->len = sizeof(struct ql_mpi_coredump);
570 else
571 regs->len = sizeof(struct ql_reg_dump);
559} 572}
560 573
561static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 574static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index 2555b1d34f34..49bfa5813068 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -3548,12 +3548,13 @@ err_irq:
3548 3548
3549static int ql_start_rss(struct ql_adapter *qdev) 3549static int ql_start_rss(struct ql_adapter *qdev)
3550{ 3550{
3551 u8 init_hash_seed[] = {0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 3551 static const u8 init_hash_seed[] = {
3552 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 3552 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3553 0xb0, 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 3553 0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3554 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 3554 0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3555 0x30, 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 3555 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3556 0xbe, 0xac, 0x01, 0xfa}; 3556 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3557 };
3557 struct ricb *ricb = &qdev->ricb; 3558 struct ricb *ricb = &qdev->ricb;
3558 int status = 0; 3559 int status = 0;
3559 int i; 3560 int i;
@@ -3844,7 +3845,7 @@ static int ql_adapter_reset(struct ql_adapter *qdev)
3844 3845
3845static void ql_display_dev_info(struct net_device *ndev) 3846static void ql_display_dev_info(struct net_device *ndev)
3846{ 3847{
3847 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 3848 struct ql_adapter *qdev = netdev_priv(ndev);
3848 3849
3849 netif_info(qdev, probe, qdev->ndev, 3850 netif_info(qdev, probe, qdev->ndev,
3850 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, " 3851 "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
@@ -4264,7 +4265,7 @@ static struct net_device_stats *qlge_get_stats(struct net_device
4264 4265
4265static void qlge_set_multicast_list(struct net_device *ndev) 4266static void qlge_set_multicast_list(struct net_device *ndev)
4266{ 4267{
4267 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 4268 struct ql_adapter *qdev = netdev_priv(ndev);
4268 struct netdev_hw_addr *ha; 4269 struct netdev_hw_addr *ha;
4269 int i, status; 4270 int i, status;
4270 4271
@@ -4354,7 +4355,7 @@ exit:
4354 4355
4355static int qlge_set_mac_address(struct net_device *ndev, void *p) 4356static int qlge_set_mac_address(struct net_device *ndev, void *p)
4356{ 4357{
4357 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 4358 struct ql_adapter *qdev = netdev_priv(ndev);
4358 struct sockaddr *addr = p; 4359 struct sockaddr *addr = p;
4359 int status; 4360 int status;
4360 4361
@@ -4377,7 +4378,7 @@ static int qlge_set_mac_address(struct net_device *ndev, void *p)
4377 4378
4378static void qlge_tx_timeout(struct net_device *ndev) 4379static void qlge_tx_timeout(struct net_device *ndev)
4379{ 4380{
4380 struct ql_adapter *qdev = (struct ql_adapter *)netdev_priv(ndev); 4381 struct ql_adapter *qdev = netdev_priv(ndev);
4381 ql_queue_asic_error(qdev); 4382 ql_queue_asic_error(qdev);
4382} 4383}
4383 4384
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index a2e919bcb3c6..ff2bf8a4e247 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -87,7 +87,7 @@ exit:
87 return status; 87 return status;
88} 88}
89 89
90static int ql_soft_reset_mpi_risc(struct ql_adapter *qdev) 90int ql_soft_reset_mpi_risc(struct ql_adapter *qdev)
91{ 91{
92 int status; 92 int status;
93 status = ql_write_mpi_reg(qdev, 0x00001010, 1); 93 status = ql_write_mpi_reg(qdev, 0x00001010, 1);
diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
index 0b014c894686..27e6f6d43cac 100644
--- a/drivers/net/r6040.c
+++ b/drivers/net/r6040.c
@@ -1153,6 +1153,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1153 lp->mii_bus = mdiobus_alloc(); 1153 lp->mii_bus = mdiobus_alloc();
1154 if (!lp->mii_bus) { 1154 if (!lp->mii_bus) {
1155 dev_err(&pdev->dev, "mdiobus_alloc() failed\n"); 1155 dev_err(&pdev->dev, "mdiobus_alloc() failed\n");
1156 err = -ENOMEM;
1156 goto err_out_unmap; 1157 goto err_out_unmap;
1157 } 1158 }
1158 1159
@@ -1165,6 +1166,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1165 lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); 1166 lp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
1166 if (!lp->mii_bus->irq) { 1167 if (!lp->mii_bus->irq) {
1167 dev_err(&pdev->dev, "mii_bus irq allocation failed\n"); 1168 dev_err(&pdev->dev, "mii_bus irq allocation failed\n");
1169 err = -ENOMEM;
1168 goto err_out_mdio; 1170 goto err_out_mdio;
1169 } 1171 }
1170 1172
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 53b13deade95..bb8645ab247c 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -24,6 +24,7 @@
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/dma-mapping.h> 25#include <linux/dma-mapping.h>
26#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
27#include <linux/firmware.h>
27 28
28#include <asm/system.h> 29#include <asm/system.h>
29#include <asm/io.h> 30#include <asm/io.h>
@@ -33,6 +34,9 @@
33#define MODULENAME "r8169" 34#define MODULENAME "r8169"
34#define PFX MODULENAME ": " 35#define PFX MODULENAME ": "
35 36
37#define FIRMWARE_8168D_1 "rtl_nic/rtl8168d-1.fw"
38#define FIRMWARE_8168D_2 "rtl_nic/rtl8168d-2.fw"
39
36#ifdef RTL8169_DEBUG 40#ifdef RTL8169_DEBUG
37#define assert(expr) \ 41#define assert(expr) \
38 if (!(expr)) { \ 42 if (!(expr)) { \
@@ -63,7 +67,6 @@ static const int multicast_filter_limit = 32;
63#define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */ 67#define RX_FIFO_THRESH 7 /* 7 means NO threshold, Rx buffer level before first PCI xfer. */
64#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ 68#define RX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
65#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */ 69#define TX_DMA_BURST 6 /* Maximum PCI burst, '6' is 1024 */
66#define EarlyTxThld 0x3F /* 0x3F means NO early transmit */
67#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */ 70#define SafeMtu 0x1c20 /* ... actually life sucks beyond ~7k */
68#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */ 71#define InterFrameGap 0x03 /* 3 means InterFrameGap = the shortest one */
69 72
@@ -118,7 +121,8 @@ enum mac_version {
118 RTL_GIGA_MAC_VER_24 = 0x18, // 8168CP 121 RTL_GIGA_MAC_VER_24 = 0x18, // 8168CP
119 RTL_GIGA_MAC_VER_25 = 0x19, // 8168D 122 RTL_GIGA_MAC_VER_25 = 0x19, // 8168D
120 RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D 123 RTL_GIGA_MAC_VER_26 = 0x1a, // 8168D
121 RTL_GIGA_MAC_VER_27 = 0x1b // 8168DP 124 RTL_GIGA_MAC_VER_27 = 0x1b, // 8168DP
125 RTL_GIGA_MAC_VER_28 = 0x1c, // 8168DP
122}; 126};
123 127
124#define _R(NAME,MAC,MASK) \ 128#define _R(NAME,MAC,MASK) \
@@ -155,7 +159,8 @@ static const struct {
155 _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_24, 0xff7e1880), // PCI-E 159 _R("RTL8168cp/8111cp", RTL_GIGA_MAC_VER_24, 0xff7e1880), // PCI-E
156 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E 160 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_25, 0xff7e1880), // PCI-E
157 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E 161 _R("RTL8168d/8111d", RTL_GIGA_MAC_VER_26, 0xff7e1880), // PCI-E
158 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880) // PCI-E 162 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_27, 0xff7e1880), // PCI-E
163 _R("RTL8168dp/8111dp", RTL_GIGA_MAC_VER_28, 0xff7e1880) // PCI-E
159}; 164};
160#undef _R 165#undef _R
161 166
@@ -227,7 +232,14 @@ enum rtl_registers {
227 IntrMitigate = 0xe2, 232 IntrMitigate = 0xe2,
228 RxDescAddrLow = 0xe4, 233 RxDescAddrLow = 0xe4,
229 RxDescAddrHigh = 0xe8, 234 RxDescAddrHigh = 0xe8,
230 EarlyTxThres = 0xec, 235 EarlyTxThres = 0xec, /* 8169. Unit of 32 bytes. */
236
237#define NoEarlyTx 0x3f /* Max value : no early transmit. */
238
239 MaxTxPacketSize = 0xec, /* 8101/8168. Unit of 128 bytes. */
240
241#define TxPacketMax (8064 >> 7)
242
231 FuncEvent = 0xf0, 243 FuncEvent = 0xf0,
232 FuncEventMask = 0xf4, 244 FuncEventMask = 0xf4,
233 FuncPresetState = 0xf8, 245 FuncPresetState = 0xf8,
@@ -248,7 +260,7 @@ enum rtl8168_8101_registers {
248#define CSIAR_BYTE_ENABLE 0x0f 260#define CSIAR_BYTE_ENABLE 0x0f
249#define CSIAR_BYTE_ENABLE_SHIFT 12 261#define CSIAR_BYTE_ENABLE_SHIFT 12
250#define CSIAR_ADDR_MASK 0x0fff 262#define CSIAR_ADDR_MASK 0x0fff
251 263 PMCH = 0x6f,
252 EPHYAR = 0x80, 264 EPHYAR = 0x80,
253#define EPHYAR_FLAG 0x80000000 265#define EPHYAR_FLAG 0x80000000
254#define EPHYAR_WRITE_CMD 0x80000000 266#define EPHYAR_WRITE_CMD 0x80000000
@@ -267,6 +279,33 @@ enum rtl8168_8101_registers {
267#define EFUSEAR_DATA_MASK 0xff 279#define EFUSEAR_DATA_MASK 0xff
268}; 280};
269 281
282enum rtl8168_registers {
283 ERIDR = 0x70,
284 ERIAR = 0x74,
285#define ERIAR_FLAG 0x80000000
286#define ERIAR_WRITE_CMD 0x80000000
287#define ERIAR_READ_CMD 0x00000000
288#define ERIAR_ADDR_BYTE_ALIGN 4
289#define ERIAR_EXGMAC 0
290#define ERIAR_MSIX 1
291#define ERIAR_ASF 2
292#define ERIAR_TYPE_SHIFT 16
293#define ERIAR_BYTEEN 0x0f
294#define ERIAR_BYTEEN_SHIFT 12
295 EPHY_RXER_NUM = 0x7c,
296 OCPDR = 0xb0, /* OCP GPHY access */
297#define OCPDR_WRITE_CMD 0x80000000
298#define OCPDR_READ_CMD 0x00000000
299#define OCPDR_REG_MASK 0x7f
300#define OCPDR_GPHY_REG_SHIFT 16
301#define OCPDR_DATA_MASK 0xffff
302 OCPAR = 0xb4,
303#define OCPAR_FLAG 0x80000000
304#define OCPAR_GPHY_WRITE_CMD 0x8000f060
305#define OCPAR_GPHY_READ_CMD 0x0000f060
306 RDSAR1 = 0xd0 /* 8168c only. Undocumented on 8168dp */
307};
308
270enum rtl_register_content { 309enum rtl_register_content {
271 /* InterruptStatusBits */ 310 /* InterruptStatusBits */
272 SYSErr = 0x8000, 311 SYSErr = 0x8000,
@@ -490,11 +529,22 @@ struct rtl8169_private {
490#ifdef CONFIG_R8169_VLAN 529#ifdef CONFIG_R8169_VLAN
491 struct vlan_group *vlgrp; 530 struct vlan_group *vlgrp;
492#endif 531#endif
532
533 struct mdio_ops {
534 void (*write)(void __iomem *, int, int);
535 int (*read)(void __iomem *, int);
536 } mdio_ops;
537
538 struct pll_power_ops {
539 void (*down)(struct rtl8169_private *);
540 void (*up)(struct rtl8169_private *);
541 } pll_power_ops;
542
493 int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex); 543 int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
494 int (*get_settings)(struct net_device *, struct ethtool_cmd *); 544 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
495 void (*phy_reset_enable)(void __iomem *); 545 void (*phy_reset_enable)(struct rtl8169_private *tp);
496 void (*hw_start)(struct net_device *); 546 void (*hw_start)(struct net_device *);
497 unsigned int (*phy_reset_pending)(void __iomem *); 547 unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
498 unsigned int (*link_ok)(void __iomem *); 548 unsigned int (*link_ok)(void __iomem *);
499 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd); 549 int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
500 int pcie_cap; 550 int pcie_cap;
@@ -514,6 +564,8 @@ module_param_named(debug, debug.msg_enable, int, 0);
514MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)"); 564MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
515MODULE_LICENSE("GPL"); 565MODULE_LICENSE("GPL");
516MODULE_VERSION(RTL8169_VERSION); 566MODULE_VERSION(RTL8169_VERSION);
567MODULE_FIRMWARE(FIRMWARE_8168D_1);
568MODULE_FIRMWARE(FIRMWARE_8168D_2);
517 569
518static int rtl8169_open(struct net_device *dev); 570static int rtl8169_open(struct net_device *dev);
519static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb, 571static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
@@ -535,7 +587,82 @@ static int rtl8169_poll(struct napi_struct *napi, int budget);
535static const unsigned int rtl8169_rx_config = 587static const unsigned int rtl8169_rx_config =
536 (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift); 588 (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift);
537 589
538static void mdio_write(void __iomem *ioaddr, int reg_addr, int value) 590static u32 ocp_read(struct rtl8169_private *tp, u8 mask, u16 reg)
591{
592 void __iomem *ioaddr = tp->mmio_addr;
593 int i;
594
595 RTL_W32(OCPAR, ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
596 for (i = 0; i < 20; i++) {
597 udelay(100);
598 if (RTL_R32(OCPAR) & OCPAR_FLAG)
599 break;
600 }
601 return RTL_R32(OCPDR);
602}
603
604static void ocp_write(struct rtl8169_private *tp, u8 mask, u16 reg, u32 data)
605{
606 void __iomem *ioaddr = tp->mmio_addr;
607 int i;
608
609 RTL_W32(OCPDR, data);
610 RTL_W32(OCPAR, OCPAR_FLAG | ((u32)mask & 0x0f) << 12 | (reg & 0x0fff));
611 for (i = 0; i < 20; i++) {
612 udelay(100);
613 if ((RTL_R32(OCPAR) & OCPAR_FLAG) == 0)
614 break;
615 }
616}
617
618static void rtl8168_oob_notify(void __iomem *ioaddr, u8 cmd)
619{
620 int i;
621
622 RTL_W8(ERIDR, cmd);
623 RTL_W32(ERIAR, 0x800010e8);
624 msleep(2);
625 for (i = 0; i < 5; i++) {
626 udelay(100);
627 if (!(RTL_R32(ERIDR) & ERIAR_FLAG))
628 break;
629 }
630
631 ocp_write(ioaddr, 0x1, 0x30, 0x00000001);
632}
633
634#define OOB_CMD_RESET 0x00
635#define OOB_CMD_DRIVER_START 0x05
636#define OOB_CMD_DRIVER_STOP 0x06
637
638static void rtl8168_driver_start(struct rtl8169_private *tp)
639{
640 int i;
641
642 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_START);
643
644 for (i = 0; i < 10; i++) {
645 msleep(10);
646 if (ocp_read(tp, 0x0f, 0x0010) & 0x00000800)
647 break;
648 }
649}
650
651static void rtl8168_driver_stop(struct rtl8169_private *tp)
652{
653 int i;
654
655 rtl8168_oob_notify(tp, OOB_CMD_DRIVER_STOP);
656
657 for (i = 0; i < 10; i++) {
658 msleep(10);
659 if ((ocp_read(tp, 0x0f, 0x0010) & 0x00000800) == 0)
660 break;
661 }
662}
663
664
665static void r8169_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
539{ 666{
540 int i; 667 int i;
541 668
@@ -557,7 +684,7 @@ static void mdio_write(void __iomem *ioaddr, int reg_addr, int value)
557 udelay(20); 684 udelay(20);
558} 685}
559 686
560static int mdio_read(void __iomem *ioaddr, int reg_addr) 687static int r8169_mdio_read(void __iomem *ioaddr, int reg_addr)
561{ 688{
562 int i, value = -1; 689 int i, value = -1;
563 690
@@ -583,34 +710,117 @@ static int mdio_read(void __iomem *ioaddr, int reg_addr)
583 return value; 710 return value;
584} 711}
585 712
586static void mdio_patch(void __iomem *ioaddr, int reg_addr, int value) 713static void r8168dp_1_mdio_access(void __iomem *ioaddr, int reg_addr, u32 data)
714{
715 int i;
716
717 RTL_W32(OCPDR, data |
718 ((reg_addr & OCPDR_REG_MASK) << OCPDR_GPHY_REG_SHIFT));
719 RTL_W32(OCPAR, OCPAR_GPHY_WRITE_CMD);
720 RTL_W32(EPHY_RXER_NUM, 0);
721
722 for (i = 0; i < 100; i++) {
723 mdelay(1);
724 if (!(RTL_R32(OCPAR) & OCPAR_FLAG))
725 break;
726 }
727}
728
729static void r8168dp_1_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
730{
731 r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_WRITE_CMD |
732 (value & OCPDR_DATA_MASK));
733}
734
735static int r8168dp_1_mdio_read(void __iomem *ioaddr, int reg_addr)
736{
737 int i;
738
739 r8168dp_1_mdio_access(ioaddr, reg_addr, OCPDR_READ_CMD);
740
741 mdelay(1);
742 RTL_W32(OCPAR, OCPAR_GPHY_READ_CMD);
743 RTL_W32(EPHY_RXER_NUM, 0);
744
745 for (i = 0; i < 100; i++) {
746 mdelay(1);
747 if (RTL_R32(OCPAR) & OCPAR_FLAG)
748 break;
749 }
750
751 return RTL_R32(OCPDR) & OCPDR_DATA_MASK;
752}
753
754#define R8168DP_1_MDIO_ACCESS_BIT 0x00020000
755
756static void r8168dp_2_mdio_start(void __iomem *ioaddr)
757{
758 RTL_W32(0xd0, RTL_R32(0xd0) & ~R8168DP_1_MDIO_ACCESS_BIT);
759}
760
761static void r8168dp_2_mdio_stop(void __iomem *ioaddr)
762{
763 RTL_W32(0xd0, RTL_R32(0xd0) | R8168DP_1_MDIO_ACCESS_BIT);
764}
765
766static void r8168dp_2_mdio_write(void __iomem *ioaddr, int reg_addr, int value)
767{
768 r8168dp_2_mdio_start(ioaddr);
769
770 r8169_mdio_write(ioaddr, reg_addr, value);
771
772 r8168dp_2_mdio_stop(ioaddr);
773}
774
775static int r8168dp_2_mdio_read(void __iomem *ioaddr, int reg_addr)
587{ 776{
588 mdio_write(ioaddr, reg_addr, mdio_read(ioaddr, reg_addr) | value); 777 int value;
778
779 r8168dp_2_mdio_start(ioaddr);
780
781 value = r8169_mdio_read(ioaddr, reg_addr);
782
783 r8168dp_2_mdio_stop(ioaddr);
784
785 return value;
589} 786}
590 787
591static void mdio_plus_minus(void __iomem *ioaddr, int reg_addr, int p, int m) 788static void rtl_writephy(struct rtl8169_private *tp, int location, u32 val)
789{
790 tp->mdio_ops.write(tp->mmio_addr, location, val);
791}
792
793static int rtl_readphy(struct rtl8169_private *tp, int location)
794{
795 return tp->mdio_ops.read(tp->mmio_addr, location);
796}
797
798static void rtl_patchphy(struct rtl8169_private *tp, int reg_addr, int value)
799{
800 rtl_writephy(tp, reg_addr, rtl_readphy(tp, reg_addr) | value);
801}
802
803static void rtl_w1w0_phy(struct rtl8169_private *tp, int reg_addr, int p, int m)
592{ 804{
593 int val; 805 int val;
594 806
595 val = mdio_read(ioaddr, reg_addr); 807 val = rtl_readphy(tp, reg_addr);
596 mdio_write(ioaddr, reg_addr, (val | p) & ~m); 808 rtl_writephy(tp, reg_addr, (val | p) & ~m);
597} 809}
598 810
599static void rtl_mdio_write(struct net_device *dev, int phy_id, int location, 811static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
600 int val) 812 int val)
601{ 813{
602 struct rtl8169_private *tp = netdev_priv(dev); 814 struct rtl8169_private *tp = netdev_priv(dev);
603 void __iomem *ioaddr = tp->mmio_addr;
604 815
605 mdio_write(ioaddr, location, val); 816 rtl_writephy(tp, location, val);
606} 817}
607 818
608static int rtl_mdio_read(struct net_device *dev, int phy_id, int location) 819static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
609{ 820{
610 struct rtl8169_private *tp = netdev_priv(dev); 821 struct rtl8169_private *tp = netdev_priv(dev);
611 void __iomem *ioaddr = tp->mmio_addr;
612 822
613 return mdio_read(ioaddr, location); 823 return rtl_readphy(tp, location);
614} 824}
615 825
616static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value) 826static void rtl_ephy_write(void __iomem *ioaddr, int reg_addr, int value)
@@ -711,14 +921,16 @@ static void rtl8169_asic_down(void __iomem *ioaddr)
711 RTL_R16(CPlusCmd); 921 RTL_R16(CPlusCmd);
712} 922}
713 923
714static unsigned int rtl8169_tbi_reset_pending(void __iomem *ioaddr) 924static unsigned int rtl8169_tbi_reset_pending(struct rtl8169_private *tp)
715{ 925{
926 void __iomem *ioaddr = tp->mmio_addr;
927
716 return RTL_R32(TBICSR) & TBIReset; 928 return RTL_R32(TBICSR) & TBIReset;
717} 929}
718 930
719static unsigned int rtl8169_xmii_reset_pending(void __iomem *ioaddr) 931static unsigned int rtl8169_xmii_reset_pending(struct rtl8169_private *tp)
720{ 932{
721 return mdio_read(ioaddr, MII_BMCR) & BMCR_RESET; 933 return rtl_readphy(tp, MII_BMCR) & BMCR_RESET;
722} 934}
723 935
724static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr) 936static unsigned int rtl8169_tbi_link_ok(void __iomem *ioaddr)
@@ -731,17 +943,19 @@ static unsigned int rtl8169_xmii_link_ok(void __iomem *ioaddr)
731 return RTL_R8(PHYstatus) & LinkStatus; 943 return RTL_R8(PHYstatus) & LinkStatus;
732} 944}
733 945
734static void rtl8169_tbi_reset_enable(void __iomem *ioaddr) 946static void rtl8169_tbi_reset_enable(struct rtl8169_private *tp)
735{ 947{
948 void __iomem *ioaddr = tp->mmio_addr;
949
736 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset); 950 RTL_W32(TBICSR, RTL_R32(TBICSR) | TBIReset);
737} 951}
738 952
739static void rtl8169_xmii_reset_enable(void __iomem *ioaddr) 953static void rtl8169_xmii_reset_enable(struct rtl8169_private *tp)
740{ 954{
741 unsigned int val; 955 unsigned int val;
742 956
743 val = mdio_read(ioaddr, MII_BMCR) | BMCR_RESET; 957 val = rtl_readphy(tp, MII_BMCR) | BMCR_RESET;
744 mdio_write(ioaddr, MII_BMCR, val & 0xffff); 958 rtl_writephy(tp, MII_BMCR, val & 0xffff);
745} 959}
746 960
747static void __rtl8169_check_link_status(struct net_device *dev, 961static void __rtl8169_check_link_status(struct net_device *dev,
@@ -905,18 +1119,17 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
905 u8 autoneg, u16 speed, u8 duplex) 1119 u8 autoneg, u16 speed, u8 duplex)
906{ 1120{
907 struct rtl8169_private *tp = netdev_priv(dev); 1121 struct rtl8169_private *tp = netdev_priv(dev);
908 void __iomem *ioaddr = tp->mmio_addr;
909 int giga_ctrl, bmcr; 1122 int giga_ctrl, bmcr;
910 1123
911 if (autoneg == AUTONEG_ENABLE) { 1124 if (autoneg == AUTONEG_ENABLE) {
912 int auto_nego; 1125 int auto_nego;
913 1126
914 auto_nego = mdio_read(ioaddr, MII_ADVERTISE); 1127 auto_nego = rtl_readphy(tp, MII_ADVERTISE);
915 auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL | 1128 auto_nego |= (ADVERTISE_10HALF | ADVERTISE_10FULL |
916 ADVERTISE_100HALF | ADVERTISE_100FULL); 1129 ADVERTISE_100HALF | ADVERTISE_100FULL);
917 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM; 1130 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
918 1131
919 giga_ctrl = mdio_read(ioaddr, MII_CTRL1000); 1132 giga_ctrl = rtl_readphy(tp, MII_CTRL1000);
920 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF); 1133 giga_ctrl &= ~(ADVERTISE_1000FULL | ADVERTISE_1000HALF);
921 1134
922 /* The 8100e/8101e/8102e do Fast Ethernet only. */ 1135 /* The 8100e/8101e/8102e do Fast Ethernet only. */
@@ -944,12 +1157,12 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
944 * Vendor specific (0x1f) and reserved (0x0e) MII 1157 * Vendor specific (0x1f) and reserved (0x0e) MII
945 * registers. 1158 * registers.
946 */ 1159 */
947 mdio_write(ioaddr, 0x1f, 0x0000); 1160 rtl_writephy(tp, 0x1f, 0x0000);
948 mdio_write(ioaddr, 0x0e, 0x0000); 1161 rtl_writephy(tp, 0x0e, 0x0000);
949 } 1162 }
950 1163
951 mdio_write(ioaddr, MII_ADVERTISE, auto_nego); 1164 rtl_writephy(tp, MII_ADVERTISE, auto_nego);
952 mdio_write(ioaddr, MII_CTRL1000, giga_ctrl); 1165 rtl_writephy(tp, MII_CTRL1000, giga_ctrl);
953 } else { 1166 } else {
954 giga_ctrl = 0; 1167 giga_ctrl = 0;
955 1168
@@ -963,21 +1176,21 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
963 if (duplex == DUPLEX_FULL) 1176 if (duplex == DUPLEX_FULL)
964 bmcr |= BMCR_FULLDPLX; 1177 bmcr |= BMCR_FULLDPLX;
965 1178
966 mdio_write(ioaddr, 0x1f, 0x0000); 1179 rtl_writephy(tp, 0x1f, 0x0000);
967 } 1180 }
968 1181
969 tp->phy_1000_ctrl_reg = giga_ctrl; 1182 tp->phy_1000_ctrl_reg = giga_ctrl;
970 1183
971 mdio_write(ioaddr, MII_BMCR, bmcr); 1184 rtl_writephy(tp, MII_BMCR, bmcr);
972 1185
973 if ((tp->mac_version == RTL_GIGA_MAC_VER_02) || 1186 if ((tp->mac_version == RTL_GIGA_MAC_VER_02) ||
974 (tp->mac_version == RTL_GIGA_MAC_VER_03)) { 1187 (tp->mac_version == RTL_GIGA_MAC_VER_03)) {
975 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) { 1188 if ((speed == SPEED_100) && (autoneg != AUTONEG_ENABLE)) {
976 mdio_write(ioaddr, 0x17, 0x2138); 1189 rtl_writephy(tp, 0x17, 0x2138);
977 mdio_write(ioaddr, 0x0e, 0x0260); 1190 rtl_writephy(tp, 0x0e, 0x0260);
978 } else { 1191 } else {
979 mdio_write(ioaddr, 0x17, 0x2108); 1192 rtl_writephy(tp, 0x17, 0x2108);
980 mdio_write(ioaddr, 0x0e, 0x0000); 1193 rtl_writephy(tp, 0x0e, 0x0000);
981 } 1194 }
982 } 1195 }
983 1196
@@ -1319,9 +1532,12 @@ static void rtl8169_get_mac_version(struct rtl8169_private *tp,
1319 /* 8168D family. */ 1532 /* 8168D family. */
1320 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 }, 1533 { 0x7cf00000, 0x28300000, RTL_GIGA_MAC_VER_26 },
1321 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 }, 1534 { 0x7cf00000, 0x28100000, RTL_GIGA_MAC_VER_25 },
1322 { 0x7c800000, 0x28800000, RTL_GIGA_MAC_VER_27 },
1323 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 }, 1535 { 0x7c800000, 0x28000000, RTL_GIGA_MAC_VER_26 },
1324 1536
1537 /* 8168DP family. */
1538 { 0x7cf00000, 0x28800000, RTL_GIGA_MAC_VER_27 },
1539 { 0x7cf00000, 0x28a00000, RTL_GIGA_MAC_VER_28 },
1540
1325 /* 8168C family. */ 1541 /* 8168C family. */
1326 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 }, 1542 { 0x7cf00000, 0x3cb00000, RTL_GIGA_MAC_VER_24 },
1327 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 }, 1543 { 0x7cf00000, 0x3c900000, RTL_GIGA_MAC_VER_23 },
@@ -1385,15 +1601,172 @@ struct phy_reg {
1385 u16 val; 1601 u16 val;
1386}; 1602};
1387 1603
1388static void rtl_phy_write(void __iomem *ioaddr, const struct phy_reg *regs, int len) 1604static void rtl_writephy_batch(struct rtl8169_private *tp,
1605 const struct phy_reg *regs, int len)
1389{ 1606{
1390 while (len-- > 0) { 1607 while (len-- > 0) {
1391 mdio_write(ioaddr, regs->reg, regs->val); 1608 rtl_writephy(tp, regs->reg, regs->val);
1392 regs++; 1609 regs++;
1393 } 1610 }
1394} 1611}
1395 1612
1396static void rtl8169s_hw_phy_config(void __iomem *ioaddr) 1613#define PHY_READ 0x00000000
1614#define PHY_DATA_OR 0x10000000
1615#define PHY_DATA_AND 0x20000000
1616#define PHY_BJMPN 0x30000000
1617#define PHY_READ_EFUSE 0x40000000
1618#define PHY_READ_MAC_BYTE 0x50000000
1619#define PHY_WRITE_MAC_BYTE 0x60000000
1620#define PHY_CLEAR_READCOUNT 0x70000000
1621#define PHY_WRITE 0x80000000
1622#define PHY_READCOUNT_EQ_SKIP 0x90000000
1623#define PHY_COMP_EQ_SKIPN 0xa0000000
1624#define PHY_COMP_NEQ_SKIPN 0xb0000000
1625#define PHY_WRITE_PREVIOUS 0xc0000000
1626#define PHY_SKIPN 0xd0000000
1627#define PHY_DELAY_MS 0xe0000000
1628#define PHY_WRITE_ERI_WORD 0xf0000000
1629
1630static void
1631rtl_phy_write_fw(struct rtl8169_private *tp, const struct firmware *fw)
1632{
1633 __le32 *phytable = (__le32 *)fw->data;
1634 struct net_device *dev = tp->dev;
1635 size_t index, fw_size = fw->size / sizeof(*phytable);
1636 u32 predata, count;
1637
1638 if (fw->size % sizeof(*phytable)) {
1639 netif_err(tp, probe, dev, "odd sized firmware %zd\n", fw->size);
1640 return;
1641 }
1642
1643 for (index = 0; index < fw_size; index++) {
1644 u32 action = le32_to_cpu(phytable[index]);
1645 u32 regno = (action & 0x0fff0000) >> 16;
1646
1647 switch(action & 0xf0000000) {
1648 case PHY_READ:
1649 case PHY_DATA_OR:
1650 case PHY_DATA_AND:
1651 case PHY_READ_EFUSE:
1652 case PHY_CLEAR_READCOUNT:
1653 case PHY_WRITE:
1654 case PHY_WRITE_PREVIOUS:
1655 case PHY_DELAY_MS:
1656 break;
1657
1658 case PHY_BJMPN:
1659 if (regno > index) {
1660 netif_err(tp, probe, tp->dev,
1661 "Out of range of firmware\n");
1662 return;
1663 }
1664 break;
1665 case PHY_READCOUNT_EQ_SKIP:
1666 if (index + 2 >= fw_size) {
1667 netif_err(tp, probe, tp->dev,
1668 "Out of range of firmware\n");
1669 return;
1670 }
1671 break;
1672 case PHY_COMP_EQ_SKIPN:
1673 case PHY_COMP_NEQ_SKIPN:
1674 case PHY_SKIPN:
1675 if (index + 1 + regno >= fw_size) {
1676 netif_err(tp, probe, tp->dev,
1677 "Out of range of firmware\n");
1678 return;
1679 }
1680 break;
1681
1682 case PHY_READ_MAC_BYTE:
1683 case PHY_WRITE_MAC_BYTE:
1684 case PHY_WRITE_ERI_WORD:
1685 default:
1686 netif_err(tp, probe, tp->dev,
1687 "Invalid action 0x%08x\n", action);
1688 return;
1689 }
1690 }
1691
1692 predata = 0;
1693 count = 0;
1694
1695 for (index = 0; index < fw_size; ) {
1696 u32 action = le32_to_cpu(phytable[index]);
1697 u32 data = action & 0x0000ffff;
1698 u32 regno = (action & 0x0fff0000) >> 16;
1699
1700 if (!action)
1701 break;
1702
1703 switch(action & 0xf0000000) {
1704 case PHY_READ:
1705 predata = rtl_readphy(tp, regno);
1706 count++;
1707 index++;
1708 break;
1709 case PHY_DATA_OR:
1710 predata |= data;
1711 index++;
1712 break;
1713 case PHY_DATA_AND:
1714 predata &= data;
1715 index++;
1716 break;
1717 case PHY_BJMPN:
1718 index -= regno;
1719 break;
1720 case PHY_READ_EFUSE:
1721 predata = rtl8168d_efuse_read(tp->mmio_addr, regno);
1722 index++;
1723 break;
1724 case PHY_CLEAR_READCOUNT:
1725 count = 0;
1726 index++;
1727 break;
1728 case PHY_WRITE:
1729 rtl_writephy(tp, regno, data);
1730 index++;
1731 break;
1732 case PHY_READCOUNT_EQ_SKIP:
1733 if (count == data)
1734 index += 2;
1735 else
1736 index += 1;
1737 break;
1738 case PHY_COMP_EQ_SKIPN:
1739 if (predata == data)
1740 index += regno;
1741 index++;
1742 break;
1743 case PHY_COMP_NEQ_SKIPN:
1744 if (predata != data)
1745 index += regno;
1746 index++;
1747 break;
1748 case PHY_WRITE_PREVIOUS:
1749 rtl_writephy(tp, regno, predata);
1750 index++;
1751 break;
1752 case PHY_SKIPN:
1753 index += regno + 1;
1754 break;
1755 case PHY_DELAY_MS:
1756 mdelay(data);
1757 index++;
1758 break;
1759
1760 case PHY_READ_MAC_BYTE:
1761 case PHY_WRITE_MAC_BYTE:
1762 case PHY_WRITE_ERI_WORD:
1763 default:
1764 BUG();
1765 }
1766 }
1767}
1768
1769static void rtl8169s_hw_phy_config(struct rtl8169_private *tp)
1397{ 1770{
1398 static const struct phy_reg phy_reg_init[] = { 1771 static const struct phy_reg phy_reg_init[] = {
1399 { 0x1f, 0x0001 }, 1772 { 0x1f, 0x0001 },
@@ -1457,10 +1830,10 @@ static void rtl8169s_hw_phy_config(void __iomem *ioaddr)
1457 { 0x00, 0x9200 } 1830 { 0x00, 0x9200 }
1458 }; 1831 };
1459 1832
1460 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 1833 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1461} 1834}
1462 1835
1463static void rtl8169sb_hw_phy_config(void __iomem *ioaddr) 1836static void rtl8169sb_hw_phy_config(struct rtl8169_private *tp)
1464{ 1837{
1465 static const struct phy_reg phy_reg_init[] = { 1838 static const struct phy_reg phy_reg_init[] = {
1466 { 0x1f, 0x0002 }, 1839 { 0x1f, 0x0002 },
@@ -1468,11 +1841,10 @@ static void rtl8169sb_hw_phy_config(void __iomem *ioaddr)
1468 { 0x1f, 0x0000 } 1841 { 0x1f, 0x0000 }
1469 }; 1842 };
1470 1843
1471 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 1844 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1472} 1845}
1473 1846
1474static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp, 1847static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp)
1475 void __iomem *ioaddr)
1476{ 1848{
1477 struct pci_dev *pdev = tp->pci_dev; 1849 struct pci_dev *pdev = tp->pci_dev;
1478 u16 vendor_id, device_id; 1850 u16 vendor_id, device_id;
@@ -1483,13 +1855,12 @@ static void rtl8169scd_hw_phy_config_quirk(struct rtl8169_private *tp,
1483 if ((vendor_id != PCI_VENDOR_ID_GIGABYTE) || (device_id != 0xe000)) 1855 if ((vendor_id != PCI_VENDOR_ID_GIGABYTE) || (device_id != 0xe000))
1484 return; 1856 return;
1485 1857
1486 mdio_write(ioaddr, 0x1f, 0x0001); 1858 rtl_writephy(tp, 0x1f, 0x0001);
1487 mdio_write(ioaddr, 0x10, 0xf01b); 1859 rtl_writephy(tp, 0x10, 0xf01b);
1488 mdio_write(ioaddr, 0x1f, 0x0000); 1860 rtl_writephy(tp, 0x1f, 0x0000);
1489} 1861}
1490 1862
1491static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp, 1863static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp)
1492 void __iomem *ioaddr)
1493{ 1864{
1494 static const struct phy_reg phy_reg_init[] = { 1865 static const struct phy_reg phy_reg_init[] = {
1495 { 0x1f, 0x0001 }, 1866 { 0x1f, 0x0001 },
@@ -1531,12 +1902,12 @@ static void rtl8169scd_hw_phy_config(struct rtl8169_private *tp,
1531 { 0x1f, 0x0000 } 1902 { 0x1f, 0x0000 }
1532 }; 1903 };
1533 1904
1534 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 1905 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1535 1906
1536 rtl8169scd_hw_phy_config_quirk(tp, ioaddr); 1907 rtl8169scd_hw_phy_config_quirk(tp);
1537} 1908}
1538 1909
1539static void rtl8169sce_hw_phy_config(void __iomem *ioaddr) 1910static void rtl8169sce_hw_phy_config(struct rtl8169_private *tp)
1540{ 1911{
1541 static const struct phy_reg phy_reg_init[] = { 1912 static const struct phy_reg phy_reg_init[] = {
1542 { 0x1f, 0x0001 }, 1913 { 0x1f, 0x0001 },
@@ -1586,23 +1957,23 @@ static void rtl8169sce_hw_phy_config(void __iomem *ioaddr)
1586 { 0x1f, 0x0000 } 1957 { 0x1f, 0x0000 }
1587 }; 1958 };
1588 1959
1589 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 1960 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1590} 1961}
1591 1962
1592static void rtl8168bb_hw_phy_config(void __iomem *ioaddr) 1963static void rtl8168bb_hw_phy_config(struct rtl8169_private *tp)
1593{ 1964{
1594 static const struct phy_reg phy_reg_init[] = { 1965 static const struct phy_reg phy_reg_init[] = {
1595 { 0x10, 0xf41b }, 1966 { 0x10, 0xf41b },
1596 { 0x1f, 0x0000 } 1967 { 0x1f, 0x0000 }
1597 }; 1968 };
1598 1969
1599 mdio_write(ioaddr, 0x1f, 0x0001); 1970 rtl_writephy(tp, 0x1f, 0x0001);
1600 mdio_patch(ioaddr, 0x16, 1 << 0); 1971 rtl_patchphy(tp, 0x16, 1 << 0);
1601 1972
1602 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 1973 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1603} 1974}
1604 1975
1605static void rtl8168bef_hw_phy_config(void __iomem *ioaddr) 1976static void rtl8168bef_hw_phy_config(struct rtl8169_private *tp)
1606{ 1977{
1607 static const struct phy_reg phy_reg_init[] = { 1978 static const struct phy_reg phy_reg_init[] = {
1608 { 0x1f, 0x0001 }, 1979 { 0x1f, 0x0001 },
@@ -1610,10 +1981,10 @@ static void rtl8168bef_hw_phy_config(void __iomem *ioaddr)
1610 { 0x1f, 0x0000 } 1981 { 0x1f, 0x0000 }
1611 }; 1982 };
1612 1983
1613 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 1984 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1614} 1985}
1615 1986
1616static void rtl8168cp_1_hw_phy_config(void __iomem *ioaddr) 1987static void rtl8168cp_1_hw_phy_config(struct rtl8169_private *tp)
1617{ 1988{
1618 static const struct phy_reg phy_reg_init[] = { 1989 static const struct phy_reg phy_reg_init[] = {
1619 { 0x1f, 0x0000 }, 1990 { 0x1f, 0x0000 },
@@ -1623,10 +1994,10 @@ static void rtl8168cp_1_hw_phy_config(void __iomem *ioaddr)
1623 { 0x1f, 0x0000 } 1994 { 0x1f, 0x0000 }
1624 }; 1995 };
1625 1996
1626 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 1997 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1627} 1998}
1628 1999
1629static void rtl8168cp_2_hw_phy_config(void __iomem *ioaddr) 2000static void rtl8168cp_2_hw_phy_config(struct rtl8169_private *tp)
1630{ 2001{
1631 static const struct phy_reg phy_reg_init[] = { 2002 static const struct phy_reg phy_reg_init[] = {
1632 { 0x1f, 0x0001 }, 2003 { 0x1f, 0x0001 },
@@ -1634,14 +2005,14 @@ static void rtl8168cp_2_hw_phy_config(void __iomem *ioaddr)
1634 { 0x1f, 0x0000 } 2005 { 0x1f, 0x0000 }
1635 }; 2006 };
1636 2007
1637 mdio_write(ioaddr, 0x1f, 0x0000); 2008 rtl_writephy(tp, 0x1f, 0x0000);
1638 mdio_patch(ioaddr, 0x14, 1 << 5); 2009 rtl_patchphy(tp, 0x14, 1 << 5);
1639 mdio_patch(ioaddr, 0x0d, 1 << 5); 2010 rtl_patchphy(tp, 0x0d, 1 << 5);
1640 2011
1641 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 2012 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1642} 2013}
1643 2014
1644static void rtl8168c_1_hw_phy_config(void __iomem *ioaddr) 2015static void rtl8168c_1_hw_phy_config(struct rtl8169_private *tp)
1645{ 2016{
1646 static const struct phy_reg phy_reg_init[] = { 2017 static const struct phy_reg phy_reg_init[] = {
1647 { 0x1f, 0x0001 }, 2018 { 0x1f, 0x0001 },
@@ -1663,14 +2034,14 @@ static void rtl8168c_1_hw_phy_config(void __iomem *ioaddr)
1663 { 0x09, 0x0000 } 2034 { 0x09, 0x0000 }
1664 }; 2035 };
1665 2036
1666 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 2037 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1667 2038
1668 mdio_patch(ioaddr, 0x14, 1 << 5); 2039 rtl_patchphy(tp, 0x14, 1 << 5);
1669 mdio_patch(ioaddr, 0x0d, 1 << 5); 2040 rtl_patchphy(tp, 0x0d, 1 << 5);
1670 mdio_write(ioaddr, 0x1f, 0x0000); 2041 rtl_writephy(tp, 0x1f, 0x0000);
1671} 2042}
1672 2043
1673static void rtl8168c_2_hw_phy_config(void __iomem *ioaddr) 2044static void rtl8168c_2_hw_phy_config(struct rtl8169_private *tp)
1674{ 2045{
1675 static const struct phy_reg phy_reg_init[] = { 2046 static const struct phy_reg phy_reg_init[] = {
1676 { 0x1f, 0x0001 }, 2047 { 0x1f, 0x0001 },
@@ -1690,15 +2061,15 @@ static void rtl8168c_2_hw_phy_config(void __iomem *ioaddr)
1690 { 0x1f, 0x0000 } 2061 { 0x1f, 0x0000 }
1691 }; 2062 };
1692 2063
1693 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 2064 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1694 2065
1695 mdio_patch(ioaddr, 0x16, 1 << 0); 2066 rtl_patchphy(tp, 0x16, 1 << 0);
1696 mdio_patch(ioaddr, 0x14, 1 << 5); 2067 rtl_patchphy(tp, 0x14, 1 << 5);
1697 mdio_patch(ioaddr, 0x0d, 1 << 5); 2068 rtl_patchphy(tp, 0x0d, 1 << 5);
1698 mdio_write(ioaddr, 0x1f, 0x0000); 2069 rtl_writephy(tp, 0x1f, 0x0000);
1699} 2070}
1700 2071
1701static void rtl8168c_3_hw_phy_config(void __iomem *ioaddr) 2072static void rtl8168c_3_hw_phy_config(struct rtl8169_private *tp)
1702{ 2073{
1703 static const struct phy_reg phy_reg_init[] = { 2074 static const struct phy_reg phy_reg_init[] = {
1704 { 0x1f, 0x0001 }, 2075 { 0x1f, 0x0001 },
@@ -1712,22 +2083,23 @@ static void rtl8168c_3_hw_phy_config(void __iomem *ioaddr)
1712 { 0x1f, 0x0000 } 2083 { 0x1f, 0x0000 }
1713 }; 2084 };
1714 2085
1715 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 2086 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
1716 2087
1717 mdio_patch(ioaddr, 0x16, 1 << 0); 2088 rtl_patchphy(tp, 0x16, 1 << 0);
1718 mdio_patch(ioaddr, 0x14, 1 << 5); 2089 rtl_patchphy(tp, 0x14, 1 << 5);
1719 mdio_patch(ioaddr, 0x0d, 1 << 5); 2090 rtl_patchphy(tp, 0x0d, 1 << 5);
1720 mdio_write(ioaddr, 0x1f, 0x0000); 2091 rtl_writephy(tp, 0x1f, 0x0000);
1721} 2092}
1722 2093
1723static void rtl8168c_4_hw_phy_config(void __iomem *ioaddr) 2094static void rtl8168c_4_hw_phy_config(struct rtl8169_private *tp)
1724{ 2095{
1725 rtl8168c_3_hw_phy_config(ioaddr); 2096 rtl8168c_3_hw_phy_config(tp);
1726} 2097}
1727 2098
1728static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr) 2099static void rtl8168d_1_hw_phy_config(struct rtl8169_private *tp)
1729{ 2100{
1730 static const struct phy_reg phy_reg_init_0[] = { 2101 static const struct phy_reg phy_reg_init_0[] = {
2102 /* Channel Estimation */
1731 { 0x1f, 0x0001 }, 2103 { 0x1f, 0x0001 },
1732 { 0x06, 0x4064 }, 2104 { 0x06, 0x4064 },
1733 { 0x07, 0x2863 }, 2105 { 0x07, 0x2863 },
@@ -1744,378 +2116,40 @@ static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
1744 { 0x12, 0xf49f }, 2116 { 0x12, 0xf49f },
1745 { 0x13, 0x070b }, 2117 { 0x13, 0x070b },
1746 { 0x1a, 0x05ad }, 2118 { 0x1a, 0x05ad },
1747 { 0x14, 0x94c0 } 2119 { 0x14, 0x94c0 },
1748 }; 2120
1749 static const struct phy_reg phy_reg_init_1[] = { 2121 /*
2122 * Tx Error Issue
2123 * enhance line driver power
2124 */
1750 { 0x1f, 0x0002 }, 2125 { 0x1f, 0x0002 },
1751 { 0x06, 0x5561 }, 2126 { 0x06, 0x5561 },
1752 { 0x1f, 0x0005 }, 2127 { 0x1f, 0x0005 },
1753 { 0x05, 0x8332 }, 2128 { 0x05, 0x8332 },
1754 { 0x06, 0x5561 } 2129 { 0x06, 0x5561 },
1755 }; 2130
1756 static const struct phy_reg phy_reg_init_2[] = { 2131 /*
1757 { 0x1f, 0x0005 }, 2132 * Can not link to 1Gbps with bad cable
1758 { 0x05, 0xffc2 }, 2133 * Decrease SNR threshold form 21.07dB to 19.04dB
1759 { 0x1f, 0x0005 }, 2134 */
1760 { 0x05, 0x8000 }, 2135 { 0x1f, 0x0001 },
1761 { 0x06, 0xf8f9 }, 2136 { 0x17, 0x0cc0 },
1762 { 0x06, 0xfaef },
1763 { 0x06, 0x59ee },
1764 { 0x06, 0xf8ea },
1765 { 0x06, 0x00ee },
1766 { 0x06, 0xf8eb },
1767 { 0x06, 0x00e0 },
1768 { 0x06, 0xf87c },
1769 { 0x06, 0xe1f8 },
1770 { 0x06, 0x7d59 },
1771 { 0x06, 0x0fef },
1772 { 0x06, 0x0139 },
1773 { 0x06, 0x029e },
1774 { 0x06, 0x06ef },
1775 { 0x06, 0x1039 },
1776 { 0x06, 0x089f },
1777 { 0x06, 0x2aee },
1778 { 0x06, 0xf8ea },
1779 { 0x06, 0x00ee },
1780 { 0x06, 0xf8eb },
1781 { 0x06, 0x01e0 },
1782 { 0x06, 0xf87c },
1783 { 0x06, 0xe1f8 },
1784 { 0x06, 0x7d58 },
1785 { 0x06, 0x409e },
1786 { 0x06, 0x0f39 },
1787 { 0x06, 0x46aa },
1788 { 0x06, 0x0bbf },
1789 { 0x06, 0x8290 },
1790 { 0x06, 0xd682 },
1791 { 0x06, 0x9802 },
1792 { 0x06, 0x014f },
1793 { 0x06, 0xae09 },
1794 { 0x06, 0xbf82 },
1795 { 0x06, 0x98d6 },
1796 { 0x06, 0x82a0 },
1797 { 0x06, 0x0201 },
1798 { 0x06, 0x4fef },
1799 { 0x06, 0x95fe },
1800 { 0x06, 0xfdfc },
1801 { 0x06, 0x05f8 },
1802 { 0x06, 0xf9fa },
1803 { 0x06, 0xeef8 },
1804 { 0x06, 0xea00 },
1805 { 0x06, 0xeef8 },
1806 { 0x06, 0xeb00 },
1807 { 0x06, 0xe2f8 },
1808 { 0x06, 0x7ce3 },
1809 { 0x06, 0xf87d },
1810 { 0x06, 0xa511 },
1811 { 0x06, 0x1112 },
1812 { 0x06, 0xd240 },
1813 { 0x06, 0xd644 },
1814 { 0x06, 0x4402 },
1815 { 0x06, 0x8217 },
1816 { 0x06, 0xd2a0 },
1817 { 0x06, 0xd6aa },
1818 { 0x06, 0xaa02 },
1819 { 0x06, 0x8217 },
1820 { 0x06, 0xae0f },
1821 { 0x06, 0xa544 },
1822 { 0x06, 0x4402 },
1823 { 0x06, 0xae4d },
1824 { 0x06, 0xa5aa },
1825 { 0x06, 0xaa02 },
1826 { 0x06, 0xae47 },
1827 { 0x06, 0xaf82 },
1828 { 0x06, 0x13ee },
1829 { 0x06, 0x834e },
1830 { 0x06, 0x00ee },
1831 { 0x06, 0x834d },
1832 { 0x06, 0x0fee },
1833 { 0x06, 0x834c },
1834 { 0x06, 0x0fee },
1835 { 0x06, 0x834f },
1836 { 0x06, 0x00ee },
1837 { 0x06, 0x8351 },
1838 { 0x06, 0x00ee },
1839 { 0x06, 0x834a },
1840 { 0x06, 0xffee },
1841 { 0x06, 0x834b },
1842 { 0x06, 0xffe0 },
1843 { 0x06, 0x8330 },
1844 { 0x06, 0xe183 },
1845 { 0x06, 0x3158 },
1846 { 0x06, 0xfee4 },
1847 { 0x06, 0xf88a },
1848 { 0x06, 0xe5f8 },
1849 { 0x06, 0x8be0 },
1850 { 0x06, 0x8332 },
1851 { 0x06, 0xe183 },
1852 { 0x06, 0x3359 },
1853 { 0x06, 0x0fe2 },
1854 { 0x06, 0x834d },
1855 { 0x06, 0x0c24 },
1856 { 0x06, 0x5af0 },
1857 { 0x06, 0x1e12 },
1858 { 0x06, 0xe4f8 },
1859 { 0x06, 0x8ce5 },
1860 { 0x06, 0xf88d },
1861 { 0x06, 0xaf82 },
1862 { 0x06, 0x13e0 },
1863 { 0x06, 0x834f },
1864 { 0x06, 0x10e4 },
1865 { 0x06, 0x834f },
1866 { 0x06, 0xe083 },
1867 { 0x06, 0x4e78 },
1868 { 0x06, 0x009f },
1869 { 0x06, 0x0ae0 },
1870 { 0x06, 0x834f },
1871 { 0x06, 0xa010 },
1872 { 0x06, 0xa5ee },
1873 { 0x06, 0x834e },
1874 { 0x06, 0x01e0 },
1875 { 0x06, 0x834e },
1876 { 0x06, 0x7805 },
1877 { 0x06, 0x9e9a },
1878 { 0x06, 0xe083 },
1879 { 0x06, 0x4e78 },
1880 { 0x06, 0x049e },
1881 { 0x06, 0x10e0 },
1882 { 0x06, 0x834e },
1883 { 0x06, 0x7803 },
1884 { 0x06, 0x9e0f },
1885 { 0x06, 0xe083 },
1886 { 0x06, 0x4e78 },
1887 { 0x06, 0x019e },
1888 { 0x06, 0x05ae },
1889 { 0x06, 0x0caf },
1890 { 0x06, 0x81f8 },
1891 { 0x06, 0xaf81 },
1892 { 0x06, 0xa3af },
1893 { 0x06, 0x81dc },
1894 { 0x06, 0xaf82 },
1895 { 0x06, 0x13ee },
1896 { 0x06, 0x8348 },
1897 { 0x06, 0x00ee },
1898 { 0x06, 0x8349 },
1899 { 0x06, 0x00e0 },
1900 { 0x06, 0x8351 },
1901 { 0x06, 0x10e4 },
1902 { 0x06, 0x8351 },
1903 { 0x06, 0x5801 },
1904 { 0x06, 0x9fea },
1905 { 0x06, 0xd000 },
1906 { 0x06, 0xd180 },
1907 { 0x06, 0x1f66 },
1908 { 0x06, 0xe2f8 },
1909 { 0x06, 0xeae3 },
1910 { 0x06, 0xf8eb },
1911 { 0x06, 0x5af8 },
1912 { 0x06, 0x1e20 },
1913 { 0x06, 0xe6f8 },
1914 { 0x06, 0xeae5 },
1915 { 0x06, 0xf8eb },
1916 { 0x06, 0xd302 },
1917 { 0x06, 0xb3fe },
1918 { 0x06, 0xe2f8 },
1919 { 0x06, 0x7cef },
1920 { 0x06, 0x325b },
1921 { 0x06, 0x80e3 },
1922 { 0x06, 0xf87d },
1923 { 0x06, 0x9e03 },
1924 { 0x06, 0x7dff },
1925 { 0x06, 0xff0d },
1926 { 0x06, 0x581c },
1927 { 0x06, 0x551a },
1928 { 0x06, 0x6511 },
1929 { 0x06, 0xa190 },
1930 { 0x06, 0xd3e2 },
1931 { 0x06, 0x8348 },
1932 { 0x06, 0xe383 },
1933 { 0x06, 0x491b },
1934 { 0x06, 0x56ab },
1935 { 0x06, 0x08ef },
1936 { 0x06, 0x56e6 },
1937 { 0x06, 0x8348 },
1938 { 0x06, 0xe783 },
1939 { 0x06, 0x4910 },
1940 { 0x06, 0xd180 },
1941 { 0x06, 0x1f66 },
1942 { 0x06, 0xa004 },
1943 { 0x06, 0xb9e2 },
1944 { 0x06, 0x8348 },
1945 { 0x06, 0xe383 },
1946 { 0x06, 0x49ef },
1947 { 0x06, 0x65e2 },
1948 { 0x06, 0x834a },
1949 { 0x06, 0xe383 },
1950 { 0x06, 0x4b1b },
1951 { 0x06, 0x56aa },
1952 { 0x06, 0x0eef },
1953 { 0x06, 0x56e6 },
1954 { 0x06, 0x834a },
1955 { 0x06, 0xe783 },
1956 { 0x06, 0x4be2 },
1957 { 0x06, 0x834d },
1958 { 0x06, 0xe683 },
1959 { 0x06, 0x4ce0 },
1960 { 0x06, 0x834d },
1961 { 0x06, 0xa000 },
1962 { 0x06, 0x0caf },
1963 { 0x06, 0x81dc },
1964 { 0x06, 0xe083 },
1965 { 0x06, 0x4d10 },
1966 { 0x06, 0xe483 },
1967 { 0x06, 0x4dae },
1968 { 0x06, 0x0480 },
1969 { 0x06, 0xe483 },
1970 { 0x06, 0x4de0 },
1971 { 0x06, 0x834e },
1972 { 0x06, 0x7803 },
1973 { 0x06, 0x9e0b },
1974 { 0x06, 0xe083 },
1975 { 0x06, 0x4e78 },
1976 { 0x06, 0x049e },
1977 { 0x06, 0x04ee },
1978 { 0x06, 0x834e },
1979 { 0x06, 0x02e0 },
1980 { 0x06, 0x8332 },
1981 { 0x06, 0xe183 },
1982 { 0x06, 0x3359 },
1983 { 0x06, 0x0fe2 },
1984 { 0x06, 0x834d },
1985 { 0x06, 0x0c24 },
1986 { 0x06, 0x5af0 },
1987 { 0x06, 0x1e12 },
1988 { 0x06, 0xe4f8 },
1989 { 0x06, 0x8ce5 },
1990 { 0x06, 0xf88d },
1991 { 0x06, 0xe083 },
1992 { 0x06, 0x30e1 },
1993 { 0x06, 0x8331 },
1994 { 0x06, 0x6801 },
1995 { 0x06, 0xe4f8 },
1996 { 0x06, 0x8ae5 },
1997 { 0x06, 0xf88b },
1998 { 0x06, 0xae37 },
1999 { 0x06, 0xee83 },
2000 { 0x06, 0x4e03 },
2001 { 0x06, 0xe083 },
2002 { 0x06, 0x4ce1 },
2003 { 0x06, 0x834d },
2004 { 0x06, 0x1b01 },
2005 { 0x06, 0x9e04 },
2006 { 0x06, 0xaaa1 },
2007 { 0x06, 0xaea8 },
2008 { 0x06, 0xee83 },
2009 { 0x06, 0x4e04 },
2010 { 0x06, 0xee83 },
2011 { 0x06, 0x4f00 },
2012 { 0x06, 0xaeab },
2013 { 0x06, 0xe083 },
2014 { 0x06, 0x4f78 },
2015 { 0x06, 0x039f },
2016 { 0x06, 0x14ee },
2017 { 0x06, 0x834e },
2018 { 0x06, 0x05d2 },
2019 { 0x06, 0x40d6 },
2020 { 0x06, 0x5554 },
2021 { 0x06, 0x0282 },
2022 { 0x06, 0x17d2 },
2023 { 0x06, 0xa0d6 },
2024 { 0x06, 0xba00 },
2025 { 0x06, 0x0282 },
2026 { 0x06, 0x17fe },
2027 { 0x06, 0xfdfc },
2028 { 0x06, 0x05f8 },
2029 { 0x06, 0xe0f8 },
2030 { 0x06, 0x60e1 },
2031 { 0x06, 0xf861 },
2032 { 0x06, 0x6802 },
2033 { 0x06, 0xe4f8 },
2034 { 0x06, 0x60e5 },
2035 { 0x06, 0xf861 },
2036 { 0x06, 0xe0f8 },
2037 { 0x06, 0x48e1 },
2038 { 0x06, 0xf849 },
2039 { 0x06, 0x580f },
2040 { 0x06, 0x1e02 },
2041 { 0x06, 0xe4f8 },
2042 { 0x06, 0x48e5 },
2043 { 0x06, 0xf849 },
2044 { 0x06, 0xd000 },
2045 { 0x06, 0x0282 },
2046 { 0x06, 0x5bbf },
2047 { 0x06, 0x8350 },
2048 { 0x06, 0xef46 },
2049 { 0x06, 0xdc19 },
2050 { 0x06, 0xddd0 },
2051 { 0x06, 0x0102 },
2052 { 0x06, 0x825b },
2053 { 0x06, 0x0282 },
2054 { 0x06, 0x77e0 },
2055 { 0x06, 0xf860 },
2056 { 0x06, 0xe1f8 },
2057 { 0x06, 0x6158 },
2058 { 0x06, 0xfde4 },
2059 { 0x06, 0xf860 },
2060 { 0x06, 0xe5f8 },
2061 { 0x06, 0x61fc },
2062 { 0x06, 0x04f9 },
2063 { 0x06, 0xfafb },
2064 { 0x06, 0xc6bf },
2065 { 0x06, 0xf840 },
2066 { 0x06, 0xbe83 },
2067 { 0x06, 0x50a0 },
2068 { 0x06, 0x0101 },
2069 { 0x06, 0x071b },
2070 { 0x06, 0x89cf },
2071 { 0x06, 0xd208 },
2072 { 0x06, 0xebdb },
2073 { 0x06, 0x19b2 },
2074 { 0x06, 0xfbff },
2075 { 0x06, 0xfefd },
2076 { 0x06, 0x04f8 },
2077 { 0x06, 0xe0f8 },
2078 { 0x06, 0x48e1 },
2079 { 0x06, 0xf849 },
2080 { 0x06, 0x6808 },
2081 { 0x06, 0xe4f8 },
2082 { 0x06, 0x48e5 },
2083 { 0x06, 0xf849 },
2084 { 0x06, 0x58f7 },
2085 { 0x06, 0xe4f8 },
2086 { 0x06, 0x48e5 },
2087 { 0x06, 0xf849 },
2088 { 0x06, 0xfc04 },
2089 { 0x06, 0x4d20 },
2090 { 0x06, 0x0002 },
2091 { 0x06, 0x4e22 },
2092 { 0x06, 0x0002 },
2093 { 0x06, 0x4ddf },
2094 { 0x06, 0xff01 },
2095 { 0x06, 0x4edd },
2096 { 0x06, 0xff01 },
2097 { 0x05, 0x83d4 },
2098 { 0x06, 0x8000 },
2099 { 0x05, 0x83d8 },
2100 { 0x06, 0x8051 },
2101 { 0x02, 0x6010 },
2102 { 0x03, 0xdc00 },
2103 { 0x05, 0xfff6 },
2104 { 0x06, 0x00fc },
2105 { 0x1f, 0x0000 },
2106 2137
2107 { 0x1f, 0x0000 }, 2138 { 0x1f, 0x0000 },
2108 { 0x0d, 0xf880 }, 2139 { 0x0d, 0xf880 }
2109 { 0x1f, 0x0000 }
2110 }; 2140 };
2141 void __iomem *ioaddr = tp->mmio_addr;
2142 const struct firmware *fw;
2111 2143
2112 rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); 2144 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2113
2114 mdio_write(ioaddr, 0x1f, 0x0002);
2115 mdio_plus_minus(ioaddr, 0x0b, 0x0010, 0x00ef);
2116 mdio_plus_minus(ioaddr, 0x0c, 0xa200, 0x5d00);
2117 2145
2118 rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1)); 2146 /*
2147 * Rx Error Issue
2148 * Fine Tune Switching regulator parameter
2149 */
2150 rtl_writephy(tp, 0x1f, 0x0002);
2151 rtl_w1w0_phy(tp, 0x0b, 0x0010, 0x00ef);
2152 rtl_w1w0_phy(tp, 0x0c, 0xa200, 0x5d00);
2119 2153
2120 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { 2154 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
2121 static const struct phy_reg phy_reg_init[] = { 2155 static const struct phy_reg phy_reg_init[] = {
@@ -2128,9 +2162,9 @@ static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
2128 }; 2162 };
2129 int val; 2163 int val;
2130 2164
2131 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 2165 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2132 2166
2133 val = mdio_read(ioaddr, 0x0d); 2167 val = rtl_readphy(tp, 0x0d);
2134 2168
2135 if ((val & 0x00ff) != 0x006c) { 2169 if ((val & 0x00ff) != 0x006c) {
2136 static const u32 set[] = { 2170 static const u32 set[] = {
@@ -2139,11 +2173,11 @@ static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
2139 }; 2173 };
2140 int i; 2174 int i;
2141 2175
2142 mdio_write(ioaddr, 0x1f, 0x0002); 2176 rtl_writephy(tp, 0x1f, 0x0002);
2143 2177
2144 val &= 0xff00; 2178 val &= 0xff00;
2145 for (i = 0; i < ARRAY_SIZE(set); i++) 2179 for (i = 0; i < ARRAY_SIZE(set); i++)
2146 mdio_write(ioaddr, 0x0d, val | set[i]); 2180 rtl_writephy(tp, 0x0d, val | set[i]);
2147 } 2181 }
2148 } else { 2182 } else {
2149 static const struct phy_reg phy_reg_init[] = { 2183 static const struct phy_reg phy_reg_init[] = {
@@ -2154,23 +2188,36 @@ static void rtl8168d_1_hw_phy_config(void __iomem *ioaddr)
2154 { 0x06, 0x6662 } 2188 { 0x06, 0x6662 }
2155 }; 2189 };
2156 2190
2157 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 2191 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2158 } 2192 }
2159 2193
2160 mdio_write(ioaddr, 0x1f, 0x0002); 2194 /* RSET couple improve */
2161 mdio_patch(ioaddr, 0x0d, 0x0300); 2195 rtl_writephy(tp, 0x1f, 0x0002);
2162 mdio_patch(ioaddr, 0x0f, 0x0010); 2196 rtl_patchphy(tp, 0x0d, 0x0300);
2163 2197 rtl_patchphy(tp, 0x0f, 0x0010);
2164 mdio_write(ioaddr, 0x1f, 0x0002); 2198
2165 mdio_plus_minus(ioaddr, 0x02, 0x0100, 0x0600); 2199 /* Fine tune PLL performance */
2166 mdio_plus_minus(ioaddr, 0x03, 0x0000, 0xe000); 2200 rtl_writephy(tp, 0x1f, 0x0002);
2201 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2202 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2203
2204 rtl_writephy(tp, 0x1f, 0x0005);
2205 rtl_writephy(tp, 0x05, 0x001b);
2206 if (rtl_readphy(tp, 0x06) == 0xbf00 &&
2207 request_firmware(&fw, FIRMWARE_8168D_1, &tp->pci_dev->dev) == 0) {
2208 rtl_phy_write_fw(tp, fw);
2209 release_firmware(fw);
2210 } else {
2211 netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
2212 }
2167 2213
2168 rtl_phy_write(ioaddr, phy_reg_init_2, ARRAY_SIZE(phy_reg_init_2)); 2214 rtl_writephy(tp, 0x1f, 0x0000);
2169} 2215}
2170 2216
2171static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr) 2217static void rtl8168d_2_hw_phy_config(struct rtl8169_private *tp)
2172{ 2218{
2173 static const struct phy_reg phy_reg_init_0[] = { 2219 static const struct phy_reg phy_reg_init_0[] = {
2220 /* Channel Estimation */
2174 { 0x1f, 0x0001 }, 2221 { 0x1f, 0x0001 },
2175 { 0x06, 0x4064 }, 2222 { 0x06, 0x4064 },
2176 { 0x07, 0x2863 }, 2223 { 0x07, 0x2863 },
@@ -2189,326 +2236,30 @@ static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
2189 { 0x1a, 0x05ad }, 2236 { 0x1a, 0x05ad },
2190 { 0x14, 0x94c0 }, 2237 { 0x14, 0x94c0 },
2191 2238
2239 /*
2240 * Tx Error Issue
2241 * enhance line driver power
2242 */
2192 { 0x1f, 0x0002 }, 2243 { 0x1f, 0x0002 },
2193 { 0x06, 0x5561 }, 2244 { 0x06, 0x5561 },
2194 { 0x1f, 0x0005 }, 2245 { 0x1f, 0x0005 },
2195 { 0x05, 0x8332 }, 2246 { 0x05, 0x8332 },
2196 { 0x06, 0x5561 } 2247 { 0x06, 0x5561 },
2197 }; 2248
2198 static const struct phy_reg phy_reg_init_1[] = { 2249 /*
2199 { 0x1f, 0x0005 }, 2250 * Can not link to 1Gbps with bad cable
2200 { 0x05, 0xffc2 }, 2251 * Decrease SNR threshold form 21.07dB to 19.04dB
2201 { 0x1f, 0x0005 }, 2252 */
2202 { 0x05, 0x8000 }, 2253 { 0x1f, 0x0001 },
2203 { 0x06, 0xf8f9 }, 2254 { 0x17, 0x0cc0 },
2204 { 0x06, 0xfaee },
2205 { 0x06, 0xf8ea },
2206 { 0x06, 0x00ee },
2207 { 0x06, 0xf8eb },
2208 { 0x06, 0x00e2 },
2209 { 0x06, 0xf87c },
2210 { 0x06, 0xe3f8 },
2211 { 0x06, 0x7da5 },
2212 { 0x06, 0x1111 },
2213 { 0x06, 0x12d2 },
2214 { 0x06, 0x40d6 },
2215 { 0x06, 0x4444 },
2216 { 0x06, 0x0281 },
2217 { 0x06, 0xc6d2 },
2218 { 0x06, 0xa0d6 },
2219 { 0x06, 0xaaaa },
2220 { 0x06, 0x0281 },
2221 { 0x06, 0xc6ae },
2222 { 0x06, 0x0fa5 },
2223 { 0x06, 0x4444 },
2224 { 0x06, 0x02ae },
2225 { 0x06, 0x4da5 },
2226 { 0x06, 0xaaaa },
2227 { 0x06, 0x02ae },
2228 { 0x06, 0x47af },
2229 { 0x06, 0x81c2 },
2230 { 0x06, 0xee83 },
2231 { 0x06, 0x4e00 },
2232 { 0x06, 0xee83 },
2233 { 0x06, 0x4d0f },
2234 { 0x06, 0xee83 },
2235 { 0x06, 0x4c0f },
2236 { 0x06, 0xee83 },
2237 { 0x06, 0x4f00 },
2238 { 0x06, 0xee83 },
2239 { 0x06, 0x5100 },
2240 { 0x06, 0xee83 },
2241 { 0x06, 0x4aff },
2242 { 0x06, 0xee83 },
2243 { 0x06, 0x4bff },
2244 { 0x06, 0xe083 },
2245 { 0x06, 0x30e1 },
2246 { 0x06, 0x8331 },
2247 { 0x06, 0x58fe },
2248 { 0x06, 0xe4f8 },
2249 { 0x06, 0x8ae5 },
2250 { 0x06, 0xf88b },
2251 { 0x06, 0xe083 },
2252 { 0x06, 0x32e1 },
2253 { 0x06, 0x8333 },
2254 { 0x06, 0x590f },
2255 { 0x06, 0xe283 },
2256 { 0x06, 0x4d0c },
2257 { 0x06, 0x245a },
2258 { 0x06, 0xf01e },
2259 { 0x06, 0x12e4 },
2260 { 0x06, 0xf88c },
2261 { 0x06, 0xe5f8 },
2262 { 0x06, 0x8daf },
2263 { 0x06, 0x81c2 },
2264 { 0x06, 0xe083 },
2265 { 0x06, 0x4f10 },
2266 { 0x06, 0xe483 },
2267 { 0x06, 0x4fe0 },
2268 { 0x06, 0x834e },
2269 { 0x06, 0x7800 },
2270 { 0x06, 0x9f0a },
2271 { 0x06, 0xe083 },
2272 { 0x06, 0x4fa0 },
2273 { 0x06, 0x10a5 },
2274 { 0x06, 0xee83 },
2275 { 0x06, 0x4e01 },
2276 { 0x06, 0xe083 },
2277 { 0x06, 0x4e78 },
2278 { 0x06, 0x059e },
2279 { 0x06, 0x9ae0 },
2280 { 0x06, 0x834e },
2281 { 0x06, 0x7804 },
2282 { 0x06, 0x9e10 },
2283 { 0x06, 0xe083 },
2284 { 0x06, 0x4e78 },
2285 { 0x06, 0x039e },
2286 { 0x06, 0x0fe0 },
2287 { 0x06, 0x834e },
2288 { 0x06, 0x7801 },
2289 { 0x06, 0x9e05 },
2290 { 0x06, 0xae0c },
2291 { 0x06, 0xaf81 },
2292 { 0x06, 0xa7af },
2293 { 0x06, 0x8152 },
2294 { 0x06, 0xaf81 },
2295 { 0x06, 0x8baf },
2296 { 0x06, 0x81c2 },
2297 { 0x06, 0xee83 },
2298 { 0x06, 0x4800 },
2299 { 0x06, 0xee83 },
2300 { 0x06, 0x4900 },
2301 { 0x06, 0xe083 },
2302 { 0x06, 0x5110 },
2303 { 0x06, 0xe483 },
2304 { 0x06, 0x5158 },
2305 { 0x06, 0x019f },
2306 { 0x06, 0xead0 },
2307 { 0x06, 0x00d1 },
2308 { 0x06, 0x801f },
2309 { 0x06, 0x66e2 },
2310 { 0x06, 0xf8ea },
2311 { 0x06, 0xe3f8 },
2312 { 0x06, 0xeb5a },
2313 { 0x06, 0xf81e },
2314 { 0x06, 0x20e6 },
2315 { 0x06, 0xf8ea },
2316 { 0x06, 0xe5f8 },
2317 { 0x06, 0xebd3 },
2318 { 0x06, 0x02b3 },
2319 { 0x06, 0xfee2 },
2320 { 0x06, 0xf87c },
2321 { 0x06, 0xef32 },
2322 { 0x06, 0x5b80 },
2323 { 0x06, 0xe3f8 },
2324 { 0x06, 0x7d9e },
2325 { 0x06, 0x037d },
2326 { 0x06, 0xffff },
2327 { 0x06, 0x0d58 },
2328 { 0x06, 0x1c55 },
2329 { 0x06, 0x1a65 },
2330 { 0x06, 0x11a1 },
2331 { 0x06, 0x90d3 },
2332 { 0x06, 0xe283 },
2333 { 0x06, 0x48e3 },
2334 { 0x06, 0x8349 },
2335 { 0x06, 0x1b56 },
2336 { 0x06, 0xab08 },
2337 { 0x06, 0xef56 },
2338 { 0x06, 0xe683 },
2339 { 0x06, 0x48e7 },
2340 { 0x06, 0x8349 },
2341 { 0x06, 0x10d1 },
2342 { 0x06, 0x801f },
2343 { 0x06, 0x66a0 },
2344 { 0x06, 0x04b9 },
2345 { 0x06, 0xe283 },
2346 { 0x06, 0x48e3 },
2347 { 0x06, 0x8349 },
2348 { 0x06, 0xef65 },
2349 { 0x06, 0xe283 },
2350 { 0x06, 0x4ae3 },
2351 { 0x06, 0x834b },
2352 { 0x06, 0x1b56 },
2353 { 0x06, 0xaa0e },
2354 { 0x06, 0xef56 },
2355 { 0x06, 0xe683 },
2356 { 0x06, 0x4ae7 },
2357 { 0x06, 0x834b },
2358 { 0x06, 0xe283 },
2359 { 0x06, 0x4de6 },
2360 { 0x06, 0x834c },
2361 { 0x06, 0xe083 },
2362 { 0x06, 0x4da0 },
2363 { 0x06, 0x000c },
2364 { 0x06, 0xaf81 },
2365 { 0x06, 0x8be0 },
2366 { 0x06, 0x834d },
2367 { 0x06, 0x10e4 },
2368 { 0x06, 0x834d },
2369 { 0x06, 0xae04 },
2370 { 0x06, 0x80e4 },
2371 { 0x06, 0x834d },
2372 { 0x06, 0xe083 },
2373 { 0x06, 0x4e78 },
2374 { 0x06, 0x039e },
2375 { 0x06, 0x0be0 },
2376 { 0x06, 0x834e },
2377 { 0x06, 0x7804 },
2378 { 0x06, 0x9e04 },
2379 { 0x06, 0xee83 },
2380 { 0x06, 0x4e02 },
2381 { 0x06, 0xe083 },
2382 { 0x06, 0x32e1 },
2383 { 0x06, 0x8333 },
2384 { 0x06, 0x590f },
2385 { 0x06, 0xe283 },
2386 { 0x06, 0x4d0c },
2387 { 0x06, 0x245a },
2388 { 0x06, 0xf01e },
2389 { 0x06, 0x12e4 },
2390 { 0x06, 0xf88c },
2391 { 0x06, 0xe5f8 },
2392 { 0x06, 0x8de0 },
2393 { 0x06, 0x8330 },
2394 { 0x06, 0xe183 },
2395 { 0x06, 0x3168 },
2396 { 0x06, 0x01e4 },
2397 { 0x06, 0xf88a },
2398 { 0x06, 0xe5f8 },
2399 { 0x06, 0x8bae },
2400 { 0x06, 0x37ee },
2401 { 0x06, 0x834e },
2402 { 0x06, 0x03e0 },
2403 { 0x06, 0x834c },
2404 { 0x06, 0xe183 },
2405 { 0x06, 0x4d1b },
2406 { 0x06, 0x019e },
2407 { 0x06, 0x04aa },
2408 { 0x06, 0xa1ae },
2409 { 0x06, 0xa8ee },
2410 { 0x06, 0x834e },
2411 { 0x06, 0x04ee },
2412 { 0x06, 0x834f },
2413 { 0x06, 0x00ae },
2414 { 0x06, 0xabe0 },
2415 { 0x06, 0x834f },
2416 { 0x06, 0x7803 },
2417 { 0x06, 0x9f14 },
2418 { 0x06, 0xee83 },
2419 { 0x06, 0x4e05 },
2420 { 0x06, 0xd240 },
2421 { 0x06, 0xd655 },
2422 { 0x06, 0x5402 },
2423 { 0x06, 0x81c6 },
2424 { 0x06, 0xd2a0 },
2425 { 0x06, 0xd6ba },
2426 { 0x06, 0x0002 },
2427 { 0x06, 0x81c6 },
2428 { 0x06, 0xfefd },
2429 { 0x06, 0xfc05 },
2430 { 0x06, 0xf8e0 },
2431 { 0x06, 0xf860 },
2432 { 0x06, 0xe1f8 },
2433 { 0x06, 0x6168 },
2434 { 0x06, 0x02e4 },
2435 { 0x06, 0xf860 },
2436 { 0x06, 0xe5f8 },
2437 { 0x06, 0x61e0 },
2438 { 0x06, 0xf848 },
2439 { 0x06, 0xe1f8 },
2440 { 0x06, 0x4958 },
2441 { 0x06, 0x0f1e },
2442 { 0x06, 0x02e4 },
2443 { 0x06, 0xf848 },
2444 { 0x06, 0xe5f8 },
2445 { 0x06, 0x49d0 },
2446 { 0x06, 0x0002 },
2447 { 0x06, 0x820a },
2448 { 0x06, 0xbf83 },
2449 { 0x06, 0x50ef },
2450 { 0x06, 0x46dc },
2451 { 0x06, 0x19dd },
2452 { 0x06, 0xd001 },
2453 { 0x06, 0x0282 },
2454 { 0x06, 0x0a02 },
2455 { 0x06, 0x8226 },
2456 { 0x06, 0xe0f8 },
2457 { 0x06, 0x60e1 },
2458 { 0x06, 0xf861 },
2459 { 0x06, 0x58fd },
2460 { 0x06, 0xe4f8 },
2461 { 0x06, 0x60e5 },
2462 { 0x06, 0xf861 },
2463 { 0x06, 0xfc04 },
2464 { 0x06, 0xf9fa },
2465 { 0x06, 0xfbc6 },
2466 { 0x06, 0xbff8 },
2467 { 0x06, 0x40be },
2468 { 0x06, 0x8350 },
2469 { 0x06, 0xa001 },
2470 { 0x06, 0x0107 },
2471 { 0x06, 0x1b89 },
2472 { 0x06, 0xcfd2 },
2473 { 0x06, 0x08eb },
2474 { 0x06, 0xdb19 },
2475 { 0x06, 0xb2fb },
2476 { 0x06, 0xfffe },
2477 { 0x06, 0xfd04 },
2478 { 0x06, 0xf8e0 },
2479 { 0x06, 0xf848 },
2480 { 0x06, 0xe1f8 },
2481 { 0x06, 0x4968 },
2482 { 0x06, 0x08e4 },
2483 { 0x06, 0xf848 },
2484 { 0x06, 0xe5f8 },
2485 { 0x06, 0x4958 },
2486 { 0x06, 0xf7e4 },
2487 { 0x06, 0xf848 },
2488 { 0x06, 0xe5f8 },
2489 { 0x06, 0x49fc },
2490 { 0x06, 0x044d },
2491 { 0x06, 0x2000 },
2492 { 0x06, 0x024e },
2493 { 0x06, 0x2200 },
2494 { 0x06, 0x024d },
2495 { 0x06, 0xdfff },
2496 { 0x06, 0x014e },
2497 { 0x06, 0xddff },
2498 { 0x06, 0x0100 },
2499 { 0x05, 0x83d8 },
2500 { 0x06, 0x8000 },
2501 { 0x03, 0xdc00 },
2502 { 0x05, 0xfff6 },
2503 { 0x06, 0x00fc },
2504 { 0x1f, 0x0000 },
2505 2255
2506 { 0x1f, 0x0000 }, 2256 { 0x1f, 0x0000 },
2507 { 0x0d, 0xf880 }, 2257 { 0x0d, 0xf880 }
2508 { 0x1f, 0x0000 }
2509 }; 2258 };
2259 void __iomem *ioaddr = tp->mmio_addr;
2260 const struct firmware *fw;
2510 2261
2511 rtl_phy_write(ioaddr, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0)); 2262 rtl_writephy_batch(tp, phy_reg_init_0, ARRAY_SIZE(phy_reg_init_0));
2512 2263
2513 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) { 2264 if (rtl8168d_efuse_read(ioaddr, 0x01) == 0xb1) {
2514 static const struct phy_reg phy_reg_init[] = { 2265 static const struct phy_reg phy_reg_init[] = {
@@ -2522,21 +2273,21 @@ static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
2522 }; 2273 };
2523 int val; 2274 int val;
2524 2275
2525 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 2276 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2526 2277
2527 val = mdio_read(ioaddr, 0x0d); 2278 val = rtl_readphy(tp, 0x0d);
2528 if ((val & 0x00ff) != 0x006c) { 2279 if ((val & 0x00ff) != 0x006c) {
2529 u32 set[] = { 2280 static const u32 set[] = {
2530 0x0065, 0x0066, 0x0067, 0x0068, 2281 0x0065, 0x0066, 0x0067, 0x0068,
2531 0x0069, 0x006a, 0x006b, 0x006c 2282 0x0069, 0x006a, 0x006b, 0x006c
2532 }; 2283 };
2533 int i; 2284 int i;
2534 2285
2535 mdio_write(ioaddr, 0x1f, 0x0002); 2286 rtl_writephy(tp, 0x1f, 0x0002);
2536 2287
2537 val &= 0xff00; 2288 val &= 0xff00;
2538 for (i = 0; i < ARRAY_SIZE(set); i++) 2289 for (i = 0; i < ARRAY_SIZE(set); i++)
2539 mdio_write(ioaddr, 0x0d, val | set[i]); 2290 rtl_writephy(tp, 0x0d, val | set[i]);
2540 } 2291 }
2541 } else { 2292 } else {
2542 static const struct phy_reg phy_reg_init[] = { 2293 static const struct phy_reg phy_reg_init[] = {
@@ -2547,23 +2298,32 @@ static void rtl8168d_2_hw_phy_config(void __iomem *ioaddr)
2547 { 0x06, 0x2642 } 2298 { 0x06, 0x2642 }
2548 }; 2299 };
2549 2300
2550 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 2301 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2551 } 2302 }
2552 2303
2553 mdio_write(ioaddr, 0x1f, 0x0002); 2304 /* Fine tune PLL performance */
2554 mdio_plus_minus(ioaddr, 0x02, 0x0100, 0x0600); 2305 rtl_writephy(tp, 0x1f, 0x0002);
2555 mdio_plus_minus(ioaddr, 0x03, 0x0000, 0xe000); 2306 rtl_w1w0_phy(tp, 0x02, 0x0100, 0x0600);
2556 2307 rtl_w1w0_phy(tp, 0x03, 0x0000, 0xe000);
2557 mdio_write(ioaddr, 0x1f, 0x0001); 2308
2558 mdio_write(ioaddr, 0x17, 0x0cc0); 2309 /* Switching regulator Slew rate */
2559 2310 rtl_writephy(tp, 0x1f, 0x0002);
2560 mdio_write(ioaddr, 0x1f, 0x0002); 2311 rtl_patchphy(tp, 0x0f, 0x0017);
2561 mdio_patch(ioaddr, 0x0f, 0x0017); 2312
2313 rtl_writephy(tp, 0x1f, 0x0005);
2314 rtl_writephy(tp, 0x05, 0x001b);
2315 if (rtl_readphy(tp, 0x06) == 0xb300 &&
2316 request_firmware(&fw, FIRMWARE_8168D_2, &tp->pci_dev->dev) == 0) {
2317 rtl_phy_write_fw(tp, fw);
2318 release_firmware(fw);
2319 } else {
2320 netif_warn(tp, probe, tp->dev, "unable to apply firmware patch\n");
2321 }
2562 2322
2563 rtl_phy_write(ioaddr, phy_reg_init_1, ARRAY_SIZE(phy_reg_init_1)); 2323 rtl_writephy(tp, 0x1f, 0x0000);
2564} 2324}
2565 2325
2566static void rtl8168d_3_hw_phy_config(void __iomem *ioaddr) 2326static void rtl8168d_3_hw_phy_config(struct rtl8169_private *tp)
2567{ 2327{
2568 static const struct phy_reg phy_reg_init[] = { 2328 static const struct phy_reg phy_reg_init[] = {
2569 { 0x1f, 0x0002 }, 2329 { 0x1f, 0x0002 },
@@ -2621,10 +2381,26 @@ static void rtl8168d_3_hw_phy_config(void __iomem *ioaddr)
2621 { 0x1f, 0x0000 } 2381 { 0x1f, 0x0000 }
2622 }; 2382 };
2623 2383
2624 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 2384 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2625} 2385}
2626 2386
2627static void rtl8102e_hw_phy_config(void __iomem *ioaddr) 2387static void rtl8168d_4_hw_phy_config(struct rtl8169_private *tp)
2388{
2389 static const struct phy_reg phy_reg_init[] = {
2390 { 0x1f, 0x0001 },
2391 { 0x17, 0x0cc0 },
2392
2393 { 0x1f, 0x0007 },
2394 { 0x1e, 0x002d },
2395 { 0x18, 0x0040 },
2396 { 0x1f, 0x0000 }
2397 };
2398
2399 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2400 rtl_patchphy(tp, 0x0d, 1 << 5);
2401}
2402
2403static void rtl8102e_hw_phy_config(struct rtl8169_private *tp)
2628{ 2404{
2629 static const struct phy_reg phy_reg_init[] = { 2405 static const struct phy_reg phy_reg_init[] = {
2630 { 0x1f, 0x0003 }, 2406 { 0x1f, 0x0003 },
@@ -2633,18 +2409,17 @@ static void rtl8102e_hw_phy_config(void __iomem *ioaddr)
2633 { 0x1f, 0x0000 } 2409 { 0x1f, 0x0000 }
2634 }; 2410 };
2635 2411
2636 mdio_write(ioaddr, 0x1f, 0x0000); 2412 rtl_writephy(tp, 0x1f, 0x0000);
2637 mdio_patch(ioaddr, 0x11, 1 << 12); 2413 rtl_patchphy(tp, 0x11, 1 << 12);
2638 mdio_patch(ioaddr, 0x19, 1 << 13); 2414 rtl_patchphy(tp, 0x19, 1 << 13);
2639 mdio_patch(ioaddr, 0x10, 1 << 15); 2415 rtl_patchphy(tp, 0x10, 1 << 15);
2640 2416
2641 rtl_phy_write(ioaddr, phy_reg_init, ARRAY_SIZE(phy_reg_init)); 2417 rtl_writephy_batch(tp, phy_reg_init, ARRAY_SIZE(phy_reg_init));
2642} 2418}
2643 2419
2644static void rtl_hw_phy_config(struct net_device *dev) 2420static void rtl_hw_phy_config(struct net_device *dev)
2645{ 2421{
2646 struct rtl8169_private *tp = netdev_priv(dev); 2422 struct rtl8169_private *tp = netdev_priv(dev);
2647 void __iomem *ioaddr = tp->mmio_addr;
2648 2423
2649 rtl8169_print_mac_version(tp); 2424 rtl8169_print_mac_version(tp);
2650 2425
@@ -2653,58 +2428,61 @@ static void rtl_hw_phy_config(struct net_device *dev)
2653 break; 2428 break;
2654 case RTL_GIGA_MAC_VER_02: 2429 case RTL_GIGA_MAC_VER_02:
2655 case RTL_GIGA_MAC_VER_03: 2430 case RTL_GIGA_MAC_VER_03:
2656 rtl8169s_hw_phy_config(ioaddr); 2431 rtl8169s_hw_phy_config(tp);
2657 break; 2432 break;
2658 case RTL_GIGA_MAC_VER_04: 2433 case RTL_GIGA_MAC_VER_04:
2659 rtl8169sb_hw_phy_config(ioaddr); 2434 rtl8169sb_hw_phy_config(tp);
2660 break; 2435 break;
2661 case RTL_GIGA_MAC_VER_05: 2436 case RTL_GIGA_MAC_VER_05:
2662 rtl8169scd_hw_phy_config(tp, ioaddr); 2437 rtl8169scd_hw_phy_config(tp);
2663 break; 2438 break;
2664 case RTL_GIGA_MAC_VER_06: 2439 case RTL_GIGA_MAC_VER_06:
2665 rtl8169sce_hw_phy_config(ioaddr); 2440 rtl8169sce_hw_phy_config(tp);
2666 break; 2441 break;
2667 case RTL_GIGA_MAC_VER_07: 2442 case RTL_GIGA_MAC_VER_07:
2668 case RTL_GIGA_MAC_VER_08: 2443 case RTL_GIGA_MAC_VER_08:
2669 case RTL_GIGA_MAC_VER_09: 2444 case RTL_GIGA_MAC_VER_09:
2670 rtl8102e_hw_phy_config(ioaddr); 2445 rtl8102e_hw_phy_config(tp);
2671 break; 2446 break;
2672 case RTL_GIGA_MAC_VER_11: 2447 case RTL_GIGA_MAC_VER_11:
2673 rtl8168bb_hw_phy_config(ioaddr); 2448 rtl8168bb_hw_phy_config(tp);
2674 break; 2449 break;
2675 case RTL_GIGA_MAC_VER_12: 2450 case RTL_GIGA_MAC_VER_12:
2676 rtl8168bef_hw_phy_config(ioaddr); 2451 rtl8168bef_hw_phy_config(tp);
2677 break; 2452 break;
2678 case RTL_GIGA_MAC_VER_17: 2453 case RTL_GIGA_MAC_VER_17:
2679 rtl8168bef_hw_phy_config(ioaddr); 2454 rtl8168bef_hw_phy_config(tp);
2680 break; 2455 break;
2681 case RTL_GIGA_MAC_VER_18: 2456 case RTL_GIGA_MAC_VER_18:
2682 rtl8168cp_1_hw_phy_config(ioaddr); 2457 rtl8168cp_1_hw_phy_config(tp);
2683 break; 2458 break;
2684 case RTL_GIGA_MAC_VER_19: 2459 case RTL_GIGA_MAC_VER_19:
2685 rtl8168c_1_hw_phy_config(ioaddr); 2460 rtl8168c_1_hw_phy_config(tp);
2686 break; 2461 break;
2687 case RTL_GIGA_MAC_VER_20: 2462 case RTL_GIGA_MAC_VER_20:
2688 rtl8168c_2_hw_phy_config(ioaddr); 2463 rtl8168c_2_hw_phy_config(tp);
2689 break; 2464 break;
2690 case RTL_GIGA_MAC_VER_21: 2465 case RTL_GIGA_MAC_VER_21:
2691 rtl8168c_3_hw_phy_config(ioaddr); 2466 rtl8168c_3_hw_phy_config(tp);
2692 break; 2467 break;
2693 case RTL_GIGA_MAC_VER_22: 2468 case RTL_GIGA_MAC_VER_22:
2694 rtl8168c_4_hw_phy_config(ioaddr); 2469 rtl8168c_4_hw_phy_config(tp);
2695 break; 2470 break;
2696 case RTL_GIGA_MAC_VER_23: 2471 case RTL_GIGA_MAC_VER_23:
2697 case RTL_GIGA_MAC_VER_24: 2472 case RTL_GIGA_MAC_VER_24:
2698 rtl8168cp_2_hw_phy_config(ioaddr); 2473 rtl8168cp_2_hw_phy_config(tp);
2699 break; 2474 break;
2700 case RTL_GIGA_MAC_VER_25: 2475 case RTL_GIGA_MAC_VER_25:
2701 rtl8168d_1_hw_phy_config(ioaddr); 2476 rtl8168d_1_hw_phy_config(tp);
2702 break; 2477 break;
2703 case RTL_GIGA_MAC_VER_26: 2478 case RTL_GIGA_MAC_VER_26:
2704 rtl8168d_2_hw_phy_config(ioaddr); 2479 rtl8168d_2_hw_phy_config(tp);
2705 break; 2480 break;
2706 case RTL_GIGA_MAC_VER_27: 2481 case RTL_GIGA_MAC_VER_27:
2707 rtl8168d_3_hw_phy_config(ioaddr); 2482 rtl8168d_3_hw_phy_config(tp);
2483 break;
2484 case RTL_GIGA_MAC_VER_28:
2485 rtl8168d_4_hw_phy_config(tp);
2708 break; 2486 break;
2709 2487
2710 default: 2488 default:
@@ -2727,7 +2505,7 @@ static void rtl8169_phy_timer(unsigned long __opaque)
2727 2505
2728 spin_lock_irq(&tp->lock); 2506 spin_lock_irq(&tp->lock);
2729 2507
2730 if (tp->phy_reset_pending(ioaddr)) { 2508 if (tp->phy_reset_pending(tp)) {
2731 /* 2509 /*
2732 * A busy loop could burn quite a few cycles on nowadays CPU. 2510 * A busy loop could burn quite a few cycles on nowadays CPU.
2733 * Let's delay the execution of the timer for a few ticks. 2511 * Let's delay the execution of the timer for a few ticks.
@@ -2741,7 +2519,7 @@ static void rtl8169_phy_timer(unsigned long __opaque)
2741 2519
2742 netif_warn(tp, link, dev, "PHY reset until link up\n"); 2520 netif_warn(tp, link, dev, "PHY reset until link up\n");
2743 2521
2744 tp->phy_reset_enable(ioaddr); 2522 tp->phy_reset_enable(tp);
2745 2523
2746out_mod_timer: 2524out_mod_timer:
2747 mod_timer(timer, jiffies + timeout); 2525 mod_timer(timer, jiffies + timeout);
@@ -2801,12 +2579,11 @@ static void rtl8169_release_board(struct pci_dev *pdev, struct net_device *dev,
2801static void rtl8169_phy_reset(struct net_device *dev, 2579static void rtl8169_phy_reset(struct net_device *dev,
2802 struct rtl8169_private *tp) 2580 struct rtl8169_private *tp)
2803{ 2581{
2804 void __iomem *ioaddr = tp->mmio_addr;
2805 unsigned int i; 2582 unsigned int i;
2806 2583
2807 tp->phy_reset_enable(ioaddr); 2584 tp->phy_reset_enable(tp);
2808 for (i = 0; i < 100; i++) { 2585 for (i = 0; i < 100; i++) {
2809 if (!tp->phy_reset_pending(ioaddr)) 2586 if (!tp->phy_reset_pending(tp))
2810 return; 2587 return;
2811 msleep(1); 2588 msleep(1);
2812 } 2589 }
@@ -2833,7 +2610,7 @@ static void rtl8169_init_phy(struct net_device *dev, struct rtl8169_private *tp)
2833 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n"); 2610 dprintk("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
2834 RTL_W8(0x82, 0x01); 2611 RTL_W8(0x82, 0x01);
2835 dprintk("Set PHY Reg 0x0bh = 0x00h\n"); 2612 dprintk("Set PHY Reg 0x0bh = 0x00h\n");
2836 mdio_write(ioaddr, 0x0b, 0x0000); //w 0x0b 15 0 0 2613 rtl_writephy(tp, 0x0b, 0x0000); //w 0x0b 15 0 0
2837 } 2614 }
2838 2615
2839 rtl8169_phy_reset(dev, tp); 2616 rtl8169_phy_reset(dev, tp);
@@ -2903,11 +2680,11 @@ static int rtl_xmii_ioctl(struct rtl8169_private *tp, struct mii_ioctl_data *dat
2903 return 0; 2680 return 0;
2904 2681
2905 case SIOCGMIIREG: 2682 case SIOCGMIIREG:
2906 data->val_out = mdio_read(tp->mmio_addr, data->reg_num & 0x1f); 2683 data->val_out = rtl_readphy(tp, data->reg_num & 0x1f);
2907 return 0; 2684 return 0;
2908 2685
2909 case SIOCSMIIREG: 2686 case SIOCSMIIREG:
2910 mdio_write(tp->mmio_addr, data->reg_num & 0x1f, data->val_in); 2687 rtl_writephy(tp, data->reg_num & 0x1f, data->val_in);
2911 return 0; 2688 return 0;
2912 } 2689 }
2913 return -EOPNOTSUPP; 2690 return -EOPNOTSUPP;
@@ -3007,6 +2784,173 @@ static const struct net_device_ops rtl8169_netdev_ops = {
3007 2784
3008}; 2785};
3009 2786
2787static void __devinit rtl_init_mdio_ops(struct rtl8169_private *tp)
2788{
2789 struct mdio_ops *ops = &tp->mdio_ops;
2790
2791 switch (tp->mac_version) {
2792 case RTL_GIGA_MAC_VER_27:
2793 ops->write = r8168dp_1_mdio_write;
2794 ops->read = r8168dp_1_mdio_read;
2795 break;
2796 case RTL_GIGA_MAC_VER_28:
2797 ops->write = r8168dp_2_mdio_write;
2798 ops->read = r8168dp_2_mdio_read;
2799 break;
2800 default:
2801 ops->write = r8169_mdio_write;
2802 ops->read = r8169_mdio_read;
2803 break;
2804 }
2805}
2806
2807static void r810x_phy_power_down(struct rtl8169_private *tp)
2808{
2809 rtl_writephy(tp, 0x1f, 0x0000);
2810 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
2811}
2812
2813static void r810x_phy_power_up(struct rtl8169_private *tp)
2814{
2815 rtl_writephy(tp, 0x1f, 0x0000);
2816 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
2817}
2818
2819static void r810x_pll_power_down(struct rtl8169_private *tp)
2820{
2821 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
2822 rtl_writephy(tp, 0x1f, 0x0000);
2823 rtl_writephy(tp, MII_BMCR, 0x0000);
2824 return;
2825 }
2826
2827 r810x_phy_power_down(tp);
2828}
2829
2830static void r810x_pll_power_up(struct rtl8169_private *tp)
2831{
2832 r810x_phy_power_up(tp);
2833}
2834
2835static void r8168_phy_power_up(struct rtl8169_private *tp)
2836{
2837 rtl_writephy(tp, 0x1f, 0x0000);
2838 rtl_writephy(tp, 0x0e, 0x0000);
2839 rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE);
2840}
2841
2842static void r8168_phy_power_down(struct rtl8169_private *tp)
2843{
2844 rtl_writephy(tp, 0x1f, 0x0000);
2845 rtl_writephy(tp, 0x0e, 0x0200);
2846 rtl_writephy(tp, MII_BMCR, BMCR_PDOWN);
2847}
2848
2849static void r8168_pll_power_down(struct rtl8169_private *tp)
2850{
2851 void __iomem *ioaddr = tp->mmio_addr;
2852
2853 if (tp->mac_version == RTL_GIGA_MAC_VER_27)
2854 return;
2855
2856 if (((tp->mac_version == RTL_GIGA_MAC_VER_23) ||
2857 (tp->mac_version == RTL_GIGA_MAC_VER_24)) &&
2858 (RTL_R16(CPlusCmd) & ASF)) {
2859 return;
2860 }
2861
2862 if (__rtl8169_get_wol(tp) & WAKE_ANY) {
2863 rtl_writephy(tp, 0x1f, 0x0000);
2864 rtl_writephy(tp, MII_BMCR, 0x0000);
2865
2866 RTL_W32(RxConfig, RTL_R32(RxConfig) |
2867 AcceptBroadcast | AcceptMulticast | AcceptMyPhys);
2868 return;
2869 }
2870
2871 r8168_phy_power_down(tp);
2872
2873 switch (tp->mac_version) {
2874 case RTL_GIGA_MAC_VER_25:
2875 case RTL_GIGA_MAC_VER_26:
2876 RTL_W8(PMCH, RTL_R8(PMCH) & ~0x80);
2877 break;
2878 }
2879}
2880
2881static void r8168_pll_power_up(struct rtl8169_private *tp)
2882{
2883 void __iomem *ioaddr = tp->mmio_addr;
2884
2885 if (tp->mac_version == RTL_GIGA_MAC_VER_27)
2886 return;
2887
2888 switch (tp->mac_version) {
2889 case RTL_GIGA_MAC_VER_25:
2890 case RTL_GIGA_MAC_VER_26:
2891 RTL_W8(PMCH, RTL_R8(PMCH) | 0x80);
2892 break;
2893 }
2894
2895 r8168_phy_power_up(tp);
2896}
2897
2898static void rtl_pll_power_op(struct rtl8169_private *tp,
2899 void (*op)(struct rtl8169_private *))
2900{
2901 if (op)
2902 op(tp);
2903}
2904
2905static void rtl_pll_power_down(struct rtl8169_private *tp)
2906{
2907 rtl_pll_power_op(tp, tp->pll_power_ops.down);
2908}
2909
2910static void rtl_pll_power_up(struct rtl8169_private *tp)
2911{
2912 rtl_pll_power_op(tp, tp->pll_power_ops.up);
2913}
2914
2915static void __devinit rtl_init_pll_power_ops(struct rtl8169_private *tp)
2916{
2917 struct pll_power_ops *ops = &tp->pll_power_ops;
2918
2919 switch (tp->mac_version) {
2920 case RTL_GIGA_MAC_VER_07:
2921 case RTL_GIGA_MAC_VER_08:
2922 case RTL_GIGA_MAC_VER_09:
2923 case RTL_GIGA_MAC_VER_10:
2924 case RTL_GIGA_MAC_VER_16:
2925 ops->down = r810x_pll_power_down;
2926 ops->up = r810x_pll_power_up;
2927 break;
2928
2929 case RTL_GIGA_MAC_VER_11:
2930 case RTL_GIGA_MAC_VER_12:
2931 case RTL_GIGA_MAC_VER_17:
2932 case RTL_GIGA_MAC_VER_18:
2933 case RTL_GIGA_MAC_VER_19:
2934 case RTL_GIGA_MAC_VER_20:
2935 case RTL_GIGA_MAC_VER_21:
2936 case RTL_GIGA_MAC_VER_22:
2937 case RTL_GIGA_MAC_VER_23:
2938 case RTL_GIGA_MAC_VER_24:
2939 case RTL_GIGA_MAC_VER_25:
2940 case RTL_GIGA_MAC_VER_26:
2941 case RTL_GIGA_MAC_VER_27:
2942 case RTL_GIGA_MAC_VER_28:
2943 ops->down = r8168_pll_power_down;
2944 ops->up = r8168_pll_power_up;
2945 break;
2946
2947 default:
2948 ops->down = NULL;
2949 ops->up = NULL;
2950 break;
2951 }
2952}
2953
3010static int __devinit 2954static int __devinit
3011rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) 2955rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3012{ 2956{
@@ -3125,6 +3069,9 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3125 /* Identify chip attached to board */ 3069 /* Identify chip attached to board */
3126 rtl8169_get_mac_version(tp, ioaddr); 3070 rtl8169_get_mac_version(tp, ioaddr);
3127 3071
3072 rtl_init_mdio_ops(tp);
3073 rtl_init_pll_power_ops(tp);
3074
3128 /* Use appropriate default if unknown */ 3075 /* Use appropriate default if unknown */
3129 if (tp->mac_version == RTL_GIGA_MAC_NONE) { 3076 if (tp->mac_version == RTL_GIGA_MAC_NONE) {
3130 netif_notice(tp, probe, dev, 3077 netif_notice(tp, probe, dev,
@@ -3215,14 +3162,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
3215 dev->base_addr, dev->dev_addr, 3162 dev->base_addr, dev->dev_addr,
3216 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq); 3163 (u32)(RTL_R32(TxConfig) & 0x9cf0f8ff), dev->irq);
3217 3164
3218 rtl8169_init_phy(dev, tp); 3165 if ((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
3219 3166 (tp->mac_version == RTL_GIGA_MAC_VER_28)) {
3220 /* 3167 rtl8168_driver_start(tp);
3221 * Pretend we are using VLANs; This bypasses a nasty bug where 3168 }
3222 * Interrupts stop flowing on high load on 8110SCd controllers.
3223 */
3224 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
3225 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
3226 3169
3227 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL); 3170 device_set_wakeup_enable(&pdev->dev, tp->features & RTL_FEATURE_WOL);
3228 3171
@@ -3250,7 +3193,12 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3250 struct net_device *dev = pci_get_drvdata(pdev); 3193 struct net_device *dev = pci_get_drvdata(pdev);
3251 struct rtl8169_private *tp = netdev_priv(dev); 3194 struct rtl8169_private *tp = netdev_priv(dev);
3252 3195
3253 flush_scheduled_work(); 3196 if ((tp->mac_version == RTL_GIGA_MAC_VER_27) ||
3197 (tp->mac_version == RTL_GIGA_MAC_VER_28)) {
3198 rtl8168_driver_stop(tp);
3199 }
3200
3201 cancel_delayed_work_sync(&tp->task);
3254 3202
3255 unregister_netdev(dev); 3203 unregister_netdev(dev);
3256 3204
@@ -3268,6 +3216,7 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
3268static int rtl8169_open(struct net_device *dev) 3216static int rtl8169_open(struct net_device *dev)
3269{ 3217{
3270 struct rtl8169_private *tp = netdev_priv(dev); 3218 struct rtl8169_private *tp = netdev_priv(dev);
3219 void __iomem *ioaddr = tp->mmio_addr;
3271 struct pci_dev *pdev = tp->pci_dev; 3220 struct pci_dev *pdev = tp->pci_dev;
3272 int retval = -ENOMEM; 3221 int retval = -ENOMEM;
3273 3222
@@ -3303,6 +3252,17 @@ static int rtl8169_open(struct net_device *dev)
3303 3252
3304 napi_enable(&tp->napi); 3253 napi_enable(&tp->napi);
3305 3254
3255 rtl8169_init_phy(dev, tp);
3256
3257 /*
3258 * Pretend we are using VLANs; This bypasses a nasty bug where
3259 * Interrupts stop flowing on high load on 8110SCd controllers.
3260 */
3261 if (tp->mac_version == RTL_GIGA_MAC_VER_05)
3262 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) | RxVlan);
3263
3264 rtl_pll_power_up(tp);
3265
3306 rtl_hw_start(dev); 3266 rtl_hw_start(dev);
3307 3267
3308 rtl8169_request_timer(dev); 3268 rtl8169_request_timer(dev);
@@ -3310,7 +3270,7 @@ static int rtl8169_open(struct net_device *dev)
3310 tp->saved_wolopts = 0; 3270 tp->saved_wolopts = 0;
3311 pm_runtime_put_noidle(&pdev->dev); 3271 pm_runtime_put_noidle(&pdev->dev);
3312 3272
3313 rtl8169_check_link_status(dev, tp, tp->mmio_addr); 3273 rtl8169_check_link_status(dev, tp, ioaddr);
3314out: 3274out:
3315 return retval; 3275 return retval;
3316 3276
@@ -3329,11 +3289,19 @@ err_pm_runtime_put:
3329 goto out; 3289 goto out;
3330} 3290}
3331 3291
3332static void rtl8169_hw_reset(void __iomem *ioaddr) 3292static void rtl8169_hw_reset(struct rtl8169_private *tp)
3333{ 3293{
3294 void __iomem *ioaddr = tp->mmio_addr;
3295
3334 /* Disable interrupts */ 3296 /* Disable interrupts */
3335 rtl8169_irq_mask_and_ack(ioaddr); 3297 rtl8169_irq_mask_and_ack(ioaddr);
3336 3298
3299 if (tp->mac_version == RTL_GIGA_MAC_VER_28) {
3300 while (RTL_R8(TxPoll) & NPQ)
3301 udelay(20);
3302
3303 }
3304
3337 /* Reset the chipset */ 3305 /* Reset the chipset */
3338 RTL_W8(ChipCmd, CmdReset); 3306 RTL_W8(ChipCmd, CmdReset);
3339 3307
@@ -3447,7 +3415,7 @@ static void rtl_hw_start_8169(struct net_device *dev)
3447 (tp->mac_version == RTL_GIGA_MAC_VER_04)) 3415 (tp->mac_version == RTL_GIGA_MAC_VER_04))
3448 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb); 3416 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
3449 3417
3450 RTL_W8(EarlyTxThres, EarlyTxThld); 3418 RTL_W8(EarlyTxThres, NoEarlyTx);
3451 3419
3452 rtl_set_rx_max_size(ioaddr, rx_buf_sz); 3420 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
3453 3421
@@ -3517,12 +3485,22 @@ static void rtl_tx_performance_tweak(struct pci_dev *pdev, u16 force)
3517 } 3485 }
3518} 3486}
3519 3487
3520static void rtl_csi_access_enable(void __iomem *ioaddr) 3488static void rtl_csi_access_enable(void __iomem *ioaddr, u32 bits)
3521{ 3489{
3522 u32 csi; 3490 u32 csi;
3523 3491
3524 csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff; 3492 csi = rtl_csi_read(ioaddr, 0x070c) & 0x00ffffff;
3525 rtl_csi_write(ioaddr, 0x070c, csi | 0x27000000); 3493 rtl_csi_write(ioaddr, 0x070c, csi | bits);
3494}
3495
3496static void rtl_csi_access_enable_1(void __iomem *ioaddr)
3497{
3498 rtl_csi_access_enable(ioaddr, 0x17000000);
3499}
3500
3501static void rtl_csi_access_enable_2(void __iomem *ioaddr)
3502{
3503 rtl_csi_access_enable(ioaddr, 0x27000000);
3526} 3504}
3527 3505
3528struct ephy_info { 3506struct ephy_info {
@@ -3557,6 +3535,21 @@ static void rtl_disable_clock_request(struct pci_dev *pdev)
3557 } 3535 }
3558} 3536}
3559 3537
3538static void rtl_enable_clock_request(struct pci_dev *pdev)
3539{
3540 struct net_device *dev = pci_get_drvdata(pdev);
3541 struct rtl8169_private *tp = netdev_priv(dev);
3542 int cap = tp->pcie_cap;
3543
3544 if (cap) {
3545 u16 ctl;
3546
3547 pci_read_config_word(pdev, cap + PCI_EXP_LNKCTL, &ctl);
3548 ctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3549 pci_write_config_word(pdev, cap + PCI_EXP_LNKCTL, ctl);
3550 }
3551}
3552
3560#define R8168_CPCMD_QUIRK_MASK (\ 3553#define R8168_CPCMD_QUIRK_MASK (\
3561 EnableBist | \ 3554 EnableBist | \
3562 Mac_dbgo_oe | \ 3555 Mac_dbgo_oe | \
@@ -3582,7 +3575,7 @@ static void rtl_hw_start_8168bef(void __iomem *ioaddr, struct pci_dev *pdev)
3582{ 3575{
3583 rtl_hw_start_8168bb(ioaddr, pdev); 3576 rtl_hw_start_8168bb(ioaddr, pdev);
3584 3577
3585 RTL_W8(EarlyTxThres, EarlyTxThld); 3578 RTL_W8(MaxTxPacketSize, TxPacketMax);
3586 3579
3587 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0)); 3580 RTL_W8(Config4, RTL_R8(Config4) & ~(1 << 0));
3588} 3581}
@@ -3610,7 +3603,7 @@ static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
3610 { 0x07, 0, 0x2000 } 3603 { 0x07, 0, 0x2000 }
3611 }; 3604 };
3612 3605
3613 rtl_csi_access_enable(ioaddr); 3606 rtl_csi_access_enable_2(ioaddr);
3614 3607
3615 rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp)); 3608 rtl_ephy_init(ioaddr, e_info_8168cp, ARRAY_SIZE(e_info_8168cp));
3616 3609
@@ -3619,7 +3612,7 @@ static void rtl_hw_start_8168cp_1(void __iomem *ioaddr, struct pci_dev *pdev)
3619 3612
3620static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev) 3613static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev)
3621{ 3614{
3622 rtl_csi_access_enable(ioaddr); 3615 rtl_csi_access_enable_2(ioaddr);
3623 3616
3624 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 3617 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
3625 3618
@@ -3630,14 +3623,14 @@ static void rtl_hw_start_8168cp_2(void __iomem *ioaddr, struct pci_dev *pdev)
3630 3623
3631static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev) 3624static void rtl_hw_start_8168cp_3(void __iomem *ioaddr, struct pci_dev *pdev)
3632{ 3625{
3633 rtl_csi_access_enable(ioaddr); 3626 rtl_csi_access_enable_2(ioaddr);
3634 3627
3635 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en); 3628 RTL_W8(Config3, RTL_R8(Config3) & ~Beacon_en);
3636 3629
3637 /* Magic. */ 3630 /* Magic. */
3638 RTL_W8(DBG_REG, 0x20); 3631 RTL_W8(DBG_REG, 0x20);
3639 3632
3640 RTL_W8(EarlyTxThres, EarlyTxThld); 3633 RTL_W8(MaxTxPacketSize, TxPacketMax);
3641 3634
3642 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 3635 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
3643 3636
@@ -3652,7 +3645,7 @@ static void rtl_hw_start_8168c_1(void __iomem *ioaddr, struct pci_dev *pdev)
3652 { 0x06, 0x0080, 0x0000 } 3645 { 0x06, 0x0080, 0x0000 }
3653 }; 3646 };
3654 3647
3655 rtl_csi_access_enable(ioaddr); 3648 rtl_csi_access_enable_2(ioaddr);
3656 3649
3657 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2); 3650 RTL_W8(DBG_REG, 0x06 | FIX_NAK_1 | FIX_NAK_2);
3658 3651
@@ -3668,7 +3661,7 @@ static void rtl_hw_start_8168c_2(void __iomem *ioaddr, struct pci_dev *pdev)
3668 { 0x03, 0x0400, 0x0220 } 3661 { 0x03, 0x0400, 0x0220 }
3669 }; 3662 };
3670 3663
3671 rtl_csi_access_enable(ioaddr); 3664 rtl_csi_access_enable_2(ioaddr);
3672 3665
3673 rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2)); 3666 rtl_ephy_init(ioaddr, e_info_8168c_2, ARRAY_SIZE(e_info_8168c_2));
3674 3667
@@ -3682,24 +3675,50 @@ static void rtl_hw_start_8168c_3(void __iomem *ioaddr, struct pci_dev *pdev)
3682 3675
3683static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev) 3676static void rtl_hw_start_8168c_4(void __iomem *ioaddr, struct pci_dev *pdev)
3684{ 3677{
3685 rtl_csi_access_enable(ioaddr); 3678 rtl_csi_access_enable_2(ioaddr);
3686 3679
3687 __rtl_hw_start_8168cp(ioaddr, pdev); 3680 __rtl_hw_start_8168cp(ioaddr, pdev);
3688} 3681}
3689 3682
3690static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev) 3683static void rtl_hw_start_8168d(void __iomem *ioaddr, struct pci_dev *pdev)
3691{ 3684{
3692 rtl_csi_access_enable(ioaddr); 3685 rtl_csi_access_enable_2(ioaddr);
3693 3686
3694 rtl_disable_clock_request(pdev); 3687 rtl_disable_clock_request(pdev);
3695 3688
3696 RTL_W8(EarlyTxThres, EarlyTxThld); 3689 RTL_W8(MaxTxPacketSize, TxPacketMax);
3697 3690
3698 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 3691 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
3699 3692
3700 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK); 3693 RTL_W16(CPlusCmd, RTL_R16(CPlusCmd) & ~R8168_CPCMD_QUIRK_MASK);
3701} 3694}
3702 3695
3696static void rtl_hw_start_8168d_4(void __iomem *ioaddr, struct pci_dev *pdev)
3697{
3698 static const struct ephy_info e_info_8168d_4[] = {
3699 { 0x0b, ~0, 0x48 },
3700 { 0x19, 0x20, 0x50 },
3701 { 0x0c, ~0, 0x20 }
3702 };
3703 int i;
3704
3705 rtl_csi_access_enable_1(ioaddr);
3706
3707 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
3708
3709 RTL_W8(MaxTxPacketSize, TxPacketMax);
3710
3711 for (i = 0; i < ARRAY_SIZE(e_info_8168d_4); i++) {
3712 const struct ephy_info *e = e_info_8168d_4 + i;
3713 u16 w;
3714
3715 w = rtl_ephy_read(ioaddr, e->offset);
3716 rtl_ephy_write(ioaddr, 0x03, (w & e->mask) | e->bits);
3717 }
3718
3719 rtl_enable_clock_request(pdev);
3720}
3721
3703static void rtl_hw_start_8168(struct net_device *dev) 3722static void rtl_hw_start_8168(struct net_device *dev)
3704{ 3723{
3705 struct rtl8169_private *tp = netdev_priv(dev); 3724 struct rtl8169_private *tp = netdev_priv(dev);
@@ -3708,7 +3727,7 @@ static void rtl_hw_start_8168(struct net_device *dev)
3708 3727
3709 RTL_W8(Cfg9346, Cfg9346_Unlock); 3728 RTL_W8(Cfg9346, Cfg9346_Unlock);
3710 3729
3711 RTL_W8(EarlyTxThres, EarlyTxThld); 3730 RTL_W8(MaxTxPacketSize, TxPacketMax);
3712 3731
3713 rtl_set_rx_max_size(ioaddr, rx_buf_sz); 3732 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
3714 3733
@@ -3777,6 +3796,10 @@ static void rtl_hw_start_8168(struct net_device *dev)
3777 rtl_hw_start_8168d(ioaddr, pdev); 3796 rtl_hw_start_8168d(ioaddr, pdev);
3778 break; 3797 break;
3779 3798
3799 case RTL_GIGA_MAC_VER_28:
3800 rtl_hw_start_8168d_4(ioaddr, pdev);
3801 break;
3802
3780 default: 3803 default:
3781 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n", 3804 printk(KERN_ERR PFX "%s: unknown chipset (mac_version = %d).\n",
3782 dev->name, tp->mac_version); 3805 dev->name, tp->mac_version);
@@ -3818,7 +3841,7 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
3818 }; 3841 };
3819 u8 cfg1; 3842 u8 cfg1;
3820 3843
3821 rtl_csi_access_enable(ioaddr); 3844 rtl_csi_access_enable_2(ioaddr);
3822 3845
3823 RTL_W8(DBG_REG, FIX_NAK_1); 3846 RTL_W8(DBG_REG, FIX_NAK_1);
3824 3847
@@ -3839,7 +3862,7 @@ static void rtl_hw_start_8102e_1(void __iomem *ioaddr, struct pci_dev *pdev)
3839 3862
3840static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev) 3863static void rtl_hw_start_8102e_2(void __iomem *ioaddr, struct pci_dev *pdev)
3841{ 3864{
3842 rtl_csi_access_enable(ioaddr); 3865 rtl_csi_access_enable_2(ioaddr);
3843 3866
3844 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT); 3867 rtl_tx_performance_tweak(pdev, 0x5 << MAX_READ_REQUEST_SHIFT);
3845 3868
@@ -3888,7 +3911,7 @@ static void rtl_hw_start_8101(struct net_device *dev)
3888 3911
3889 RTL_W8(Cfg9346, Cfg9346_Unlock); 3912 RTL_W8(Cfg9346, Cfg9346_Unlock);
3890 3913
3891 RTL_W8(EarlyTxThres, EarlyTxThld); 3914 RTL_W8(MaxTxPacketSize, TxPacketMax);
3892 3915
3893 rtl_set_rx_max_size(ioaddr, rx_buf_sz); 3916 rtl_set_rx_max_size(ioaddr, rx_buf_sz);
3894 3917
@@ -4189,7 +4212,7 @@ static void rtl8169_tx_timeout(struct net_device *dev)
4189{ 4212{
4190 struct rtl8169_private *tp = netdev_priv(dev); 4213 struct rtl8169_private *tp = netdev_priv(dev);
4191 4214
4192 rtl8169_hw_reset(tp->mmio_addr); 4215 rtl8169_hw_reset(tp);
4193 4216
4194 /* Let's wait a bit while any (async) irq lands on */ 4217 /* Let's wait a bit while any (async) irq lands on */
4195 rtl8169_schedule_work(dev, rtl8169_reset_task); 4218 rtl8169_schedule_work(dev, rtl8169_reset_task);
@@ -4347,7 +4370,6 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
4347{ 4370{
4348 struct rtl8169_private *tp = netdev_priv(dev); 4371 struct rtl8169_private *tp = netdev_priv(dev);
4349 struct pci_dev *pdev = tp->pci_dev; 4372 struct pci_dev *pdev = tp->pci_dev;
4350 void __iomem *ioaddr = tp->mmio_addr;
4351 u16 pci_status, pci_cmd; 4373 u16 pci_status, pci_cmd;
4352 4374
4353 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 4375 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
@@ -4378,13 +4400,15 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
4378 4400
4379 /* The infamous DAC f*ckup only happens at boot time */ 4401 /* The infamous DAC f*ckup only happens at boot time */
4380 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) { 4402 if ((tp->cp_cmd & PCIDAC) && !tp->dirty_rx && !tp->cur_rx) {
4403 void __iomem *ioaddr = tp->mmio_addr;
4404
4381 netif_info(tp, intr, dev, "disabling PCI DAC\n"); 4405 netif_info(tp, intr, dev, "disabling PCI DAC\n");
4382 tp->cp_cmd &= ~PCIDAC; 4406 tp->cp_cmd &= ~PCIDAC;
4383 RTL_W16(CPlusCmd, tp->cp_cmd); 4407 RTL_W16(CPlusCmd, tp->cp_cmd);
4384 dev->features &= ~NETIF_F_HIGHDMA; 4408 dev->features &= ~NETIF_F_HIGHDMA;
4385 } 4409 }
4386 4410
4387 rtl8169_hw_reset(ioaddr); 4411 rtl8169_hw_reset(tp);
4388 4412
4389 rtl8169_schedule_work(dev, rtl8169_reinit_task); 4413 rtl8169_schedule_work(dev, rtl8169_reinit_task);
4390} 4414}
@@ -4711,6 +4735,8 @@ static void rtl8169_down(struct net_device *dev)
4711 rtl8169_tx_clear(tp); 4735 rtl8169_tx_clear(tp);
4712 4736
4713 rtl8169_rx_clear(tp); 4737 rtl8169_rx_clear(tp);
4738
4739 rtl_pll_power_down(tp);
4714} 4740}
4715 4741
4716static int rtl8169_close(struct net_device *dev) 4742static int rtl8169_close(struct net_device *dev)
@@ -4815,9 +4841,13 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
4815 4841
4816static void rtl8169_net_suspend(struct net_device *dev) 4842static void rtl8169_net_suspend(struct net_device *dev)
4817{ 4843{
4844 struct rtl8169_private *tp = netdev_priv(dev);
4845
4818 if (!netif_running(dev)) 4846 if (!netif_running(dev))
4819 return; 4847 return;
4820 4848
4849 rtl_pll_power_down(tp);
4850
4821 netif_device_detach(dev); 4851 netif_device_detach(dev);
4822 netif_stop_queue(dev); 4852 netif_stop_queue(dev);
4823} 4853}
@@ -4836,7 +4866,12 @@ static int rtl8169_suspend(struct device *device)
4836 4866
4837static void __rtl8169_resume(struct net_device *dev) 4867static void __rtl8169_resume(struct net_device *dev)
4838{ 4868{
4869 struct rtl8169_private *tp = netdev_priv(dev);
4870
4839 netif_device_attach(dev); 4871 netif_device_attach(dev);
4872
4873 rtl_pll_power_up(tp);
4874
4840 rtl8169_schedule_work(dev, rtl8169_reset_task); 4875 rtl8169_schedule_work(dev, rtl8169_reset_task);
4841} 4876}
4842 4877
diff --git a/drivers/net/s2io.c b/drivers/net/s2io.c
index ecc25aab896a..39c17cecb8b9 100644
--- a/drivers/net/s2io.c
+++ b/drivers/net/s2io.c
@@ -88,14 +88,14 @@
88#include "s2io.h" 88#include "s2io.h"
89#include "s2io-regs.h" 89#include "s2io-regs.h"
90 90
91#define DRV_VERSION "2.0.26.27" 91#define DRV_VERSION "2.0.26.28"
92 92
93/* S2io Driver name & version. */ 93/* S2io Driver name & version. */
94static char s2io_driver_name[] = "Neterion"; 94static const char s2io_driver_name[] = "Neterion";
95static char s2io_driver_version[] = DRV_VERSION; 95static const char s2io_driver_version[] = DRV_VERSION;
96 96
97static int rxd_size[2] = {32, 48}; 97static const int rxd_size[2] = {32, 48};
98static int rxd_count[2] = {127, 85}; 98static const int rxd_count[2] = {127, 85};
99 99
100static inline int RXD_IS_UP2DT(struct RxD_t *rxdp) 100static inline int RXD_IS_UP2DT(struct RxD_t *rxdp)
101{ 101{
@@ -3598,10 +3598,12 @@ static int s2io_set_swapper(struct s2io_nic *sp)
3598 val64 = readq(&bar0->pif_rd_swapper_fb); 3598 val64 = readq(&bar0->pif_rd_swapper_fb);
3599 if (val64 != 0x0123456789ABCDEFULL) { 3599 if (val64 != 0x0123456789ABCDEFULL) {
3600 int i = 0; 3600 int i = 0;
3601 u64 value[] = { 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */ 3601 static const u64 value[] = {
3602 0x8100008181000081ULL, /* FE=1, SE=0 */ 3602 0xC30000C3C30000C3ULL, /* FE=1, SE=1 */
3603 0x4200004242000042ULL, /* FE=0, SE=1 */ 3603 0x8100008181000081ULL, /* FE=1, SE=0 */
3604 0}; /* FE=0, SE=0 */ 3604 0x4200004242000042ULL, /* FE=0, SE=1 */
3605 0 /* FE=0, SE=0 */
3606 };
3605 3607
3606 while (i < 4) { 3608 while (i < 4) {
3607 writeq(value[i], &bar0->swapper_ctrl); 3609 writeq(value[i], &bar0->swapper_ctrl);
@@ -3627,10 +3629,12 @@ static int s2io_set_swapper(struct s2io_nic *sp)
3627 3629
3628 if (val64 != valt) { 3630 if (val64 != valt) {
3629 int i = 0; 3631 int i = 0;
3630 u64 value[] = { 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */ 3632 static const u64 value[] = {
3631 0x0081810000818100ULL, /* FE=1, SE=0 */ 3633 0x00C3C30000C3C300ULL, /* FE=1, SE=1 */
3632 0x0042420000424200ULL, /* FE=0, SE=1 */ 3634 0x0081810000818100ULL, /* FE=1, SE=0 */
3633 0}; /* FE=0, SE=0 */ 3635 0x0042420000424200ULL, /* FE=0, SE=1 */
3636 0 /* FE=0, SE=0 */
3637 };
3634 3638
3635 while (i < 4) { 3639 while (i < 4) {
3636 writeq((value[i] | valr), &bar0->swapper_ctrl); 3640 writeq((value[i] | valr), &bar0->swapper_ctrl);
@@ -5568,30 +5572,27 @@ static void s2io_ethtool_gringparam(struct net_device *dev,
5568 struct s2io_nic *sp = netdev_priv(dev); 5572 struct s2io_nic *sp = netdev_priv(dev);
5569 int i, tx_desc_count = 0, rx_desc_count = 0; 5573 int i, tx_desc_count = 0, rx_desc_count = 0;
5570 5574
5571 if (sp->rxd_mode == RXD_MODE_1) 5575 if (sp->rxd_mode == RXD_MODE_1) {
5572 ering->rx_max_pending = MAX_RX_DESC_1; 5576 ering->rx_max_pending = MAX_RX_DESC_1;
5573 else if (sp->rxd_mode == RXD_MODE_3B) 5577 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5578 } else {
5574 ering->rx_max_pending = MAX_RX_DESC_2; 5579 ering->rx_max_pending = MAX_RX_DESC_2;
5580 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5581 }
5575 5582
5583 ering->rx_mini_max_pending = 0;
5576 ering->tx_max_pending = MAX_TX_DESC; 5584 ering->tx_max_pending = MAX_TX_DESC;
5577 for (i = 0 ; i < sp->config.tx_fifo_num ; i++)
5578 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5579 5585
5580 DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds); 5586 for (i = 0; i < sp->config.rx_ring_num; i++)
5581 ering->tx_pending = tx_desc_count;
5582 rx_desc_count = 0;
5583 for (i = 0 ; i < sp->config.rx_ring_num ; i++)
5584 rx_desc_count += sp->config.rx_cfg[i].num_rxd; 5587 rx_desc_count += sp->config.rx_cfg[i].num_rxd;
5585
5586 ering->rx_pending = rx_desc_count; 5588 ering->rx_pending = rx_desc_count;
5587
5588 ering->rx_mini_max_pending = 0;
5589 ering->rx_mini_pending = 0;
5590 if (sp->rxd_mode == RXD_MODE_1)
5591 ering->rx_jumbo_max_pending = MAX_RX_DESC_1;
5592 else if (sp->rxd_mode == RXD_MODE_3B)
5593 ering->rx_jumbo_max_pending = MAX_RX_DESC_2;
5594 ering->rx_jumbo_pending = rx_desc_count; 5589 ering->rx_jumbo_pending = rx_desc_count;
5590 ering->rx_mini_pending = 0;
5591
5592 for (i = 0; i < sp->config.tx_fifo_num; i++)
5593 tx_desc_count += sp->config.tx_cfg[i].fifo_len;
5594 ering->tx_pending = tx_desc_count;
5595 DBG_PRINT(INFO_DBG, "max txds: %d\n", sp->config.max_txds);
5595} 5596}
5596 5597
5597/** 5598/**
@@ -7692,6 +7693,8 @@ static void s2io_init_pci(struct s2io_nic *sp)
7692static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type, 7693static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7693 u8 *dev_multiq) 7694 u8 *dev_multiq)
7694{ 7695{
7696 int i;
7697
7695 if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) { 7698 if ((tx_fifo_num > MAX_TX_FIFOS) || (tx_fifo_num < 1)) {
7696 DBG_PRINT(ERR_DBG, "Requested number of tx fifos " 7699 DBG_PRINT(ERR_DBG, "Requested number of tx fifos "
7697 "(%d) not supported\n", tx_fifo_num); 7700 "(%d) not supported\n", tx_fifo_num);
@@ -7750,6 +7753,15 @@ static int s2io_verify_parm(struct pci_dev *pdev, u8 *dev_intr_type,
7750 DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n"); 7753 DBG_PRINT(ERR_DBG, "Defaulting to 1-buffer mode\n");
7751 rx_ring_mode = 1; 7754 rx_ring_mode = 1;
7752 } 7755 }
7756
7757 for (i = 0; i < MAX_RX_RINGS; i++)
7758 if (rx_ring_sz[i] > MAX_RX_BLOCKS_PER_RING) {
7759 DBG_PRINT(ERR_DBG, "Requested rx ring size not "
7760 "supported\nDefaulting to %d\n",
7761 MAX_RX_BLOCKS_PER_RING);
7762 rx_ring_sz[i] = MAX_RX_BLOCKS_PER_RING;
7763 }
7764
7753 return SUCCESS; 7765 return SUCCESS;
7754} 7766}
7755 7767
@@ -8321,8 +8333,7 @@ mem_alloc_failed:
8321 8333
8322static void __devexit s2io_rem_nic(struct pci_dev *pdev) 8334static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8323{ 8335{
8324 struct net_device *dev = 8336 struct net_device *dev = pci_get_drvdata(pdev);
8325 (struct net_device *)pci_get_drvdata(pdev);
8326 struct s2io_nic *sp; 8337 struct s2io_nic *sp;
8327 8338
8328 if (dev == NULL) { 8339 if (dev == NULL) {
@@ -8330,9 +8341,11 @@ static void __devexit s2io_rem_nic(struct pci_dev *pdev)
8330 return; 8341 return;
8331 } 8342 }
8332 8343
8333 flush_scheduled_work();
8334
8335 sp = netdev_priv(dev); 8344 sp = netdev_priv(dev);
8345
8346 cancel_work_sync(&sp->rst_timer_task);
8347 cancel_work_sync(&sp->set_link_task);
8348
8336 unregister_netdev(dev); 8349 unregister_netdev(dev);
8337 8350
8338 free_shared_mem(sp); 8351 free_shared_mem(sp);
diff --git a/drivers/net/s2io.h b/drivers/net/s2io.h
index 00b8614efe48..7d160306b651 100644
--- a/drivers/net/s2io.h
+++ b/drivers/net/s2io.h
@@ -355,13 +355,12 @@ struct stat_block {
355#define FIFO_OTHER_MAX_NUM 1 355#define FIFO_OTHER_MAX_NUM 1
356 356
357 357
358#define MAX_RX_DESC_1 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 127 ) 358#define MAX_RX_DESC_1 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 128)
359#define MAX_RX_DESC_2 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 85 ) 359#define MAX_RX_DESC_2 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 86)
360#define MAX_RX_DESC_3 (MAX_RX_RINGS * MAX_RX_BLOCKS_PER_RING * 85 )
361#define MAX_TX_DESC (MAX_AVAILABLE_TXDS) 360#define MAX_TX_DESC (MAX_AVAILABLE_TXDS)
362 361
363/* FIFO mappings for all possible number of fifos configured */ 362/* FIFO mappings for all possible number of fifos configured */
364static int fifo_map[][MAX_TX_FIFOS] = { 363static const int fifo_map[][MAX_TX_FIFOS] = {
365 {0, 0, 0, 0, 0, 0, 0, 0}, 364 {0, 0, 0, 0, 0, 0, 0, 0},
366 {0, 0, 0, 0, 1, 1, 1, 1}, 365 {0, 0, 0, 0, 1, 1, 1, 1},
367 {0, 0, 0, 1, 1, 1, 2, 2}, 366 {0, 0, 0, 1, 1, 1, 2, 2},
@@ -372,7 +371,7 @@ static int fifo_map[][MAX_TX_FIFOS] = {
372 {0, 1, 2, 3, 4, 5, 6, 7}, 371 {0, 1, 2, 3, 4, 5, 6, 7},
373}; 372};
374 373
375static u16 fifo_selector[MAX_TX_FIFOS] = {0, 1, 3, 3, 7, 7, 7, 7}; 374static const u16 fifo_selector[MAX_TX_FIFOS] = {0, 1, 3, 3, 7, 7, 7, 7};
376 375
377/* Maintains Per FIFO related information. */ 376/* Maintains Per FIFO related information. */
378struct tx_fifo_config { 377struct tx_fifo_config {
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 417adf372828..76290a8c3c14 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -1449,7 +1449,8 @@ static int __devinit sc92031_probe(struct pci_dev *pdev,
1449 dev->irq = pdev->irq; 1449 dev->irq = pdev->irq;
1450 1450
1451 /* faked with skb_copy_and_csum_dev */ 1451 /* faked with skb_copy_and_csum_dev */
1452 dev->features = NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA; 1452 dev->features = NETIF_F_SG | NETIF_F_HIGHDMA |
1453 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1453 1454
1454 dev->netdev_ops = &sc92031_netdev_ops; 1455 dev->netdev_ops = &sc92031_netdev_ops;
1455 dev->watchdog_timeo = TX_TIMEOUT; 1456 dev->watchdog_timeo = TX_TIMEOUT;
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index fb83cdd94643..711449c6e675 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -23,7 +23,6 @@
23#include <linux/gfp.h> 23#include <linux/gfp.h>
24#include "net_driver.h" 24#include "net_driver.h"
25#include "efx.h" 25#include "efx.h"
26#include "mdio_10g.h"
27#include "nic.h" 26#include "nic.h"
28 27
29#include "mcdi.h" 28#include "mcdi.h"
@@ -462,9 +461,6 @@ efx_alloc_channel(struct efx_nic *efx, int i, struct efx_channel *old_channel)
462 } 461 }
463 } 462 }
464 463
465 spin_lock_init(&channel->tx_stop_lock);
466 atomic_set(&channel->tx_stop_count, 1);
467
468 rx_queue = &channel->rx_queue; 464 rx_queue = &channel->rx_queue;
469 rx_queue->efx = efx; 465 rx_queue->efx = efx;
470 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill, 466 setup_timer(&rx_queue->slow_fill, efx_rx_slow_fill,
@@ -921,6 +917,7 @@ static void efx_mac_work(struct work_struct *data)
921 917
922static int efx_probe_port(struct efx_nic *efx) 918static int efx_probe_port(struct efx_nic *efx)
923{ 919{
920 unsigned char *perm_addr;
924 int rc; 921 int rc;
925 922
926 netif_dbg(efx, probe, efx->net_dev, "create port\n"); 923 netif_dbg(efx, probe, efx->net_dev, "create port\n");
@@ -934,11 +931,12 @@ static int efx_probe_port(struct efx_nic *efx)
934 return rc; 931 return rc;
935 932
936 /* Sanity check MAC address */ 933 /* Sanity check MAC address */
937 if (is_valid_ether_addr(efx->mac_address)) { 934 perm_addr = efx->net_dev->perm_addr;
938 memcpy(efx->net_dev->dev_addr, efx->mac_address, ETH_ALEN); 935 if (is_valid_ether_addr(perm_addr)) {
936 memcpy(efx->net_dev->dev_addr, perm_addr, ETH_ALEN);
939 } else { 937 } else {
940 netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n", 938 netif_err(efx, probe, efx->net_dev, "invalid MAC address %pM\n",
941 efx->mac_address); 939 perm_addr);
942 if (!allow_bad_hwaddr) { 940 if (!allow_bad_hwaddr) {
943 rc = -EINVAL; 941 rc = -EINVAL;
944 goto err; 942 goto err;
@@ -1405,11 +1403,11 @@ static void efx_start_all(struct efx_nic *efx)
1405 * restart the transmit interface early so the watchdog timer stops */ 1403 * restart the transmit interface early so the watchdog timer stops */
1406 efx_start_port(efx); 1404 efx_start_port(efx);
1407 1405
1408 efx_for_each_channel(channel, efx) { 1406 if (efx_dev_registered(efx))
1409 if (efx_dev_registered(efx)) 1407 netif_tx_wake_all_queues(efx->net_dev);
1410 efx_wake_queue(channel); 1408
1409 efx_for_each_channel(channel, efx)
1411 efx_start_channel(channel); 1410 efx_start_channel(channel);
1412 }
1413 1411
1414 if (efx->legacy_irq) 1412 if (efx->legacy_irq)
1415 efx->legacy_irq_enabled = true; 1413 efx->legacy_irq_enabled = true;
@@ -1497,9 +1495,7 @@ static void efx_stop_all(struct efx_nic *efx)
1497 /* Stop the kernel transmit interface late, so the watchdog 1495 /* Stop the kernel transmit interface late, so the watchdog
1498 * timer isn't ticking over the flush */ 1496 * timer isn't ticking over the flush */
1499 if (efx_dev_registered(efx)) { 1497 if (efx_dev_registered(efx)) {
1500 struct efx_channel *channel; 1498 netif_tx_stop_all_queues(efx->net_dev);
1501 efx_for_each_channel(channel, efx)
1502 efx_stop_queue(channel);
1503 netif_tx_lock_bh(efx->net_dev); 1499 netif_tx_lock_bh(efx->net_dev);
1504 netif_tx_unlock_bh(efx->net_dev); 1500 netif_tx_unlock_bh(efx->net_dev);
1505 } 1501 }
@@ -1895,6 +1891,7 @@ static DEVICE_ATTR(phy_type, 0644, show_phy_type, NULL);
1895static int efx_register_netdev(struct efx_nic *efx) 1891static int efx_register_netdev(struct efx_nic *efx)
1896{ 1892{
1897 struct net_device *net_dev = efx->net_dev; 1893 struct net_device *net_dev = efx->net_dev;
1894 struct efx_channel *channel;
1898 int rc; 1895 int rc;
1899 1896
1900 net_dev->watchdog_timeo = 5 * HZ; 1897 net_dev->watchdog_timeo = 5 * HZ;
@@ -1917,6 +1914,14 @@ static int efx_register_netdev(struct efx_nic *efx)
1917 if (rc) 1914 if (rc)
1918 goto fail_locked; 1915 goto fail_locked;
1919 1916
1917 efx_for_each_channel(channel, efx) {
1918 struct efx_tx_queue *tx_queue;
1919 efx_for_each_channel_tx_queue(tx_queue, channel) {
1920 tx_queue->core_txq = netdev_get_tx_queue(
1921 efx->net_dev, tx_queue->queue / EFX_TXQ_TYPES);
1922 }
1923 }
1924
1920 /* Always start with carrier off; PHY events will detect the link */ 1925 /* Always start with carrier off; PHY events will detect the link */
1921 netif_carrier_off(efx->net_dev); 1926 netif_carrier_off(efx->net_dev);
1922 1927
@@ -1980,7 +1985,6 @@ void efx_reset_down(struct efx_nic *efx, enum reset_type method)
1980 1985
1981 efx_stop_all(efx); 1986 efx_stop_all(efx);
1982 mutex_lock(&efx->mac_lock); 1987 mutex_lock(&efx->mac_lock);
1983 mutex_lock(&efx->spi_lock);
1984 1988
1985 efx_fini_channels(efx); 1989 efx_fini_channels(efx);
1986 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE) 1990 if (efx->port_initialized && method != RESET_TYPE_INVISIBLE)
@@ -2022,7 +2026,6 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2022 efx_init_channels(efx); 2026 efx_init_channels(efx);
2023 efx_restore_filters(efx); 2027 efx_restore_filters(efx);
2024 2028
2025 mutex_unlock(&efx->spi_lock);
2026 mutex_unlock(&efx->mac_lock); 2029 mutex_unlock(&efx->mac_lock);
2027 2030
2028 efx_start_all(efx); 2031 efx_start_all(efx);
@@ -2032,7 +2035,6 @@ int efx_reset_up(struct efx_nic *efx, enum reset_type method, bool ok)
2032fail: 2035fail:
2033 efx->port_initialized = false; 2036 efx->port_initialized = false;
2034 2037
2035 mutex_unlock(&efx->spi_lock);
2036 mutex_unlock(&efx->mac_lock); 2038 mutex_unlock(&efx->mac_lock);
2037 2039
2038 return rc; 2040 return rc;
@@ -2220,8 +2222,6 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
2220 /* Initialise common structures */ 2222 /* Initialise common structures */
2221 memset(efx, 0, sizeof(*efx)); 2223 memset(efx, 0, sizeof(*efx));
2222 spin_lock_init(&efx->biu_lock); 2224 spin_lock_init(&efx->biu_lock);
2223 mutex_init(&efx->mdio_lock);
2224 mutex_init(&efx->spi_lock);
2225#ifdef CONFIG_SFC_MTD 2225#ifdef CONFIG_SFC_MTD
2226 INIT_LIST_HEAD(&efx->mtd_list); 2226 INIT_LIST_HEAD(&efx->mtd_list);
2227#endif 2227#endif
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index 10a1bf40da96..d43a7e5212b1 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -36,8 +36,6 @@ efx_hard_start_xmit(struct sk_buff *skb, struct net_device *net_dev);
36extern netdev_tx_t 36extern netdev_tx_t
37efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb); 37efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb);
38extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index); 38extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
39extern void efx_stop_queue(struct efx_channel *channel);
40extern void efx_wake_queue(struct efx_channel *channel);
41 39
42/* RX */ 40/* RX */
43extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue); 41extern int efx_probe_rx_queue(struct efx_rx_queue *rx_queue);
@@ -74,9 +72,8 @@ extern int efx_filter_insert_filter(struct efx_nic *efx,
74 bool replace); 72 bool replace);
75extern int efx_filter_remove_filter(struct efx_nic *efx, 73extern int efx_filter_remove_filter(struct efx_nic *efx,
76 struct efx_filter_spec *spec); 74 struct efx_filter_spec *spec);
77extern void efx_filter_table_clear(struct efx_nic *efx, 75extern void efx_filter_clear_rx(struct efx_nic *efx,
78 enum efx_filter_table_id table_id, 76 enum efx_filter_priority priority);
79 enum efx_filter_priority priority);
80 77
81/* Channels */ 78/* Channels */
82extern void efx_process_channel_now(struct efx_channel *channel); 79extern void efx_process_channel_now(struct efx_channel *channel);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index edb9d16b8b47..0e8bb19ed60d 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -11,14 +11,13 @@
11#include <linux/netdevice.h> 11#include <linux/netdevice.h>
12#include <linux/ethtool.h> 12#include <linux/ethtool.h>
13#include <linux/rtnetlink.h> 13#include <linux/rtnetlink.h>
14#include <linux/in.h>
14#include "net_driver.h" 15#include "net_driver.h"
15#include "workarounds.h" 16#include "workarounds.h"
16#include "selftest.h" 17#include "selftest.h"
17#include "efx.h" 18#include "efx.h"
18#include "filter.h" 19#include "filter.h"
19#include "nic.h" 20#include "nic.h"
20#include "spi.h"
21#include "mdio_10g.h"
22 21
23struct ethtool_string { 22struct ethtool_string {
24 char name[ETH_GSTRING_LEN]; 23 char name[ETH_GSTRING_LEN];
@@ -560,12 +559,8 @@ static int efx_ethtool_set_flags(struct net_device *net_dev, u32 data)
560 if (rc) 559 if (rc)
561 return rc; 560 return rc;
562 561
563 if (!(data & ETH_FLAG_NTUPLE)) { 562 if (!(data & ETH_FLAG_NTUPLE))
564 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, 563 efx_filter_clear_rx(efx, EFX_FILTER_PRI_MANUAL);
565 EFX_FILTER_PRI_MANUAL);
566 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC,
567 EFX_FILTER_PRI_MANUAL);
568 }
569 564
570 return 0; 565 return 0;
571} 566}
@@ -584,6 +579,9 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
584 goto fail1; 579 goto fail1;
585 } 580 }
586 581
582 netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
583 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
584
587 /* We need rx buffers and interrupts. */ 585 /* We need rx buffers and interrupts. */
588 already_up = (efx->net_dev->flags & IFF_UP); 586 already_up = (efx->net_dev->flags & IFF_UP);
589 if (!already_up) { 587 if (!already_up) {
@@ -602,9 +600,9 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
602 if (!already_up) 600 if (!already_up)
603 dev_close(efx->net_dev); 601 dev_close(efx->net_dev);
604 602
605 netif_dbg(efx, drv, efx->net_dev, "%s %sline self-tests\n", 603 netif_info(efx, drv, efx->net_dev, "%s %sline self-tests\n",
606 rc == 0 ? "passed" : "failed", 604 rc == 0 ? "passed" : "failed",
607 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on"); 605 (test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
608 606
609 fail2: 607 fail2:
610 fail1: 608 fail1:
@@ -622,68 +620,6 @@ static int efx_ethtool_nway_reset(struct net_device *net_dev)
622 return mdio45_nway_restart(&efx->mdio); 620 return mdio45_nway_restart(&efx->mdio);
623} 621}
624 622
625static u32 efx_ethtool_get_link(struct net_device *net_dev)
626{
627 struct efx_nic *efx = netdev_priv(net_dev);
628
629 return efx->link_state.up;
630}
631
632static int efx_ethtool_get_eeprom_len(struct net_device *net_dev)
633{
634 struct efx_nic *efx = netdev_priv(net_dev);
635 struct efx_spi_device *spi = efx->spi_eeprom;
636
637 if (!spi)
638 return 0;
639 return min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
640 min(spi->size, EFX_EEPROM_BOOTCONFIG_START);
641}
642
643static int efx_ethtool_get_eeprom(struct net_device *net_dev,
644 struct ethtool_eeprom *eeprom, u8 *buf)
645{
646 struct efx_nic *efx = netdev_priv(net_dev);
647 struct efx_spi_device *spi = efx->spi_eeprom;
648 size_t len;
649 int rc;
650
651 rc = mutex_lock_interruptible(&efx->spi_lock);
652 if (rc)
653 return rc;
654 rc = falcon_spi_read(efx, spi,
655 eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
656 eeprom->len, &len, buf);
657 mutex_unlock(&efx->spi_lock);
658
659 eeprom->magic = EFX_ETHTOOL_EEPROM_MAGIC;
660 eeprom->len = len;
661 return rc;
662}
663
664static int efx_ethtool_set_eeprom(struct net_device *net_dev,
665 struct ethtool_eeprom *eeprom, u8 *buf)
666{
667 struct efx_nic *efx = netdev_priv(net_dev);
668 struct efx_spi_device *spi = efx->spi_eeprom;
669 size_t len;
670 int rc;
671
672 if (eeprom->magic != EFX_ETHTOOL_EEPROM_MAGIC)
673 return -EINVAL;
674
675 rc = mutex_lock_interruptible(&efx->spi_lock);
676 if (rc)
677 return rc;
678 rc = falcon_spi_write(efx, spi,
679 eeprom->offset + EFX_EEPROM_BOOTCONFIG_START,
680 eeprom->len, &len, buf);
681 mutex_unlock(&efx->spi_lock);
682
683 eeprom->len = len;
684 return rc;
685}
686
687static int efx_ethtool_get_coalesce(struct net_device *net_dev, 623static int efx_ethtool_get_coalesce(struct net_device *net_dev,
688 struct ethtool_coalesce *coalesce) 624 struct ethtool_coalesce *coalesce)
689{ 625{
@@ -978,6 +914,7 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
978 struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec; 914 struct ethhdr *mac_entry = &ntuple->fs.h_u.ether_spec;
979 struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec; 915 struct ethhdr *mac_mask = &ntuple->fs.m_u.ether_spec;
980 struct efx_filter_spec filter; 916 struct efx_filter_spec filter;
917 int rc;
981 918
982 /* Range-check action */ 919 /* Range-check action */
983 if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR || 920 if (ntuple->fs.action < ETHTOOL_RXNTUPLE_ACTION_CLEAR ||
@@ -987,9 +924,16 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
987 if (~ntuple->fs.data_mask) 924 if (~ntuple->fs.data_mask)
988 return -EINVAL; 925 return -EINVAL;
989 926
927 efx_filter_init_rx(&filter, EFX_FILTER_PRI_MANUAL, 0,
928 (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP) ?
929 0xfff : ntuple->fs.action);
930
990 switch (ntuple->fs.flow_type) { 931 switch (ntuple->fs.flow_type) {
991 case TCP_V4_FLOW: 932 case TCP_V4_FLOW:
992 case UDP_V4_FLOW: 933 case UDP_V4_FLOW: {
934 u8 proto = (ntuple->fs.flow_type == TCP_V4_FLOW ?
935 IPPROTO_TCP : IPPROTO_UDP);
936
993 /* Must match all of destination, */ 937 /* Must match all of destination, */
994 if (ip_mask->ip4dst | ip_mask->pdst) 938 if (ip_mask->ip4dst | ip_mask->pdst)
995 return -EINVAL; 939 return -EINVAL;
@@ -1001,7 +945,22 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
1001 /* and nothing else */ 945 /* and nothing else */
1002 if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask) 946 if ((u8)~ip_mask->tos | (u16)~ntuple->fs.vlan_tag_mask)
1003 return -EINVAL; 947 return -EINVAL;
948
949 if (!ip_mask->ip4src)
950 rc = efx_filter_set_ipv4_full(&filter, proto,
951 ip_entry->ip4dst,
952 ip_entry->pdst,
953 ip_entry->ip4src,
954 ip_entry->psrc);
955 else
956 rc = efx_filter_set_ipv4_local(&filter, proto,
957 ip_entry->ip4dst,
958 ip_entry->pdst);
959 if (rc)
960 return rc;
1004 break; 961 break;
962 }
963
1005 case ETHER_FLOW: 964 case ETHER_FLOW:
1006 /* Must match all of destination, */ 965 /* Must match all of destination, */
1007 if (!is_zero_ether_addr(mac_mask->h_dest)) 966 if (!is_zero_ether_addr(mac_mask->h_dest))
@@ -1014,58 +973,24 @@ static int efx_ethtool_set_rx_ntuple(struct net_device *net_dev,
1014 if (!is_broadcast_ether_addr(mac_mask->h_source) || 973 if (!is_broadcast_ether_addr(mac_mask->h_source) ||
1015 mac_mask->h_proto != htons(0xffff)) 974 mac_mask->h_proto != htons(0xffff))
1016 return -EINVAL; 975 return -EINVAL;
976
977 rc = efx_filter_set_eth_local(
978 &filter,
979 (ntuple->fs.vlan_tag_mask == 0xf000) ?
980 ntuple->fs.vlan_tag : EFX_FILTER_VID_UNSPEC,
981 mac_entry->h_dest);
982 if (rc)
983 return rc;
1017 break; 984 break;
985
1018 default: 986 default:
1019 return -EINVAL; 987 return -EINVAL;
1020 } 988 }
1021 989
1022 filter.priority = EFX_FILTER_PRI_MANUAL; 990 if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR)
1023 filter.flags = 0;
1024
1025 switch (ntuple->fs.flow_type) {
1026 case TCP_V4_FLOW:
1027 if (!ip_mask->ip4src)
1028 efx_filter_set_rx_tcp_full(&filter,
1029 htonl(ip_entry->ip4src),
1030 htons(ip_entry->psrc),
1031 htonl(ip_entry->ip4dst),
1032 htons(ip_entry->pdst));
1033 else
1034 efx_filter_set_rx_tcp_wild(&filter,
1035 htonl(ip_entry->ip4dst),
1036 htons(ip_entry->pdst));
1037 break;
1038 case UDP_V4_FLOW:
1039 if (!ip_mask->ip4src)
1040 efx_filter_set_rx_udp_full(&filter,
1041 htonl(ip_entry->ip4src),
1042 htons(ip_entry->psrc),
1043 htonl(ip_entry->ip4dst),
1044 htons(ip_entry->pdst));
1045 else
1046 efx_filter_set_rx_udp_wild(&filter,
1047 htonl(ip_entry->ip4dst),
1048 htons(ip_entry->pdst));
1049 break;
1050 case ETHER_FLOW:
1051 if (ntuple->fs.vlan_tag_mask == 0xf000)
1052 efx_filter_set_rx_mac_full(&filter,
1053 ntuple->fs.vlan_tag & 0xfff,
1054 mac_entry->h_dest);
1055 else
1056 efx_filter_set_rx_mac_wild(&filter, mac_entry->h_dest);
1057 break;
1058 }
1059
1060 if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_CLEAR) {
1061 return efx_filter_remove_filter(efx, &filter); 991 return efx_filter_remove_filter(efx, &filter);
1062 } else { 992 else
1063 if (ntuple->fs.action == ETHTOOL_RXNTUPLE_ACTION_DROP)
1064 filter.dmaq_id = 0xfff;
1065 else
1066 filter.dmaq_id = ntuple->fs.action;
1067 return efx_filter_insert_filter(efx, &filter, true); 993 return efx_filter_insert_filter(efx, &filter, true);
1068 }
1069} 994}
1070 995
1071static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev, 996static int efx_ethtool_get_rxfh_indir(struct net_device *net_dev,
@@ -1115,10 +1040,7 @@ const struct ethtool_ops efx_ethtool_ops = {
1115 .get_msglevel = efx_ethtool_get_msglevel, 1040 .get_msglevel = efx_ethtool_get_msglevel,
1116 .set_msglevel = efx_ethtool_set_msglevel, 1041 .set_msglevel = efx_ethtool_set_msglevel,
1117 .nway_reset = efx_ethtool_nway_reset, 1042 .nway_reset = efx_ethtool_nway_reset,
1118 .get_link = efx_ethtool_get_link, 1043 .get_link = ethtool_op_get_link,
1119 .get_eeprom_len = efx_ethtool_get_eeprom_len,
1120 .get_eeprom = efx_ethtool_get_eeprom,
1121 .set_eeprom = efx_ethtool_set_eeprom,
1122 .get_coalesce = efx_ethtool_get_coalesce, 1044 .get_coalesce = efx_ethtool_get_coalesce,
1123 .set_coalesce = efx_ethtool_set_coalesce, 1045 .set_coalesce = efx_ethtool_set_coalesce,
1124 .get_ringparam = efx_ethtool_get_ringparam, 1046 .get_ringparam = efx_ethtool_get_ringparam,
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index 267019bb2b15..70e4f7dcce81 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -24,7 +24,6 @@
24#include "nic.h" 24#include "nic.h"
25#include "regs.h" 25#include "regs.h"
26#include "io.h" 26#include "io.h"
27#include "mdio_10g.h"
28#include "phy.h" 27#include "phy.h"
29#include "workarounds.h" 28#include "workarounds.h"
30 29
@@ -255,7 +254,6 @@ int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi,
255 /* Input validation */ 254 /* Input validation */
256 if (len > FALCON_SPI_MAX_LEN) 255 if (len > FALCON_SPI_MAX_LEN)
257 return -EINVAL; 256 return -EINVAL;
258 BUG_ON(!mutex_is_locked(&efx->spi_lock));
259 257
260 /* Check that previous command is not still running */ 258 /* Check that previous command is not still running */
261 rc = falcon_spi_poll(efx); 259 rc = falcon_spi_poll(efx);
@@ -719,6 +717,7 @@ static int falcon_mdio_write(struct net_device *net_dev,
719 int prtad, int devad, u16 addr, u16 value) 717 int prtad, int devad, u16 addr, u16 value)
720{ 718{
721 struct efx_nic *efx = netdev_priv(net_dev); 719 struct efx_nic *efx = netdev_priv(net_dev);
720 struct falcon_nic_data *nic_data = efx->nic_data;
722 efx_oword_t reg; 721 efx_oword_t reg;
723 int rc; 722 int rc;
724 723
@@ -726,7 +725,7 @@ static int falcon_mdio_write(struct net_device *net_dev,
726 "writing MDIO %d register %d.%d with 0x%04x\n", 725 "writing MDIO %d register %d.%d with 0x%04x\n",
727 prtad, devad, addr, value); 726 prtad, devad, addr, value);
728 727
729 mutex_lock(&efx->mdio_lock); 728 mutex_lock(&nic_data->mdio_lock);
730 729
731 /* Check MDIO not currently being accessed */ 730 /* Check MDIO not currently being accessed */
732 rc = falcon_gmii_wait(efx); 731 rc = falcon_gmii_wait(efx);
@@ -762,7 +761,7 @@ static int falcon_mdio_write(struct net_device *net_dev,
762 } 761 }
763 762
764out: 763out:
765 mutex_unlock(&efx->mdio_lock); 764 mutex_unlock(&nic_data->mdio_lock);
766 return rc; 765 return rc;
767} 766}
768 767
@@ -771,10 +770,11 @@ static int falcon_mdio_read(struct net_device *net_dev,
771 int prtad, int devad, u16 addr) 770 int prtad, int devad, u16 addr)
772{ 771{
773 struct efx_nic *efx = netdev_priv(net_dev); 772 struct efx_nic *efx = netdev_priv(net_dev);
773 struct falcon_nic_data *nic_data = efx->nic_data;
774 efx_oword_t reg; 774 efx_oword_t reg;
775 int rc; 775 int rc;
776 776
777 mutex_lock(&efx->mdio_lock); 777 mutex_lock(&nic_data->mdio_lock);
778 778
779 /* Check MDIO not currently being accessed */ 779 /* Check MDIO not currently being accessed */
780 rc = falcon_gmii_wait(efx); 780 rc = falcon_gmii_wait(efx);
@@ -813,7 +813,7 @@ static int falcon_mdio_read(struct net_device *net_dev,
813 } 813 }
814 814
815out: 815out:
816 mutex_unlock(&efx->mdio_lock); 816 mutex_unlock(&nic_data->mdio_lock);
817 return rc; 817 return rc;
818} 818}
819 819
@@ -841,6 +841,7 @@ static int falcon_probe_port(struct efx_nic *efx)
841 } 841 }
842 842
843 /* Fill out MDIO structure and loopback modes */ 843 /* Fill out MDIO structure and loopback modes */
844 mutex_init(&nic_data->mdio_lock);
844 efx->mdio.mdio_read = falcon_mdio_read; 845 efx->mdio.mdio_read = falcon_mdio_read;
845 efx->mdio.mdio_write = falcon_mdio_write; 846 efx->mdio.mdio_write = falcon_mdio_write;
846 rc = efx->phy_op->probe(efx); 847 rc = efx->phy_op->probe(efx);
@@ -880,6 +881,41 @@ static void falcon_remove_port(struct efx_nic *efx)
880 efx_nic_free_buffer(efx, &efx->stats_buffer); 881 efx_nic_free_buffer(efx, &efx->stats_buffer);
881} 882}
882 883
884/* Global events are basically PHY events */
885static bool
886falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
887{
888 struct efx_nic *efx = channel->efx;
889 struct falcon_nic_data *nic_data = efx->nic_data;
890
891 if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
892 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
893 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR))
894 /* Ignored */
895 return true;
896
897 if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) &&
898 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
899 nic_data->xmac_poll_required = true;
900 return true;
901 }
902
903 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
904 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
905 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
906 netif_err(efx, rx_err, efx->net_dev,
907 "channel %d seen global RX_RESET event. Resetting.\n",
908 channel->channel);
909
910 atomic_inc(&efx->rx_reset);
911 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
912 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
913 return true;
914 }
915
916 return false;
917}
918
883/************************************************************************** 919/**************************************************************************
884 * 920 *
885 * Falcon test code 921 * Falcon test code
@@ -889,6 +925,7 @@ static void falcon_remove_port(struct efx_nic *efx)
889static int 925static int
890falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) 926falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
891{ 927{
928 struct falcon_nic_data *nic_data = efx->nic_data;
892 struct falcon_nvconfig *nvconfig; 929 struct falcon_nvconfig *nvconfig;
893 struct efx_spi_device *spi; 930 struct efx_spi_device *spi;
894 void *region; 931 void *region;
@@ -896,8 +933,11 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
896 __le16 *word, *limit; 933 __le16 *word, *limit;
897 u32 csum; 934 u32 csum;
898 935
899 spi = efx->spi_flash ? efx->spi_flash : efx->spi_eeprom; 936 if (efx_spi_present(&nic_data->spi_flash))
900 if (!spi) 937 spi = &nic_data->spi_flash;
938 else if (efx_spi_present(&nic_data->spi_eeprom))
939 spi = &nic_data->spi_eeprom;
940 else
901 return -EINVAL; 941 return -EINVAL;
902 942
903 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL); 943 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
@@ -905,12 +945,13 @@ falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
905 return -ENOMEM; 945 return -ENOMEM;
906 nvconfig = region + FALCON_NVCONFIG_OFFSET; 946 nvconfig = region + FALCON_NVCONFIG_OFFSET;
907 947
908 mutex_lock(&efx->spi_lock); 948 mutex_lock(&nic_data->spi_lock);
909 rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region); 949 rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region);
910 mutex_unlock(&efx->spi_lock); 950 mutex_unlock(&nic_data->spi_lock);
911 if (rc) { 951 if (rc) {
912 netif_err(efx, hw, efx->net_dev, "Failed to read %s\n", 952 netif_err(efx, hw, efx->net_dev, "Failed to read %s\n",
913 efx->spi_flash ? "flash" : "EEPROM"); 953 efx_spi_present(&nic_data->spi_flash) ?
954 "flash" : "EEPROM");
914 rc = -EIO; 955 rc = -EIO;
915 goto out; 956 goto out;
916 } 957 }
@@ -1012,7 +1053,7 @@ static int falcon_b0_test_registers(struct efx_nic *efx)
1012 1053
1013/* Resets NIC to known state. This routine must be called in process 1054/* Resets NIC to known state. This routine must be called in process
1014 * context and is allowed to sleep. */ 1055 * context and is allowed to sleep. */
1015static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) 1056static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1016{ 1057{
1017 struct falcon_nic_data *nic_data = efx->nic_data; 1058 struct falcon_nic_data *nic_data = efx->nic_data;
1018 efx_oword_t glb_ctl_reg_ker; 1059 efx_oword_t glb_ctl_reg_ker;
@@ -1108,6 +1149,18 @@ fail5:
1108 return rc; 1149 return rc;
1109} 1150}
1110 1151
1152static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
1153{
1154 struct falcon_nic_data *nic_data = efx->nic_data;
1155 int rc;
1156
1157 mutex_lock(&nic_data->spi_lock);
1158 rc = __falcon_reset_hw(efx, method);
1159 mutex_unlock(&nic_data->spi_lock);
1160
1161 return rc;
1162}
1163
1111static void falcon_monitor(struct efx_nic *efx) 1164static void falcon_monitor(struct efx_nic *efx)
1112{ 1165{
1113 bool link_changed; 1166 bool link_changed;
@@ -1189,16 +1242,11 @@ static int falcon_reset_sram(struct efx_nic *efx)
1189 return -ETIMEDOUT; 1242 return -ETIMEDOUT;
1190} 1243}
1191 1244
1192static int falcon_spi_device_init(struct efx_nic *efx, 1245static void falcon_spi_device_init(struct efx_nic *efx,
1193 struct efx_spi_device **spi_device_ret, 1246 struct efx_spi_device *spi_device,
1194 unsigned int device_id, u32 device_type) 1247 unsigned int device_id, u32 device_type)
1195{ 1248{
1196 struct efx_spi_device *spi_device;
1197
1198 if (device_type != 0) { 1249 if (device_type != 0) {
1199 spi_device = kzalloc(sizeof(*spi_device), GFP_KERNEL);
1200 if (!spi_device)
1201 return -ENOMEM;
1202 spi_device->device_id = device_id; 1250 spi_device->device_id = device_id;
1203 spi_device->size = 1251 spi_device->size =
1204 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE); 1252 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE);
@@ -1215,27 +1263,15 @@ static int falcon_spi_device_init(struct efx_nic *efx,
1215 1 << SPI_DEV_TYPE_FIELD(device_type, 1263 1 << SPI_DEV_TYPE_FIELD(device_type,
1216 SPI_DEV_TYPE_BLOCK_SIZE); 1264 SPI_DEV_TYPE_BLOCK_SIZE);
1217 } else { 1265 } else {
1218 spi_device = NULL; 1266 spi_device->size = 0;
1219 } 1267 }
1220
1221 kfree(*spi_device_ret);
1222 *spi_device_ret = spi_device;
1223 return 0;
1224}
1225
1226static void falcon_remove_spi_devices(struct efx_nic *efx)
1227{
1228 kfree(efx->spi_eeprom);
1229 efx->spi_eeprom = NULL;
1230 kfree(efx->spi_flash);
1231 efx->spi_flash = NULL;
1232} 1268}
1233 1269
1234/* Extract non-volatile configuration */ 1270/* Extract non-volatile configuration */
1235static int falcon_probe_nvconfig(struct efx_nic *efx) 1271static int falcon_probe_nvconfig(struct efx_nic *efx)
1236{ 1272{
1273 struct falcon_nic_data *nic_data = efx->nic_data;
1237 struct falcon_nvconfig *nvconfig; 1274 struct falcon_nvconfig *nvconfig;
1238 int board_rev;
1239 int rc; 1275 int rc;
1240 1276
1241 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL); 1277 nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL);
@@ -1243,55 +1279,32 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
1243 return -ENOMEM; 1279 return -ENOMEM;
1244 1280
1245 rc = falcon_read_nvram(efx, nvconfig); 1281 rc = falcon_read_nvram(efx, nvconfig);
1246 if (rc == -EINVAL) { 1282 if (rc)
1247 netif_err(efx, probe, efx->net_dev, 1283 goto out;
1248 "NVRAM is invalid therefore using defaults\n"); 1284
1249 efx->phy_type = PHY_TYPE_NONE; 1285 efx->phy_type = nvconfig->board_v2.port0_phy_type;
1250 efx->mdio.prtad = MDIO_PRTAD_NONE; 1286 efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr;
1251 board_rev = 0; 1287
1252 rc = 0; 1288 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
1253 } else if (rc) { 1289 falcon_spi_device_init(
1254 goto fail1; 1290 efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
1255 } else { 1291 le32_to_cpu(nvconfig->board_v3
1256 struct falcon_nvconfig_board_v2 *v2 = &nvconfig->board_v2; 1292 .spi_device_type[FFE_AB_SPI_DEVICE_FLASH]));
1257 struct falcon_nvconfig_board_v3 *v3 = &nvconfig->board_v3; 1293 falcon_spi_device_init(
1258 1294 efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
1259 efx->phy_type = v2->port0_phy_type; 1295 le32_to_cpu(nvconfig->board_v3
1260 efx->mdio.prtad = v2->port0_phy_addr; 1296 .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM]));
1261 board_rev = le16_to_cpu(v2->board_revision);
1262
1263 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
1264 rc = falcon_spi_device_init(
1265 efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
1266 le32_to_cpu(v3->spi_device_type
1267 [FFE_AB_SPI_DEVICE_FLASH]));
1268 if (rc)
1269 goto fail2;
1270 rc = falcon_spi_device_init(
1271 efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
1272 le32_to_cpu(v3->spi_device_type
1273 [FFE_AB_SPI_DEVICE_EEPROM]));
1274 if (rc)
1275 goto fail2;
1276 }
1277 } 1297 }
1278 1298
1279 /* Read the MAC addresses */ 1299 /* Read the MAC addresses */
1280 memcpy(efx->mac_address, nvconfig->mac_address[0], ETH_ALEN); 1300 memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN);
1281 1301
1282 netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n", 1302 netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n",
1283 efx->phy_type, efx->mdio.prtad); 1303 efx->phy_type, efx->mdio.prtad);
1284 1304
1285 rc = falcon_probe_board(efx, board_rev); 1305 rc = falcon_probe_board(efx,
1286 if (rc) 1306 le16_to_cpu(nvconfig->board_v2.board_revision));
1287 goto fail2; 1307out:
1288
1289 kfree(nvconfig);
1290 return 0;
1291
1292 fail2:
1293 falcon_remove_spi_devices(efx);
1294 fail1:
1295 kfree(nvconfig); 1308 kfree(nvconfig);
1296 return rc; 1309 return rc;
1297} 1310}
@@ -1299,6 +1312,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
1299/* Probe all SPI devices on the NIC */ 1312/* Probe all SPI devices on the NIC */
1300static void falcon_probe_spi_devices(struct efx_nic *efx) 1313static void falcon_probe_spi_devices(struct efx_nic *efx)
1301{ 1314{
1315 struct falcon_nic_data *nic_data = efx->nic_data;
1302 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; 1316 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
1303 int boot_dev; 1317 int boot_dev;
1304 1318
@@ -1327,12 +1341,14 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
1327 efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); 1341 efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
1328 } 1342 }
1329 1343
1344 mutex_init(&nic_data->spi_lock);
1345
1330 if (boot_dev == FFE_AB_SPI_DEVICE_FLASH) 1346 if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
1331 falcon_spi_device_init(efx, &efx->spi_flash, 1347 falcon_spi_device_init(efx, &nic_data->spi_flash,
1332 FFE_AB_SPI_DEVICE_FLASH, 1348 FFE_AB_SPI_DEVICE_FLASH,
1333 default_flash_type); 1349 default_flash_type);
1334 if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM) 1350 if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
1335 falcon_spi_device_init(efx, &efx->spi_eeprom, 1351 falcon_spi_device_init(efx, &nic_data->spi_eeprom,
1336 FFE_AB_SPI_DEVICE_EEPROM, 1352 FFE_AB_SPI_DEVICE_EEPROM,
1337 large_eeprom_type); 1353 large_eeprom_type);
1338} 1354}
@@ -1397,7 +1413,7 @@ static int falcon_probe_nic(struct efx_nic *efx)
1397 } 1413 }
1398 1414
1399 /* Now we can reset the NIC */ 1415 /* Now we can reset the NIC */
1400 rc = falcon_reset_hw(efx, RESET_TYPE_ALL); 1416 rc = __falcon_reset_hw(efx, RESET_TYPE_ALL);
1401 if (rc) { 1417 if (rc) {
1402 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); 1418 netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n");
1403 goto fail3; 1419 goto fail3;
@@ -1419,8 +1435,11 @@ static int falcon_probe_nic(struct efx_nic *efx)
1419 1435
1420 /* Read in the non-volatile configuration */ 1436 /* Read in the non-volatile configuration */
1421 rc = falcon_probe_nvconfig(efx); 1437 rc = falcon_probe_nvconfig(efx);
1422 if (rc) 1438 if (rc) {
1439 if (rc == -EINVAL)
1440 netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n");
1423 goto fail5; 1441 goto fail5;
1442 }
1424 1443
1425 /* Initialise I2C adapter */ 1444 /* Initialise I2C adapter */
1426 board = falcon_board(efx); 1445 board = falcon_board(efx);
@@ -1452,7 +1471,6 @@ static int falcon_probe_nic(struct efx_nic *efx)
1452 BUG_ON(i2c_del_adapter(&board->i2c_adap)); 1471 BUG_ON(i2c_del_adapter(&board->i2c_adap));
1453 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); 1472 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
1454 fail5: 1473 fail5:
1455 falcon_remove_spi_devices(efx);
1456 efx_nic_free_buffer(efx, &efx->irq_status); 1474 efx_nic_free_buffer(efx, &efx->irq_status);
1457 fail4: 1475 fail4:
1458 fail3: 1476 fail3:
@@ -1606,10 +1624,9 @@ static void falcon_remove_nic(struct efx_nic *efx)
1606 BUG_ON(rc); 1624 BUG_ON(rc);
1607 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); 1625 memset(&board->i2c_adap, 0, sizeof(board->i2c_adap));
1608 1626
1609 falcon_remove_spi_devices(efx);
1610 efx_nic_free_buffer(efx, &efx->irq_status); 1627 efx_nic_free_buffer(efx, &efx->irq_status);
1611 1628
1612 falcon_reset_hw(efx, RESET_TYPE_ALL); 1629 __falcon_reset_hw(efx, RESET_TYPE_ALL);
1613 1630
1614 /* Release the second function after the reset */ 1631 /* Release the second function after the reset */
1615 if (nic_data->pci_dev2) { 1632 if (nic_data->pci_dev2) {
@@ -1720,6 +1737,7 @@ struct efx_nic_type falcon_a1_nic_type = {
1720 .reset = falcon_reset_hw, 1737 .reset = falcon_reset_hw,
1721 .probe_port = falcon_probe_port, 1738 .probe_port = falcon_probe_port,
1722 .remove_port = falcon_remove_port, 1739 .remove_port = falcon_remove_port,
1740 .handle_global_event = falcon_handle_global_event,
1723 .prepare_flush = falcon_prepare_flush, 1741 .prepare_flush = falcon_prepare_flush,
1724 .update_stats = falcon_update_nic_stats, 1742 .update_stats = falcon_update_nic_stats,
1725 .start_stats = falcon_start_nic_stats, 1743 .start_stats = falcon_start_nic_stats,
@@ -1760,6 +1778,7 @@ struct efx_nic_type falcon_b0_nic_type = {
1760 .reset = falcon_reset_hw, 1778 .reset = falcon_reset_hw,
1761 .probe_port = falcon_probe_port, 1779 .probe_port = falcon_probe_port,
1762 .remove_port = falcon_remove_port, 1780 .remove_port = falcon_remove_port,
1781 .handle_global_event = falcon_handle_global_event,
1763 .prepare_flush = falcon_prepare_flush, 1782 .prepare_flush = falcon_prepare_flush,
1764 .update_stats = falcon_update_nic_stats, 1783 .update_stats = falcon_update_nic_stats,
1765 .start_stats = falcon_start_nic_stats, 1784 .start_stats = falcon_start_nic_stats,
diff --git a/drivers/net/sfc/falcon_boards.c b/drivers/net/sfc/falcon_boards.c
index cfc6a5b5a477..2dd16f0b3ced 100644
--- a/drivers/net/sfc/falcon_boards.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -13,8 +13,6 @@
13#include "phy.h" 13#include "phy.h"
14#include "efx.h" 14#include "efx.h"
15#include "nic.h" 15#include "nic.h"
16#include "regs.h"
17#include "io.h"
18#include "workarounds.h" 16#include "workarounds.h"
19 17
20/* Macros for unpacking the board revision */ 18/* Macros for unpacking the board revision */
@@ -30,17 +28,28 @@
30#define FALCON_BOARD_SFN4112F 0x52 28#define FALCON_BOARD_SFN4112F 0x52
31 29
32/* Board temperature is about 15°C above ambient when air flow is 30/* Board temperature is about 15°C above ambient when air flow is
33 * limited. */ 31 * limited. The maximum acceptable ambient temperature varies
32 * depending on the PHY specifications but the critical temperature
33 * above which we should shut down to avoid damage is 80°C. */
34#define FALCON_BOARD_TEMP_BIAS 15 34#define FALCON_BOARD_TEMP_BIAS 15
35#define FALCON_BOARD_TEMP_CRIT (80 + FALCON_BOARD_TEMP_BIAS)
35 36
36/* SFC4000 datasheet says: 'The maximum permitted junction temperature 37/* SFC4000 datasheet says: 'The maximum permitted junction temperature
37 * is 125°C; the thermal design of the environment for the SFC4000 38 * is 125°C; the thermal design of the environment for the SFC4000
38 * should aim to keep this well below 100°C.' */ 39 * should aim to keep this well below 100°C.' */
40#define FALCON_JUNC_TEMP_MIN 0
39#define FALCON_JUNC_TEMP_MAX 90 41#define FALCON_JUNC_TEMP_MAX 90
42#define FALCON_JUNC_TEMP_CRIT 125
40 43
41/***************************************************************************** 44/*****************************************************************************
42 * Support for LM87 sensor chip used on several boards 45 * Support for LM87 sensor chip used on several boards
43 */ 46 */
47#define LM87_REG_TEMP_HW_INT_LOCK 0x13
48#define LM87_REG_TEMP_HW_EXT_LOCK 0x14
49#define LM87_REG_TEMP_HW_INT 0x17
50#define LM87_REG_TEMP_HW_EXT 0x18
51#define LM87_REG_TEMP_EXT1 0x26
52#define LM87_REG_TEMP_INT 0x27
44#define LM87_REG_ALARMS1 0x41 53#define LM87_REG_ALARMS1 0x41
45#define LM87_REG_ALARMS2 0x42 54#define LM87_REG_ALARMS2 0x42
46#define LM87_IN_LIMITS(nr, _min, _max) \ 55#define LM87_IN_LIMITS(nr, _min, _max) \
@@ -57,6 +66,27 @@
57 66
58#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE) 67#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
59 68
69static int efx_poke_lm87(struct i2c_client *client, const u8 *reg_values)
70{
71 while (*reg_values) {
72 u8 reg = *reg_values++;
73 u8 value = *reg_values++;
74 int rc = i2c_smbus_write_byte_data(client, reg, value);
75 if (rc)
76 return rc;
77 }
78 return 0;
79}
80
81static const u8 falcon_lm87_common_regs[] = {
82 LM87_REG_TEMP_HW_INT_LOCK, FALCON_BOARD_TEMP_CRIT,
83 LM87_REG_TEMP_HW_INT, FALCON_BOARD_TEMP_CRIT,
84 LM87_TEMP_EXT1_LIMITS(FALCON_JUNC_TEMP_MIN, FALCON_JUNC_TEMP_MAX),
85 LM87_REG_TEMP_HW_EXT_LOCK, FALCON_JUNC_TEMP_CRIT,
86 LM87_REG_TEMP_HW_EXT, FALCON_JUNC_TEMP_CRIT,
87 0
88};
89
60static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info, 90static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
61 const u8 *reg_values) 91 const u8 *reg_values)
62{ 92{
@@ -67,13 +97,16 @@ static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
67 if (!client) 97 if (!client)
68 return -EIO; 98 return -EIO;
69 99
70 while (*reg_values) { 100 /* Read-to-clear alarm/interrupt status */
71 u8 reg = *reg_values++; 101 i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
72 u8 value = *reg_values++; 102 i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
73 rc = i2c_smbus_write_byte_data(client, reg, value); 103
74 if (rc) 104 rc = efx_poke_lm87(client, reg_values);
75 goto err; 105 if (rc)
76 } 106 goto err;
107 rc = efx_poke_lm87(client, falcon_lm87_common_regs);
108 if (rc)
109 goto err;
77 110
78 board->hwmon_client = client; 111 board->hwmon_client = client;
79 return 0; 112 return 0;
@@ -91,36 +124,56 @@ static void efx_fini_lm87(struct efx_nic *efx)
91static int efx_check_lm87(struct efx_nic *efx, unsigned mask) 124static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
92{ 125{
93 struct i2c_client *client = falcon_board(efx)->hwmon_client; 126 struct i2c_client *client = falcon_board(efx)->hwmon_client;
94 s32 alarms1, alarms2; 127 bool temp_crit, elec_fault, is_failure;
128 u16 alarms;
129 s32 reg;
95 130
96 /* If link is up then do not monitor temperature */ 131 /* If link is up then do not monitor temperature */
97 if (EFX_WORKAROUND_7884(efx) && efx->link_state.up) 132 if (EFX_WORKAROUND_7884(efx) && efx->link_state.up)
98 return 0; 133 return 0;
99 134
100 alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1); 135 reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
101 alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2); 136 if (reg < 0)
102 if (alarms1 < 0) 137 return reg;
103 return alarms1; 138 alarms = reg;
104 if (alarms2 < 0) 139 reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
105 return alarms2; 140 if (reg < 0)
106 alarms1 &= mask; 141 return reg;
107 alarms2 &= mask >> 8; 142 alarms |= reg << 8;
108 if (alarms1 || alarms2) { 143 alarms &= mask;
144
145 temp_crit = false;
146 if (alarms & LM87_ALARM_TEMP_INT) {
147 reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_INT);
148 if (reg < 0)
149 return reg;
150 if (reg > FALCON_BOARD_TEMP_CRIT)
151 temp_crit = true;
152 }
153 if (alarms & LM87_ALARM_TEMP_EXT1) {
154 reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_EXT1);
155 if (reg < 0)
156 return reg;
157 if (reg > FALCON_JUNC_TEMP_CRIT)
158 temp_crit = true;
159 }
160 elec_fault = alarms & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1);
161 is_failure = temp_crit || elec_fault;
162
163 if (alarms)
109 netif_err(efx, hw, efx->net_dev, 164 netif_err(efx, hw, efx->net_dev,
110 "LM87 detected a hardware failure (status %02x:%02x)" 165 "LM87 detected a hardware %s (status %02x:%02x)"
111 "%s%s%s\n", 166 "%s%s%s%s\n",
112 alarms1, alarms2, 167 is_failure ? "failure" : "problem",
113 (alarms1 & LM87_ALARM_TEMP_INT) ? 168 alarms & 0xff, alarms >> 8,
169 (alarms & LM87_ALARM_TEMP_INT) ?
114 "; board is overheating" : "", 170 "; board is overheating" : "",
115 (alarms1 & LM87_ALARM_TEMP_EXT1) ? 171 (alarms & LM87_ALARM_TEMP_EXT1) ?
116 "; controller is overheating" : "", 172 "; controller is overheating" : "",
117 (alarms1 & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1) 173 temp_crit ? "; reached critical temperature" : "",
118 || alarms2) ? 174 elec_fault ? "; electrical fault" : "");
119 "; electrical fault" : "");
120 return -ERANGE;
121 }
122 175
123 return 0; 176 return is_failure ? -ERANGE : 0;
124} 177}
125 178
126#else /* !CONFIG_SENSORS_LM87 */ 179#else /* !CONFIG_SENSORS_LM87 */
@@ -325,7 +378,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
325 new_mode = old_mode & ~PHY_MODE_SPECIAL; 378 new_mode = old_mode & ~PHY_MODE_SPECIAL;
326 else 379 else
327 new_mode = PHY_MODE_SPECIAL; 380 new_mode = PHY_MODE_SPECIAL;
328 if (old_mode == new_mode) { 381 if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) {
329 err = 0; 382 err = 0;
330 } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) { 383 } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) {
331 err = -EBUSY; 384 err = -EBUSY;
@@ -362,10 +415,11 @@ static void sfe4001_fini(struct efx_nic *efx)
362 415
363static int sfe4001_check_hw(struct efx_nic *efx) 416static int sfe4001_check_hw(struct efx_nic *efx)
364{ 417{
418 struct falcon_nic_data *nic_data = efx->nic_data;
365 s32 status; 419 s32 status;
366 420
367 /* If XAUI link is up then do not monitor */ 421 /* If XAUI link is up then do not monitor */
368 if (EFX_WORKAROUND_7884(efx) && !efx->xmac_poll_required) 422 if (EFX_WORKAROUND_7884(efx) && !nic_data->xmac_poll_required)
369 return 0; 423 return 0;
370 424
371 /* Check the powered status of the PHY. Lack of power implies that 425 /* Check the powered status of the PHY. Lack of power implies that
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index b31f595ebb5b..b49e84394641 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -16,7 +16,6 @@
16#include "io.h" 16#include "io.h"
17#include "mac.h" 17#include "mac.h"
18#include "mdio_10g.h" 18#include "mdio_10g.h"
19#include "phy.h"
20#include "workarounds.h" 19#include "workarounds.h"
21 20
22/************************************************************************** 21/**************************************************************************
@@ -88,6 +87,7 @@ int falcon_reset_xaui(struct efx_nic *efx)
88 87
89static void falcon_ack_status_intr(struct efx_nic *efx) 88static void falcon_ack_status_intr(struct efx_nic *efx)
90{ 89{
90 struct falcon_nic_data *nic_data = efx->nic_data;
91 efx_oword_t reg; 91 efx_oword_t reg;
92 92
93 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx)) 93 if ((efx_nic_rev(efx) != EFX_REV_FALCON_B0) || LOOPBACK_INTERNAL(efx))
@@ -99,7 +99,7 @@ static void falcon_ack_status_intr(struct efx_nic *efx)
99 99
100 /* We can only use this interrupt to signal the negative edge of 100 /* We can only use this interrupt to signal the negative edge of
101 * xaui_align [we have to poll the positive edge]. */ 101 * xaui_align [we have to poll the positive edge]. */
102 if (efx->xmac_poll_required) 102 if (nic_data->xmac_poll_required)
103 return; 103 return;
104 104
105 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK); 105 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
@@ -277,12 +277,14 @@ static bool falcon_xmac_check_fault(struct efx_nic *efx)
277 277
278static int falcon_reconfigure_xmac(struct efx_nic *efx) 278static int falcon_reconfigure_xmac(struct efx_nic *efx)
279{ 279{
280 struct falcon_nic_data *nic_data = efx->nic_data;
281
280 falcon_reconfigure_xgxs_core(efx); 282 falcon_reconfigure_xgxs_core(efx);
281 falcon_reconfigure_xmac_core(efx); 283 falcon_reconfigure_xmac_core(efx);
282 284
283 falcon_reconfigure_mac_wrapper(efx); 285 falcon_reconfigure_mac_wrapper(efx);
284 286
285 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5); 287 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 5);
286 falcon_ack_status_intr(efx); 288 falcon_ack_status_intr(efx);
287 289
288 return 0; 290 return 0;
@@ -350,11 +352,13 @@ static void falcon_update_stats_xmac(struct efx_nic *efx)
350 352
351void falcon_poll_xmac(struct efx_nic *efx) 353void falcon_poll_xmac(struct efx_nic *efx)
352{ 354{
355 struct falcon_nic_data *nic_data = efx->nic_data;
356
353 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up || 357 if (!EFX_WORKAROUND_5147(efx) || !efx->link_state.up ||
354 !efx->xmac_poll_required) 358 !nic_data->xmac_poll_required)
355 return; 359 return;
356 360
357 efx->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1); 361 nic_data->xmac_poll_required = !falcon_xmac_link_ok_retry(efx, 1);
358 falcon_ack_status_intr(efx); 362 falcon_ack_status_intr(efx);
359} 363}
360 364
diff --git a/drivers/net/sfc/filter.c b/drivers/net/sfc/filter.c
index 52cb6082b910..d4722c41c4ce 100644
--- a/drivers/net/sfc/filter.c
+++ b/drivers/net/sfc/filter.c
@@ -7,6 +7,7 @@
7 * by the Free Software Foundation, incorporated herein by reference. 7 * by the Free Software Foundation, incorporated herein by reference.
8 */ 8 */
9 9
10#include <linux/in.h>
10#include "efx.h" 11#include "efx.h"
11#include "filter.h" 12#include "filter.h"
12#include "io.h" 13#include "io.h"
@@ -26,19 +27,26 @@
26 */ 27 */
27#define FILTER_CTL_SRCH_MAX 200 28#define FILTER_CTL_SRCH_MAX 200
28 29
30enum efx_filter_table_id {
31 EFX_FILTER_TABLE_RX_IP = 0,
32 EFX_FILTER_TABLE_RX_MAC,
33 EFX_FILTER_TABLE_COUNT,
34};
35
29struct efx_filter_table { 36struct efx_filter_table {
37 enum efx_filter_table_id id;
30 u32 offset; /* address of table relative to BAR */ 38 u32 offset; /* address of table relative to BAR */
31 unsigned size; /* number of entries */ 39 unsigned size; /* number of entries */
32 unsigned step; /* step between entries */ 40 unsigned step; /* step between entries */
33 unsigned used; /* number currently used */ 41 unsigned used; /* number currently used */
34 unsigned long *used_bitmap; 42 unsigned long *used_bitmap;
35 struct efx_filter_spec *spec; 43 struct efx_filter_spec *spec;
44 unsigned search_depth[EFX_FILTER_TYPE_COUNT];
36}; 45};
37 46
38struct efx_filter_state { 47struct efx_filter_state {
39 spinlock_t lock; 48 spinlock_t lock;
40 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT]; 49 struct efx_filter_table table[EFX_FILTER_TABLE_COUNT];
41 unsigned search_depth[EFX_FILTER_TYPE_COUNT];
42}; 50};
43 51
44/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit 52/* The filter hash function is LFSR polynomial x^16 + x^3 + 1 of a 32-bit
@@ -65,68 +73,203 @@ static u16 efx_filter_increment(u32 key)
65} 73}
66 74
67static enum efx_filter_table_id 75static enum efx_filter_table_id
68efx_filter_type_table_id(enum efx_filter_type type) 76efx_filter_spec_table_id(const struct efx_filter_spec *spec)
77{
78 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_FULL >> 2));
79 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_TCP_WILD >> 2));
80 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_FULL >> 2));
81 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_UDP_WILD >> 2));
82 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_FULL >> 2));
83 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_MAC_WILD >> 2));
84 EFX_BUG_ON_PARANOID(spec->type == EFX_FILTER_UNSPEC);
85 return spec->type >> 2;
86}
87
88static struct efx_filter_table *
89efx_filter_spec_table(struct efx_filter_state *state,
90 const struct efx_filter_spec *spec)
69{ 91{
70 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_TCP_FULL >> 2)); 92 if (spec->type == EFX_FILTER_UNSPEC)
71 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_TCP_WILD >> 2)); 93 return NULL;
72 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_UDP_FULL >> 2)); 94 else
73 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_IP != (EFX_FILTER_RX_UDP_WILD >> 2)); 95 return &state->table[efx_filter_spec_table_id(spec)];
74 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_RX_MAC_FULL >> 2));
75 BUILD_BUG_ON(EFX_FILTER_TABLE_RX_MAC != (EFX_FILTER_RX_MAC_WILD >> 2));
76 return type >> 2;
77} 96}
78 97
79static void 98static void efx_filter_table_reset_search_depth(struct efx_filter_table *table)
80efx_filter_table_reset_search_depth(struct efx_filter_state *state,
81 enum efx_filter_table_id table_id)
82{ 99{
83 memset(state->search_depth + (table_id << 2), 0, 100 memset(table->search_depth, 0, sizeof(table->search_depth));
84 sizeof(state->search_depth[0]) << 2);
85} 101}
86 102
87static void efx_filter_push_rx_limits(struct efx_nic *efx) 103static void efx_filter_push_rx_limits(struct efx_nic *efx)
88{ 104{
89 struct efx_filter_state *state = efx->filter_state; 105 struct efx_filter_state *state = efx->filter_state;
106 struct efx_filter_table *table;
90 efx_oword_t filter_ctl; 107 efx_oword_t filter_ctl;
91 108
92 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); 109 efx_reado(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
93 110
111 table = &state->table[EFX_FILTER_TABLE_RX_IP];
94 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT, 112 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_FULL_SRCH_LIMIT,
95 state->search_depth[EFX_FILTER_RX_TCP_FULL] + 113 table->search_depth[EFX_FILTER_TCP_FULL] +
96 FILTER_CTL_SRCH_FUDGE_FULL); 114 FILTER_CTL_SRCH_FUDGE_FULL);
97 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT, 115 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_TCP_WILD_SRCH_LIMIT,
98 state->search_depth[EFX_FILTER_RX_TCP_WILD] + 116 table->search_depth[EFX_FILTER_TCP_WILD] +
99 FILTER_CTL_SRCH_FUDGE_WILD); 117 FILTER_CTL_SRCH_FUDGE_WILD);
100 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT, 118 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_FULL_SRCH_LIMIT,
101 state->search_depth[EFX_FILTER_RX_UDP_FULL] + 119 table->search_depth[EFX_FILTER_UDP_FULL] +
102 FILTER_CTL_SRCH_FUDGE_FULL); 120 FILTER_CTL_SRCH_FUDGE_FULL);
103 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT, 121 EFX_SET_OWORD_FIELD(filter_ctl, FRF_BZ_UDP_WILD_SRCH_LIMIT,
104 state->search_depth[EFX_FILTER_RX_UDP_WILD] + 122 table->search_depth[EFX_FILTER_UDP_WILD] +
105 FILTER_CTL_SRCH_FUDGE_WILD); 123 FILTER_CTL_SRCH_FUDGE_WILD);
106 124
107 if (state->table[EFX_FILTER_TABLE_RX_MAC].size) { 125 table = &state->table[EFX_FILTER_TABLE_RX_MAC];
126 if (table->size) {
108 EFX_SET_OWORD_FIELD( 127 EFX_SET_OWORD_FIELD(
109 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT, 128 filter_ctl, FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT,
110 state->search_depth[EFX_FILTER_RX_MAC_FULL] + 129 table->search_depth[EFX_FILTER_MAC_FULL] +
111 FILTER_CTL_SRCH_FUDGE_FULL); 130 FILTER_CTL_SRCH_FUDGE_FULL);
112 EFX_SET_OWORD_FIELD( 131 EFX_SET_OWORD_FIELD(
113 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT, 132 filter_ctl, FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT,
114 state->search_depth[EFX_FILTER_RX_MAC_WILD] + 133 table->search_depth[EFX_FILTER_MAC_WILD] +
115 FILTER_CTL_SRCH_FUDGE_WILD); 134 FILTER_CTL_SRCH_FUDGE_WILD);
116 } 135 }
117 136
118 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL); 137 efx_writeo(efx, &filter_ctl, FR_BZ_RX_FILTER_CTL);
119} 138}
120 139
140static inline void __efx_filter_set_ipv4(struct efx_filter_spec *spec,
141 __be32 host1, __be16 port1,
142 __be32 host2, __be16 port2)
143{
144 spec->data[0] = ntohl(host1) << 16 | ntohs(port1);
145 spec->data[1] = ntohs(port2) << 16 | ntohl(host1) >> 16;
146 spec->data[2] = ntohl(host2);
147}
148
149/**
150 * efx_filter_set_ipv4_local - specify IPv4 host, transport protocol and port
151 * @spec: Specification to initialise
152 * @proto: Transport layer protocol number
153 * @host: Local host address (network byte order)
154 * @port: Local port (network byte order)
155 */
156int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
157 __be32 host, __be16 port)
158{
159 __be32 host1;
160 __be16 port1;
161
162 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
163
164 /* This cannot currently be combined with other filtering */
165 if (spec->type != EFX_FILTER_UNSPEC)
166 return -EPROTONOSUPPORT;
167
168 if (port == 0)
169 return -EINVAL;
170
171 switch (proto) {
172 case IPPROTO_TCP:
173 spec->type = EFX_FILTER_TCP_WILD;
174 break;
175 case IPPROTO_UDP:
176 spec->type = EFX_FILTER_UDP_WILD;
177 break;
178 default:
179 return -EPROTONOSUPPORT;
180 }
181
182 /* Filter is constructed in terms of source and destination,
183 * with the odd wrinkle that the ports are swapped in a UDP
184 * wildcard filter. We need to convert from local and remote
185 * (= zero for wildcard) addresses.
186 */
187 host1 = 0;
188 if (proto != IPPROTO_UDP) {
189 port1 = 0;
190 } else {
191 port1 = port;
192 port = 0;
193 }
194
195 __efx_filter_set_ipv4(spec, host1, port1, host, port);
196 return 0;
197}
198
199/**
200 * efx_filter_set_ipv4_full - specify IPv4 hosts, transport protocol and ports
201 * @spec: Specification to initialise
202 * @proto: Transport layer protocol number
203 * @host: Local host address (network byte order)
204 * @port: Local port (network byte order)
205 * @rhost: Remote host address (network byte order)
206 * @rport: Remote port (network byte order)
207 */
208int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
209 __be32 host, __be16 port,
210 __be32 rhost, __be16 rport)
211{
212 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
213
214 /* This cannot currently be combined with other filtering */
215 if (spec->type != EFX_FILTER_UNSPEC)
216 return -EPROTONOSUPPORT;
217
218 if (port == 0 || rport == 0)
219 return -EINVAL;
220
221 switch (proto) {
222 case IPPROTO_TCP:
223 spec->type = EFX_FILTER_TCP_FULL;
224 break;
225 case IPPROTO_UDP:
226 spec->type = EFX_FILTER_UDP_FULL;
227 break;
228 default:
229 return -EPROTONOSUPPORT;
230 }
231
232 __efx_filter_set_ipv4(spec, rhost, rport, host, port);
233 return 0;
234}
235
236/**
237 * efx_filter_set_eth_local - specify local Ethernet address and optional VID
238 * @spec: Specification to initialise
239 * @vid: VLAN ID to match, or %EFX_FILTER_VID_UNSPEC
240 * @addr: Local Ethernet MAC address
241 */
242int efx_filter_set_eth_local(struct efx_filter_spec *spec,
243 u16 vid, const u8 *addr)
244{
245 EFX_BUG_ON_PARANOID(!(spec->flags & EFX_FILTER_FLAG_RX));
246
247 /* This cannot currently be combined with other filtering */
248 if (spec->type != EFX_FILTER_UNSPEC)
249 return -EPROTONOSUPPORT;
250
251 if (vid == EFX_FILTER_VID_UNSPEC) {
252 spec->type = EFX_FILTER_MAC_WILD;
253 spec->data[0] = 0;
254 } else {
255 spec->type = EFX_FILTER_MAC_FULL;
256 spec->data[0] = vid;
257 }
258
259 spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
260 spec->data[2] = addr[0] << 8 | addr[1];
261 return 0;
262}
263
121/* Build a filter entry and return its n-tuple key. */ 264/* Build a filter entry and return its n-tuple key. */
122static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec) 265static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
123{ 266{
124 u32 data3; 267 u32 data3;
125 268
126 switch (efx_filter_type_table_id(spec->type)) { 269 switch (efx_filter_spec_table_id(spec)) {
127 case EFX_FILTER_TABLE_RX_IP: { 270 case EFX_FILTER_TABLE_RX_IP: {
128 bool is_udp = (spec->type == EFX_FILTER_RX_UDP_FULL || 271 bool is_udp = (spec->type == EFX_FILTER_UDP_FULL ||
129 spec->type == EFX_FILTER_RX_UDP_WILD); 272 spec->type == EFX_FILTER_UDP_WILD);
130 EFX_POPULATE_OWORD_7( 273 EFX_POPULATE_OWORD_7(
131 *filter, 274 *filter,
132 FRF_BZ_RSS_EN, 275 FRF_BZ_RSS_EN,
@@ -143,7 +286,7 @@ static u32 efx_filter_build(efx_oword_t *filter, struct efx_filter_spec *spec)
143 } 286 }
144 287
145 case EFX_FILTER_TABLE_RX_MAC: { 288 case EFX_FILTER_TABLE_RX_MAC: {
146 bool is_wild = spec->type == EFX_FILTER_RX_MAC_WILD; 289 bool is_wild = spec->type == EFX_FILTER_MAC_WILD;
147 EFX_POPULATE_OWORD_8( 290 EFX_POPULATE_OWORD_8(
148 *filter, 291 *filter,
149 FRF_CZ_RMFT_RSS_EN, 292 FRF_CZ_RMFT_RSS_EN,
@@ -206,6 +349,14 @@ found:
206 return filter_idx; 349 return filter_idx;
207} 350}
208 351
352/* Construct/deconstruct external filter IDs */
353
354static inline int
355efx_filter_make_id(enum efx_filter_table_id table_id, unsigned index)
356{
357 return table_id << 16 | index;
358}
359
209/** 360/**
210 * efx_filter_insert_filter - add or replace a filter 361 * efx_filter_insert_filter - add or replace a filter
211 * @efx: NIC in which to insert the filter 362 * @efx: NIC in which to insert the filter
@@ -213,30 +364,28 @@ found:
213 * @replace: Flag for whether the specified filter may replace a filter 364 * @replace: Flag for whether the specified filter may replace a filter
214 * with an identical match expression and equal or lower priority 365 * with an identical match expression and equal or lower priority
215 * 366 *
216 * On success, return the filter index within its table. 367 * On success, return the filter ID.
217 * On failure, return a negative error code. 368 * On failure, return a negative error code.
218 */ 369 */
219int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec, 370int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
220 bool replace) 371 bool replace)
221{ 372{
222 struct efx_filter_state *state = efx->filter_state; 373 struct efx_filter_state *state = efx->filter_state;
223 enum efx_filter_table_id table_id = 374 struct efx_filter_table *table = efx_filter_spec_table(state, spec);
224 efx_filter_type_table_id(spec->type);
225 struct efx_filter_table *table = &state->table[table_id];
226 struct efx_filter_spec *saved_spec; 375 struct efx_filter_spec *saved_spec;
227 efx_oword_t filter; 376 efx_oword_t filter;
228 int filter_idx, depth; 377 int filter_idx, depth;
229 u32 key; 378 u32 key;
230 int rc; 379 int rc;
231 380
232 if (table->size == 0) 381 if (!table || table->size == 0)
233 return -EINVAL; 382 return -EINVAL;
234 383
235 key = efx_filter_build(&filter, spec); 384 key = efx_filter_build(&filter, spec);
236 385
237 netif_vdbg(efx, hw, efx->net_dev, 386 netif_vdbg(efx, hw, efx->net_dev,
238 "%s: type %d search_depth=%d", __func__, spec->type, 387 "%s: type %d search_depth=%d", __func__, spec->type,
239 state->search_depth[spec->type]); 388 table->search_depth[spec->type]);
240 389
241 spin_lock_bh(&state->lock); 390 spin_lock_bh(&state->lock);
242 391
@@ -263,8 +412,8 @@ int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
263 } 412 }
264 *saved_spec = *spec; 413 *saved_spec = *spec;
265 414
266 if (state->search_depth[spec->type] < depth) { 415 if (table->search_depth[spec->type] < depth) {
267 state->search_depth[spec->type] = depth; 416 table->search_depth[spec->type] = depth;
268 efx_filter_push_rx_limits(efx); 417 efx_filter_push_rx_limits(efx);
269 } 418 }
270 419
@@ -273,6 +422,7 @@ int efx_filter_insert_filter(struct efx_nic *efx, struct efx_filter_spec *spec,
273 netif_vdbg(efx, hw, efx->net_dev, 422 netif_vdbg(efx, hw, efx->net_dev,
274 "%s: filter type %d index %d rxq %u set", 423 "%s: filter type %d index %d rxq %u set",
275 __func__, spec->type, filter_idx, spec->dmaq_id); 424 __func__, spec->type, filter_idx, spec->dmaq_id);
425 rc = efx_filter_make_id(table->id, filter_idx);
276 426
277out: 427out:
278 spin_unlock_bh(&state->lock); 428 spin_unlock_bh(&state->lock);
@@ -306,15 +456,16 @@ static void efx_filter_table_clear_entry(struct efx_nic *efx,
306int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec) 456int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec)
307{ 457{
308 struct efx_filter_state *state = efx->filter_state; 458 struct efx_filter_state *state = efx->filter_state;
309 enum efx_filter_table_id table_id = 459 struct efx_filter_table *table = efx_filter_spec_table(state, spec);
310 efx_filter_type_table_id(spec->type);
311 struct efx_filter_table *table = &state->table[table_id];
312 struct efx_filter_spec *saved_spec; 460 struct efx_filter_spec *saved_spec;
313 efx_oword_t filter; 461 efx_oword_t filter;
314 int filter_idx, depth; 462 int filter_idx, depth;
315 u32 key; 463 u32 key;
316 int rc; 464 int rc;
317 465
466 if (!table)
467 return -EINVAL;
468
318 key = efx_filter_build(&filter, spec); 469 key = efx_filter_build(&filter, spec);
319 470
320 spin_lock_bh(&state->lock); 471 spin_lock_bh(&state->lock);
@@ -332,7 +483,7 @@ int efx_filter_remove_filter(struct efx_nic *efx, struct efx_filter_spec *spec)
332 483
333 efx_filter_table_clear_entry(efx, table, filter_idx); 484 efx_filter_table_clear_entry(efx, table, filter_idx);
334 if (table->used == 0) 485 if (table->used == 0)
335 efx_filter_table_reset_search_depth(state, table_id); 486 efx_filter_table_reset_search_depth(table);
336 rc = 0; 487 rc = 0;
337 488
338out: 489out:
@@ -340,15 +491,9 @@ out:
340 return rc; 491 return rc;
341} 492}
342 493
343/** 494static void efx_filter_table_clear(struct efx_nic *efx,
344 * efx_filter_table_clear - remove filters from a table by priority 495 enum efx_filter_table_id table_id,
345 * @efx: NIC from which to remove the filters 496 enum efx_filter_priority priority)
346 * @table_id: Table from which to remove the filters
347 * @priority: Maximum priority to remove
348 */
349void efx_filter_table_clear(struct efx_nic *efx,
350 enum efx_filter_table_id table_id,
351 enum efx_filter_priority priority)
352{ 497{
353 struct efx_filter_state *state = efx->filter_state; 498 struct efx_filter_state *state = efx->filter_state;
354 struct efx_filter_table *table = &state->table[table_id]; 499 struct efx_filter_table *table = &state->table[table_id];
@@ -360,11 +505,22 @@ void efx_filter_table_clear(struct efx_nic *efx,
360 if (table->spec[filter_idx].priority <= priority) 505 if (table->spec[filter_idx].priority <= priority)
361 efx_filter_table_clear_entry(efx, table, filter_idx); 506 efx_filter_table_clear_entry(efx, table, filter_idx);
362 if (table->used == 0) 507 if (table->used == 0)
363 efx_filter_table_reset_search_depth(state, table_id); 508 efx_filter_table_reset_search_depth(table);
364 509
365 spin_unlock_bh(&state->lock); 510 spin_unlock_bh(&state->lock);
366} 511}
367 512
513/**
514 * efx_filter_clear_rx - remove RX filters by priority
515 * @efx: NIC from which to remove the filters
516 * @priority: Maximum priority to remove
517 */
518void efx_filter_clear_rx(struct efx_nic *efx, enum efx_filter_priority priority)
519{
520 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_IP, priority);
521 efx_filter_table_clear(efx, EFX_FILTER_TABLE_RX_MAC, priority);
522}
523
368/* Restore filter stater after reset */ 524/* Restore filter stater after reset */
369void efx_restore_filters(struct efx_nic *efx) 525void efx_restore_filters(struct efx_nic *efx)
370{ 526{
@@ -407,6 +563,7 @@ int efx_probe_filters(struct efx_nic *efx)
407 563
408 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { 564 if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) {
409 table = &state->table[EFX_FILTER_TABLE_RX_IP]; 565 table = &state->table[EFX_FILTER_TABLE_RX_IP];
566 table->id = EFX_FILTER_TABLE_RX_IP;
410 table->offset = FR_BZ_RX_FILTER_TBL0; 567 table->offset = FR_BZ_RX_FILTER_TBL0;
411 table->size = FR_BZ_RX_FILTER_TBL0_ROWS; 568 table->size = FR_BZ_RX_FILTER_TBL0_ROWS;
412 table->step = FR_BZ_RX_FILTER_TBL0_STEP; 569 table->step = FR_BZ_RX_FILTER_TBL0_STEP;
@@ -414,6 +571,7 @@ int efx_probe_filters(struct efx_nic *efx)
414 571
415 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) { 572 if (efx_nic_rev(efx) >= EFX_REV_SIENA_A0) {
416 table = &state->table[EFX_FILTER_TABLE_RX_MAC]; 573 table = &state->table[EFX_FILTER_TABLE_RX_MAC];
574 table->id = EFX_FILTER_TABLE_RX_MAC;
417 table->offset = FR_CZ_RX_MAC_FILTER_TBL0; 575 table->offset = FR_CZ_RX_MAC_FILTER_TBL0;
418 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS; 576 table->size = FR_CZ_RX_MAC_FILTER_TBL0_ROWS;
419 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP; 577 table->step = FR_CZ_RX_MAC_FILTER_TBL0_STEP;
@@ -428,10 +586,9 @@ int efx_probe_filters(struct efx_nic *efx)
428 GFP_KERNEL); 586 GFP_KERNEL);
429 if (!table->used_bitmap) 587 if (!table->used_bitmap)
430 goto fail; 588 goto fail;
431 table->spec = vmalloc(table->size * sizeof(*table->spec)); 589 table->spec = vzalloc(table->size * sizeof(*table->spec));
432 if (!table->spec) 590 if (!table->spec)
433 goto fail; 591 goto fail;
434 memset(table->spec, 0, table->size * sizeof(*table->spec));
435 } 592 }
436 593
437 return 0; 594 return 0;
diff --git a/drivers/net/sfc/filter.h b/drivers/net/sfc/filter.h
index a53319ded79c..872f2132a496 100644
--- a/drivers/net/sfc/filter.h
+++ b/drivers/net/sfc/filter.h
@@ -12,31 +12,27 @@
12 12
13#include <linux/types.h> 13#include <linux/types.h>
14 14
15enum efx_filter_table_id {
16 EFX_FILTER_TABLE_RX_IP = 0,
17 EFX_FILTER_TABLE_RX_MAC,
18 EFX_FILTER_TABLE_COUNT,
19};
20
21/** 15/**
22 * enum efx_filter_type - type of hardware filter 16 * enum efx_filter_type - type of hardware filter
23 * @EFX_FILTER_RX_TCP_FULL: RX, matching TCP/IPv4 4-tuple 17 * @EFX_FILTER_TCP_FULL: Matching TCP/IPv4 4-tuple
24 * @EFX_FILTER_RX_TCP_WILD: RX, matching TCP/IPv4 destination (host, port) 18 * @EFX_FILTER_TCP_WILD: Matching TCP/IPv4 destination (host, port)
25 * @EFX_FILTER_RX_UDP_FULL: RX, matching UDP/IPv4 4-tuple 19 * @EFX_FILTER_UDP_FULL: Matching UDP/IPv4 4-tuple
26 * @EFX_FILTER_RX_UDP_WILD: RX, matching UDP/IPv4 destination (host, port) 20 * @EFX_FILTER_UDP_WILD: Matching UDP/IPv4 destination (host, port)
27 * @EFX_FILTER_RX_MAC_FULL: RX, matching Ethernet destination MAC address, VID 21 * @EFX_FILTER_MAC_FULL: Matching Ethernet destination MAC address, VID
28 * @EFX_FILTER_RX_MAC_WILD: RX, matching Ethernet destination MAC address 22 * @EFX_FILTER_MAC_WILD: Matching Ethernet destination MAC address
23 * @EFX_FILTER_UNSPEC: Match type is unspecified
29 * 24 *
30 * Falcon NICs only support the RX TCP/IPv4 and UDP/IPv4 filter types. 25 * Falcon NICs only support the TCP/IPv4 and UDP/IPv4 filter types.
31 */ 26 */
32enum efx_filter_type { 27enum efx_filter_type {
33 EFX_FILTER_RX_TCP_FULL = 0, 28 EFX_FILTER_TCP_FULL = 0,
34 EFX_FILTER_RX_TCP_WILD, 29 EFX_FILTER_TCP_WILD,
35 EFX_FILTER_RX_UDP_FULL, 30 EFX_FILTER_UDP_FULL,
36 EFX_FILTER_RX_UDP_WILD, 31 EFX_FILTER_UDP_WILD,
37 EFX_FILTER_RX_MAC_FULL = 4, 32 EFX_FILTER_MAC_FULL = 4,
38 EFX_FILTER_RX_MAC_WILD, 33 EFX_FILTER_MAC_WILD,
39 EFX_FILTER_TYPE_COUNT, 34 EFX_FILTER_TYPE_COUNT, /* number of specific types */
35 EFX_FILTER_UNSPEC = 0xf,
40}; 36};
41 37
42/** 38/**
@@ -63,13 +59,13 @@ enum efx_filter_priority {
63 * @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override 59 * @EFX_FILTER_FLAG_RX_OVERRIDE_IP: Enables a MAC filter to override
64 * any IP filter that matches the same packet. By default, IP 60 * any IP filter that matches the same packet. By default, IP
65 * filters take precedence. 61 * filters take precedence.
66 * 62 * @EFX_FILTER_FLAG_RX: Filter is for RX
67 * Currently, no flags are defined for TX filters.
68 */ 63 */
69enum efx_filter_flags { 64enum efx_filter_flags {
70 EFX_FILTER_FLAG_RX_RSS = 0x01, 65 EFX_FILTER_FLAG_RX_RSS = 0x01,
71 EFX_FILTER_FLAG_RX_SCATTER = 0x02, 66 EFX_FILTER_FLAG_RX_SCATTER = 0x02,
72 EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04, 67 EFX_FILTER_FLAG_RX_OVERRIDE_IP = 0x04,
68 EFX_FILTER_FLAG_RX = 0x08,
73}; 69};
74 70
75/** 71/**
@@ -91,99 +87,26 @@ struct efx_filter_spec {
91 u32 data[3]; 87 u32 data[3];
92}; 88};
93 89
94/** 90static inline void efx_filter_init_rx(struct efx_filter_spec *spec,
95 * efx_filter_set_rx_tcp_full - specify RX filter with TCP/IPv4 full match 91 enum efx_filter_priority priority,
96 * @spec: Specification to initialise 92 enum efx_filter_flags flags,
97 * @shost: Source host address (host byte order) 93 unsigned rxq_id)
98 * @sport: Source port (host byte order)
99 * @dhost: Destination host address (host byte order)
100 * @dport: Destination port (host byte order)
101 */
102static inline void
103efx_filter_set_rx_tcp_full(struct efx_filter_spec *spec,
104 u32 shost, u16 sport, u32 dhost, u16 dport)
105{
106 spec->type = EFX_FILTER_RX_TCP_FULL;
107 spec->data[0] = sport | shost << 16;
108 spec->data[1] = dport << 16 | shost >> 16;
109 spec->data[2] = dhost;
110}
111
112/**
113 * efx_filter_set_rx_tcp_wild - specify RX filter with TCP/IPv4 wildcard match
114 * @spec: Specification to initialise
115 * @dhost: Destination host address (host byte order)
116 * @dport: Destination port (host byte order)
117 */
118static inline void
119efx_filter_set_rx_tcp_wild(struct efx_filter_spec *spec, u32 dhost, u16 dport)
120{
121 spec->type = EFX_FILTER_RX_TCP_WILD;
122 spec->data[0] = 0;
123 spec->data[1] = dport << 16;
124 spec->data[2] = dhost;
125}
126
127/**
128 * efx_filter_set_rx_udp_full - specify RX filter with UDP/IPv4 full match
129 * @spec: Specification to initialise
130 * @shost: Source host address (host byte order)
131 * @sport: Source port (host byte order)
132 * @dhost: Destination host address (host byte order)
133 * @dport: Destination port (host byte order)
134 */
135static inline void
136efx_filter_set_rx_udp_full(struct efx_filter_spec *spec,
137 u32 shost, u16 sport, u32 dhost, u16 dport)
138{
139 spec->type = EFX_FILTER_RX_UDP_FULL;
140 spec->data[0] = sport | shost << 16;
141 spec->data[1] = dport << 16 | shost >> 16;
142 spec->data[2] = dhost;
143}
144
145/**
146 * efx_filter_set_rx_udp_wild - specify RX filter with UDP/IPv4 wildcard match
147 * @spec: Specification to initialise
148 * @dhost: Destination host address (host byte order)
149 * @dport: Destination port (host byte order)
150 */
151static inline void
152efx_filter_set_rx_udp_wild(struct efx_filter_spec *spec, u32 dhost, u16 dport)
153{ 94{
154 spec->type = EFX_FILTER_RX_UDP_WILD; 95 spec->type = EFX_FILTER_UNSPEC;
155 spec->data[0] = dport; 96 spec->priority = priority;
156 spec->data[1] = 0; 97 spec->flags = EFX_FILTER_FLAG_RX | flags;
157 spec->data[2] = dhost; 98 spec->dmaq_id = rxq_id;
158} 99}
159 100
160/** 101extern int efx_filter_set_ipv4_local(struct efx_filter_spec *spec, u8 proto,
161 * efx_filter_set_rx_mac_full - specify RX filter with MAC full match 102 __be32 host, __be16 port);
162 * @spec: Specification to initialise 103extern int efx_filter_set_ipv4_full(struct efx_filter_spec *spec, u8 proto,
163 * @vid: VLAN ID 104 __be32 host, __be16 port,
164 * @addr: Destination MAC address 105 __be32 rhost, __be16 rport);
165 */ 106extern int efx_filter_set_eth_local(struct efx_filter_spec *spec,
166static inline void efx_filter_set_rx_mac_full(struct efx_filter_spec *spec, 107 u16 vid, const u8 *addr);
167 u16 vid, const u8 *addr) 108enum {
168{ 109 EFX_FILTER_VID_UNSPEC = 0xffff,
169 spec->type = EFX_FILTER_RX_MAC_FULL; 110};
170 spec->data[0] = vid;
171 spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
172 spec->data[2] = addr[0] << 8 | addr[1];
173}
174
175/**
176 * efx_filter_set_rx_mac_full - specify RX filter with MAC wildcard match
177 * @spec: Specification to initialise
178 * @addr: Destination MAC address
179 */
180static inline void efx_filter_set_rx_mac_wild(struct efx_filter_spec *spec,
181 const u8 *addr)
182{
183 spec->type = EFX_FILTER_RX_MAC_WILD;
184 spec->data[0] = 0;
185 spec->data[1] = addr[2] << 24 | addr[3] << 16 | addr[4] << 8 | addr[5];
186 spec->data[2] = addr[0] << 8 | addr[1];
187}
188 111
189#endif /* EFX_FILTER_H */ 112#endif /* EFX_FILTER_H */
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
index 85a99fe87437..6da4ae20a039 100644
--- a/drivers/net/sfc/io.h
+++ b/drivers/net/sfc/io.h
@@ -22,28 +22,39 @@
22 * 22 *
23 * Notes on locking strategy: 23 * Notes on locking strategy:
24 * 24 *
25 * Most NIC registers require 16-byte (or 8-byte, for SRAM) atomic writes 25 * Most CSRs are 128-bit (oword) and therefore cannot be read or
26 * which necessitates locking. 26 * written atomically. Access from the host is buffered by the Bus
27 * Under normal operation few writes to NIC registers are made and these 27 * Interface Unit (BIU). Whenever the host reads from the lowest
28 * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special 28 * address of such a register, or from the address of a different such
29 * cased to allow 4-byte (hence lockless) accesses. 29 * register, the BIU latches the register's value. Subsequent reads
30 * from higher addresses of the same register will read the latched
31 * value. Whenever the host writes part of such a register, the BIU
32 * collects the written value and does not write to the underlying
33 * register until all 4 dwords have been written. A similar buffering
34 * scheme applies to host access to the NIC's 64-bit SRAM.
30 * 35 *
31 * It *is* safe to write to these 4-byte registers in the middle of an 36 * Access to different CSRs and 64-bit SRAM words must be serialised,
32 * access to an 8-byte or 16-byte register. We therefore use a 37 * since interleaved access can result in lost writes or lost
33 * spinlock to protect accesses to the larger registers, but no locks 38 * information from read-to-clear fields. We use efx_nic::biu_lock
34 * for the 4-byte registers. 39 * for this. (We could use separate locks for read and write, but
40 * this is not normally a performance bottleneck.)
35 * 41 *
36 * A write barrier is needed to ensure that DW3 is written after DW0/1/2 42 * The DMA descriptor pointers (RX_DESC_UPD and TX_DESC_UPD) are
37 * due to the way the 16byte registers are "collected" in the BIU. 43 * 128-bit but are special-cased in the BIU to avoid the need for
44 * locking in the host:
38 * 45 *
39 * We also lock when carrying out reads, to ensure consistency of the 46 * - They are write-only.
40 * data (made possible since the BIU reads all 128 bits into a cache). 47 * - The semantics of writing to these registers are such that
41 * Reads are very rare, so this isn't a significant performance 48 * replacing the low 96 bits with zero does not affect functionality.
42 * impact. (Most data transferred from NIC to host is DMAed directly 49 * - If the host writes to the last dword address of such a register
43 * into host memory). 50 * (i.e. the high 32 bits) the underlying register will always be
44 * 51 * written. If the collector does not hold values for the low 96
45 * I/O BAR access uses locks for both reads and writes (but is only provided 52 * bits of the register, they will be written as zero. Writing to
46 * for testing purposes). 53 * the last qword does not have this effect and must not be done.
54 * - If the host writes to the address of any other part of such a
55 * register while the collector already holds values for some other
56 * register, the write is discarded and the collector maintains its
57 * current state.
47 */ 58 */
48 59
49#if BITS_PER_LONG == 64 60#if BITS_PER_LONG == 64
@@ -72,7 +83,7 @@ static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
72 return (__force __le32)__raw_readl(efx->membase + reg); 83 return (__force __le32)__raw_readl(efx->membase + reg);
73} 84}
74 85
75/* Writes to a normal 16-byte Efx register, locking as appropriate. */ 86/* Write a normal 128-bit CSR, locking as appropriate. */
76static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value, 87static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
77 unsigned int reg) 88 unsigned int reg)
78{ 89{
@@ -85,21 +96,18 @@ static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
85 spin_lock_irqsave(&efx->biu_lock, flags); 96 spin_lock_irqsave(&efx->biu_lock, flags);
86#ifdef EFX_USE_QWORD_IO 97#ifdef EFX_USE_QWORD_IO
87 _efx_writeq(efx, value->u64[0], reg + 0); 98 _efx_writeq(efx, value->u64[0], reg + 0);
88 wmb();
89 _efx_writeq(efx, value->u64[1], reg + 8); 99 _efx_writeq(efx, value->u64[1], reg + 8);
90#else 100#else
91 _efx_writed(efx, value->u32[0], reg + 0); 101 _efx_writed(efx, value->u32[0], reg + 0);
92 _efx_writed(efx, value->u32[1], reg + 4); 102 _efx_writed(efx, value->u32[1], reg + 4);
93 _efx_writed(efx, value->u32[2], reg + 8); 103 _efx_writed(efx, value->u32[2], reg + 8);
94 wmb();
95 _efx_writed(efx, value->u32[3], reg + 12); 104 _efx_writed(efx, value->u32[3], reg + 12);
96#endif 105#endif
97 mmiowb(); 106 mmiowb();
98 spin_unlock_irqrestore(&efx->biu_lock, flags); 107 spin_unlock_irqrestore(&efx->biu_lock, flags);
99} 108}
100 109
101/* Write an 8-byte NIC SRAM entry through the supplied mapping, 110/* Write 64-bit SRAM through the supplied mapping, locking as appropriate. */
102 * locking as appropriate. */
103static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase, 111static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
104 efx_qword_t *value, unsigned int index) 112 efx_qword_t *value, unsigned int index)
105{ 113{
@@ -115,36 +123,25 @@ static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
115 __raw_writeq((__force u64)value->u64[0], membase + addr); 123 __raw_writeq((__force u64)value->u64[0], membase + addr);
116#else 124#else
117 __raw_writel((__force u32)value->u32[0], membase + addr); 125 __raw_writel((__force u32)value->u32[0], membase + addr);
118 wmb();
119 __raw_writel((__force u32)value->u32[1], membase + addr + 4); 126 __raw_writel((__force u32)value->u32[1], membase + addr + 4);
120#endif 127#endif
121 mmiowb(); 128 mmiowb();
122 spin_unlock_irqrestore(&efx->biu_lock, flags); 129 spin_unlock_irqrestore(&efx->biu_lock, flags);
123} 130}
124 131
125/* Write dword to NIC register that allows partial writes 132/* Write a 32-bit CSR or the last dword of a special 128-bit CSR */
126 *
127 * Some registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
128 * TX_DESC_UPD_REG) can be written to as a single dword. This allows
129 * for lockless writes.
130 */
131static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value, 133static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
132 unsigned int reg) 134 unsigned int reg)
133{ 135{
134 netif_vdbg(efx, hw, efx->net_dev, 136 netif_vdbg(efx, hw, efx->net_dev,
135 "writing partial register %x with "EFX_DWORD_FMT"\n", 137 "writing register %x with "EFX_DWORD_FMT"\n",
136 reg, EFX_DWORD_VAL(*value)); 138 reg, EFX_DWORD_VAL(*value));
137 139
138 /* No lock required */ 140 /* No lock required */
139 _efx_writed(efx, value->u32[0], reg); 141 _efx_writed(efx, value->u32[0], reg);
140} 142}
141 143
142/* Read from a NIC register 144/* Read a 128-bit CSR, locking as appropriate. */
143 *
144 * This reads an entire 16-byte register in one go, locking as
145 * appropriate. It is essential to read the first dword first, as this
146 * prompts the NIC to load the current value into the shadow register.
147 */
148static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value, 145static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
149 unsigned int reg) 146 unsigned int reg)
150{ 147{
@@ -152,7 +149,6 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
152 149
153 spin_lock_irqsave(&efx->biu_lock, flags); 150 spin_lock_irqsave(&efx->biu_lock, flags);
154 value->u32[0] = _efx_readd(efx, reg + 0); 151 value->u32[0] = _efx_readd(efx, reg + 0);
155 rmb();
156 value->u32[1] = _efx_readd(efx, reg + 4); 152 value->u32[1] = _efx_readd(efx, reg + 4);
157 value->u32[2] = _efx_readd(efx, reg + 8); 153 value->u32[2] = _efx_readd(efx, reg + 8);
158 value->u32[3] = _efx_readd(efx, reg + 12); 154 value->u32[3] = _efx_readd(efx, reg + 12);
@@ -163,8 +159,7 @@ static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
163 EFX_OWORD_VAL(*value)); 159 EFX_OWORD_VAL(*value));
164} 160}
165 161
166/* Read an 8-byte SRAM entry through supplied mapping, 162/* Read 64-bit SRAM through the supplied mapping, locking as appropriate. */
167 * locking as appropriate. */
168static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase, 163static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
169 efx_qword_t *value, unsigned int index) 164 efx_qword_t *value, unsigned int index)
170{ 165{
@@ -176,7 +171,6 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
176 value->u64[0] = (__force __le64)__raw_readq(membase + addr); 171 value->u64[0] = (__force __le64)__raw_readq(membase + addr);
177#else 172#else
178 value->u32[0] = (__force __le32)__raw_readl(membase + addr); 173 value->u32[0] = (__force __le32)__raw_readl(membase + addr);
179 rmb();
180 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4); 174 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
181#endif 175#endif
182 spin_unlock_irqrestore(&efx->biu_lock, flags); 176 spin_unlock_irqrestore(&efx->biu_lock, flags);
@@ -186,7 +180,7 @@ static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
186 addr, EFX_QWORD_VAL(*value)); 180 addr, EFX_QWORD_VAL(*value));
187} 181}
188 182
189/* Read dword from register that allows partial writes (sic) */ 183/* Read a 32-bit CSR or SRAM */
190static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value, 184static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
191 unsigned int reg) 185 unsigned int reg)
192{ 186{
@@ -196,28 +190,28 @@ static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
196 reg, EFX_DWORD_VAL(*value)); 190 reg, EFX_DWORD_VAL(*value));
197} 191}
198 192
199/* Write to a register forming part of a table */ 193/* Write a 128-bit CSR forming part of a table */
200static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value, 194static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value,
201 unsigned int reg, unsigned int index) 195 unsigned int reg, unsigned int index)
202{ 196{
203 efx_writeo(efx, value, reg + index * sizeof(efx_oword_t)); 197 efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
204} 198}
205 199
206/* Read to a register forming part of a table */ 200/* Read a 128-bit CSR forming part of a table */
207static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value, 201static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
208 unsigned int reg, unsigned int index) 202 unsigned int reg, unsigned int index)
209{ 203{
210 efx_reado(efx, value, reg + index * sizeof(efx_oword_t)); 204 efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
211} 205}
212 206
213/* Write to a dword register forming part of a table */ 207/* Write a 32-bit CSR forming part of a table, or 32-bit SRAM */
214static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value, 208static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
215 unsigned int reg, unsigned int index) 209 unsigned int reg, unsigned int index)
216{ 210{
217 efx_writed(efx, value, reg + index * sizeof(efx_oword_t)); 211 efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
218} 212}
219 213
220/* Read from a dword register forming part of a table */ 214/* Read a 32-bit CSR forming part of a table, or 32-bit SRAM */
221static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value, 215static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
222 unsigned int reg, unsigned int index) 216 unsigned int reg, unsigned int index)
223{ 217{
@@ -231,29 +225,54 @@ static inline void efx_readd_table(struct efx_nic *efx, efx_dword_t *value,
231#define EFX_PAGED_REG(page, reg) \ 225#define EFX_PAGED_REG(page, reg) \
232 ((page) * EFX_PAGE_BLOCK_SIZE + (reg)) 226 ((page) * EFX_PAGE_BLOCK_SIZE + (reg))
233 227
234/* As for efx_writeo(), but for a page-mapped register. */ 228/* Write the whole of RX_DESC_UPD or TX_DESC_UPD */
235static inline void efx_writeo_page(struct efx_nic *efx, efx_oword_t *value, 229static inline void _efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
236 unsigned int reg, unsigned int page) 230 unsigned int reg, unsigned int page)
237{ 231{
238 efx_writeo(efx, value, EFX_PAGED_REG(page, reg)); 232 reg = EFX_PAGED_REG(page, reg);
239}
240 233
241/* As for efx_writed(), but for a page-mapped register. */ 234 netif_vdbg(efx, hw, efx->net_dev,
242static inline void efx_writed_page(struct efx_nic *efx, efx_dword_t *value, 235 "writing register %x with " EFX_OWORD_FMT "\n", reg,
243 unsigned int reg, unsigned int page) 236 EFX_OWORD_VAL(*value));
237
238#ifdef EFX_USE_QWORD_IO
239 _efx_writeq(efx, value->u64[0], reg + 0);
240#else
241 _efx_writed(efx, value->u32[0], reg + 0);
242 _efx_writed(efx, value->u32[1], reg + 4);
243#endif
244 _efx_writed(efx, value->u32[2], reg + 8);
245 _efx_writed(efx, value->u32[3], reg + 12);
246}
247#define efx_writeo_page(efx, value, reg, page) \
248 _efx_writeo_page(efx, value, \
249 reg + \
250 BUILD_BUG_ON_ZERO((reg) != 0x830 && (reg) != 0xa10), \
251 page)
252
253/* Write a page-mapped 32-bit CSR (EVQ_RPTR or the high bits of
254 * RX_DESC_UPD or TX_DESC_UPD)
255 */
256static inline void _efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
257 unsigned int reg, unsigned int page)
244{ 258{
245 efx_writed(efx, value, EFX_PAGED_REG(page, reg)); 259 efx_writed(efx, value, EFX_PAGED_REG(page, reg));
246} 260}
247 261#define efx_writed_page(efx, value, reg, page) \
248/* Write dword to page-mapped register with an extra lock. 262 _efx_writed_page(efx, value, \
249 * 263 reg + \
250 * As for efx_writed_page(), but for a register that suffers from 264 BUILD_BUG_ON_ZERO((reg) != 0x400 && (reg) != 0x83c \
251 * SFC bug 3181. Take out a lock so the BIU collector cannot be 265 && (reg) != 0xa1c), \
252 * confused. */ 266 page)
253static inline void efx_writed_page_locked(struct efx_nic *efx, 267
254 efx_dword_t *value, 268/* Write TIMER_COMMAND. This is a page-mapped 32-bit CSR, but a bug
255 unsigned int reg, 269 * in the BIU means that writes to TIMER_COMMAND[0] invalidate the
256 unsigned int page) 270 * collector register.
271 */
272static inline void _efx_writed_page_locked(struct efx_nic *efx,
273 efx_dword_t *value,
274 unsigned int reg,
275 unsigned int page)
257{ 276{
258 unsigned long flags __attribute__ ((unused)); 277 unsigned long flags __attribute__ ((unused));
259 278
@@ -265,5 +284,9 @@ static inline void efx_writed_page_locked(struct efx_nic *efx,
265 efx_writed(efx, value, EFX_PAGED_REG(page, reg)); 284 efx_writed(efx, value, EFX_PAGED_REG(page, reg));
266 } 285 }
267} 286}
287#define efx_writed_page_locked(efx, value, reg, page) \
288 _efx_writed_page_locked(efx, value, \
289 reg + BUILD_BUG_ON_ZERO((reg) != 0x420), \
290 page)
268 291
269#endif /* EFX_IO_H */ 292#endif /* EFX_IO_H */
diff --git a/drivers/net/sfc/mcdi.c b/drivers/net/sfc/mcdi.c
index 12cf910c2ce7..b716e827b291 100644
--- a/drivers/net/sfc/mcdi.c
+++ b/drivers/net/sfc/mcdi.c
@@ -381,7 +381,7 @@ int efx_mcdi_rpc(struct efx_nic *efx, unsigned cmd,
381 -rc); 381 -rc);
382 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE); 382 efx_schedule_reset(efx, RESET_TYPE_MC_FAILURE);
383 } else 383 } else
384 netif_err(efx, hw, efx->net_dev, 384 netif_dbg(efx, hw, efx->net_dev,
385 "MC command 0x%x inlen %d failed rc=%d\n", 385 "MC command 0x%x inlen %d failed rc=%d\n",
386 cmd, (int)inlen, -rc); 386 cmd, (int)inlen, -rc);
387 } 387 }
@@ -463,6 +463,7 @@ static void efx_mcdi_ev_death(struct efx_nic *efx, int rc)
463 if (mcdi->mode == MCDI_MODE_EVENTS) { 463 if (mcdi->mode == MCDI_MODE_EVENTS) {
464 mcdi->resprc = rc; 464 mcdi->resprc = rc;
465 mcdi->resplen = 0; 465 mcdi->resplen = 0;
466 ++mcdi->credits;
466 } 467 }
467 } else 468 } else
468 /* Nobody was waiting for an MCDI request, so trigger a reset */ 469 /* Nobody was waiting for an MCDI request, so trigger a reset */
diff --git a/drivers/net/sfc/mcdi_phy.c b/drivers/net/sfc/mcdi_phy.c
index c992742446b1..0e97eed663c6 100644
--- a/drivers/net/sfc/mcdi_phy.c
+++ b/drivers/net/sfc/mcdi_phy.c
@@ -16,7 +16,6 @@
16#include "phy.h" 16#include "phy.h"
17#include "mcdi.h" 17#include "mcdi.h"
18#include "mcdi_pcol.h" 18#include "mcdi_pcol.h"
19#include "mdio_10g.h"
20#include "nic.h" 19#include "nic.h"
21#include "selftest.h" 20#include "selftest.h"
22 21
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 98d946020429..56b0266b441f 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -15,7 +15,6 @@
15#include "net_driver.h" 15#include "net_driver.h"
16#include "mdio_10g.h" 16#include "mdio_10g.h"
17#include "workarounds.h" 17#include "workarounds.h"
18#include "nic.h"
19 18
20unsigned efx_mdio_id_oui(u32 id) 19unsigned efx_mdio_id_oui(u32 id)
21{ 20{
diff --git a/drivers/net/sfc/mtd.c b/drivers/net/sfc/mtd.c
index 02e54b4f701f..d38627448c22 100644
--- a/drivers/net/sfc/mtd.c
+++ b/drivers/net/sfc/mtd.c
@@ -321,14 +321,15 @@ static int falcon_mtd_read(struct mtd_info *mtd, loff_t start,
321 struct efx_mtd *efx_mtd = mtd->priv; 321 struct efx_mtd *efx_mtd = mtd->priv;
322 const struct efx_spi_device *spi = efx_mtd->spi; 322 const struct efx_spi_device *spi = efx_mtd->spi;
323 struct efx_nic *efx = efx_mtd->efx; 323 struct efx_nic *efx = efx_mtd->efx;
324 struct falcon_nic_data *nic_data = efx->nic_data;
324 int rc; 325 int rc;
325 326
326 rc = mutex_lock_interruptible(&efx->spi_lock); 327 rc = mutex_lock_interruptible(&nic_data->spi_lock);
327 if (rc) 328 if (rc)
328 return rc; 329 return rc;
329 rc = falcon_spi_read(efx, spi, part->offset + start, len, 330 rc = falcon_spi_read(efx, spi, part->offset + start, len,
330 retlen, buffer); 331 retlen, buffer);
331 mutex_unlock(&efx->spi_lock); 332 mutex_unlock(&nic_data->spi_lock);
332 return rc; 333 return rc;
333} 334}
334 335
@@ -337,13 +338,14 @@ static int falcon_mtd_erase(struct mtd_info *mtd, loff_t start, size_t len)
337 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); 338 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
338 struct efx_mtd *efx_mtd = mtd->priv; 339 struct efx_mtd *efx_mtd = mtd->priv;
339 struct efx_nic *efx = efx_mtd->efx; 340 struct efx_nic *efx = efx_mtd->efx;
341 struct falcon_nic_data *nic_data = efx->nic_data;
340 int rc; 342 int rc;
341 343
342 rc = mutex_lock_interruptible(&efx->spi_lock); 344 rc = mutex_lock_interruptible(&nic_data->spi_lock);
343 if (rc) 345 if (rc)
344 return rc; 346 return rc;
345 rc = efx_spi_erase(part, part->offset + start, len); 347 rc = efx_spi_erase(part, part->offset + start, len);
346 mutex_unlock(&efx->spi_lock); 348 mutex_unlock(&nic_data->spi_lock);
347 return rc; 349 return rc;
348} 350}
349 351
@@ -354,14 +356,15 @@ static int falcon_mtd_write(struct mtd_info *mtd, loff_t start,
354 struct efx_mtd *efx_mtd = mtd->priv; 356 struct efx_mtd *efx_mtd = mtd->priv;
355 const struct efx_spi_device *spi = efx_mtd->spi; 357 const struct efx_spi_device *spi = efx_mtd->spi;
356 struct efx_nic *efx = efx_mtd->efx; 358 struct efx_nic *efx = efx_mtd->efx;
359 struct falcon_nic_data *nic_data = efx->nic_data;
357 int rc; 360 int rc;
358 361
359 rc = mutex_lock_interruptible(&efx->spi_lock); 362 rc = mutex_lock_interruptible(&nic_data->spi_lock);
360 if (rc) 363 if (rc)
361 return rc; 364 return rc;
362 rc = falcon_spi_write(efx, spi, part->offset + start, len, 365 rc = falcon_spi_write(efx, spi, part->offset + start, len,
363 retlen, buffer); 366 retlen, buffer);
364 mutex_unlock(&efx->spi_lock); 367 mutex_unlock(&nic_data->spi_lock);
365 return rc; 368 return rc;
366} 369}
367 370
@@ -370,11 +373,12 @@ static int falcon_mtd_sync(struct mtd_info *mtd)
370 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd); 373 struct efx_mtd_partition *part = to_efx_mtd_partition(mtd);
371 struct efx_mtd *efx_mtd = mtd->priv; 374 struct efx_mtd *efx_mtd = mtd->priv;
372 struct efx_nic *efx = efx_mtd->efx; 375 struct efx_nic *efx = efx_mtd->efx;
376 struct falcon_nic_data *nic_data = efx->nic_data;
373 int rc; 377 int rc;
374 378
375 mutex_lock(&efx->spi_lock); 379 mutex_lock(&nic_data->spi_lock);
376 rc = efx_spi_slow_wait(part, true); 380 rc = efx_spi_slow_wait(part, true);
377 mutex_unlock(&efx->spi_lock); 381 mutex_unlock(&nic_data->spi_lock);
378 return rc; 382 return rc;
379} 383}
380 384
@@ -387,35 +391,67 @@ static struct efx_mtd_ops falcon_mtd_ops = {
387 391
388static int falcon_mtd_probe(struct efx_nic *efx) 392static int falcon_mtd_probe(struct efx_nic *efx)
389{ 393{
390 struct efx_spi_device *spi = efx->spi_flash; 394 struct falcon_nic_data *nic_data = efx->nic_data;
395 struct efx_spi_device *spi;
391 struct efx_mtd *efx_mtd; 396 struct efx_mtd *efx_mtd;
392 int rc; 397 int rc = -ENODEV;
393 398
394 ASSERT_RTNL(); 399 ASSERT_RTNL();
395 400
396 if (!spi || spi->size <= FALCON_FLASH_BOOTCODE_START) 401 spi = &nic_data->spi_flash;
397 return -ENODEV; 402 if (efx_spi_present(spi) && spi->size > FALCON_FLASH_BOOTCODE_START) {
398 403 efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
399 efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]), 404 GFP_KERNEL);
400 GFP_KERNEL); 405 if (!efx_mtd)
401 if (!efx_mtd) 406 return -ENOMEM;
402 return -ENOMEM; 407
403 408 efx_mtd->spi = spi;
404 efx_mtd->spi = spi; 409 efx_mtd->name = "flash";
405 efx_mtd->name = "flash"; 410 efx_mtd->ops = &falcon_mtd_ops;
406 efx_mtd->ops = &falcon_mtd_ops; 411
412 efx_mtd->n_parts = 1;
413 efx_mtd->part[0].mtd.type = MTD_NORFLASH;
414 efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH;
415 efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START;
416 efx_mtd->part[0].mtd.erasesize = spi->erase_size;
417 efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START;
418 efx_mtd->part[0].type_name = "sfc_flash_bootrom";
419
420 rc = efx_mtd_probe_device(efx, efx_mtd);
421 if (rc) {
422 kfree(efx_mtd);
423 return rc;
424 }
425 }
407 426
408 efx_mtd->n_parts = 1; 427 spi = &nic_data->spi_eeprom;
409 efx_mtd->part[0].mtd.type = MTD_NORFLASH; 428 if (efx_spi_present(spi) && spi->size > EFX_EEPROM_BOOTCONFIG_START) {
410 efx_mtd->part[0].mtd.flags = MTD_CAP_NORFLASH; 429 efx_mtd = kzalloc(sizeof(*efx_mtd) + sizeof(efx_mtd->part[0]),
411 efx_mtd->part[0].mtd.size = spi->size - FALCON_FLASH_BOOTCODE_START; 430 GFP_KERNEL);
412 efx_mtd->part[0].mtd.erasesize = spi->erase_size; 431 if (!efx_mtd)
413 efx_mtd->part[0].offset = FALCON_FLASH_BOOTCODE_START; 432 return -ENOMEM;
414 efx_mtd->part[0].type_name = "sfc_flash_bootrom"; 433
434 efx_mtd->spi = spi;
435 efx_mtd->name = "EEPROM";
436 efx_mtd->ops = &falcon_mtd_ops;
437
438 efx_mtd->n_parts = 1;
439 efx_mtd->part[0].mtd.type = MTD_RAM;
440 efx_mtd->part[0].mtd.flags = MTD_CAP_RAM;
441 efx_mtd->part[0].mtd.size =
442 min(spi->size, EFX_EEPROM_BOOTCONFIG_END) -
443 EFX_EEPROM_BOOTCONFIG_START;
444 efx_mtd->part[0].mtd.erasesize = spi->erase_size;
445 efx_mtd->part[0].offset = EFX_EEPROM_BOOTCONFIG_START;
446 efx_mtd->part[0].type_name = "sfc_bootconfig";
447
448 rc = efx_mtd_probe_device(efx, efx_mtd);
449 if (rc) {
450 kfree(efx_mtd);
451 return rc;
452 }
453 }
415 454
416 rc = efx_mtd_probe_device(efx, efx_mtd);
417 if (rc)
418 kfree(efx_mtd);
419 return rc; 455 return rc;
420} 456}
421 457
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index b137c889152b..bdce66ddf93a 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -136,14 +136,19 @@ struct efx_tx_buffer {
136 * @efx: The associated Efx NIC 136 * @efx: The associated Efx NIC
137 * @queue: DMA queue number 137 * @queue: DMA queue number
138 * @channel: The associated channel 138 * @channel: The associated channel
139 * @core_txq: The networking core TX queue structure
139 * @buffer: The software buffer ring 140 * @buffer: The software buffer ring
140 * @txd: The hardware descriptor ring 141 * @txd: The hardware descriptor ring
141 * @ptr_mask: The size of the ring minus 1. 142 * @ptr_mask: The size of the ring minus 1.
142 * @flushed: Used when handling queue flushing 143 * @flushed: Used when handling queue flushing
143 * @read_count: Current read pointer. 144 * @read_count: Current read pointer.
144 * This is the number of buffers that have been removed from both rings. 145 * This is the number of buffers that have been removed from both rings.
145 * @stopped: Stopped count. 146 * @old_write_count: The value of @write_count when last checked.
146 * Set if this TX queue is currently stopping its port. 147 * This is here for performance reasons. The xmit path will
148 * only get the up-to-date value of @write_count if this
149 * variable indicates that the queue is empty. This is to
150 * avoid cache-line ping-pong between the xmit path and the
151 * completion path.
147 * @insert_count: Current insert pointer 152 * @insert_count: Current insert pointer
148 * This is the number of buffers that have been added to the 153 * This is the number of buffers that have been added to the
149 * software ring. 154 * software ring.
@@ -163,13 +168,17 @@ struct efx_tx_buffer {
163 * @tso_long_headers: Number of packets with headers too long for standard 168 * @tso_long_headers: Number of packets with headers too long for standard
164 * blocks 169 * blocks
165 * @tso_packets: Number of packets via the TSO xmit path 170 * @tso_packets: Number of packets via the TSO xmit path
171 * @pushes: Number of times the TX push feature has been used
172 * @empty_read_count: If the completion path has seen the queue as empty
173 * and the transmission path has not yet checked this, the value of
174 * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
166 */ 175 */
167struct efx_tx_queue { 176struct efx_tx_queue {
168 /* Members which don't change on the fast path */ 177 /* Members which don't change on the fast path */
169 struct efx_nic *efx ____cacheline_aligned_in_smp; 178 struct efx_nic *efx ____cacheline_aligned_in_smp;
170 unsigned queue; 179 unsigned queue;
171 struct efx_channel *channel; 180 struct efx_channel *channel;
172 struct efx_nic *nic; 181 struct netdev_queue *core_txq;
173 struct efx_tx_buffer *buffer; 182 struct efx_tx_buffer *buffer;
174 struct efx_special_buffer txd; 183 struct efx_special_buffer txd;
175 unsigned int ptr_mask; 184 unsigned int ptr_mask;
@@ -177,7 +186,7 @@ struct efx_tx_queue {
177 186
178 /* Members used mainly on the completion path */ 187 /* Members used mainly on the completion path */
179 unsigned int read_count ____cacheline_aligned_in_smp; 188 unsigned int read_count ____cacheline_aligned_in_smp;
180 int stopped; 189 unsigned int old_write_count;
181 190
182 /* Members used only on the xmit path */ 191 /* Members used only on the xmit path */
183 unsigned int insert_count ____cacheline_aligned_in_smp; 192 unsigned int insert_count ____cacheline_aligned_in_smp;
@@ -187,6 +196,11 @@ struct efx_tx_queue {
187 unsigned int tso_bursts; 196 unsigned int tso_bursts;
188 unsigned int tso_long_headers; 197 unsigned int tso_long_headers;
189 unsigned int tso_packets; 198 unsigned int tso_packets;
199 unsigned int pushes;
200
201 /* Members shared between paths and sometimes updated */
202 unsigned int empty_read_count ____cacheline_aligned_in_smp;
203#define EFX_EMPTY_COUNT_VALID 0x80000000
190}; 204};
191 205
192/** 206/**
@@ -305,7 +319,6 @@ enum efx_rx_alloc_method {
305 * @irq_moderation: IRQ moderation value (in hardware ticks) 319 * @irq_moderation: IRQ moderation value (in hardware ticks)
306 * @napi_dev: Net device used with NAPI 320 * @napi_dev: Net device used with NAPI
307 * @napi_str: NAPI control structure 321 * @napi_str: NAPI control structure
308 * @reset_work: Scheduled reset work thread
309 * @work_pending: Is work pending via NAPI? 322 * @work_pending: Is work pending via NAPI?
310 * @eventq: Event queue buffer 323 * @eventq: Event queue buffer
311 * @eventq_mask: Event queue pointer mask 324 * @eventq_mask: Event queue pointer mask
@@ -326,8 +339,6 @@ enum efx_rx_alloc_method {
326 * @n_rx_overlength: Count of RX_OVERLENGTH errors 339 * @n_rx_overlength: Count of RX_OVERLENGTH errors
327 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun 340 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
328 * @rx_queue: RX queue for this channel 341 * @rx_queue: RX queue for this channel
329 * @tx_stop_count: Core TX queue stop count
330 * @tx_stop_lock: Core TX queue stop lock
331 * @tx_queue: TX queues for this channel 342 * @tx_queue: TX queues for this channel
332 */ 343 */
333struct efx_channel { 344struct efx_channel {
@@ -366,10 +377,6 @@ struct efx_channel {
366 bool rx_pkt_csummed; 377 bool rx_pkt_csummed;
367 378
368 struct efx_rx_queue rx_queue; 379 struct efx_rx_queue rx_queue;
369
370 atomic_t tx_stop_count;
371 spinlock_t tx_stop_lock;
372
373 struct efx_tx_queue tx_queue[2]; 380 struct efx_tx_queue tx_queue[2];
374}; 381};
375 382
@@ -626,10 +633,8 @@ struct efx_filter_state;
626 * Work items do not hold and must not acquire RTNL. 633 * Work items do not hold and must not acquire RTNL.
627 * @workqueue_name: Name of workqueue 634 * @workqueue_name: Name of workqueue
628 * @reset_work: Scheduled reset workitem 635 * @reset_work: Scheduled reset workitem
629 * @monitor_work: Hardware monitor workitem
630 * @membase_phys: Memory BAR value as physical address 636 * @membase_phys: Memory BAR value as physical address
631 * @membase: Memory BAR value 637 * @membase: Memory BAR value
632 * @biu_lock: BIU (bus interface unit) lock
633 * @interrupt_mode: Interrupt mode 638 * @interrupt_mode: Interrupt mode
634 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues 639 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
635 * @irq_rx_moderation: IRQ moderation time for RX event queues 640 * @irq_rx_moderation: IRQ moderation time for RX event queues
@@ -648,23 +653,14 @@ struct efx_filter_state;
648 * @n_tx_channels: Number of channels used for TX 653 * @n_tx_channels: Number of channels used for TX
649 * @rx_buffer_len: RX buffer length 654 * @rx_buffer_len: RX buffer length
650 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 655 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
656 * @rx_hash_key: Toeplitz hash key for RSS
651 * @rx_indir_table: Indirection table for RSS 657 * @rx_indir_table: Indirection table for RSS
652 * @int_error_count: Number of internal errors seen recently 658 * @int_error_count: Number of internal errors seen recently
653 * @int_error_expire: Time at which error count will be expired 659 * @int_error_expire: Time at which error count will be expired
654 * @irq_status: Interrupt status buffer 660 * @irq_status: Interrupt status buffer
655 * @last_irq_cpu: Last CPU to handle interrupt.
656 * This register is written with the SMP processor ID whenever an
657 * interrupt is handled. It is used by efx_nic_test_interrupt()
658 * to verify that an interrupt has occurred.
659 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0 661 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
660 * @fatal_irq_level: IRQ level (bit number) used for serious errors 662 * @fatal_irq_level: IRQ level (bit number) used for serious errors
661 * @spi_flash: SPI flash device
662 * This field will be %NULL if no flash device is present (or for Siena).
663 * @spi_eeprom: SPI EEPROM device
664 * This field will be %NULL if no EEPROM device is present (or for Siena).
665 * @spi_lock: SPI bus lock
666 * @mtd_list: List of MTDs attached to the NIC 663 * @mtd_list: List of MTDs attached to the NIC
667 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
668 * @nic_data: Hardware dependant state 664 * @nic_data: Hardware dependant state
669 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode, 665 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
670 * @port_inhibited, efx_monitor() and efx_reconfigure_port() 666 * @port_inhibited, efx_monitor() and efx_reconfigure_port()
@@ -677,21 +673,14 @@ struct efx_filter_state;
677 * @port_initialized: Port initialized? 673 * @port_initialized: Port initialized?
678 * @net_dev: Operating system network device. Consider holding the rtnl lock 674 * @net_dev: Operating system network device. Consider holding the rtnl lock
679 * @rx_checksum_enabled: RX checksumming enabled 675 * @rx_checksum_enabled: RX checksumming enabled
680 * @mac_stats: MAC statistics. These include all statistics the MACs
681 * can provide. Generic code converts these into a standard
682 * &struct net_device_stats.
683 * @stats_buffer: DMA buffer for statistics 676 * @stats_buffer: DMA buffer for statistics
684 * @stats_lock: Statistics update lock. Serialises statistics fetches
685 * @mac_op: MAC interface 677 * @mac_op: MAC interface
686 * @mac_address: Permanent MAC address
687 * @phy_type: PHY type 678 * @phy_type: PHY type
688 * @mdio_lock: MDIO lock
689 * @phy_op: PHY interface 679 * @phy_op: PHY interface
690 * @phy_data: PHY private data (including PHY-specific stats) 680 * @phy_data: PHY private data (including PHY-specific stats)
691 * @mdio: PHY MDIO interface 681 * @mdio: PHY MDIO interface
692 * @mdio_bus: PHY MDIO bus ID (only used by Siena) 682 * @mdio_bus: PHY MDIO bus ID (only used by Siena)
693 * @phy_mode: PHY operating mode. Serialised by @mac_lock. 683 * @phy_mode: PHY operating mode. Serialised by @mac_lock.
694 * @xmac_poll_required: XMAC link state needs polling
695 * @link_advertising: Autonegotiation advertising flags 684 * @link_advertising: Autonegotiation advertising flags
696 * @link_state: Current state of the link 685 * @link_state: Current state of the link
697 * @n_link_state_changes: Number of times the link has changed state 686 * @n_link_state_changes: Number of times the link has changed state
@@ -702,10 +691,23 @@ struct efx_filter_state;
702 * @loopback_mode: Loopback status 691 * @loopback_mode: Loopback status
703 * @loopback_modes: Supported loopback mode bitmask 692 * @loopback_modes: Supported loopback mode bitmask
704 * @loopback_selftest: Offline self-test private state 693 * @loopback_selftest: Offline self-test private state
694 * @monitor_work: Hardware monitor workitem
695 * @biu_lock: BIU (bus interface unit) lock
696 * @last_irq_cpu: Last CPU to handle interrupt.
697 * This register is written with the SMP processor ID whenever an
698 * interrupt is handled. It is used by efx_nic_test_interrupt()
699 * to verify that an interrupt has occurred.
700 * @n_rx_nodesc_drop_cnt: RX no descriptor drop count
701 * @mac_stats: MAC statistics. These include all statistics the MACs
702 * can provide. Generic code converts these into a standard
703 * &struct net_device_stats.
704 * @stats_lock: Statistics update lock. Serialises statistics fetches
705 * 705 *
706 * This is stored in the private area of the &struct net_device. 706 * This is stored in the private area of the &struct net_device.
707 */ 707 */
708struct efx_nic { 708struct efx_nic {
709 /* The following fields should be written very rarely */
710
709 char name[IFNAMSIZ]; 711 char name[IFNAMSIZ];
710 struct pci_dev *pci_dev; 712 struct pci_dev *pci_dev;
711 const struct efx_nic_type *type; 713 const struct efx_nic_type *type;
@@ -714,10 +716,9 @@ struct efx_nic {
714 struct workqueue_struct *workqueue; 716 struct workqueue_struct *workqueue;
715 char workqueue_name[16]; 717 char workqueue_name[16];
716 struct work_struct reset_work; 718 struct work_struct reset_work;
717 struct delayed_work monitor_work;
718 resource_size_t membase_phys; 719 resource_size_t membase_phys;
719 void __iomem *membase; 720 void __iomem *membase;
720 spinlock_t biu_lock; 721
721 enum efx_int_mode interrupt_mode; 722 enum efx_int_mode interrupt_mode;
722 bool irq_rx_adaptive; 723 bool irq_rx_adaptive;
723 unsigned int irq_rx_moderation; 724 unsigned int irq_rx_moderation;
@@ -744,19 +745,13 @@ struct efx_nic {
744 unsigned long int_error_expire; 745 unsigned long int_error_expire;
745 746
746 struct efx_buffer irq_status; 747 struct efx_buffer irq_status;
747 volatile signed int last_irq_cpu;
748 unsigned irq_zero_count; 748 unsigned irq_zero_count;
749 unsigned fatal_irq_level; 749 unsigned fatal_irq_level;
750 750
751 struct efx_spi_device *spi_flash;
752 struct efx_spi_device *spi_eeprom;
753 struct mutex spi_lock;
754#ifdef CONFIG_SFC_MTD 751#ifdef CONFIG_SFC_MTD
755 struct list_head mtd_list; 752 struct list_head mtd_list;
756#endif 753#endif
757 754
758 unsigned n_rx_nodesc_drop_cnt;
759
760 void *nic_data; 755 void *nic_data;
761 756
762 struct mutex mac_lock; 757 struct mutex mac_lock;
@@ -768,22 +763,17 @@ struct efx_nic {
768 struct net_device *net_dev; 763 struct net_device *net_dev;
769 bool rx_checksum_enabled; 764 bool rx_checksum_enabled;
770 765
771 struct efx_mac_stats mac_stats;
772 struct efx_buffer stats_buffer; 766 struct efx_buffer stats_buffer;
773 spinlock_t stats_lock;
774 767
775 struct efx_mac_operations *mac_op; 768 struct efx_mac_operations *mac_op;
776 unsigned char mac_address[ETH_ALEN];
777 769
778 unsigned int phy_type; 770 unsigned int phy_type;
779 struct mutex mdio_lock;
780 struct efx_phy_operations *phy_op; 771 struct efx_phy_operations *phy_op;
781 void *phy_data; 772 void *phy_data;
782 struct mdio_if_info mdio; 773 struct mdio_if_info mdio;
783 unsigned int mdio_bus; 774 unsigned int mdio_bus;
784 enum efx_phy_mode phy_mode; 775 enum efx_phy_mode phy_mode;
785 776
786 bool xmac_poll_required;
787 u32 link_advertising; 777 u32 link_advertising;
788 struct efx_link_state link_state; 778 struct efx_link_state link_state;
789 unsigned int n_link_state_changes; 779 unsigned int n_link_state_changes;
@@ -799,6 +789,15 @@ struct efx_nic {
799 void *loopback_selftest; 789 void *loopback_selftest;
800 790
801 struct efx_filter_state *filter_state; 791 struct efx_filter_state *filter_state;
792
793 /* The following fields may be written more often */
794
795 struct delayed_work monitor_work ____cacheline_aligned_in_smp;
796 spinlock_t biu_lock;
797 volatile signed int last_irq_cpu;
798 unsigned n_rx_nodesc_drop_cnt;
799 struct efx_mac_stats mac_stats;
800 spinlock_t stats_lock;
802}; 801};
803 802
804static inline int efx_dev_registered(struct efx_nic *efx) 803static inline int efx_dev_registered(struct efx_nic *efx)
@@ -831,6 +830,7 @@ static inline unsigned int efx_port_num(struct efx_nic *efx)
831 * be called while the controller is uninitialised. 830 * be called while the controller is uninitialised.
832 * @probe_port: Probe the MAC and PHY 831 * @probe_port: Probe the MAC and PHY
833 * @remove_port: Free resources allocated by probe_port() 832 * @remove_port: Free resources allocated by probe_port()
833 * @handle_global_event: Handle a "global" event (may be %NULL)
834 * @prepare_flush: Prepare the hardware for flushing the DMA queues 834 * @prepare_flush: Prepare the hardware for flushing the DMA queues
835 * @update_stats: Update statistics not provided by event handling 835 * @update_stats: Update statistics not provided by event handling
836 * @start_stats: Start the regular fetching of statistics 836 * @start_stats: Start the regular fetching of statistics
@@ -875,6 +875,7 @@ struct efx_nic_type {
875 int (*reset)(struct efx_nic *efx, enum reset_type method); 875 int (*reset)(struct efx_nic *efx, enum reset_type method);
876 int (*probe_port)(struct efx_nic *efx); 876 int (*probe_port)(struct efx_nic *efx);
877 void (*remove_port)(struct efx_nic *efx); 877 void (*remove_port)(struct efx_nic *efx);
878 bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
878 void (*prepare_flush)(struct efx_nic *efx); 879 void (*prepare_flush)(struct efx_nic *efx);
879 void (*update_stats)(struct efx_nic *efx); 880 void (*update_stats)(struct efx_nic *efx);
880 void (*start_stats)(struct efx_nic *efx); 881 void (*start_stats)(struct efx_nic *efx);
diff --git a/drivers/net/sfc/nic.c b/drivers/net/sfc/nic.c
index 67cb0c96838c..da386599ab68 100644
--- a/drivers/net/sfc/nic.c
+++ b/drivers/net/sfc/nic.c
@@ -362,6 +362,35 @@ static inline void efx_notify_tx_desc(struct efx_tx_queue *tx_queue)
362 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue); 362 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
363} 363}
364 364
365/* Write pointer and first descriptor for TX descriptor ring */
366static inline void efx_push_tx_desc(struct efx_tx_queue *tx_queue,
367 const efx_qword_t *txd)
368{
369 unsigned write_ptr;
370 efx_oword_t reg;
371
372 BUILD_BUG_ON(FRF_AZ_TX_DESC_LBN != 0);
373 BUILD_BUG_ON(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0);
374
375 write_ptr = tx_queue->write_count & tx_queue->ptr_mask;
376 EFX_POPULATE_OWORD_2(reg, FRF_AZ_TX_DESC_PUSH_CMD, true,
377 FRF_AZ_TX_DESC_WPTR, write_ptr);
378 reg.qword[0] = *txd;
379 efx_writeo_page(tx_queue->efx, &reg,
380 FR_BZ_TX_DESC_UPD_P0, tx_queue->queue);
381}
382
383static inline bool
384efx_may_push_tx_desc(struct efx_tx_queue *tx_queue, unsigned int write_count)
385{
386 unsigned empty_read_count = ACCESS_ONCE(tx_queue->empty_read_count);
387
388 if (empty_read_count == 0)
389 return false;
390
391 tx_queue->empty_read_count = 0;
392 return ((empty_read_count ^ write_count) & ~EFX_EMPTY_COUNT_VALID) == 0;
393}
365 394
366/* For each entry inserted into the software descriptor ring, create a 395/* For each entry inserted into the software descriptor ring, create a
367 * descriptor in the hardware TX descriptor ring (in host memory), and 396 * descriptor in the hardware TX descriptor ring (in host memory), and
@@ -373,6 +402,7 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
373 struct efx_tx_buffer *buffer; 402 struct efx_tx_buffer *buffer;
374 efx_qword_t *txd; 403 efx_qword_t *txd;
375 unsigned write_ptr; 404 unsigned write_ptr;
405 unsigned old_write_count = tx_queue->write_count;
376 406
377 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 407 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
378 408
@@ -391,7 +421,15 @@ void efx_nic_push_buffers(struct efx_tx_queue *tx_queue)
391 } while (tx_queue->write_count != tx_queue->insert_count); 421 } while (tx_queue->write_count != tx_queue->insert_count);
392 422
393 wmb(); /* Ensure descriptors are written before they are fetched */ 423 wmb(); /* Ensure descriptors are written before they are fetched */
394 efx_notify_tx_desc(tx_queue); 424
425 if (efx_may_push_tx_desc(tx_queue, old_write_count)) {
426 txd = efx_tx_desc(tx_queue,
427 old_write_count & tx_queue->ptr_mask);
428 efx_push_tx_desc(tx_queue, txd);
429 ++tx_queue->pushes;
430 } else {
431 efx_notify_tx_desc(tx_queue);
432 }
395} 433}
396 434
397/* Allocate hardware resources for a TX queue */ 435/* Allocate hardware resources for a TX queue */
@@ -894,46 +932,6 @@ efx_handle_generated_event(struct efx_channel *channel, efx_qword_t *event)
894 channel->channel, EFX_QWORD_VAL(*event)); 932 channel->channel, EFX_QWORD_VAL(*event));
895} 933}
896 934
897/* Global events are basically PHY events */
898static void
899efx_handle_global_event(struct efx_channel *channel, efx_qword_t *event)
900{
901 struct efx_nic *efx = channel->efx;
902 bool handled = false;
903
904 if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
905 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
906 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) {
907 /* Ignored */
908 handled = true;
909 }
910
911 if ((efx_nic_rev(efx) >= EFX_REV_FALCON_B0) &&
912 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
913 efx->xmac_poll_required = true;
914 handled = true;
915 }
916
917 if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ?
918 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
919 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
920 netif_err(efx, rx_err, efx->net_dev,
921 "channel %d seen global RX_RESET event. Resetting.\n",
922 channel->channel);
923
924 atomic_inc(&efx->rx_reset);
925 efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ?
926 RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE);
927 handled = true;
928 }
929
930 if (!handled)
931 netif_err(efx, hw, efx->net_dev,
932 "channel %d unknown global event "
933 EFX_QWORD_FMT "\n", channel->channel,
934 EFX_QWORD_VAL(*event));
935}
936
937static void 935static void
938efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event) 936efx_handle_driver_event(struct efx_channel *channel, efx_qword_t *event)
939{ 937{
@@ -1050,15 +1048,17 @@ int efx_nic_process_eventq(struct efx_channel *channel, int budget)
1050 case FSE_AZ_EV_CODE_DRV_GEN_EV: 1048 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1051 efx_handle_generated_event(channel, &event); 1049 efx_handle_generated_event(channel, &event);
1052 break; 1050 break;
1053 case FSE_AZ_EV_CODE_GLOBAL_EV:
1054 efx_handle_global_event(channel, &event);
1055 break;
1056 case FSE_AZ_EV_CODE_DRIVER_EV: 1051 case FSE_AZ_EV_CODE_DRIVER_EV:
1057 efx_handle_driver_event(channel, &event); 1052 efx_handle_driver_event(channel, &event);
1058 break; 1053 break;
1059 case FSE_CZ_EV_CODE_MCDI_EV: 1054 case FSE_CZ_EV_CODE_MCDI_EV:
1060 efx_mcdi_process_event(channel, &event); 1055 efx_mcdi_process_event(channel, &event);
1061 break; 1056 break;
1057 case FSE_AZ_EV_CODE_GLOBAL_EV:
1058 if (efx->type->handle_global_event &&
1059 efx->type->handle_global_event(channel, &event))
1060 break;
1061 /* else fall through */
1062 default: 1062 default:
1063 netif_err(channel->efx, hw, channel->efx->net_dev, 1063 netif_err(channel->efx, hw, channel->efx->net_dev,
1064 "channel %d unknown event type %d (data " 1064 "channel %d unknown event type %d (data "
@@ -1670,7 +1670,7 @@ void efx_nic_init_common(struct efx_nic *efx)
1670 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe); 1670 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
1671 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1); 1671 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
1672 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1); 1672 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
1673 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0); 1673 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 1);
1674 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1); 1674 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
1675 /* Enable SW_EV to inherit in char driver - assume harmless here */ 1675 /* Enable SW_EV to inherit in char driver - assume harmless here */
1676 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1); 1676 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
diff --git a/drivers/net/sfc/nic.h b/drivers/net/sfc/nic.h
index 0438dc98722d..eb0586925b51 100644
--- a/drivers/net/sfc/nic.h
+++ b/drivers/net/sfc/nic.h
@@ -15,6 +15,7 @@
15#include "net_driver.h" 15#include "net_driver.h"
16#include "efx.h" 16#include "efx.h"
17#include "mcdi.h" 17#include "mcdi.h"
18#include "spi.h"
18 19
19/* 20/*
20 * Falcon hardware control 21 * Falcon hardware control
@@ -113,6 +114,11 @@ struct falcon_board {
113 * @stats_pending: Is there a pending DMA of MAC statistics. 114 * @stats_pending: Is there a pending DMA of MAC statistics.
114 * @stats_timer: A timer for regularly fetching MAC statistics. 115 * @stats_timer: A timer for regularly fetching MAC statistics.
115 * @stats_dma_done: Pointer to the flag which indicates DMA completion. 116 * @stats_dma_done: Pointer to the flag which indicates DMA completion.
117 * @spi_flash: SPI flash device
118 * @spi_eeprom: SPI EEPROM device
119 * @spi_lock: SPI bus lock
120 * @mdio_lock: MDIO bus lock
121 * @xmac_poll_required: XMAC link state needs polling
116 */ 122 */
117struct falcon_nic_data { 123struct falcon_nic_data {
118 struct pci_dev *pci_dev2; 124 struct pci_dev *pci_dev2;
@@ -121,6 +127,11 @@ struct falcon_nic_data {
121 bool stats_pending; 127 bool stats_pending;
122 struct timer_list stats_timer; 128 struct timer_list stats_timer;
123 u32 *stats_dma_done; 129 u32 *stats_dma_done;
130 struct efx_spi_device spi_flash;
131 struct efx_spi_device spi_eeprom;
132 struct mutex spi_lock;
133 struct mutex mdio_lock;
134 bool xmac_poll_required;
124}; 135};
125 136
126static inline struct falcon_board *falcon_board(struct efx_nic *efx) 137static inline struct falcon_board *falcon_board(struct efx_nic *efx)
@@ -135,7 +146,6 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
135 * @fw_build: Firmware build number 146 * @fw_build: Firmware build number
136 * @mcdi: Management-Controller-to-Driver Interface 147 * @mcdi: Management-Controller-to-Driver Interface
137 * @wol_filter_id: Wake-on-LAN packet filter id 148 * @wol_filter_id: Wake-on-LAN packet filter id
138 * @ipv6_rss_key: Toeplitz hash key for IPv6 RSS
139 */ 149 */
140struct siena_nic_data { 150struct siena_nic_data {
141 u64 fw_version; 151 u64 fw_version;
diff --git a/drivers/net/sfc/qt202x_phy.c b/drivers/net/sfc/qt202x_phy.c
index 68813d1d85f3..ea3ae0089315 100644
--- a/drivers/net/sfc/qt202x_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -41,6 +41,8 @@
41#define PCS_UC_STATUS_LBN 0 41#define PCS_UC_STATUS_LBN 0
42#define PCS_UC_STATUS_WIDTH 8 42#define PCS_UC_STATUS_WIDTH 8
43#define PCS_UC_STATUS_FW_SAVE 0x20 43#define PCS_UC_STATUS_FW_SAVE 0x20
44#define PMA_PMD_MODE_REG 0xc301
45#define PMA_PMD_RXIN_SEL_LBN 6
44#define PMA_PMD_FTX_CTRL2_REG 0xc309 46#define PMA_PMD_FTX_CTRL2_REG 0xc309
45#define PMA_PMD_FTX_STATIC_LBN 13 47#define PMA_PMD_FTX_STATIC_LBN 13
46#define PMA_PMD_VEND1_REG 0xc001 48#define PMA_PMD_VEND1_REG 0xc001
@@ -282,6 +284,10 @@ static int qt2025c_select_phy_mode(struct efx_nic *efx)
282 * slow) reload of the firmware image (the microcontroller's code 284 * slow) reload of the firmware image (the microcontroller's code
283 * memory is not affected by the microcontroller reset). */ 285 * memory is not affected by the microcontroller reset). */
284 efx_mdio_write(efx, 1, 0xc317, 0x00ff); 286 efx_mdio_write(efx, 1, 0xc317, 0x00ff);
287 /* PMA/PMD loopback sets RXIN to inverse polarity and the firmware
288 * restart doesn't reset it. We need to do that ourselves. */
289 efx_mdio_set_flag(efx, 1, PMA_PMD_MODE_REG,
290 1 << PMA_PMD_RXIN_SEL_LBN, false);
285 efx_mdio_write(efx, 1, 0xc300, 0x0002); 291 efx_mdio_write(efx, 1, 0xc300, 0x0002);
286 msleep(20); 292 msleep(20);
287 293
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 6d0959b5158e..3925fd621177 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -37,7 +37,7 @@
37 * This driver supports two methods for allocating and using RX buffers: 37 * This driver supports two methods for allocating and using RX buffers:
38 * each RX buffer may be backed by an skb or by an order-n page. 38 * each RX buffer may be backed by an skb or by an order-n page.
39 * 39 *
40 * When LRO is in use then the second method has a lower overhead, 40 * When GRO is in use then the second method has a lower overhead,
41 * since we don't have to allocate then free skbs on reassembled frames. 41 * since we don't have to allocate then free skbs on reassembled frames.
42 * 42 *
43 * Values: 43 * Values:
@@ -50,25 +50,25 @@
50 * 50 *
51 * - Since pushing and popping descriptors are separated by the rx_queue 51 * - Since pushing and popping descriptors are separated by the rx_queue
52 * size, so the watermarks should be ~rxd_size. 52 * size, so the watermarks should be ~rxd_size.
53 * - The performance win by using page-based allocation for LRO is less 53 * - The performance win by using page-based allocation for GRO is less
54 * than the performance hit of using page-based allocation of non-LRO, 54 * than the performance hit of using page-based allocation of non-GRO,
55 * so the watermarks should reflect this. 55 * so the watermarks should reflect this.
56 * 56 *
57 * Per channel we maintain a single variable, updated by each channel: 57 * Per channel we maintain a single variable, updated by each channel:
58 * 58 *
59 * rx_alloc_level += (lro_performed ? RX_ALLOC_FACTOR_LRO : 59 * rx_alloc_level += (gro_performed ? RX_ALLOC_FACTOR_GRO :
60 * RX_ALLOC_FACTOR_SKB) 60 * RX_ALLOC_FACTOR_SKB)
61 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which 61 * Per NAPI poll interval, we constrain rx_alloc_level to 0..MAX (which
62 * limits the hysteresis), and update the allocation strategy: 62 * limits the hysteresis), and update the allocation strategy:
63 * 63 *
64 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ? 64 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_GRO ?
65 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) 65 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
66 */ 66 */
67static int rx_alloc_method = RX_ALLOC_METHOD_AUTO; 67static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
68 68
69#define RX_ALLOC_LEVEL_LRO 0x2000 69#define RX_ALLOC_LEVEL_GRO 0x2000
70#define RX_ALLOC_LEVEL_MAX 0x3000 70#define RX_ALLOC_LEVEL_MAX 0x3000
71#define RX_ALLOC_FACTOR_LRO 1 71#define RX_ALLOC_FACTOR_GRO 1
72#define RX_ALLOC_FACTOR_SKB (-2) 72#define RX_ALLOC_FACTOR_SKB (-2)
73 73
74/* This is the percentage fill level below which new RX descriptors 74/* This is the percentage fill level below which new RX descriptors
@@ -441,19 +441,19 @@ static void efx_rx_packet__check_len(struct efx_rx_queue *rx_queue,
441 efx_rx_queue_channel(rx_queue)->n_rx_overlength++; 441 efx_rx_queue_channel(rx_queue)->n_rx_overlength++;
442} 442}
443 443
444/* Pass a received packet up through the generic LRO stack 444/* Pass a received packet up through the generic GRO stack
445 * 445 *
446 * Handles driverlink veto, and passes the fragment up via 446 * Handles driverlink veto, and passes the fragment up via
447 * the appropriate LRO method 447 * the appropriate GRO method
448 */ 448 */
449static void efx_rx_packet_lro(struct efx_channel *channel, 449static void efx_rx_packet_gro(struct efx_channel *channel,
450 struct efx_rx_buffer *rx_buf, 450 struct efx_rx_buffer *rx_buf,
451 bool checksummed) 451 bool checksummed)
452{ 452{
453 struct napi_struct *napi = &channel->napi_str; 453 struct napi_struct *napi = &channel->napi_str;
454 gro_result_t gro_result; 454 gro_result_t gro_result;
455 455
456 /* Pass the skb/page into the LRO engine */ 456 /* Pass the skb/page into the GRO engine */
457 if (rx_buf->page) { 457 if (rx_buf->page) {
458 struct efx_nic *efx = channel->efx; 458 struct efx_nic *efx = channel->efx;
459 struct page *page = rx_buf->page; 459 struct page *page = rx_buf->page;
@@ -499,7 +499,7 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
499 if (gro_result == GRO_NORMAL) { 499 if (gro_result == GRO_NORMAL) {
500 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB; 500 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
501 } else if (gro_result != GRO_DROP) { 501 } else if (gro_result != GRO_DROP) {
502 channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO; 502 channel->rx_alloc_level += RX_ALLOC_FACTOR_GRO;
503 channel->irq_mod_score += 2; 503 channel->irq_mod_score += 2;
504 } 504 }
505} 505}
@@ -605,7 +605,7 @@ void __efx_rx_packet(struct efx_channel *channel,
605 } 605 }
606 606
607 if (likely(checksummed || rx_buf->page)) { 607 if (likely(checksummed || rx_buf->page)) {
608 efx_rx_packet_lro(channel, rx_buf, checksummed); 608 efx_rx_packet_gro(channel, rx_buf, checksummed);
609 return; 609 return;
610 } 610 }
611 611
@@ -628,7 +628,7 @@ void efx_rx_strategy(struct efx_channel *channel)
628{ 628{
629 enum efx_rx_alloc_method method = rx_alloc_method; 629 enum efx_rx_alloc_method method = rx_alloc_method;
630 630
631 /* Only makes sense to use page based allocation if LRO is enabled */ 631 /* Only makes sense to use page based allocation if GRO is enabled */
632 if (!(channel->efx->net_dev->features & NETIF_F_GRO)) { 632 if (!(channel->efx->net_dev->features & NETIF_F_GRO)) {
633 method = RX_ALLOC_METHOD_SKB; 633 method = RX_ALLOC_METHOD_SKB;
634 } else if (method == RX_ALLOC_METHOD_AUTO) { 634 } else if (method == RX_ALLOC_METHOD_AUTO) {
@@ -639,7 +639,7 @@ void efx_rx_strategy(struct efx_channel *channel)
639 channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX; 639 channel->rx_alloc_level = RX_ALLOC_LEVEL_MAX;
640 640
641 /* Decide on the allocation method */ 641 /* Decide on the allocation method */
642 method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_LRO) ? 642 method = ((channel->rx_alloc_level > RX_ALLOC_LEVEL_GRO) ?
643 RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB); 643 RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB);
644 } 644 }
645 645
diff --git a/drivers/net/sfc/siena.c b/drivers/net/sfc/siena.c
index 45236f58a258..bf8456176443 100644
--- a/drivers/net/sfc/siena.c
+++ b/drivers/net/sfc/siena.c
@@ -194,13 +194,7 @@ static int siena_reset_hw(struct efx_nic *efx, enum reset_type method)
194 194
195static int siena_probe_nvconfig(struct efx_nic *efx) 195static int siena_probe_nvconfig(struct efx_nic *efx)
196{ 196{
197 int rc; 197 return efx_mcdi_get_board_cfg(efx, efx->net_dev->perm_addr, NULL);
198
199 rc = efx_mcdi_get_board_cfg(efx, efx->mac_address, NULL);
200 if (rc)
201 return rc;
202
203 return 0;
204} 198}
205 199
206static int siena_probe_nic(struct efx_nic *efx) 200static int siena_probe_nic(struct efx_nic *efx)
@@ -562,7 +556,7 @@ static int siena_set_wol(struct efx_nic *efx, u32 type)
562 if (nic_data->wol_filter_id != -1) 556 if (nic_data->wol_filter_id != -1)
563 efx_mcdi_wol_filter_remove(efx, 557 efx_mcdi_wol_filter_remove(efx,
564 nic_data->wol_filter_id); 558 nic_data->wol_filter_id);
565 rc = efx_mcdi_wol_filter_set_magic(efx, efx->mac_address, 559 rc = efx_mcdi_wol_filter_set_magic(efx, efx->net_dev->dev_addr,
566 &nic_data->wol_filter_id); 560 &nic_data->wol_filter_id);
567 if (rc) 561 if (rc)
568 goto fail; 562 goto fail;
diff --git a/drivers/net/sfc/spi.h b/drivers/net/sfc/spi.h
index 8bf4fce0813a..879b7f6bde3d 100644
--- a/drivers/net/sfc/spi.h
+++ b/drivers/net/sfc/spi.h
@@ -61,6 +61,11 @@ struct efx_spi_device {
61 unsigned int block_size; 61 unsigned int block_size;
62}; 62};
63 63
64static inline bool efx_spi_present(const struct efx_spi_device *spi)
65{
66 return spi->size != 0;
67}
68
64int falcon_spi_cmd(struct efx_nic *efx, 69int falcon_spi_cmd(struct efx_nic *efx,
65 const struct efx_spi_device *spi, unsigned int command, 70 const struct efx_spi_device *spi, unsigned int command,
66 int address, const void* in, void *out, size_t len); 71 int address, const void* in, void *out, size_t len);
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index 1bc6c48c96ee..f102912eba91 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -15,9 +15,7 @@
15#include "mdio_10g.h" 15#include "mdio_10g.h"
16#include "nic.h" 16#include "nic.h"
17#include "phy.h" 17#include "phy.h"
18#include "regs.h"
19#include "workarounds.h" 18#include "workarounds.h"
20#include "selftest.h"
21 19
22/* We expect these MMDs to be in the package. */ 20/* We expect these MMDs to be in the package. */
23#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \ 21#define TENXPRESS_REQUIRED_DEVS (MDIO_DEVS_PMAPMD | \
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 11726989fe2d..2f5e9da657bf 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -30,50 +30,6 @@
30 */ 30 */
31#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u) 31#define EFX_TXQ_THRESHOLD(_efx) ((_efx)->txq_entries / 2u)
32 32
33/* We need to be able to nest calls to netif_tx_stop_queue(), partly
34 * because of the 2 hardware queues associated with each core queue,
35 * but also so that we can inhibit TX for reasons other than a full
36 * hardware queue. */
37void efx_stop_queue(struct efx_channel *channel)
38{
39 struct efx_nic *efx = channel->efx;
40 struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
41
42 if (!tx_queue)
43 return;
44
45 spin_lock_bh(&channel->tx_stop_lock);
46 netif_vdbg(efx, tx_queued, efx->net_dev, "stop TX queue\n");
47
48 atomic_inc(&channel->tx_stop_count);
49 netif_tx_stop_queue(
50 netdev_get_tx_queue(efx->net_dev,
51 tx_queue->queue / EFX_TXQ_TYPES));
52
53 spin_unlock_bh(&channel->tx_stop_lock);
54}
55
56/* Decrement core TX queue stop count and wake it if the count is 0 */
57void efx_wake_queue(struct efx_channel *channel)
58{
59 struct efx_nic *efx = channel->efx;
60 struct efx_tx_queue *tx_queue = efx_channel_get_tx_queue(channel, 0);
61
62 if (!tx_queue)
63 return;
64
65 local_bh_disable();
66 if (atomic_dec_and_lock(&channel->tx_stop_count,
67 &channel->tx_stop_lock)) {
68 netif_vdbg(efx, tx_queued, efx->net_dev, "waking TX queue\n");
69 netif_tx_wake_queue(
70 netdev_get_tx_queue(efx->net_dev,
71 tx_queue->queue / EFX_TXQ_TYPES));
72 spin_unlock(&channel->tx_stop_lock);
73 }
74 local_bh_enable();
75}
76
77static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue, 33static void efx_dequeue_buffer(struct efx_tx_queue *tx_queue,
78 struct efx_tx_buffer *buffer) 34 struct efx_tx_buffer *buffer)
79{ 35{
@@ -234,21 +190,22 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
234 * checked. Update the xmit path's 190 * checked. Update the xmit path's
235 * copy of read_count. 191 * copy of read_count.
236 */ 192 */
237 ++tx_queue->stopped; 193 netif_tx_stop_queue(tx_queue->core_txq);
238 /* This memory barrier protects the 194 /* This memory barrier protects the
239 * change of stopped from the access 195 * change of queue state from the access
240 * of read_count. */ 196 * of read_count. */
241 smp_mb(); 197 smp_mb();
242 tx_queue->old_read_count = 198 tx_queue->old_read_count =
243 *(volatile unsigned *) 199 ACCESS_ONCE(tx_queue->read_count);
244 &tx_queue->read_count;
245 fill_level = (tx_queue->insert_count 200 fill_level = (tx_queue->insert_count
246 - tx_queue->old_read_count); 201 - tx_queue->old_read_count);
247 q_space = efx->txq_entries - 1 - fill_level; 202 q_space = efx->txq_entries - 1 - fill_level;
248 if (unlikely(q_space-- <= 0)) 203 if (unlikely(q_space-- <= 0)) {
249 goto stop; 204 rc = NETDEV_TX_BUSY;
205 goto unwind;
206 }
250 smp_mb(); 207 smp_mb();
251 --tx_queue->stopped; 208 netif_tx_start_queue(tx_queue->core_txq);
252 } 209 }
253 210
254 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 211 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
@@ -308,13 +265,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
308 265
309 /* Mark the packet as transmitted, and free the SKB ourselves */ 266 /* Mark the packet as transmitted, and free the SKB ourselves */
310 dev_kfree_skb_any(skb); 267 dev_kfree_skb_any(skb);
311 goto unwind;
312
313 stop:
314 rc = NETDEV_TX_BUSY;
315
316 if (tx_queue->stopped == 1)
317 efx_stop_queue(tx_queue->channel);
318 268
319 unwind: 269 unwind:
320 /* Work backwards until we hit the original insert pointer value */ 270 /* Work backwards until we hit the original insert pointer value */
@@ -407,22 +357,25 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
407 efx_dequeue_buffers(tx_queue, index); 357 efx_dequeue_buffers(tx_queue, index);
408 358
409 /* See if we need to restart the netif queue. This barrier 359 /* See if we need to restart the netif queue. This barrier
410 * separates the update of read_count from the test of 360 * separates the update of read_count from the test of the
411 * stopped. */ 361 * queue state. */
412 smp_mb(); 362 smp_mb();
413 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) { 363 if (unlikely(netif_tx_queue_stopped(tx_queue->core_txq)) &&
364 likely(efx->port_enabled)) {
414 fill_level = tx_queue->insert_count - tx_queue->read_count; 365 fill_level = tx_queue->insert_count - tx_queue->read_count;
415 if (fill_level < EFX_TXQ_THRESHOLD(efx)) { 366 if (fill_level < EFX_TXQ_THRESHOLD(efx)) {
416 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); 367 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
368 netif_tx_wake_queue(tx_queue->core_txq);
369 }
370 }
417 371
418 /* Do this under netif_tx_lock(), to avoid racing 372 /* Check whether the hardware queue is now empty */
419 * with efx_xmit(). */ 373 if ((int)(tx_queue->read_count - tx_queue->old_write_count) >= 0) {
420 netif_tx_lock(efx->net_dev); 374 tx_queue->old_write_count = ACCESS_ONCE(tx_queue->write_count);
421 if (tx_queue->stopped) { 375 if (tx_queue->read_count == tx_queue->old_write_count) {
422 tx_queue->stopped = 0; 376 smp_mb();
423 efx_wake_queue(tx_queue->channel); 377 tx_queue->empty_read_count =
424 } 378 tx_queue->read_count | EFX_EMPTY_COUNT_VALID;
425 netif_tx_unlock(efx->net_dev);
426 } 379 }
427 } 380 }
428} 381}
@@ -470,9 +423,10 @@ void efx_init_tx_queue(struct efx_tx_queue *tx_queue)
470 423
471 tx_queue->insert_count = 0; 424 tx_queue->insert_count = 0;
472 tx_queue->write_count = 0; 425 tx_queue->write_count = 0;
426 tx_queue->old_write_count = 0;
473 tx_queue->read_count = 0; 427 tx_queue->read_count = 0;
474 tx_queue->old_read_count = 0; 428 tx_queue->old_read_count = 0;
475 BUG_ON(tx_queue->stopped); 429 tx_queue->empty_read_count = 0 | EFX_EMPTY_COUNT_VALID;
476 430
477 /* Set up TX descriptor ring */ 431 /* Set up TX descriptor ring */
478 efx_nic_init_tx(tx_queue); 432 efx_nic_init_tx(tx_queue);
@@ -508,12 +462,6 @@ void efx_fini_tx_queue(struct efx_tx_queue *tx_queue)
508 462
509 /* Free up TSO header cache */ 463 /* Free up TSO header cache */
510 efx_fini_tso(tx_queue); 464 efx_fini_tso(tx_queue);
511
512 /* Release queue's stop on port, if any */
513 if (tx_queue->stopped) {
514 tx_queue->stopped = 0;
515 efx_wake_queue(tx_queue->channel);
516 }
517} 465}
518 466
519void efx_remove_tx_queue(struct efx_tx_queue *tx_queue) 467void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
@@ -755,12 +703,12 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
755 * since the xmit path last checked. Update 703 * since the xmit path last checked. Update
756 * the xmit path's copy of read_count. 704 * the xmit path's copy of read_count.
757 */ 705 */
758 ++tx_queue->stopped; 706 netif_tx_stop_queue(tx_queue->core_txq);
759 /* This memory barrier protects the change of 707 /* This memory barrier protects the change of
760 * stopped from the access of read_count. */ 708 * queue state from the access of read_count. */
761 smp_mb(); 709 smp_mb();
762 tx_queue->old_read_count = 710 tx_queue->old_read_count =
763 *(volatile unsigned *)&tx_queue->read_count; 711 ACCESS_ONCE(tx_queue->read_count);
764 fill_level = (tx_queue->insert_count 712 fill_level = (tx_queue->insert_count
765 - tx_queue->old_read_count); 713 - tx_queue->old_read_count);
766 q_space = efx->txq_entries - 1 - fill_level; 714 q_space = efx->txq_entries - 1 - fill_level;
@@ -769,7 +717,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
769 return 1; 717 return 1;
770 } 718 }
771 smp_mb(); 719 smp_mb();
772 --tx_queue->stopped; 720 netif_tx_start_queue(tx_queue->core_txq);
773 } 721 }
774 722
775 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask; 723 insert_ptr = tx_queue->insert_count & tx_queue->ptr_mask;
@@ -1109,8 +1057,10 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1109 1057
1110 while (1) { 1058 while (1) {
1111 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state); 1059 rc = tso_fill_packet_with_fragment(tx_queue, skb, &state);
1112 if (unlikely(rc)) 1060 if (unlikely(rc)) {
1113 goto stop; 1061 rc2 = NETDEV_TX_BUSY;
1062 goto unwind;
1063 }
1114 1064
1115 /* Move onto the next fragment? */ 1065 /* Move onto the next fragment? */
1116 if (state.in_len == 0) { 1066 if (state.in_len == 0) {
@@ -1139,14 +1089,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1139 netif_err(efx, tx_err, efx->net_dev, 1089 netif_err(efx, tx_err, efx->net_dev,
1140 "Out of memory for TSO headers, or PCI mapping error\n"); 1090 "Out of memory for TSO headers, or PCI mapping error\n");
1141 dev_kfree_skb_any(skb); 1091 dev_kfree_skb_any(skb);
1142 goto unwind;
1143
1144 stop:
1145 rc2 = NETDEV_TX_BUSY;
1146
1147 /* Stop the queue if it wasn't stopped before. */
1148 if (tx_queue->stopped == 1)
1149 efx_stop_queue(tx_queue->channel);
1150 1092
1151 unwind: 1093 unwind:
1152 /* Free the DMA mapping we were in the process of writing out */ 1094 /* Free the DMA mapping we were in the process of writing out */
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 50259dfec583..819c1750e2ab 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -45,9 +45,9 @@ static void sh_eth_set_duplex(struct net_device *ndev)
45 u32 ioaddr = ndev->base_addr; 45 u32 ioaddr = ndev->base_addr;
46 46
47 if (mdp->duplex) /* Full */ 47 if (mdp->duplex) /* Full */
48 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); 48 writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
49 else /* Half */ 49 else /* Half */
50 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); 50 writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
51} 51}
52 52
53static void sh_eth_set_rate(struct net_device *ndev) 53static void sh_eth_set_rate(struct net_device *ndev)
@@ -57,10 +57,10 @@ static void sh_eth_set_rate(struct net_device *ndev)
57 57
58 switch (mdp->speed) { 58 switch (mdp->speed) {
59 case 10: /* 10BASE */ 59 case 10: /* 10BASE */
60 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR); 60 writel(readl(ioaddr + ECMR) & ~ECMR_RTM, ioaddr + ECMR);
61 break; 61 break;
62 case 100:/* 100BASE */ 62 case 100:/* 100BASE */
63 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR); 63 writel(readl(ioaddr + ECMR) | ECMR_RTM, ioaddr + ECMR);
64 break; 64 break;
65 default: 65 default:
66 break; 66 break;
@@ -96,9 +96,9 @@ static void sh_eth_set_duplex(struct net_device *ndev)
96 u32 ioaddr = ndev->base_addr; 96 u32 ioaddr = ndev->base_addr;
97 97
98 if (mdp->duplex) /* Full */ 98 if (mdp->duplex) /* Full */
99 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); 99 writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
100 else /* Half */ 100 else /* Half */
101 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); 101 writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
102} 102}
103 103
104static void sh_eth_set_rate(struct net_device *ndev) 104static void sh_eth_set_rate(struct net_device *ndev)
@@ -108,10 +108,10 @@ static void sh_eth_set_rate(struct net_device *ndev)
108 108
109 switch (mdp->speed) { 109 switch (mdp->speed) {
110 case 10: /* 10BASE */ 110 case 10: /* 10BASE */
111 ctrl_outl(0, ioaddr + RTRATE); 111 writel(0, ioaddr + RTRATE);
112 break; 112 break;
113 case 100:/* 100BASE */ 113 case 100:/* 100BASE */
114 ctrl_outl(1, ioaddr + RTRATE); 114 writel(1, ioaddr + RTRATE);
115 break; 115 break;
116 default: 116 default:
117 break; 117 break;
@@ -143,7 +143,7 @@ static struct sh_eth_cpu_data sh_eth_my_cpu_data = {
143static void sh_eth_chip_reset(struct net_device *ndev) 143static void sh_eth_chip_reset(struct net_device *ndev)
144{ 144{
145 /* reset device */ 145 /* reset device */
146 ctrl_outl(ARSTR_ARSTR, ARSTR); 146 writel(ARSTR_ARSTR, ARSTR);
147 mdelay(1); 147 mdelay(1);
148} 148}
149 149
@@ -152,10 +152,10 @@ static void sh_eth_reset(struct net_device *ndev)
152 u32 ioaddr = ndev->base_addr; 152 u32 ioaddr = ndev->base_addr;
153 int cnt = 100; 153 int cnt = 100;
154 154
155 ctrl_outl(EDSR_ENALL, ioaddr + EDSR); 155 writel(EDSR_ENALL, ioaddr + EDSR);
156 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); 156 writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
157 while (cnt > 0) { 157 while (cnt > 0) {
158 if (!(ctrl_inl(ioaddr + EDMR) & 0x3)) 158 if (!(readl(ioaddr + EDMR) & 0x3))
159 break; 159 break;
160 mdelay(1); 160 mdelay(1);
161 cnt--; 161 cnt--;
@@ -164,14 +164,14 @@ static void sh_eth_reset(struct net_device *ndev)
164 printk(KERN_ERR "Device reset fail\n"); 164 printk(KERN_ERR "Device reset fail\n");
165 165
166 /* Table Init */ 166 /* Table Init */
167 ctrl_outl(0x0, ioaddr + TDLAR); 167 writel(0x0, ioaddr + TDLAR);
168 ctrl_outl(0x0, ioaddr + TDFAR); 168 writel(0x0, ioaddr + TDFAR);
169 ctrl_outl(0x0, ioaddr + TDFXR); 169 writel(0x0, ioaddr + TDFXR);
170 ctrl_outl(0x0, ioaddr + TDFFR); 170 writel(0x0, ioaddr + TDFFR);
171 ctrl_outl(0x0, ioaddr + RDLAR); 171 writel(0x0, ioaddr + RDLAR);
172 ctrl_outl(0x0, ioaddr + RDFAR); 172 writel(0x0, ioaddr + RDFAR);
173 ctrl_outl(0x0, ioaddr + RDFXR); 173 writel(0x0, ioaddr + RDFXR);
174 ctrl_outl(0x0, ioaddr + RDFFR); 174 writel(0x0, ioaddr + RDFFR);
175} 175}
176 176
177static void sh_eth_set_duplex(struct net_device *ndev) 177static void sh_eth_set_duplex(struct net_device *ndev)
@@ -180,9 +180,9 @@ static void sh_eth_set_duplex(struct net_device *ndev)
180 u32 ioaddr = ndev->base_addr; 180 u32 ioaddr = ndev->base_addr;
181 181
182 if (mdp->duplex) /* Full */ 182 if (mdp->duplex) /* Full */
183 ctrl_outl(ctrl_inl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR); 183 writel(readl(ioaddr + ECMR) | ECMR_DM, ioaddr + ECMR);
184 else /* Half */ 184 else /* Half */
185 ctrl_outl(ctrl_inl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR); 185 writel(readl(ioaddr + ECMR) & ~ECMR_DM, ioaddr + ECMR);
186} 186}
187 187
188static void sh_eth_set_rate(struct net_device *ndev) 188static void sh_eth_set_rate(struct net_device *ndev)
@@ -192,13 +192,13 @@ static void sh_eth_set_rate(struct net_device *ndev)
192 192
193 switch (mdp->speed) { 193 switch (mdp->speed) {
194 case 10: /* 10BASE */ 194 case 10: /* 10BASE */
195 ctrl_outl(GECMR_10, ioaddr + GECMR); 195 writel(GECMR_10, ioaddr + GECMR);
196 break; 196 break;
197 case 100:/* 100BASE */ 197 case 100:/* 100BASE */
198 ctrl_outl(GECMR_100, ioaddr + GECMR); 198 writel(GECMR_100, ioaddr + GECMR);
199 break; 199 break;
200 case 1000: /* 1000BASE */ 200 case 1000: /* 1000BASE */
201 ctrl_outl(GECMR_1000, ioaddr + GECMR); 201 writel(GECMR_1000, ioaddr + GECMR);
202 break; 202 break;
203 default: 203 default:
204 break; 204 break;
@@ -283,9 +283,9 @@ static void sh_eth_reset(struct net_device *ndev)
283{ 283{
284 u32 ioaddr = ndev->base_addr; 284 u32 ioaddr = ndev->base_addr;
285 285
286 ctrl_outl(ctrl_inl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR); 286 writel(readl(ioaddr + EDMR) | EDMR_SRST, ioaddr + EDMR);
287 mdelay(3); 287 mdelay(3);
288 ctrl_outl(ctrl_inl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR); 288 writel(readl(ioaddr + EDMR) & ~EDMR_SRST, ioaddr + EDMR);
289} 289}
290#endif 290#endif
291 291
@@ -336,10 +336,10 @@ static void update_mac_address(struct net_device *ndev)
336{ 336{
337 u32 ioaddr = ndev->base_addr; 337 u32 ioaddr = ndev->base_addr;
338 338
339 ctrl_outl((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | 339 writel((ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) |
340 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), 340 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]),
341 ioaddr + MAHR); 341 ioaddr + MAHR);
342 ctrl_outl((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), 342 writel((ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]),
343 ioaddr + MALR); 343 ioaddr + MALR);
344} 344}
345 345
@@ -358,12 +358,12 @@ static void read_mac_address(struct net_device *ndev, unsigned char *mac)
358 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) { 358 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
359 memcpy(ndev->dev_addr, mac, 6); 359 memcpy(ndev->dev_addr, mac, 6);
360 } else { 360 } else {
361 ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24); 361 ndev->dev_addr[0] = (readl(ioaddr + MAHR) >> 24);
362 ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF; 362 ndev->dev_addr[1] = (readl(ioaddr + MAHR) >> 16) & 0xFF;
363 ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF; 363 ndev->dev_addr[2] = (readl(ioaddr + MAHR) >> 8) & 0xFF;
364 ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF); 364 ndev->dev_addr[3] = (readl(ioaddr + MAHR) & 0xFF);
365 ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF; 365 ndev->dev_addr[4] = (readl(ioaddr + MALR) >> 8) & 0xFF;
366 ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF); 366 ndev->dev_addr[5] = (readl(ioaddr + MALR) & 0xFF);
367 } 367 }
368} 368}
369 369
@@ -379,19 +379,19 @@ struct bb_info {
379/* PHY bit set */ 379/* PHY bit set */
380static void bb_set(u32 addr, u32 msk) 380static void bb_set(u32 addr, u32 msk)
381{ 381{
382 ctrl_outl(ctrl_inl(addr) | msk, addr); 382 writel(readl(addr) | msk, addr);
383} 383}
384 384
385/* PHY bit clear */ 385/* PHY bit clear */
386static void bb_clr(u32 addr, u32 msk) 386static void bb_clr(u32 addr, u32 msk)
387{ 387{
388 ctrl_outl((ctrl_inl(addr) & ~msk), addr); 388 writel((readl(addr) & ~msk), addr);
389} 389}
390 390
391/* PHY bit read */ 391/* PHY bit read */
392static int bb_read(u32 addr, u32 msk) 392static int bb_read(u32 addr, u32 msk)
393{ 393{
394 return (ctrl_inl(addr) & msk) != 0; 394 return (readl(addr) & msk) != 0;
395} 395}
396 396
397/* Data I/O pin control */ 397/* Data I/O pin control */
@@ -506,9 +506,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
506 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 506 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
507 /* Rx descriptor address set */ 507 /* Rx descriptor address set */
508 if (i == 0) { 508 if (i == 0) {
509 ctrl_outl(mdp->rx_desc_dma, ioaddr + RDLAR); 509 writel(mdp->rx_desc_dma, ioaddr + RDLAR);
510#if defined(CONFIG_CPU_SUBTYPE_SH7763) 510#if defined(CONFIG_CPU_SUBTYPE_SH7763)
511 ctrl_outl(mdp->rx_desc_dma, ioaddr + RDFAR); 511 writel(mdp->rx_desc_dma, ioaddr + RDFAR);
512#endif 512#endif
513 } 513 }
514 } 514 }
@@ -528,9 +528,9 @@ static void sh_eth_ring_format(struct net_device *ndev)
528 txdesc->buffer_length = 0; 528 txdesc->buffer_length = 0;
529 if (i == 0) { 529 if (i == 0) {
530 /* Tx descriptor address set */ 530 /* Tx descriptor address set */
531 ctrl_outl(mdp->tx_desc_dma, ioaddr + TDLAR); 531 writel(mdp->tx_desc_dma, ioaddr + TDLAR);
532#if defined(CONFIG_CPU_SUBTYPE_SH7763) 532#if defined(CONFIG_CPU_SUBTYPE_SH7763)
533 ctrl_outl(mdp->tx_desc_dma, ioaddr + TDFAR); 533 writel(mdp->tx_desc_dma, ioaddr + TDFAR);
534#endif 534#endif
535 } 535 }
536 } 536 }
@@ -623,71 +623,71 @@ static int sh_eth_dev_init(struct net_device *ndev)
623 /* Descriptor format */ 623 /* Descriptor format */
624 sh_eth_ring_format(ndev); 624 sh_eth_ring_format(ndev);
625 if (mdp->cd->rpadir) 625 if (mdp->cd->rpadir)
626 ctrl_outl(mdp->cd->rpadir_value, ioaddr + RPADIR); 626 writel(mdp->cd->rpadir_value, ioaddr + RPADIR);
627 627
628 /* all sh_eth int mask */ 628 /* all sh_eth int mask */
629 ctrl_outl(0, ioaddr + EESIPR); 629 writel(0, ioaddr + EESIPR);
630 630
631#if defined(__LITTLE_ENDIAN__) 631#if defined(__LITTLE_ENDIAN__)
632 if (mdp->cd->hw_swap) 632 if (mdp->cd->hw_swap)
633 ctrl_outl(EDMR_EL, ioaddr + EDMR); 633 writel(EDMR_EL, ioaddr + EDMR);
634 else 634 else
635#endif 635#endif
636 ctrl_outl(0, ioaddr + EDMR); 636 writel(0, ioaddr + EDMR);
637 637
638 /* FIFO size set */ 638 /* FIFO size set */
639 ctrl_outl(mdp->cd->fdr_value, ioaddr + FDR); 639 writel(mdp->cd->fdr_value, ioaddr + FDR);
640 ctrl_outl(0, ioaddr + TFTR); 640 writel(0, ioaddr + TFTR);
641 641
642 /* Frame recv control */ 642 /* Frame recv control */
643 ctrl_outl(mdp->cd->rmcr_value, ioaddr + RMCR); 643 writel(mdp->cd->rmcr_value, ioaddr + RMCR);
644 644
645 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5; 645 rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
646 tx_int_var = mdp->tx_int_var = DESC_I_TINT2; 646 tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
647 ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER); 647 writel(rx_int_var | tx_int_var, ioaddr + TRSCER);
648 648
649 if (mdp->cd->bculr) 649 if (mdp->cd->bculr)
650 ctrl_outl(0x800, ioaddr + BCULR); /* Burst sycle set */ 650 writel(0x800, ioaddr + BCULR); /* Burst sycle set */
651 651
652 ctrl_outl(mdp->cd->fcftr_value, ioaddr + FCFTR); 652 writel(mdp->cd->fcftr_value, ioaddr + FCFTR);
653 653
654 if (!mdp->cd->no_trimd) 654 if (!mdp->cd->no_trimd)
655 ctrl_outl(0, ioaddr + TRIMD); 655 writel(0, ioaddr + TRIMD);
656 656
657 /* Recv frame limit set register */ 657 /* Recv frame limit set register */
658 ctrl_outl(RFLR_VALUE, ioaddr + RFLR); 658 writel(RFLR_VALUE, ioaddr + RFLR);
659 659
660 ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR); 660 writel(readl(ioaddr + EESR), ioaddr + EESR);
661 ctrl_outl(mdp->cd->eesipr_value, ioaddr + EESIPR); 661 writel(mdp->cd->eesipr_value, ioaddr + EESIPR);
662 662
663 /* PAUSE Prohibition */ 663 /* PAUSE Prohibition */
664 val = (ctrl_inl(ioaddr + ECMR) & ECMR_DM) | 664 val = (readl(ioaddr + ECMR) & ECMR_DM) |
665 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE; 665 ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;
666 666
667 ctrl_outl(val, ioaddr + ECMR); 667 writel(val, ioaddr + ECMR);
668 668
669 if (mdp->cd->set_rate) 669 if (mdp->cd->set_rate)
670 mdp->cd->set_rate(ndev); 670 mdp->cd->set_rate(ndev);
671 671
672 /* E-MAC Status Register clear */ 672 /* E-MAC Status Register clear */
673 ctrl_outl(mdp->cd->ecsr_value, ioaddr + ECSR); 673 writel(mdp->cd->ecsr_value, ioaddr + ECSR);
674 674
675 /* E-MAC Interrupt Enable register */ 675 /* E-MAC Interrupt Enable register */
676 ctrl_outl(mdp->cd->ecsipr_value, ioaddr + ECSIPR); 676 writel(mdp->cd->ecsipr_value, ioaddr + ECSIPR);
677 677
678 /* Set MAC address */ 678 /* Set MAC address */
679 update_mac_address(ndev); 679 update_mac_address(ndev);
680 680
681 /* mask reset */ 681 /* mask reset */
682 if (mdp->cd->apr) 682 if (mdp->cd->apr)
683 ctrl_outl(APR_AP, ioaddr + APR); 683 writel(APR_AP, ioaddr + APR);
684 if (mdp->cd->mpr) 684 if (mdp->cd->mpr)
685 ctrl_outl(MPR_MP, ioaddr + MPR); 685 writel(MPR_MP, ioaddr + MPR);
686 if (mdp->cd->tpauser) 686 if (mdp->cd->tpauser)
687 ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER); 687 writel(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
688 688
689 /* Setting the Rx mode will start the Rx process. */ 689 /* Setting the Rx mode will start the Rx process. */
690 ctrl_outl(EDRRR_R, ioaddr + EDRRR); 690 writel(EDRRR_R, ioaddr + EDRRR);
691 691
692 netif_start_queue(ndev); 692 netif_start_queue(ndev);
693 693
@@ -811,8 +811,8 @@ static int sh_eth_rx(struct net_device *ndev)
811 811
812 /* Restart Rx engine if stopped. */ 812 /* Restart Rx engine if stopped. */
813 /* If we don't need to check status, don't. -KDU */ 813 /* If we don't need to check status, don't. -KDU */
814 if (!(ctrl_inl(ndev->base_addr + EDRRR) & EDRRR_R)) 814 if (!(readl(ndev->base_addr + EDRRR) & EDRRR_R))
815 ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR); 815 writel(EDRRR_R, ndev->base_addr + EDRRR);
816 816
817 return 0; 817 return 0;
818} 818}
@@ -827,8 +827,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
827 u32 mask; 827 u32 mask;
828 828
829 if (intr_status & EESR_ECI) { 829 if (intr_status & EESR_ECI) {
830 felic_stat = ctrl_inl(ioaddr + ECSR); 830 felic_stat = readl(ioaddr + ECSR);
831 ctrl_outl(felic_stat, ioaddr + ECSR); /* clear int */ 831 writel(felic_stat, ioaddr + ECSR); /* clear int */
832 if (felic_stat & ECSR_ICD) 832 if (felic_stat & ECSR_ICD)
833 mdp->stats.tx_carrier_errors++; 833 mdp->stats.tx_carrier_errors++;
834 if (felic_stat & ECSR_LCHNG) { 834 if (felic_stat & ECSR_LCHNG) {
@@ -839,25 +839,25 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
839 else 839 else
840 link_stat = PHY_ST_LINK; 840 link_stat = PHY_ST_LINK;
841 } else { 841 } else {
842 link_stat = (ctrl_inl(ioaddr + PSR)); 842 link_stat = (readl(ioaddr + PSR));
843 if (mdp->ether_link_active_low) 843 if (mdp->ether_link_active_low)
844 link_stat = ~link_stat; 844 link_stat = ~link_stat;
845 } 845 }
846 if (!(link_stat & PHY_ST_LINK)) { 846 if (!(link_stat & PHY_ST_LINK)) {
847 /* Link Down : disable tx and rx */ 847 /* Link Down : disable tx and rx */
848 ctrl_outl(ctrl_inl(ioaddr + ECMR) & 848 writel(readl(ioaddr + ECMR) &
849 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR); 849 ~(ECMR_RE | ECMR_TE), ioaddr + ECMR);
850 } else { 850 } else {
851 /* Link Up */ 851 /* Link Up */
852 ctrl_outl(ctrl_inl(ioaddr + EESIPR) & 852 writel(readl(ioaddr + EESIPR) &
853 ~DMAC_M_ECI, ioaddr + EESIPR); 853 ~DMAC_M_ECI, ioaddr + EESIPR);
854 /*clear int */ 854 /*clear int */
855 ctrl_outl(ctrl_inl(ioaddr + ECSR), 855 writel(readl(ioaddr + ECSR),
856 ioaddr + ECSR); 856 ioaddr + ECSR);
857 ctrl_outl(ctrl_inl(ioaddr + EESIPR) | 857 writel(readl(ioaddr + EESIPR) |
858 DMAC_M_ECI, ioaddr + EESIPR); 858 DMAC_M_ECI, ioaddr + EESIPR);
859 /* enable tx and rx */ 859 /* enable tx and rx */
860 ctrl_outl(ctrl_inl(ioaddr + ECMR) | 860 writel(readl(ioaddr + ECMR) |
861 (ECMR_RE | ECMR_TE), ioaddr + ECMR); 861 (ECMR_RE | ECMR_TE), ioaddr + ECMR);
862 } 862 }
863 } 863 }
@@ -888,8 +888,8 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
888 /* Receive Descriptor Empty int */ 888 /* Receive Descriptor Empty int */
889 mdp->stats.rx_over_errors++; 889 mdp->stats.rx_over_errors++;
890 890
891 if (ctrl_inl(ioaddr + EDRRR) ^ EDRRR_R) 891 if (readl(ioaddr + EDRRR) ^ EDRRR_R)
892 ctrl_outl(EDRRR_R, ioaddr + EDRRR); 892 writel(EDRRR_R, ioaddr + EDRRR);
893 dev_err(&ndev->dev, "Receive Descriptor Empty\n"); 893 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
894 } 894 }
895 if (intr_status & EESR_RFE) { 895 if (intr_status & EESR_RFE) {
@@ -903,7 +903,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
903 mask &= ~EESR_ADE; 903 mask &= ~EESR_ADE;
904 if (intr_status & mask) { 904 if (intr_status & mask) {
905 /* Tx error */ 905 /* Tx error */
906 u32 edtrr = ctrl_inl(ndev->base_addr + EDTRR); 906 u32 edtrr = readl(ndev->base_addr + EDTRR);
907 /* dmesg */ 907 /* dmesg */
908 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ", 908 dev_err(&ndev->dev, "TX error. status=%8.8x cur_tx=%8.8x ",
909 intr_status, mdp->cur_tx); 909 intr_status, mdp->cur_tx);
@@ -915,7 +915,7 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
915 /* SH7712 BUG */ 915 /* SH7712 BUG */
916 if (edtrr ^ EDTRR_TRNS) { 916 if (edtrr ^ EDTRR_TRNS) {
917 /* tx dma start */ 917 /* tx dma start */
918 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR); 918 writel(EDTRR_TRNS, ndev->base_addr + EDTRR);
919 } 919 }
920 /* wakeup */ 920 /* wakeup */
921 netif_wake_queue(ndev); 921 netif_wake_queue(ndev);
@@ -934,12 +934,12 @@ static irqreturn_t sh_eth_interrupt(int irq, void *netdev)
934 spin_lock(&mdp->lock); 934 spin_lock(&mdp->lock);
935 935
936 /* Get interrpt stat */ 936 /* Get interrpt stat */
937 intr_status = ctrl_inl(ioaddr + EESR); 937 intr_status = readl(ioaddr + EESR);
938 /* Clear interrupt */ 938 /* Clear interrupt */
939 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF | 939 if (intr_status & (EESR_FRC | EESR_RMAF | EESR_RRF |
940 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF | 940 EESR_RTLF | EESR_RTSF | EESR_PRE | EESR_CERF |
941 cd->tx_check | cd->eesr_err_check)) { 941 cd->tx_check | cd->eesr_err_check)) {
942 ctrl_outl(intr_status, ioaddr + EESR); 942 writel(intr_status, ioaddr + EESR);
943 ret = IRQ_HANDLED; 943 ret = IRQ_HANDLED;
944 } else 944 } else
945 goto other_irq; 945 goto other_irq;
@@ -1000,7 +1000,7 @@ static void sh_eth_adjust_link(struct net_device *ndev)
1000 mdp->cd->set_rate(ndev); 1000 mdp->cd->set_rate(ndev);
1001 } 1001 }
1002 if (mdp->link == PHY_DOWN) { 1002 if (mdp->link == PHY_DOWN) {
1003 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_TXF) 1003 writel((readl(ioaddr + ECMR) & ~ECMR_TXF)
1004 | ECMR_DM, ioaddr + ECMR); 1004 | ECMR_DM, ioaddr + ECMR);
1005 new_state = 1; 1005 new_state = 1;
1006 mdp->link = phydev->link; 1006 mdp->link = phydev->link;
@@ -1125,7 +1125,7 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
1125 1125
1126 /* worning message out. */ 1126 /* worning message out. */
1127 printk(KERN_WARNING "%s: transmit timed out, status %8.8x," 1127 printk(KERN_WARNING "%s: transmit timed out, status %8.8x,"
1128 " resetting...\n", ndev->name, (int)ctrl_inl(ioaddr + EESR)); 1128 " resetting...\n", ndev->name, (int)readl(ioaddr + EESR));
1129 1129
1130 /* tx_errors count up */ 1130 /* tx_errors count up */
1131 mdp->stats.tx_errors++; 1131 mdp->stats.tx_errors++;
@@ -1196,8 +1196,8 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1196 1196
1197 mdp->cur_tx++; 1197 mdp->cur_tx++;
1198 1198
1199 if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS)) 1199 if (!(readl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
1200 ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR); 1200 writel(EDTRR_TRNS, ndev->base_addr + EDTRR);
1201 1201
1202 return NETDEV_TX_OK; 1202 return NETDEV_TX_OK;
1203} 1203}
@@ -1212,11 +1212,11 @@ static int sh_eth_close(struct net_device *ndev)
1212 netif_stop_queue(ndev); 1212 netif_stop_queue(ndev);
1213 1213
1214 /* Disable interrupts by clearing the interrupt mask. */ 1214 /* Disable interrupts by clearing the interrupt mask. */
1215 ctrl_outl(0x0000, ioaddr + EESIPR); 1215 writel(0x0000, ioaddr + EESIPR);
1216 1216
1217 /* Stop the chip's Tx and Rx processes. */ 1217 /* Stop the chip's Tx and Rx processes. */
1218 ctrl_outl(0, ioaddr + EDTRR); 1218 writel(0, ioaddr + EDTRR);
1219 ctrl_outl(0, ioaddr + EDRRR); 1219 writel(0, ioaddr + EDRRR);
1220 1220
1221 /* PHY Disconnect */ 1221 /* PHY Disconnect */
1222 if (mdp->phydev) { 1222 if (mdp->phydev) {
@@ -1251,20 +1251,20 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1251 1251
1252 pm_runtime_get_sync(&mdp->pdev->dev); 1252 pm_runtime_get_sync(&mdp->pdev->dev);
1253 1253
1254 mdp->stats.tx_dropped += ctrl_inl(ioaddr + TROCR); 1254 mdp->stats.tx_dropped += readl(ioaddr + TROCR);
1255 ctrl_outl(0, ioaddr + TROCR); /* (write clear) */ 1255 writel(0, ioaddr + TROCR); /* (write clear) */
1256 mdp->stats.collisions += ctrl_inl(ioaddr + CDCR); 1256 mdp->stats.collisions += readl(ioaddr + CDCR);
1257 ctrl_outl(0, ioaddr + CDCR); /* (write clear) */ 1257 writel(0, ioaddr + CDCR); /* (write clear) */
1258 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + LCCR); 1258 mdp->stats.tx_carrier_errors += readl(ioaddr + LCCR);
1259 ctrl_outl(0, ioaddr + LCCR); /* (write clear) */ 1259 writel(0, ioaddr + LCCR); /* (write clear) */
1260#if defined(CONFIG_CPU_SUBTYPE_SH7763) 1260#if defined(CONFIG_CPU_SUBTYPE_SH7763)
1261 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CERCR);/* CERCR */ 1261 mdp->stats.tx_carrier_errors += readl(ioaddr + CERCR);/* CERCR */
1262 ctrl_outl(0, ioaddr + CERCR); /* (write clear) */ 1262 writel(0, ioaddr + CERCR); /* (write clear) */
1263 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CEECR);/* CEECR */ 1263 mdp->stats.tx_carrier_errors += readl(ioaddr + CEECR);/* CEECR */
1264 ctrl_outl(0, ioaddr + CEECR); /* (write clear) */ 1264 writel(0, ioaddr + CEECR); /* (write clear) */
1265#else 1265#else
1266 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR); 1266 mdp->stats.tx_carrier_errors += readl(ioaddr + CNDCR);
1267 ctrl_outl(0, ioaddr + CNDCR); /* (write clear) */ 1267 writel(0, ioaddr + CNDCR); /* (write clear) */
1268#endif 1268#endif
1269 pm_runtime_put_sync(&mdp->pdev->dev); 1269 pm_runtime_put_sync(&mdp->pdev->dev);
1270 1270
@@ -1295,11 +1295,11 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
1295 1295
1296 if (ndev->flags & IFF_PROMISC) { 1296 if (ndev->flags & IFF_PROMISC) {
1297 /* Set promiscuous. */ 1297 /* Set promiscuous. */
1298 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM, 1298 writel((readl(ioaddr + ECMR) & ~ECMR_MCT) | ECMR_PRM,
1299 ioaddr + ECMR); 1299 ioaddr + ECMR);
1300 } else { 1300 } else {
1301 /* Normal, unicast/broadcast-only mode. */ 1301 /* Normal, unicast/broadcast-only mode. */
1302 ctrl_outl((ctrl_inl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT, 1302 writel((readl(ioaddr + ECMR) & ~ECMR_PRM) | ECMR_MCT,
1303 ioaddr + ECMR); 1303 ioaddr + ECMR);
1304 } 1304 }
1305} 1305}
@@ -1307,30 +1307,30 @@ static void sh_eth_set_multicast_list(struct net_device *ndev)
1307/* SuperH's TSU register init function */ 1307/* SuperH's TSU register init function */
1308static void sh_eth_tsu_init(u32 ioaddr) 1308static void sh_eth_tsu_init(u32 ioaddr)
1309{ 1309{
1310 ctrl_outl(0, ioaddr + TSU_FWEN0); /* Disable forward(0->1) */ 1310 writel(0, ioaddr + TSU_FWEN0); /* Disable forward(0->1) */
1311 ctrl_outl(0, ioaddr + TSU_FWEN1); /* Disable forward(1->0) */ 1311 writel(0, ioaddr + TSU_FWEN1); /* Disable forward(1->0) */
1312 ctrl_outl(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */ 1312 writel(0, ioaddr + TSU_FCM); /* forward fifo 3k-3k */
1313 ctrl_outl(0xc, ioaddr + TSU_BSYSL0); 1313 writel(0xc, ioaddr + TSU_BSYSL0);
1314 ctrl_outl(0xc, ioaddr + TSU_BSYSL1); 1314 writel(0xc, ioaddr + TSU_BSYSL1);
1315 ctrl_outl(0, ioaddr + TSU_PRISL0); 1315 writel(0, ioaddr + TSU_PRISL0);
1316 ctrl_outl(0, ioaddr + TSU_PRISL1); 1316 writel(0, ioaddr + TSU_PRISL1);
1317 ctrl_outl(0, ioaddr + TSU_FWSL0); 1317 writel(0, ioaddr + TSU_FWSL0);
1318 ctrl_outl(0, ioaddr + TSU_FWSL1); 1318 writel(0, ioaddr + TSU_FWSL1);
1319 ctrl_outl(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC); 1319 writel(TSU_FWSLC_POSTENU | TSU_FWSLC_POSTENL, ioaddr + TSU_FWSLC);
1320#if defined(CONFIG_CPU_SUBTYPE_SH7763) 1320#if defined(CONFIG_CPU_SUBTYPE_SH7763)
1321 ctrl_outl(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */ 1321 writel(0, ioaddr + TSU_QTAG0); /* Disable QTAG(0->1) */
1322 ctrl_outl(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */ 1322 writel(0, ioaddr + TSU_QTAG1); /* Disable QTAG(1->0) */
1323#else 1323#else
1324 ctrl_outl(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */ 1324 writel(0, ioaddr + TSU_QTAGM0); /* Disable QTAG(0->1) */
1325 ctrl_outl(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */ 1325 writel(0, ioaddr + TSU_QTAGM1); /* Disable QTAG(1->0) */
1326#endif 1326#endif
1327 ctrl_outl(0, ioaddr + TSU_FWSR); /* all interrupt status clear */ 1327 writel(0, ioaddr + TSU_FWSR); /* all interrupt status clear */
1328 ctrl_outl(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */ 1328 writel(0, ioaddr + TSU_FWINMK); /* Disable all interrupt */
1329 ctrl_outl(0, ioaddr + TSU_TEN); /* Disable all CAM entry */ 1329 writel(0, ioaddr + TSU_TEN); /* Disable all CAM entry */
1330 ctrl_outl(0, ioaddr + TSU_POST1); /* Disable CAM entry [ 0- 7] */ 1330 writel(0, ioaddr + TSU_POST1); /* Disable CAM entry [ 0- 7] */
1331 ctrl_outl(0, ioaddr + TSU_POST2); /* Disable CAM entry [ 8-15] */ 1331 writel(0, ioaddr + TSU_POST2); /* Disable CAM entry [ 8-15] */
1332 ctrl_outl(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */ 1332 writel(0, ioaddr + TSU_POST3); /* Disable CAM entry [16-23] */
1333 ctrl_outl(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */ 1333 writel(0, ioaddr + TSU_POST4); /* Disable CAM entry [24-31] */
1334} 1334}
1335#endif /* SH_ETH_HAS_TSU */ 1335#endif /* SH_ETH_HAS_TSU */
1336 1336
@@ -1552,7 +1552,6 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
1552 1552
1553 sh_mdio_release(ndev); 1553 sh_mdio_release(ndev);
1554 unregister_netdev(ndev); 1554 unregister_netdev(ndev);
1555 flush_scheduled_work();
1556 pm_runtime_disable(&pdev->dev); 1555 pm_runtime_disable(&pdev->dev);
1557 free_netdev(ndev); 1556 free_netdev(ndev);
1558 platform_set_drvdata(pdev, NULL); 1557 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h
index 8b47763958f2..efa64221eede 100644
--- a/drivers/net/sh_eth.h
+++ b/drivers/net/sh_eth.h
@@ -26,7 +26,6 @@
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/kernel.h> 27#include <linux/kernel.h>
28#include <linux/spinlock.h> 28#include <linux/spinlock.h>
29#include <linux/workqueue.h>
30#include <linux/netdevice.h> 29#include <linux/netdevice.h>
31#include <linux/phy.h> 30#include <linux/phy.h>
32 31
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index a5d6a6bd0c1a..3406ed870917 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -1915,9 +1915,10 @@ err_release_board:
1915static void __devexit sis190_remove_one(struct pci_dev *pdev) 1915static void __devexit sis190_remove_one(struct pci_dev *pdev)
1916{ 1916{
1917 struct net_device *dev = pci_get_drvdata(pdev); 1917 struct net_device *dev = pci_get_drvdata(pdev);
1918 struct sis190_private *tp = netdev_priv(dev);
1918 1919
1919 sis190_mii_remove(dev); 1920 sis190_mii_remove(dev);
1920 flush_scheduled_work(); 1921 cancel_work_sync(&tp->phy_task);
1921 unregister_netdev(dev); 1922 unregister_netdev(dev);
1922 sis190_release_board(pdev); 1923 sis190_release_board(pdev);
1923 pci_set_drvdata(pdev, NULL); 1924 pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/skfp/skfddi.c b/drivers/net/skfp/skfddi.c
index 0a66fed52e8e..16c62659cdd9 100644
--- a/drivers/net/skfp/skfddi.c
+++ b/drivers/net/skfp/skfddi.c
@@ -412,7 +412,7 @@ static int skfp_driver_init(struct net_device *dev)
412 bp->SharedMemAddr = pci_alloc_consistent(&bp->pdev, 412 bp->SharedMemAddr = pci_alloc_consistent(&bp->pdev,
413 bp->SharedMemSize, 413 bp->SharedMemSize,
414 &bp->SharedMemDMA); 414 &bp->SharedMemDMA);
415 if (!bp->SharedMemSize) { 415 if (!bp->SharedMemAddr) {
416 printk("could not allocate mem for "); 416 printk("could not allocate mem for ");
417 printk("hardware module: %ld byte\n", 417 printk("hardware module: %ld byte\n",
418 bp->SharedMemSize); 418 bp->SharedMemSize);
diff --git a/drivers/net/skfp/smt.c b/drivers/net/skfp/smt.c
index 2d9941c045bc..1e1bd0c201c8 100644
--- a/drivers/net/skfp/smt.c
+++ b/drivers/net/skfp/smt.c
@@ -1263,7 +1263,7 @@ void smt_set_timestamp(struct s_smc *smc, u_char *p)
1263static void smt_fill_policy(struct s_smc *smc, struct smt_p_policy *policy) 1263static void smt_fill_policy(struct s_smc *smc, struct smt_p_policy *policy)
1264{ 1264{
1265 int i ; 1265 int i ;
1266 u_char *map ; 1266 const u_char *map ;
1267 u_short in ; 1267 u_short in ;
1268 u_short out ; 1268 u_short out ;
1269 1269
@@ -1271,7 +1271,7 @@ static void smt_fill_policy(struct s_smc *smc, struct smt_p_policy *policy)
1271 * MIB para 101b (fddiSMTConnectionPolicy) coding 1271 * MIB para 101b (fddiSMTConnectionPolicy) coding
1272 * is different from 0005 coding 1272 * is different from 0005 coding
1273 */ 1273 */
1274 static u_char ansi_weirdness[16] = { 1274 static const u_char ansi_weirdness[16] = {
1275 0,7,5,3,8,1,6,4,9,10,2,11,12,13,14,15 1275 0,7,5,3,8,1,6,4,9,10,2,11,12,13,14,15
1276 } ; 1276 } ;
1277 SMTSETPARA(policy,SMT_P_POLICY) ; 1277 SMTSETPARA(policy,SMT_P_POLICY) ;
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 220e0398f1d5..42daf98ba736 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -1191,7 +1191,7 @@ static void genesis_init(struct skge_hw *hw)
1191 1191
1192static void genesis_reset(struct skge_hw *hw, int port) 1192static void genesis_reset(struct skge_hw *hw, int port)
1193{ 1193{
1194 const u8 zero[8] = { 0 }; 1194 static const u8 zero[8] = { 0 };
1195 u32 reg; 1195 u32 reg;
1196 1196
1197 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0); 1197 skge_write8(hw, SK_REG(port, GMAC_IRQ_MSK), 0);
@@ -1557,7 +1557,7 @@ static void genesis_mac_init(struct skge_hw *hw, int port)
1557 int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN; 1557 int jumbo = hw->dev[port]->mtu > ETH_DATA_LEN;
1558 int i; 1558 int i;
1559 u32 r; 1559 u32 r;
1560 const u8 zero[6] = { 0 }; 1560 static const u8 zero[6] = { 0 };
1561 1561
1562 for (i = 0; i < 10; i++) { 1562 for (i = 0; i < 10; i++) {
1563 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1), 1563 skge_write16(hw, SK_REG(port, TX_MFF_CTRL1),
@@ -2764,7 +2764,7 @@ static netdev_tx_t skge_xmit_frame(struct sk_buff *skb,
2764 td->dma_hi = map >> 32; 2764 td->dma_hi = map >> 32;
2765 2765
2766 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2766 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2767 const int offset = skb_transport_offset(skb); 2767 const int offset = skb_checksum_start_offset(skb);
2768 2768
2769 /* This seems backwards, but it is what the sk98lin 2769 /* This seems backwards, but it is what the sk98lin
2770 * does. Looks like hardware is wrong? 2770 * does. Looks like hardware is wrong?
@@ -4012,8 +4012,6 @@ static void __devexit skge_remove(struct pci_dev *pdev)
4012 if (!hw) 4012 if (!hw)
4013 return; 4013 return;
4014 4014
4015 flush_scheduled_work();
4016
4017 dev1 = hw->dev[1]; 4015 dev1 = hw->dev[1];
4018 if (dev1) 4016 if (dev1)
4019 unregister_netdev(dev1); 4017 unregister_netdev(dev1);
@@ -4044,53 +4042,40 @@ static void __devexit skge_remove(struct pci_dev *pdev)
4044} 4042}
4045 4043
4046#ifdef CONFIG_PM 4044#ifdef CONFIG_PM
4047static int skge_suspend(struct pci_dev *pdev, pm_message_t state) 4045static int skge_suspend(struct device *dev)
4048{ 4046{
4047 struct pci_dev *pdev = to_pci_dev(dev);
4049 struct skge_hw *hw = pci_get_drvdata(pdev); 4048 struct skge_hw *hw = pci_get_drvdata(pdev);
4050 int i, err, wol = 0; 4049 int i;
4051 4050
4052 if (!hw) 4051 if (!hw)
4053 return 0; 4052 return 0;
4054 4053
4055 err = pci_save_state(pdev);
4056 if (err)
4057 return err;
4058
4059 for (i = 0; i < hw->ports; i++) { 4054 for (i = 0; i < hw->ports; i++) {
4060 struct net_device *dev = hw->dev[i]; 4055 struct net_device *dev = hw->dev[i];
4061 struct skge_port *skge = netdev_priv(dev); 4056 struct skge_port *skge = netdev_priv(dev);
4062 4057
4063 if (netif_running(dev)) 4058 if (netif_running(dev))
4064 skge_down(dev); 4059 skge_down(dev);
4060
4065 if (skge->wol) 4061 if (skge->wol)
4066 skge_wol_init(skge); 4062 skge_wol_init(skge);
4067
4068 wol |= skge->wol;
4069 } 4063 }
4070 4064
4071 skge_write32(hw, B0_IMSK, 0); 4065 skge_write32(hw, B0_IMSK, 0);
4072 4066
4073 pci_prepare_to_sleep(pdev);
4074
4075 return 0; 4067 return 0;
4076} 4068}
4077 4069
4078static int skge_resume(struct pci_dev *pdev) 4070static int skge_resume(struct device *dev)
4079{ 4071{
4072 struct pci_dev *pdev = to_pci_dev(dev);
4080 struct skge_hw *hw = pci_get_drvdata(pdev); 4073 struct skge_hw *hw = pci_get_drvdata(pdev);
4081 int i, err; 4074 int i, err;
4082 4075
4083 if (!hw) 4076 if (!hw)
4084 return 0; 4077 return 0;
4085 4078
4086 err = pci_back_from_sleep(pdev);
4087 if (err)
4088 goto out;
4089
4090 err = pci_restore_state(pdev);
4091 if (err)
4092 goto out;
4093
4094 err = skge_reset(hw); 4079 err = skge_reset(hw);
4095 if (err) 4080 if (err)
4096 goto out; 4081 goto out;
@@ -4111,12 +4096,19 @@ static int skge_resume(struct pci_dev *pdev)
4111out: 4096out:
4112 return err; 4097 return err;
4113} 4098}
4099
4100static SIMPLE_DEV_PM_OPS(skge_pm_ops, skge_suspend, skge_resume);
4101#define SKGE_PM_OPS (&skge_pm_ops)
4102
4103#else
4104
4105#define SKGE_PM_OPS NULL
4114#endif 4106#endif
4115 4107
4116static void skge_shutdown(struct pci_dev *pdev) 4108static void skge_shutdown(struct pci_dev *pdev)
4117{ 4109{
4118 struct skge_hw *hw = pci_get_drvdata(pdev); 4110 struct skge_hw *hw = pci_get_drvdata(pdev);
4119 int i, wol = 0; 4111 int i;
4120 4112
4121 if (!hw) 4113 if (!hw)
4122 return; 4114 return;
@@ -4127,15 +4119,10 @@ static void skge_shutdown(struct pci_dev *pdev)
4127 4119
4128 if (skge->wol) 4120 if (skge->wol)
4129 skge_wol_init(skge); 4121 skge_wol_init(skge);
4130 wol |= skge->wol;
4131 } 4122 }
4132 4123
4133 if (pci_enable_wake(pdev, PCI_D3cold, wol)) 4124 pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
4134 pci_enable_wake(pdev, PCI_D3hot, wol);
4135
4136 pci_disable_device(pdev);
4137 pci_set_power_state(pdev, PCI_D3hot); 4125 pci_set_power_state(pdev, PCI_D3hot);
4138
4139} 4126}
4140 4127
4141static struct pci_driver skge_driver = { 4128static struct pci_driver skge_driver = {
@@ -4143,11 +4130,8 @@ static struct pci_driver skge_driver = {
4143 .id_table = skge_id_table, 4130 .id_table = skge_id_table,
4144 .probe = skge_probe, 4131 .probe = skge_probe,
4145 .remove = __devexit_p(skge_remove), 4132 .remove = __devexit_p(skge_remove),
4146#ifdef CONFIG_PM
4147 .suspend = skge_suspend,
4148 .resume = skge_resume,
4149#endif
4150 .shutdown = skge_shutdown, 4133 .shutdown = skge_shutdown,
4134 .driver.pm = SKGE_PM_OPS,
4151}; 4135};
4152 4136
4153static struct dmi_system_id skge_32bit_dma_boards[] = { 4137static struct dmi_system_id skge_32bit_dma_boards[] = {
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index d6577084ce70..7d85a38377a1 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -46,10 +46,6 @@
46 46
47#include <asm/irq.h> 47#include <asm/irq.h>
48 48
49#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
50#define SKY2_VLAN_TAG_USED 1
51#endif
52
53#include "sky2.h" 49#include "sky2.h"
54 50
55#define DRV_NAME "sky2" 51#define DRV_NAME "sky2"
@@ -1326,39 +1322,34 @@ static int sky2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1326 return err; 1322 return err;
1327} 1323}
1328 1324
1329#ifdef SKY2_VLAN_TAG_USED 1325#define NETIF_F_ALL_VLAN (NETIF_F_HW_VLAN_TX|NETIF_F_HW_VLAN_RX)
1330static void sky2_set_vlan_mode(struct sky2_hw *hw, u16 port, bool onoff)
1331{
1332 if (onoff) {
1333 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1334 RX_VLAN_STRIP_ON);
1335 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1336 TX_VLAN_TAG_ON);
1337 } else {
1338 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1339 RX_VLAN_STRIP_OFF);
1340 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1341 TX_VLAN_TAG_OFF);
1342 }
1343}
1344 1326
1345static void sky2_vlan_rx_register(struct net_device *dev, struct vlan_group *grp) 1327static void sky2_vlan_mode(struct net_device *dev)
1346{ 1328{
1347 struct sky2_port *sky2 = netdev_priv(dev); 1329 struct sky2_port *sky2 = netdev_priv(dev);
1348 struct sky2_hw *hw = sky2->hw; 1330 struct sky2_hw *hw = sky2->hw;
1349 u16 port = sky2->port; 1331 u16 port = sky2->port;
1350 1332
1351 netif_tx_lock_bh(dev); 1333 if (dev->features & NETIF_F_HW_VLAN_RX)
1352 napi_disable(&hw->napi); 1334 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1335 RX_VLAN_STRIP_ON);
1336 else
1337 sky2_write32(hw, SK_REG(port, RX_GMF_CTRL_T),
1338 RX_VLAN_STRIP_OFF);
1353 1339
1354 sky2->vlgrp = grp; 1340 dev->vlan_features = dev->features &~ NETIF_F_ALL_VLAN;
1355 sky2_set_vlan_mode(hw, port, grp != NULL); 1341 if (dev->features & NETIF_F_HW_VLAN_TX)
1342 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1343 TX_VLAN_TAG_ON);
1344 else {
1345 sky2_write32(hw, SK_REG(port, TX_GMF_CTRL_T),
1346 TX_VLAN_TAG_OFF);
1356 1347
1357 sky2_read32(hw, B0_Y2_SP_LISR); 1348 /* Can't do transmit offload of vlan without hw vlan */
1358 napi_enable(&hw->napi); 1349 dev->vlan_features &= ~(NETIF_F_TSO | NETIF_F_SG
1359 netif_tx_unlock_bh(dev); 1350 | NETIF_F_ALL_CSUM);
1351 }
1360} 1352}
1361#endif
1362 1353
1363/* Amount of required worst case padding in rx buffer */ 1354/* Amount of required worst case padding in rx buffer */
1364static inline unsigned sky2_rx_pad(const struct sky2_hw *hw) 1355static inline unsigned sky2_rx_pad(const struct sky2_hw *hw)
@@ -1635,9 +1626,7 @@ static void sky2_hw_up(struct sky2_port *sky2)
1635 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map, 1626 sky2_prefetch_init(hw, txqaddr[port], sky2->tx_le_map,
1636 sky2->tx_ring_size - 1); 1627 sky2->tx_ring_size - 1);
1637 1628
1638#ifdef SKY2_VLAN_TAG_USED 1629 sky2_vlan_mode(sky2->netdev);
1639 sky2_set_vlan_mode(hw, port, sky2->vlgrp != NULL);
1640#endif
1641 1630
1642 sky2_rx_start(sky2); 1631 sky2_rx_start(sky2);
1643} 1632}
@@ -1780,7 +1769,7 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
1780 } 1769 }
1781 1770
1782 ctrl = 0; 1771 ctrl = 0;
1783#ifdef SKY2_VLAN_TAG_USED 1772
1784 /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */ 1773 /* Add VLAN tag, can piggyback on LRGLEN or ADDR64 */
1785 if (vlan_tx_tag_present(skb)) { 1774 if (vlan_tx_tag_present(skb)) {
1786 if (!le) { 1775 if (!le) {
@@ -1792,7 +1781,6 @@ static netdev_tx_t sky2_xmit_frame(struct sk_buff *skb,
1792 le->length = cpu_to_be16(vlan_tx_tag_get(skb)); 1781 le->length = cpu_to_be16(vlan_tx_tag_get(skb));
1793 ctrl |= INS_VLAN; 1782 ctrl |= INS_VLAN;
1794 } 1783 }
1795#endif
1796 1784
1797 /* Handle TCP checksum offload */ 1785 /* Handle TCP checksum offload */
1798 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1786 if (skb->ip_summed == CHECKSUM_PARTIAL) {
@@ -1917,8 +1905,10 @@ static void sky2_tx_complete(struct sky2_port *sky2, u16 done)
1917 netif_printk(sky2, tx_done, KERN_DEBUG, dev, 1905 netif_printk(sky2, tx_done, KERN_DEBUG, dev,
1918 "tx done %u\n", idx); 1906 "tx done %u\n", idx);
1919 1907
1920 dev->stats.tx_packets++; 1908 u64_stats_update_begin(&sky2->tx_stats.syncp);
1921 dev->stats.tx_bytes += skb->len; 1909 ++sky2->tx_stats.packets;
1910 sky2->tx_stats.bytes += skb->len;
1911 u64_stats_update_end(&sky2->tx_stats.syncp);
1922 1912
1923 re->skb = NULL; 1913 re->skb = NULL;
1924 dev_kfree_skb_any(skb); 1914 dev_kfree_skb_any(skb);
@@ -2430,11 +2420,8 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
2430 struct sk_buff *skb = NULL; 2420 struct sk_buff *skb = NULL;
2431 u16 count = (status & GMR_FS_LEN) >> 16; 2421 u16 count = (status & GMR_FS_LEN) >> 16;
2432 2422
2433#ifdef SKY2_VLAN_TAG_USED 2423 if (status & GMR_FS_VLAN)
2434 /* Account for vlan tag */ 2424 count -= VLAN_HLEN; /* Account for vlan tag */
2435 if (sky2->vlgrp && (status & GMR_FS_VLAN))
2436 count -= VLAN_HLEN;
2437#endif
2438 2425
2439 netif_printk(sky2, rx_status, KERN_DEBUG, dev, 2426 netif_printk(sky2, rx_status, KERN_DEBUG, dev,
2440 "rx slot %u status 0x%x len %d\n", 2427 "rx slot %u status 0x%x len %d\n",
@@ -2460,7 +2447,7 @@ static struct sk_buff *sky2_receive(struct net_device *dev,
2460 2447
2461 /* if length reported by DMA does not match PHY, packet was truncated */ 2448 /* if length reported by DMA does not match PHY, packet was truncated */
2462 if (length != count) 2449 if (length != count)
2463 goto len_error; 2450 goto error;
2464 2451
2465okay: 2452okay:
2466 if (length < copybreak) 2453 if (length < copybreak)
@@ -2475,34 +2462,13 @@ resubmit:
2475 2462
2476 return skb; 2463 return skb;
2477 2464
2478len_error:
2479 /* Truncation of overlength packets
2480 causes PHY length to not match MAC length */
2481 ++dev->stats.rx_length_errors;
2482 if (net_ratelimit())
2483 netif_info(sky2, rx_err, dev,
2484 "rx length error: status %#x length %d\n",
2485 status, length);
2486 goto resubmit;
2487
2488error: 2465error:
2489 ++dev->stats.rx_errors; 2466 ++dev->stats.rx_errors;
2490 if (status & GMR_FS_RX_FF_OV) {
2491 dev->stats.rx_over_errors++;
2492 goto resubmit;
2493 }
2494 2467
2495 if (net_ratelimit()) 2468 if (net_ratelimit())
2496 netif_info(sky2, rx_err, dev, 2469 netif_info(sky2, rx_err, dev,
2497 "rx error, status 0x%x length %d\n", status, length); 2470 "rx error, status 0x%x length %d\n", status, length);
2498 2471
2499 if (status & (GMR_FS_LONG_ERR | GMR_FS_UN_SIZE))
2500 dev->stats.rx_length_errors++;
2501 if (status & GMR_FS_FRAGMENT)
2502 dev->stats.rx_frame_errors++;
2503 if (status & GMR_FS_CRC_ERR)
2504 dev->stats.rx_crc_errors++;
2505
2506 goto resubmit; 2472 goto resubmit;
2507} 2473}
2508 2474
@@ -2523,17 +2489,9 @@ static inline void sky2_tx_done(struct net_device *dev, u16 last)
2523static inline void sky2_skb_rx(const struct sky2_port *sky2, 2489static inline void sky2_skb_rx(const struct sky2_port *sky2,
2524 u32 status, struct sk_buff *skb) 2490 u32 status, struct sk_buff *skb)
2525{ 2491{
2526#ifdef SKY2_VLAN_TAG_USED 2492 if (status & GMR_FS_VLAN)
2527 u16 vlan_tag = be16_to_cpu(sky2->rx_tag); 2493 __vlan_hwaccel_put_tag(skb, be16_to_cpu(sky2->rx_tag));
2528 if (sky2->vlgrp && (status & GMR_FS_VLAN)) { 2494
2529 if (skb->ip_summed == CHECKSUM_NONE)
2530 vlan_hwaccel_receive_skb(skb, sky2->vlgrp, vlan_tag);
2531 else
2532 vlan_gro_receive(&sky2->hw->napi, sky2->vlgrp,
2533 vlan_tag, skb);
2534 return;
2535 }
2536#endif
2537 if (skb->ip_summed == CHECKSUM_NONE) 2495 if (skb->ip_summed == CHECKSUM_NONE)
2538 netif_receive_skb(skb); 2496 netif_receive_skb(skb);
2539 else 2497 else
@@ -2543,14 +2501,19 @@ static inline void sky2_skb_rx(const struct sky2_port *sky2,
2543static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port, 2501static inline void sky2_rx_done(struct sky2_hw *hw, unsigned port,
2544 unsigned packets, unsigned bytes) 2502 unsigned packets, unsigned bytes)
2545{ 2503{
2546 if (packets) { 2504 struct net_device *dev = hw->dev[port];
2547 struct net_device *dev = hw->dev[port]; 2505 struct sky2_port *sky2 = netdev_priv(dev);
2548 2506
2549 dev->stats.rx_packets += packets; 2507 if (packets == 0)
2550 dev->stats.rx_bytes += bytes; 2508 return;
2551 dev->last_rx = jiffies; 2509
2552 sky2_rx_update(netdev_priv(dev), rxqaddr[port]); 2510 u64_stats_update_begin(&sky2->rx_stats.syncp);
2553 } 2511 sky2->rx_stats.packets += packets;
2512 sky2->rx_stats.bytes += bytes;
2513 u64_stats_update_end(&sky2->rx_stats.syncp);
2514
2515 dev->last_rx = jiffies;
2516 sky2_rx_update(netdev_priv(dev), rxqaddr[port]);
2554} 2517}
2555 2518
2556static void sky2_rx_checksum(struct sky2_port *sky2, u32 status) 2519static void sky2_rx_checksum(struct sky2_port *sky2, u32 status)
@@ -2645,7 +2608,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2645 goto exit_loop; 2608 goto exit_loop;
2646 break; 2609 break;
2647 2610
2648#ifdef SKY2_VLAN_TAG_USED
2649 case OP_RXVLAN: 2611 case OP_RXVLAN:
2650 sky2->rx_tag = length; 2612 sky2->rx_tag = length;
2651 break; 2613 break;
@@ -2653,7 +2615,6 @@ static int sky2_status_intr(struct sky2_hw *hw, int to_do, u16 idx)
2653 case OP_RXCHKSVLAN: 2615 case OP_RXCHKSVLAN:
2654 sky2->rx_tag = length; 2616 sky2->rx_tag = length;
2655 /* fall through */ 2617 /* fall through */
2656#endif
2657 case OP_RXCHKS: 2618 case OP_RXCHKS:
2658 if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM)) 2619 if (likely(sky2->flags & SKY2_FLAG_RX_CHECKSUM))
2659 sky2_rx_checksum(sky2, status); 2620 sky2_rx_checksum(sky2, status);
@@ -3056,6 +3017,10 @@ static int __devinit sky2_init(struct sky2_hw *hw)
3056 | SKY2_HW_NEW_LE 3017 | SKY2_HW_NEW_LE
3057 | SKY2_HW_AUTO_TX_SUM 3018 | SKY2_HW_AUTO_TX_SUM
3058 | SKY2_HW_ADV_POWER_CTL; 3019 | SKY2_HW_ADV_POWER_CTL;
3020
3021 /* The workaround for status conflicts VLAN tag detection. */
3022 if (hw->chip_rev == CHIP_REV_YU_FE2_A0)
3023 hw->flags |= SKY2_HW_VLAN_BROKEN;
3059 break; 3024 break;
3060 3025
3061 case CHIP_ID_YUKON_SUPR: 3026 case CHIP_ID_YUKON_SUPR:
@@ -3398,12 +3363,24 @@ static int sky2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3398{ 3363{
3399 struct sky2_port *sky2 = netdev_priv(dev); 3364 struct sky2_port *sky2 = netdev_priv(dev);
3400 struct sky2_hw *hw = sky2->hw; 3365 struct sky2_hw *hw = sky2->hw;
3366 bool enable_wakeup = false;
3367 int i;
3401 3368
3402 if ((wol->wolopts & ~sky2_wol_supported(sky2->hw)) || 3369 if ((wol->wolopts & ~sky2_wol_supported(sky2->hw)) ||
3403 !device_can_wakeup(&hw->pdev->dev)) 3370 !device_can_wakeup(&hw->pdev->dev))
3404 return -EOPNOTSUPP; 3371 return -EOPNOTSUPP;
3405 3372
3406 sky2->wol = wol->wolopts; 3373 sky2->wol = wol->wolopts;
3374
3375 for (i = 0; i < hw->ports; i++) {
3376 struct net_device *dev = hw->dev[i];
3377 struct sky2_port *sky2 = netdev_priv(dev);
3378
3379 if (sky2->wol)
3380 enable_wakeup = true;
3381 }
3382 device_set_wakeup_enable(&hw->pdev->dev, enable_wakeup);
3383
3407 return 0; 3384 return 0;
3408} 3385}
3409 3386
@@ -3413,18 +3390,15 @@ static u32 sky2_supported_modes(const struct sky2_hw *hw)
3413 u32 modes = SUPPORTED_10baseT_Half 3390 u32 modes = SUPPORTED_10baseT_Half
3414 | SUPPORTED_10baseT_Full 3391 | SUPPORTED_10baseT_Full
3415 | SUPPORTED_100baseT_Half 3392 | SUPPORTED_100baseT_Half
3416 | SUPPORTED_100baseT_Full 3393 | SUPPORTED_100baseT_Full;
3417 | SUPPORTED_Autoneg | SUPPORTED_TP;
3418 3394
3419 if (hw->flags & SKY2_HW_GIGABIT) 3395 if (hw->flags & SKY2_HW_GIGABIT)
3420 modes |= SUPPORTED_1000baseT_Half 3396 modes |= SUPPORTED_1000baseT_Half
3421 | SUPPORTED_1000baseT_Full; 3397 | SUPPORTED_1000baseT_Full;
3422 return modes; 3398 return modes;
3423 } else 3399 } else
3424 return SUPPORTED_1000baseT_Half 3400 return SUPPORTED_1000baseT_Half
3425 | SUPPORTED_1000baseT_Full 3401 | SUPPORTED_1000baseT_Full;
3426 | SUPPORTED_Autoneg
3427 | SUPPORTED_FIBRE;
3428} 3402}
3429 3403
3430static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) 3404static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
@@ -3438,9 +3412,11 @@ static int sky2_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3438 if (sky2_is_copper(hw)) { 3412 if (sky2_is_copper(hw)) {
3439 ecmd->port = PORT_TP; 3413 ecmd->port = PORT_TP;
3440 ecmd->speed = sky2->speed; 3414 ecmd->speed = sky2->speed;
3415 ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_TP;
3441 } else { 3416 } else {
3442 ecmd->speed = SPEED_1000; 3417 ecmd->speed = SPEED_1000;
3443 ecmd->port = PORT_FIBRE; 3418 ecmd->port = PORT_FIBRE;
3419 ecmd->supported |= SUPPORTED_Autoneg | SUPPORTED_FIBRE;
3444 } 3420 }
3445 3421
3446 ecmd->advertising = sky2->advertising; 3422 ecmd->advertising = sky2->advertising;
@@ -3457,8 +3433,19 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3457 u32 supported = sky2_supported_modes(hw); 3433 u32 supported = sky2_supported_modes(hw);
3458 3434
3459 if (ecmd->autoneg == AUTONEG_ENABLE) { 3435 if (ecmd->autoneg == AUTONEG_ENABLE) {
3436 if (ecmd->advertising & ~supported)
3437 return -EINVAL;
3438
3439 if (sky2_is_copper(hw))
3440 sky2->advertising = ecmd->advertising |
3441 ADVERTISED_TP |
3442 ADVERTISED_Autoneg;
3443 else
3444 sky2->advertising = ecmd->advertising |
3445 ADVERTISED_FIBRE |
3446 ADVERTISED_Autoneg;
3447
3460 sky2->flags |= SKY2_FLAG_AUTO_SPEED; 3448 sky2->flags |= SKY2_FLAG_AUTO_SPEED;
3461 ecmd->advertising = supported;
3462 sky2->duplex = -1; 3449 sky2->duplex = -1;
3463 sky2->speed = -1; 3450 sky2->speed = -1;
3464 } else { 3451 } else {
@@ -3502,8 +3489,6 @@ static int sky2_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd)
3502 sky2->flags &= ~SKY2_FLAG_AUTO_SPEED; 3489 sky2->flags &= ~SKY2_FLAG_AUTO_SPEED;
3503 } 3490 }
3504 3491
3505 sky2->advertising = ecmd->advertising;
3506
3507 if (netif_running(dev)) { 3492 if (netif_running(dev)) {
3508 sky2_phy_reinit(sky2); 3493 sky2_phy_reinit(sky2);
3509 sky2_set_multicast(dev); 3494 sky2_set_multicast(dev);
@@ -3614,13 +3599,11 @@ static void sky2_phy_stats(struct sky2_port *sky2, u64 * data, unsigned count)
3614 unsigned port = sky2->port; 3599 unsigned port = sky2->port;
3615 int i; 3600 int i;
3616 3601
3617 data[0] = (u64) gma_read32(hw, port, GM_TXO_OK_HI) << 32 3602 data[0] = get_stats64(hw, port, GM_TXO_OK_LO);
3618 | (u64) gma_read32(hw, port, GM_TXO_OK_LO); 3603 data[1] = get_stats64(hw, port, GM_RXO_OK_LO);
3619 data[1] = (u64) gma_read32(hw, port, GM_RXO_OK_HI) << 32
3620 | (u64) gma_read32(hw, port, GM_RXO_OK_LO);
3621 3604
3622 for (i = 2; i < count; i++) 3605 for (i = 2; i < count; i++)
3623 data[i] = (u64) gma_read32(hw, port, sky2_stats[i].offset); 3606 data[i] = get_stats32(hw, port, sky2_stats[i].offset);
3624} 3607}
3625 3608
3626static void sky2_set_msglevel(struct net_device *netdev, u32 value) 3609static void sky2_set_msglevel(struct net_device *netdev, u32 value)
@@ -3738,6 +3721,51 @@ static void sky2_set_multicast(struct net_device *dev)
3738 gma_write16(hw, port, GM_RX_CTRL, reg); 3721 gma_write16(hw, port, GM_RX_CTRL, reg);
3739} 3722}
3740 3723
3724static struct rtnl_link_stats64 *sky2_get_stats(struct net_device *dev,
3725 struct rtnl_link_stats64 *stats)
3726{
3727 struct sky2_port *sky2 = netdev_priv(dev);
3728 struct sky2_hw *hw = sky2->hw;
3729 unsigned port = sky2->port;
3730 unsigned int start;
3731 u64 _bytes, _packets;
3732
3733 do {
3734 start = u64_stats_fetch_begin_bh(&sky2->rx_stats.syncp);
3735 _bytes = sky2->rx_stats.bytes;
3736 _packets = sky2->rx_stats.packets;
3737 } while (u64_stats_fetch_retry_bh(&sky2->rx_stats.syncp, start));
3738
3739 stats->rx_packets = _packets;
3740 stats->rx_bytes = _bytes;
3741
3742 do {
3743 start = u64_stats_fetch_begin_bh(&sky2->tx_stats.syncp);
3744 _bytes = sky2->tx_stats.bytes;
3745 _packets = sky2->tx_stats.packets;
3746 } while (u64_stats_fetch_retry_bh(&sky2->tx_stats.syncp, start));
3747
3748 stats->tx_packets = _packets;
3749 stats->tx_bytes = _bytes;
3750
3751 stats->multicast = get_stats32(hw, port, GM_RXF_MC_OK)
3752 + get_stats32(hw, port, GM_RXF_BC_OK);
3753
3754 stats->collisions = get_stats32(hw, port, GM_TXF_COL);
3755
3756 stats->rx_length_errors = get_stats32(hw, port, GM_RXF_LNG_ERR);
3757 stats->rx_crc_errors = get_stats32(hw, port, GM_RXF_FCS_ERR);
3758 stats->rx_frame_errors = get_stats32(hw, port, GM_RXF_SHT)
3759 + get_stats32(hw, port, GM_RXE_FRAG);
3760 stats->rx_over_errors = get_stats32(hw, port, GM_RXE_FIFO_OV);
3761
3762 stats->rx_dropped = dev->stats.rx_dropped;
3763 stats->rx_fifo_errors = dev->stats.rx_fifo_errors;
3764 stats->tx_fifo_errors = dev->stats.tx_fifo_errors;
3765
3766 return stats;
3767}
3768
3741/* Can have one global because blinking is controlled by 3769/* Can have one global because blinking is controlled by
3742 * ethtool and that is always under RTNL mutex 3770 * ethtool and that is always under RTNL mutex
3743 */ 3771 */
@@ -4188,15 +4216,28 @@ static int sky2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom
4188static int sky2_set_flags(struct net_device *dev, u32 data) 4216static int sky2_set_flags(struct net_device *dev, u32 data)
4189{ 4217{
4190 struct sky2_port *sky2 = netdev_priv(dev); 4218 struct sky2_port *sky2 = netdev_priv(dev);
4191 u32 supported = 4219 unsigned long old_feat = dev->features;
4192 (sky2->hw->flags & SKY2_HW_RSS_BROKEN) ? 0 : ETH_FLAG_RXHASH; 4220 u32 supported = 0;
4193 int rc; 4221 int rc;
4194 4222
4223 if (!(sky2->hw->flags & SKY2_HW_RSS_BROKEN))
4224 supported |= ETH_FLAG_RXHASH;
4225
4226 if (!(sky2->hw->flags & SKY2_HW_VLAN_BROKEN))
4227 supported |= ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN;
4228
4229 printk(KERN_DEBUG "sky2 set_flags: supported %x data %x\n",
4230 supported, data);
4231
4195 rc = ethtool_op_set_flags(dev, data, supported); 4232 rc = ethtool_op_set_flags(dev, data, supported);
4196 if (rc) 4233 if (rc)
4197 return rc; 4234 return rc;
4198 4235
4199 rx_set_rss(dev); 4236 if ((old_feat ^ dev->features) & NETIF_F_RXHASH)
4237 rx_set_rss(dev);
4238
4239 if ((old_feat ^ dev->features) & NETIF_F_ALL_VLAN)
4240 sky2_vlan_mode(dev);
4200 4241
4201 return 0; 4242 return 0;
4202} 4243}
@@ -4232,6 +4273,7 @@ static const struct ethtool_ops sky2_ethtool_ops = {
4232 .get_sset_count = sky2_get_sset_count, 4273 .get_sset_count = sky2_get_sset_count,
4233 .get_ethtool_stats = sky2_get_ethtool_stats, 4274 .get_ethtool_stats = sky2_get_ethtool_stats,
4234 .set_flags = sky2_set_flags, 4275 .set_flags = sky2_set_flags,
4276 .get_flags = ethtool_op_get_flags,
4235}; 4277};
4236 4278
4237#ifdef CONFIG_SKY2_DEBUG 4279#ifdef CONFIG_SKY2_DEBUG
@@ -4512,9 +4554,7 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
4512 .ndo_set_multicast_list = sky2_set_multicast, 4554 .ndo_set_multicast_list = sky2_set_multicast,
4513 .ndo_change_mtu = sky2_change_mtu, 4555 .ndo_change_mtu = sky2_change_mtu,
4514 .ndo_tx_timeout = sky2_tx_timeout, 4556 .ndo_tx_timeout = sky2_tx_timeout,
4515#ifdef SKY2_VLAN_TAG_USED 4557 .ndo_get_stats64 = sky2_get_stats,
4516 .ndo_vlan_rx_register = sky2_vlan_rx_register,
4517#endif
4518#ifdef CONFIG_NET_POLL_CONTROLLER 4558#ifdef CONFIG_NET_POLL_CONTROLLER
4519 .ndo_poll_controller = sky2_netpoll, 4559 .ndo_poll_controller = sky2_netpoll,
4520#endif 4560#endif
@@ -4529,9 +4569,7 @@ static const struct net_device_ops sky2_netdev_ops[2] = {
4529 .ndo_set_multicast_list = sky2_set_multicast, 4569 .ndo_set_multicast_list = sky2_set_multicast,
4530 .ndo_change_mtu = sky2_change_mtu, 4570 .ndo_change_mtu = sky2_change_mtu,
4531 .ndo_tx_timeout = sky2_tx_timeout, 4571 .ndo_tx_timeout = sky2_tx_timeout,
4532#ifdef SKY2_VLAN_TAG_USED 4572 .ndo_get_stats64 = sky2_get_stats,
4533 .ndo_vlan_rx_register = sky2_vlan_rx_register,
4534#endif
4535 }, 4573 },
4536}; 4574};
4537 4575
@@ -4582,7 +4620,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4582 sky2->port = port; 4620 sky2->port = port;
4583 4621
4584 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG 4622 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG
4585 | NETIF_F_TSO | NETIF_F_GRO; 4623 | NETIF_F_TSO | NETIF_F_GRO;
4624
4586 if (highmem) 4625 if (highmem)
4587 dev->features |= NETIF_F_HIGHDMA; 4626 dev->features |= NETIF_F_HIGHDMA;
4588 4627
@@ -4590,13 +4629,8 @@ static __devinit struct net_device *sky2_init_netdev(struct sky2_hw *hw,
4590 if (!(hw->flags & SKY2_HW_RSS_BROKEN)) 4629 if (!(hw->flags & SKY2_HW_RSS_BROKEN))
4591 dev->features |= NETIF_F_RXHASH; 4630 dev->features |= NETIF_F_RXHASH;
4592 4631
4593#ifdef SKY2_VLAN_TAG_USED 4632 if (!(hw->flags & SKY2_HW_VLAN_BROKEN))
4594 /* The workaround for FE+ status conflicts with VLAN tag detection. */
4595 if (!(sky2->hw->chip_id == CHIP_ID_YUKON_FE_P &&
4596 sky2->hw->chip_rev == CHIP_REV_YU_FE2_A0)) {
4597 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; 4633 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
4598 }
4599#endif
4600 4634
4601 /* read the mac address */ 4635 /* read the mac address */
4602 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN); 4636 memcpy_fromio(dev->dev_addr, hw->regs + B2_MAC_1 + port * 8, ETH_ALEN);
@@ -4920,10 +4954,11 @@ static void __devexit sky2_remove(struct pci_dev *pdev)
4920 pci_set_drvdata(pdev, NULL); 4954 pci_set_drvdata(pdev, NULL);
4921} 4955}
4922 4956
4923static int sky2_suspend(struct pci_dev *pdev, pm_message_t state) 4957static int sky2_suspend(struct device *dev)
4924{ 4958{
4959 struct pci_dev *pdev = to_pci_dev(dev);
4925 struct sky2_hw *hw = pci_get_drvdata(pdev); 4960 struct sky2_hw *hw = pci_get_drvdata(pdev);
4926 int i, wol = 0; 4961 int i;
4927 4962
4928 if (!hw) 4963 if (!hw)
4929 return 0; 4964 return 0;
@@ -4940,41 +4975,24 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
4940 4975
4941 if (sky2->wol) 4976 if (sky2->wol)
4942 sky2_wol_init(sky2); 4977 sky2_wol_init(sky2);
4943
4944 wol |= sky2->wol;
4945 } 4978 }
4946 4979
4947 device_set_wakeup_enable(&pdev->dev, wol != 0);
4948
4949 sky2_power_aux(hw); 4980 sky2_power_aux(hw);
4950 rtnl_unlock(); 4981 rtnl_unlock();
4951 4982
4952 pci_save_state(pdev);
4953 pci_enable_wake(pdev, pci_choose_state(pdev, state), wol);
4954 pci_set_power_state(pdev, pci_choose_state(pdev, state));
4955
4956 return 0; 4983 return 0;
4957} 4984}
4958 4985
4959#ifdef CONFIG_PM 4986#ifdef CONFIG_PM
4960static int sky2_resume(struct pci_dev *pdev) 4987static int sky2_resume(struct device *dev)
4961{ 4988{
4989 struct pci_dev *pdev = to_pci_dev(dev);
4962 struct sky2_hw *hw = pci_get_drvdata(pdev); 4990 struct sky2_hw *hw = pci_get_drvdata(pdev);
4963 int err; 4991 int err;
4964 4992
4965 if (!hw) 4993 if (!hw)
4966 return 0; 4994 return 0;
4967 4995
4968 err = pci_set_power_state(pdev, PCI_D0);
4969 if (err)
4970 goto out;
4971
4972 err = pci_restore_state(pdev);
4973 if (err)
4974 goto out;
4975
4976 pci_enable_wake(pdev, PCI_D0, 0);
4977
4978 /* Re-enable all clocks */ 4996 /* Re-enable all clocks */
4979 err = pci_write_config_dword(pdev, PCI_DEV_REG3, 0); 4997 err = pci_write_config_dword(pdev, PCI_DEV_REG3, 0);
4980 if (err) { 4998 if (err) {
@@ -4994,11 +5012,20 @@ out:
4994 pci_disable_device(pdev); 5012 pci_disable_device(pdev);
4995 return err; 5013 return err;
4996} 5014}
5015
5016static SIMPLE_DEV_PM_OPS(sky2_pm_ops, sky2_suspend, sky2_resume);
5017#define SKY2_PM_OPS (&sky2_pm_ops)
5018
5019#else
5020
5021#define SKY2_PM_OPS NULL
4997#endif 5022#endif
4998 5023
4999static void sky2_shutdown(struct pci_dev *pdev) 5024static void sky2_shutdown(struct pci_dev *pdev)
5000{ 5025{
5001 sky2_suspend(pdev, PMSG_SUSPEND); 5026 sky2_suspend(&pdev->dev);
5027 pci_wake_from_d3(pdev, device_may_wakeup(&pdev->dev));
5028 pci_set_power_state(pdev, PCI_D3hot);
5002} 5029}
5003 5030
5004static struct pci_driver sky2_driver = { 5031static struct pci_driver sky2_driver = {
@@ -5006,11 +5033,8 @@ static struct pci_driver sky2_driver = {
5006 .id_table = sky2_id_table, 5033 .id_table = sky2_id_table,
5007 .probe = sky2_probe, 5034 .probe = sky2_probe,
5008 .remove = __devexit_p(sky2_remove), 5035 .remove = __devexit_p(sky2_remove),
5009#ifdef CONFIG_PM
5010 .suspend = sky2_suspend,
5011 .resume = sky2_resume,
5012#endif
5013 .shutdown = sky2_shutdown, 5036 .shutdown = sky2_shutdown,
5037 .driver.pm = SKY2_PM_OPS,
5014}; 5038};
5015 5039
5016static int __init sky2_init_module(void) 5040static int __init sky2_init_module(void)
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index 61891a6cacc2..6861b0e8db9a 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -2200,6 +2200,12 @@ enum flow_control {
2200 FC_BOTH = 3, 2200 FC_BOTH = 3,
2201}; 2201};
2202 2202
2203struct sky2_stats {
2204 struct u64_stats_sync syncp;
2205 u64 packets;
2206 u64 bytes;
2207};
2208
2203struct sky2_port { 2209struct sky2_port {
2204 struct sky2_hw *hw; 2210 struct sky2_hw *hw;
2205 struct net_device *netdev; 2211 struct net_device *netdev;
@@ -2209,6 +2215,8 @@ struct sky2_port {
2209 2215
2210 struct tx_ring_info *tx_ring; 2216 struct tx_ring_info *tx_ring;
2211 struct sky2_tx_le *tx_le; 2217 struct sky2_tx_le *tx_le;
2218 struct sky2_stats tx_stats;
2219
2212 u16 tx_ring_size; 2220 u16 tx_ring_size;
2213 u16 tx_cons; /* next le to check */ 2221 u16 tx_cons; /* next le to check */
2214 u16 tx_prod; /* next le to use */ 2222 u16 tx_prod; /* next le to use */
@@ -2221,17 +2229,15 @@ struct sky2_port {
2221 2229
2222 struct rx_ring_info *rx_ring ____cacheline_aligned_in_smp; 2230 struct rx_ring_info *rx_ring ____cacheline_aligned_in_smp;
2223 struct sky2_rx_le *rx_le; 2231 struct sky2_rx_le *rx_le;
2232 struct sky2_stats rx_stats;
2224 2233
2225 u16 rx_next; /* next re to check */ 2234 u16 rx_next; /* next re to check */
2226 u16 rx_put; /* next le index to use */ 2235 u16 rx_put; /* next le index to use */
2227 u16 rx_pending; 2236 u16 rx_pending;
2228 u16 rx_data_size; 2237 u16 rx_data_size;
2229 u16 rx_nfrags; 2238 u16 rx_nfrags;
2230
2231#ifdef SKY2_VLAN_TAG_USED
2232 u16 rx_tag; 2239 u16 rx_tag;
2233 struct vlan_group *vlgrp; 2240
2234#endif
2235 struct { 2241 struct {
2236 unsigned long last; 2242 unsigned long last;
2237 u32 mac_rp; 2243 u32 mac_rp;
@@ -2275,6 +2281,7 @@ struct sky2_hw {
2275#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */ 2281#define SKY2_HW_AUTO_TX_SUM 0x00000040 /* new IP decode for Tx */
2276#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */ 2282#define SKY2_HW_ADV_POWER_CTL 0x00000080 /* additional PHY power regs */
2277#define SKY2_HW_RSS_BROKEN 0x00000100 2283#define SKY2_HW_RSS_BROKEN 0x00000100
2284#define SKY2_HW_VLAN_BROKEN 0x00000200
2278 2285
2279 u8 chip_id; 2286 u8 chip_id;
2280 u8 chip_rev; 2287 u8 chip_rev;
@@ -2346,6 +2353,39 @@ static inline u32 gma_read32(struct sky2_hw *hw, unsigned port, unsigned reg)
2346 | (u32) sky2_read16(hw, base+4) << 16; 2353 | (u32) sky2_read16(hw, base+4) << 16;
2347} 2354}
2348 2355
2356static inline u64 gma_read64(struct sky2_hw *hw, unsigned port, unsigned reg)
2357{
2358 unsigned base = SK_GMAC_REG(port, reg);
2359
2360 return (u64) sky2_read16(hw, base)
2361 | (u64) sky2_read16(hw, base+4) << 16
2362 | (u64) sky2_read16(hw, base+8) << 32
2363 | (u64) sky2_read16(hw, base+12) << 48;
2364}
2365
2366/* There is no way to atomically read32 bit values from PHY, so retry */
2367static inline u32 get_stats32(struct sky2_hw *hw, unsigned port, unsigned reg)
2368{
2369 u32 val;
2370
2371 do {
2372 val = gma_read32(hw, port, reg);
2373 } while (gma_read32(hw, port, reg) != val);
2374
2375 return val;
2376}
2377
2378static inline u64 get_stats64(struct sky2_hw *hw, unsigned port, unsigned reg)
2379{
2380 u64 val;
2381
2382 do {
2383 val = gma_read64(hw, port, reg);
2384 } while (gma_read64(hw, port, reg) != val);
2385
2386 return val;
2387}
2388
2349static inline void gma_write16(const struct sky2_hw *hw, unsigned port, int r, u16 v) 2389static inline void gma_write16(const struct sky2_hw *hw, unsigned port, int r, u16 v)
2350{ 2390{
2351 sky2_write16(hw, SK_GMAC_REG(port,r), v); 2391 sky2_write16(hw, SK_GMAC_REG(port,r), v);
diff --git a/drivers/net/smc-ultra.c b/drivers/net/smc-ultra.c
index d2dd8e6113ab..235a3c6c9f91 100644
--- a/drivers/net/smc-ultra.c
+++ b/drivers/net/smc-ultra.c
@@ -277,8 +277,12 @@ static int __init ultra_probe1(struct net_device *dev, int ioaddr)
277 dev->base_addr = ioaddr+ULTRA_NIC_OFFSET; 277 dev->base_addr = ioaddr+ULTRA_NIC_OFFSET;
278 278
279 { 279 {
280 int addr_tbl[4] = {0x0C0000, 0x0E0000, 0xFC0000, 0xFE0000}; 280 static const int addr_tbl[4] = {
281 short num_pages_tbl[4] = {0x20, 0x40, 0x80, 0xff}; 281 0x0C0000, 0x0E0000, 0xFC0000, 0xFE0000
282 };
283 static const short num_pages_tbl[4] = {
284 0x20, 0x40, 0x80, 0xff
285 };
282 286
283 dev->mem_start = ((addr & 0x0f) << 13) + addr_tbl[(addr >> 6) & 3] ; 287 dev->mem_start = ((addr & 0x0f) << 13) + addr_tbl[(addr >> 6) & 3] ;
284 num_pages = num_pages_tbl[(addr >> 4) & 3]; 288 num_pages = num_pages_tbl[(addr >> 4) & 3];
diff --git a/drivers/net/starfire.c b/drivers/net/starfire.c
index 4adf12422787..a4f2bd52e546 100644
--- a/drivers/net/starfire.c
+++ b/drivers/net/starfire.c
@@ -148,7 +148,7 @@ static int full_duplex[MAX_UNITS] = {0, };
148 * This SUCKS. 148 * This SUCKS.
149 * We need a much better method to determine if dma_addr_t is 64-bit. 149 * We need a much better method to determine if dma_addr_t is 64-bit.
150 */ 150 */
151#if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__alpha__) || defined(__mips64__) || (defined(__mips__) && defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) || (defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT)) 151#if (defined(__i386__) && defined(CONFIG_HIGHMEM64G)) || defined(__x86_64__) || defined (__ia64__) || defined(__alpha__) || (defined(CONFIG_MIPS) && ((defined(CONFIG_HIGHMEM) && defined(CONFIG_64BIT_PHYS_ADDR)) || defined(CONFIG_64BIT))) || (defined(__powerpc64__) || defined(CONFIG_PHYS_64BIT))
152/* 64-bit dma_addr_t */ 152/* 64-bit dma_addr_t */
153#define ADDR_64BITS /* This chip uses 64 bit addresses. */ 153#define ADDR_64BITS /* This chip uses 64 bit addresses. */
154#define netdrv_addr_t __le64 154#define netdrv_addr_t __le64
diff --git a/drivers/net/stmmac/stmmac.h b/drivers/net/stmmac/stmmac.h
index 79bdc2e13224..5f06c4706abe 100644
--- a/drivers/net/stmmac/stmmac.h
+++ b/drivers/net/stmmac/stmmac.h
@@ -20,7 +20,7 @@
20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> 20 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
21*******************************************************************************/ 21*******************************************************************************/
22 22
23#define DRV_MODULE_VERSION "Apr_2010" 23#define DRV_MODULE_VERSION "Nov_2010"
24#include <linux/platform_device.h> 24#include <linux/platform_device.h>
25#include <linux/stmmac.h> 25#include <linux/stmmac.h>
26 26
@@ -37,7 +37,6 @@ struct stmmac_priv {
37 unsigned int cur_tx; 37 unsigned int cur_tx;
38 unsigned int dirty_tx; 38 unsigned int dirty_tx;
39 unsigned int dma_tx_size; 39 unsigned int dma_tx_size;
40 int tx_coe;
41 int tx_coalesce; 40 int tx_coalesce;
42 41
43 struct dma_desc *dma_rx ; 42 struct dma_desc *dma_rx ;
@@ -48,7 +47,6 @@ struct stmmac_priv {
48 struct sk_buff_head rx_recycle; 47 struct sk_buff_head rx_recycle;
49 48
50 struct net_device *dev; 49 struct net_device *dev;
51 int is_gmac;
52 dma_addr_t dma_rx_phy; 50 dma_addr_t dma_rx_phy;
53 unsigned int dma_rx_size; 51 unsigned int dma_rx_size;
54 unsigned int dma_buf_sz; 52 unsigned int dma_buf_sz;
@@ -60,14 +58,11 @@ struct stmmac_priv {
60 struct napi_struct napi; 58 struct napi_struct napi;
61 59
62 phy_interface_t phy_interface; 60 phy_interface_t phy_interface;
63 int pbl;
64 int bus_id;
65 int phy_addr; 61 int phy_addr;
66 int phy_mask; 62 int phy_mask;
67 int (*phy_reset) (void *priv); 63 int (*phy_reset) (void *priv);
68 void (*fix_mac_speed) (void *priv, unsigned int speed); 64 int rx_coe;
69 void (*bus_setup)(void __iomem *ioaddr); 65 int no_csum_insertion;
70 void *bsp_priv;
71 66
72 int phy_irq; 67 int phy_irq;
73 struct phy_device *phydev; 68 struct phy_device *phydev;
@@ -77,47 +72,20 @@ struct stmmac_priv {
77 unsigned int flow_ctrl; 72 unsigned int flow_ctrl;
78 unsigned int pause; 73 unsigned int pause;
79 struct mii_bus *mii; 74 struct mii_bus *mii;
80 int mii_clk_csr;
81 75
82 u32 msg_enable; 76 u32 msg_enable;
83 spinlock_t lock; 77 spinlock_t lock;
84 int wolopts; 78 int wolopts;
85 int wolenabled; 79 int wolenabled;
86 int shutdown;
87#ifdef CONFIG_STMMAC_TIMER 80#ifdef CONFIG_STMMAC_TIMER
88 struct stmmac_timer *tm; 81 struct stmmac_timer *tm;
89#endif 82#endif
90#ifdef STMMAC_VLAN_TAG_USED 83#ifdef STMMAC_VLAN_TAG_USED
91 struct vlan_group *vlgrp; 84 struct vlan_group *vlgrp;
92#endif 85#endif
93 int enh_desc; 86 struct plat_stmmacenet_data *plat;
94 int rx_coe;
95 int bugged_jumbo;
96 int no_csum_insertion;
97}; 87};
98 88
99#ifdef CONFIG_STM_DRIVERS
100#include <linux/stm/pad.h>
101static inline int stmmac_claim_resource(struct platform_device *pdev)
102{
103 int ret = 0;
104 struct plat_stmmacenet_data *plat_dat = pdev->dev.platform_data;
105
106 /* Pad routing setup */
107 if (IS_ERR(devm_stm_pad_claim(&pdev->dev, plat_dat->pad_config,
108 dev_name(&pdev->dev)))) {
109 printk(KERN_ERR "%s: Failed to request pads!\n", __func__);
110 ret = -ENODEV;
111 }
112 return ret;
113}
114#else
115static inline int stmmac_claim_resource(struct platform_device *pdev)
116{
117 return 0;
118}
119#endif
120
121extern int stmmac_mdio_unregister(struct net_device *ndev); 89extern int stmmac_mdio_unregister(struct net_device *ndev);
122extern int stmmac_mdio_register(struct net_device *ndev); 90extern int stmmac_mdio_register(struct net_device *ndev);
123extern void stmmac_set_ethtool_ops(struct net_device *netdev); 91extern void stmmac_set_ethtool_ops(struct net_device *netdev);
diff --git a/drivers/net/stmmac/stmmac_ethtool.c b/drivers/net/stmmac/stmmac_ethtool.c
index 6d65482e789a..fd719edc7f7c 100644
--- a/drivers/net/stmmac/stmmac_ethtool.c
+++ b/drivers/net/stmmac/stmmac_ethtool.c
@@ -94,7 +94,7 @@ static void stmmac_ethtool_getdrvinfo(struct net_device *dev,
94{ 94{
95 struct stmmac_priv *priv = netdev_priv(dev); 95 struct stmmac_priv *priv = netdev_priv(dev);
96 96
97 if (!priv->is_gmac) 97 if (!priv->plat->has_gmac)
98 strcpy(info->driver, MAC100_ETHTOOL_NAME); 98 strcpy(info->driver, MAC100_ETHTOOL_NAME);
99 else 99 else
100 strcpy(info->driver, GMAC_ETHTOOL_NAME); 100 strcpy(info->driver, GMAC_ETHTOOL_NAME);
@@ -176,7 +176,7 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
176 176
177 memset(reg_space, 0x0, REG_SPACE_SIZE); 177 memset(reg_space, 0x0, REG_SPACE_SIZE);
178 178
179 if (!priv->is_gmac) { 179 if (!priv->plat->has_gmac) {
180 /* MAC registers */ 180 /* MAC registers */
181 for (i = 0; i < 12; i++) 181 for (i = 0; i < 12; i++)
182 reg_space[i] = readl(priv->ioaddr + (i * 4)); 182 reg_space[i] = readl(priv->ioaddr + (i * 4));
@@ -197,16 +197,6 @@ static void stmmac_ethtool_gregs(struct net_device *dev,
197 } 197 }
198} 198}
199 199
200static int stmmac_ethtool_set_tx_csum(struct net_device *netdev, u32 data)
201{
202 if (data)
203 netdev->features |= NETIF_F_HW_CSUM;
204 else
205 netdev->features &= ~NETIF_F_HW_CSUM;
206
207 return 0;
208}
209
210static u32 stmmac_ethtool_get_rx_csum(struct net_device *dev) 200static u32 stmmac_ethtool_get_rx_csum(struct net_device *dev)
211{ 201{
212 struct stmmac_priv *priv = netdev_priv(dev); 202 struct stmmac_priv *priv = netdev_priv(dev);
@@ -370,7 +360,7 @@ static struct ethtool_ops stmmac_ethtool_ops = {
370 .get_link = ethtool_op_get_link, 360 .get_link = ethtool_op_get_link,
371 .get_rx_csum = stmmac_ethtool_get_rx_csum, 361 .get_rx_csum = stmmac_ethtool_get_rx_csum,
372 .get_tx_csum = ethtool_op_get_tx_csum, 362 .get_tx_csum = ethtool_op_get_tx_csum,
373 .set_tx_csum = stmmac_ethtool_set_tx_csum, 363 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
374 .get_sg = ethtool_op_get_sg, 364 .get_sg = ethtool_op_get_sg,
375 .set_sg = ethtool_op_set_sg, 365 .set_sg = ethtool_op_set_sg,
376 .get_pauseparam = stmmac_get_pauseparam, 366 .get_pauseparam = stmmac_get_pauseparam,
diff --git a/drivers/net/stmmac/stmmac_main.c b/drivers/net/stmmac/stmmac_main.c
index 2114837809e7..34a0af3837f9 100644
--- a/drivers/net/stmmac/stmmac_main.c
+++ b/drivers/net/stmmac/stmmac_main.c
@@ -186,6 +186,18 @@ static inline u32 stmmac_tx_avail(struct stmmac_priv *priv)
186 return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1; 186 return priv->dirty_tx + priv->dma_tx_size - priv->cur_tx - 1;
187} 187}
188 188
189/* On some ST platforms, some HW system configuraton registers have to be
190 * set according to the link speed negotiated.
191 */
192static inline void stmmac_hw_fix_mac_speed(struct stmmac_priv *priv)
193{
194 struct phy_device *phydev = priv->phydev;
195
196 if (likely(priv->plat->fix_mac_speed))
197 priv->plat->fix_mac_speed(priv->plat->bsp_priv,
198 phydev->speed);
199}
200
189/** 201/**
190 * stmmac_adjust_link 202 * stmmac_adjust_link
191 * @dev: net device structure 203 * @dev: net device structure
@@ -228,15 +240,13 @@ static void stmmac_adjust_link(struct net_device *dev)
228 new_state = 1; 240 new_state = 1;
229 switch (phydev->speed) { 241 switch (phydev->speed) {
230 case 1000: 242 case 1000:
231 if (likely(priv->is_gmac)) 243 if (likely(priv->plat->has_gmac))
232 ctrl &= ~priv->hw->link.port; 244 ctrl &= ~priv->hw->link.port;
233 if (likely(priv->fix_mac_speed)) 245 stmmac_hw_fix_mac_speed(priv);
234 priv->fix_mac_speed(priv->bsp_priv,
235 phydev->speed);
236 break; 246 break;
237 case 100: 247 case 100:
238 case 10: 248 case 10:
239 if (priv->is_gmac) { 249 if (priv->plat->has_gmac) {
240 ctrl |= priv->hw->link.port; 250 ctrl |= priv->hw->link.port;
241 if (phydev->speed == SPEED_100) { 251 if (phydev->speed == SPEED_100) {
242 ctrl |= priv->hw->link.speed; 252 ctrl |= priv->hw->link.speed;
@@ -246,9 +256,7 @@ static void stmmac_adjust_link(struct net_device *dev)
246 } else { 256 } else {
247 ctrl &= ~priv->hw->link.port; 257 ctrl &= ~priv->hw->link.port;
248 } 258 }
249 if (likely(priv->fix_mac_speed)) 259 stmmac_hw_fix_mac_speed(priv);
250 priv->fix_mac_speed(priv->bsp_priv,
251 phydev->speed);
252 break; 260 break;
253 default: 261 default:
254 if (netif_msg_link(priv)) 262 if (netif_msg_link(priv))
@@ -305,7 +313,7 @@ static int stmmac_init_phy(struct net_device *dev)
305 return 0; 313 return 0;
306 } 314 }
307 315
308 snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->bus_id); 316 snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
309 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, 317 snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id,
310 priv->phy_addr); 318 priv->phy_addr);
311 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id); 319 pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id);
@@ -552,7 +560,7 @@ static void free_dma_desc_resources(struct stmmac_priv *priv)
552 */ 560 */
553static void stmmac_dma_operation_mode(struct stmmac_priv *priv) 561static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
554{ 562{
555 if (likely((priv->tx_coe) && (!priv->no_csum_insertion))) { 563 if (likely((priv->plat->tx_coe) && (!priv->no_csum_insertion))) {
556 /* In case of GMAC, SF mode has to be enabled 564 /* In case of GMAC, SF mode has to be enabled
557 * to perform the TX COE. This depends on: 565 * to perform the TX COE. This depends on:
558 * 1) TX COE if actually supported 566 * 1) TX COE if actually supported
@@ -814,7 +822,7 @@ static int stmmac_open(struct net_device *dev)
814 init_dma_desc_rings(dev); 822 init_dma_desc_rings(dev);
815 823
816 /* DMA initialization and SW reset */ 824 /* DMA initialization and SW reset */
817 if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->pbl, 825 if (unlikely(priv->hw->dma->init(priv->ioaddr, priv->plat->pbl,
818 priv->dma_tx_phy, 826 priv->dma_tx_phy,
819 priv->dma_rx_phy) < 0)) { 827 priv->dma_rx_phy) < 0)) {
820 828
@@ -825,19 +833,17 @@ static int stmmac_open(struct net_device *dev)
825 /* Copy the MAC addr into the HW */ 833 /* Copy the MAC addr into the HW */
826 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0); 834 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
827 /* If required, perform hw setup of the bus. */ 835 /* If required, perform hw setup of the bus. */
828 if (priv->bus_setup) 836 if (priv->plat->bus_setup)
829 priv->bus_setup(priv->ioaddr); 837 priv->plat->bus_setup(priv->ioaddr);
830 /* Initialize the MAC Core */ 838 /* Initialize the MAC Core */
831 priv->hw->mac->core_init(priv->ioaddr); 839 priv->hw->mac->core_init(priv->ioaddr);
832 840
833 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr); 841 priv->rx_coe = priv->hw->mac->rx_coe(priv->ioaddr);
834 if (priv->rx_coe) 842 if (priv->rx_coe)
835 pr_info("stmmac: Rx Checksum Offload Engine supported\n"); 843 pr_info("stmmac: Rx Checksum Offload Engine supported\n");
836 if (priv->tx_coe) 844 if (priv->plat->tx_coe)
837 pr_info("\tTX Checksum insertion supported\n"); 845 pr_info("\tTX Checksum insertion supported\n");
838 846
839 priv->shutdown = 0;
840
841 /* Initialise the MMC (if present) to disable all interrupts. */ 847 /* Initialise the MMC (if present) to disable all interrupts. */
842 writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK); 848 writel(0xffffffff, priv->ioaddr + MMC_HIGH_INTR_MASK);
843 writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK); 849 writel(0xffffffff, priv->ioaddr + MMC_LOW_INTR_MASK);
@@ -943,7 +949,7 @@ static int stmmac_sw_tso(struct stmmac_priv *priv, struct sk_buff *skb)
943 skb, skb->len); 949 skb, skb->len);
944 950
945 segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO); 951 segs = skb_gso_segment(skb, priv->dev->features & ~NETIF_F_TSO);
946 if (unlikely(IS_ERR(segs))) 952 if (IS_ERR(segs))
947 goto sw_tso_end; 953 goto sw_tso_end;
948 954
949 do { 955 do {
@@ -1042,7 +1048,8 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1042 return stmmac_sw_tso(priv, skb); 1048 return stmmac_sw_tso(priv, skb);
1043 1049
1044 if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) { 1050 if (likely((skb->ip_summed == CHECKSUM_PARTIAL))) {
1045 if (unlikely((!priv->tx_coe) || (priv->no_csum_insertion))) 1051 if (unlikely((!priv->plat->tx_coe) ||
1052 (priv->no_csum_insertion)))
1046 skb_checksum_help(skb); 1053 skb_checksum_help(skb);
1047 else 1054 else
1048 csum_insertion = 1; 1055 csum_insertion = 1;
@@ -1146,7 +1153,7 @@ static inline void stmmac_rx_refill(struct stmmac_priv *priv)
1146 DMA_FROM_DEVICE); 1153 DMA_FROM_DEVICE);
1147 1154
1148 (p + entry)->des2 = priv->rx_skbuff_dma[entry]; 1155 (p + entry)->des2 = priv->rx_skbuff_dma[entry];
1149 if (unlikely(priv->is_gmac)) { 1156 if (unlikely(priv->plat->has_gmac)) {
1150 if (bfsize >= BUF_SIZE_8KiB) 1157 if (bfsize >= BUF_SIZE_8KiB)
1151 (p + entry)->des3 = 1158 (p + entry)->des3 =
1152 (p + entry)->des2 + BUF_SIZE_8KiB; 1159 (p + entry)->des2 + BUF_SIZE_8KiB;
@@ -1356,7 +1363,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
1356 return -EBUSY; 1363 return -EBUSY;
1357 } 1364 }
1358 1365
1359 if (priv->is_gmac) 1366 if (priv->plat->has_gmac)
1360 max_mtu = JUMBO_LEN; 1367 max_mtu = JUMBO_LEN;
1361 else 1368 else
1362 max_mtu = ETH_DATA_LEN; 1369 max_mtu = ETH_DATA_LEN;
@@ -1370,7 +1377,7 @@ static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
1370 * needs to have the Tx COE disabled for oversized frames 1377 * needs to have the Tx COE disabled for oversized frames
1371 * (due to limited buffer sizes). In this case we disable 1378 * (due to limited buffer sizes). In this case we disable
1372 * the TX csum insertionin the TDES and not use SF. */ 1379 * the TX csum insertionin the TDES and not use SF. */
1373 if ((priv->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN)) 1380 if ((priv->plat->bugged_jumbo) && (priv->dev->mtu > ETH_DATA_LEN))
1374 priv->no_csum_insertion = 1; 1381 priv->no_csum_insertion = 1;
1375 else 1382 else
1376 priv->no_csum_insertion = 0; 1383 priv->no_csum_insertion = 0;
@@ -1390,7 +1397,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
1390 return IRQ_NONE; 1397 return IRQ_NONE;
1391 } 1398 }
1392 1399
1393 if (priv->is_gmac) 1400 if (priv->plat->has_gmac)
1394 /* To handle GMAC own interrupts */ 1401 /* To handle GMAC own interrupts */
1395 priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr); 1402 priv->hw->mac->host_irq_status((void __iomem *) dev->base_addr);
1396 1403
@@ -1487,7 +1494,8 @@ static int stmmac_probe(struct net_device *dev)
1487 dev->netdev_ops = &stmmac_netdev_ops; 1494 dev->netdev_ops = &stmmac_netdev_ops;
1488 stmmac_set_ethtool_ops(dev); 1495 stmmac_set_ethtool_ops(dev);
1489 1496
1490 dev->features |= (NETIF_F_SG | NETIF_F_HW_CSUM | NETIF_F_HIGHDMA); 1497 dev->features |= NETIF_F_SG | NETIF_F_HIGHDMA |
1498 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
1491 dev->watchdog_timeo = msecs_to_jiffies(watchdog); 1499 dev->watchdog_timeo = msecs_to_jiffies(watchdog);
1492#ifdef STMMAC_VLAN_TAG_USED 1500#ifdef STMMAC_VLAN_TAG_USED
1493 /* Both mac100 and gmac support receive VLAN tag detection */ 1501 /* Both mac100 and gmac support receive VLAN tag detection */
@@ -1520,7 +1528,7 @@ static int stmmac_probe(struct net_device *dev)
1520 1528
1521 DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n", 1529 DBG(probe, DEBUG, "%s: Scatter/Gather: %s - HW checksums: %s\n",
1522 dev->name, (dev->features & NETIF_F_SG) ? "on" : "off", 1530 dev->name, (dev->features & NETIF_F_SG) ? "on" : "off",
1523 (dev->features & NETIF_F_HW_CSUM) ? "on" : "off"); 1531 (dev->features & NETIF_F_IP_CSUM) ? "on" : "off");
1524 1532
1525 return ret; 1533 return ret;
1526} 1534}
@@ -1536,7 +1544,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1536 1544
1537 struct mac_device_info *device; 1545 struct mac_device_info *device;
1538 1546
1539 if (priv->is_gmac) 1547 if (priv->plat->has_gmac)
1540 device = dwmac1000_setup(priv->ioaddr); 1548 device = dwmac1000_setup(priv->ioaddr);
1541 else 1549 else
1542 device = dwmac100_setup(priv->ioaddr); 1550 device = dwmac100_setup(priv->ioaddr);
@@ -1544,7 +1552,7 @@ static int stmmac_mac_device_setup(struct net_device *dev)
1544 if (!device) 1552 if (!device)
1545 return -ENOMEM; 1553 return -ENOMEM;
1546 1554
1547 if (priv->enh_desc) { 1555 if (priv->plat->enh_desc) {
1548 device->desc = &enh_desc_ops; 1556 device->desc = &enh_desc_ops;
1549 pr_info("\tEnhanced descriptor structure\n"); 1557 pr_info("\tEnhanced descriptor structure\n");
1550 } else 1558 } else
@@ -1598,7 +1606,7 @@ static int stmmac_associate_phy(struct device *dev, void *data)
1598 plat_dat->bus_id); 1606 plat_dat->bus_id);
1599 1607
1600 /* Check that this phy is for the MAC being initialised */ 1608 /* Check that this phy is for the MAC being initialised */
1601 if (priv->bus_id != plat_dat->bus_id) 1609 if (priv->plat->bus_id != plat_dat->bus_id)
1602 return 0; 1610 return 0;
1603 1611
1604 /* OK, this PHY is connected to the MAC. 1612 /* OK, this PHY is connected to the MAC.
@@ -1634,15 +1642,13 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1634 struct resource *res; 1642 struct resource *res;
1635 void __iomem *addr = NULL; 1643 void __iomem *addr = NULL;
1636 struct net_device *ndev = NULL; 1644 struct net_device *ndev = NULL;
1637 struct stmmac_priv *priv; 1645 struct stmmac_priv *priv = NULL;
1638 struct plat_stmmacenet_data *plat_dat; 1646 struct plat_stmmacenet_data *plat_dat;
1639 1647
1640 pr_info("STMMAC driver:\n\tplatform registration... "); 1648 pr_info("STMMAC driver:\n\tplatform registration... ");
1641 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1649 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1642 if (!res) { 1650 if (!res)
1643 ret = -ENODEV; 1651 return -ENODEV;
1644 goto out;
1645 }
1646 pr_info("\tdone!\n"); 1652 pr_info("\tdone!\n");
1647 1653
1648 if (!request_mem_region(res->start, resource_size(res), 1654 if (!request_mem_region(res->start, resource_size(res),
@@ -1650,22 +1656,21 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1650 pr_err("%s: ERROR: memory allocation failed" 1656 pr_err("%s: ERROR: memory allocation failed"
1651 "cannot get the I/O addr 0x%x\n", 1657 "cannot get the I/O addr 0x%x\n",
1652 __func__, (unsigned int)res->start); 1658 __func__, (unsigned int)res->start);
1653 ret = -EBUSY; 1659 return -EBUSY;
1654 goto out;
1655 } 1660 }
1656 1661
1657 addr = ioremap(res->start, resource_size(res)); 1662 addr = ioremap(res->start, resource_size(res));
1658 if (!addr) { 1663 if (!addr) {
1659 pr_err("%s: ERROR: memory mapping failed\n", __func__); 1664 pr_err("%s: ERROR: memory mapping failed\n", __func__);
1660 ret = -ENOMEM; 1665 ret = -ENOMEM;
1661 goto out; 1666 goto out_release_region;
1662 } 1667 }
1663 1668
1664 ndev = alloc_etherdev(sizeof(struct stmmac_priv)); 1669 ndev = alloc_etherdev(sizeof(struct stmmac_priv));
1665 if (!ndev) { 1670 if (!ndev) {
1666 pr_err("%s: ERROR: allocating the device\n", __func__); 1671 pr_err("%s: ERROR: allocating the device\n", __func__);
1667 ret = -ENOMEM; 1672 ret = -ENOMEM;
1668 goto out; 1673 goto out_unmap;
1669 } 1674 }
1670 1675
1671 SET_NETDEV_DEV(ndev, &pdev->dev); 1676 SET_NETDEV_DEV(ndev, &pdev->dev);
@@ -1675,21 +1680,17 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1675 if (ndev->irq == -ENXIO) { 1680 if (ndev->irq == -ENXIO) {
1676 pr_err("%s: ERROR: MAC IRQ configuration " 1681 pr_err("%s: ERROR: MAC IRQ configuration "
1677 "information not found\n", __func__); 1682 "information not found\n", __func__);
1678 ret = -ENODEV; 1683 ret = -ENXIO;
1679 goto out; 1684 goto out_free_ndev;
1680 } 1685 }
1681 1686
1682 priv = netdev_priv(ndev); 1687 priv = netdev_priv(ndev);
1683 priv->device = &(pdev->dev); 1688 priv->device = &(pdev->dev);
1684 priv->dev = ndev; 1689 priv->dev = ndev;
1685 plat_dat = pdev->dev.platform_data; 1690 plat_dat = pdev->dev.platform_data;
1686 priv->bus_id = plat_dat->bus_id; 1691
1687 priv->pbl = plat_dat->pbl; /* TLI */ 1692 priv->plat = plat_dat;
1688 priv->mii_clk_csr = plat_dat->clk_csr; 1693
1689 priv->tx_coe = plat_dat->tx_coe;
1690 priv->bugged_jumbo = plat_dat->bugged_jumbo;
1691 priv->is_gmac = plat_dat->has_gmac; /* GMAC is on board */
1692 priv->enh_desc = plat_dat->enh_desc;
1693 priv->ioaddr = addr; 1694 priv->ioaddr = addr;
1694 1695
1695 /* PMT module is not integrated in all the MAC devices. */ 1696 /* PMT module is not integrated in all the MAC devices. */
@@ -1703,20 +1704,22 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1703 /* Set the I/O base addr */ 1704 /* Set the I/O base addr */
1704 ndev->base_addr = (unsigned long)addr; 1705 ndev->base_addr = (unsigned long)addr;
1705 1706
1706 /* Verify embedded resource for the platform */ 1707 /* Custom initialisation */
1707 ret = stmmac_claim_resource(pdev); 1708 if (priv->plat->init) {
1708 if (ret < 0) 1709 ret = priv->plat->init(pdev);
1709 goto out; 1710 if (unlikely(ret))
1711 goto out_free_ndev;
1712 }
1710 1713
1711 /* MAC HW revice detection */ 1714 /* MAC HW revice detection */
1712 ret = stmmac_mac_device_setup(ndev); 1715 ret = stmmac_mac_device_setup(ndev);
1713 if (ret < 0) 1716 if (ret < 0)
1714 goto out; 1717 goto out_plat_exit;
1715 1718
1716 /* Network Device Registration */ 1719 /* Network Device Registration */
1717 ret = stmmac_probe(ndev); 1720 ret = stmmac_probe(ndev);
1718 if (ret < 0) 1721 if (ret < 0)
1719 goto out; 1722 goto out_plat_exit;
1720 1723
1721 /* associate a PHY - it is provided by another platform bus */ 1724 /* associate a PHY - it is provided by another platform bus */
1722 if (!driver_for_each_device 1725 if (!driver_for_each_device
@@ -1724,31 +1727,33 @@ static int stmmac_dvr_probe(struct platform_device *pdev)
1724 stmmac_associate_phy)) { 1727 stmmac_associate_phy)) {
1725 pr_err("No PHY device is associated with this MAC!\n"); 1728 pr_err("No PHY device is associated with this MAC!\n");
1726 ret = -ENODEV; 1729 ret = -ENODEV;
1727 goto out; 1730 goto out_unregister;
1728 } 1731 }
1729 1732
1730 priv->fix_mac_speed = plat_dat->fix_mac_speed;
1731 priv->bus_setup = plat_dat->bus_setup;
1732 priv->bsp_priv = plat_dat->bsp_priv;
1733
1734 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n" 1733 pr_info("\t%s - (dev. name: %s - id: %d, IRQ #%d\n"
1735 "\tIO base addr: 0x%p)\n", ndev->name, pdev->name, 1734 "\tIO base addr: 0x%p)\n", ndev->name, pdev->name,
1736 pdev->id, ndev->irq, addr); 1735 pdev->id, ndev->irq, addr);
1737 1736
1738 /* MDIO bus Registration */ 1737 /* MDIO bus Registration */
1739 pr_debug("\tMDIO bus (id: %d)...", priv->bus_id); 1738 pr_debug("\tMDIO bus (id: %d)...", priv->plat->bus_id);
1740 ret = stmmac_mdio_register(ndev); 1739 ret = stmmac_mdio_register(ndev);
1741 if (ret < 0) 1740 if (ret < 0)
1742 goto out; 1741 goto out_unregister;
1743 pr_debug("registered!\n"); 1742 pr_debug("registered!\n");
1743 return 0;
1744 1744
1745out: 1745out_unregister:
1746 if (ret < 0) { 1746 unregister_netdev(ndev);
1747 platform_set_drvdata(pdev, NULL); 1747out_plat_exit:
1748 release_mem_region(res->start, resource_size(res)); 1748 if (priv->plat->exit)
1749 if (addr != NULL) 1749 priv->plat->exit(pdev);
1750 iounmap(addr); 1750out_free_ndev:
1751 } 1751 free_netdev(ndev);
1752 platform_set_drvdata(pdev, NULL);
1753out_unmap:
1754 iounmap(addr);
1755out_release_region:
1756 release_mem_region(res->start, resource_size(res));
1752 1757
1753 return ret; 1758 return ret;
1754} 1759}
@@ -1777,6 +1782,9 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
1777 1782
1778 stmmac_mdio_unregister(ndev); 1783 stmmac_mdio_unregister(ndev);
1779 1784
1785 if (priv->plat->exit)
1786 priv->plat->exit(pdev);
1787
1780 platform_set_drvdata(pdev, NULL); 1788 platform_set_drvdata(pdev, NULL);
1781 unregister_netdev(ndev); 1789 unregister_netdev(ndev);
1782 1790
@@ -1790,69 +1798,54 @@ static int stmmac_dvr_remove(struct platform_device *pdev)
1790} 1798}
1791 1799
1792#ifdef CONFIG_PM 1800#ifdef CONFIG_PM
1793static int stmmac_suspend(struct platform_device *pdev, pm_message_t state) 1801static int stmmac_suspend(struct device *dev)
1794{ 1802{
1795 struct net_device *dev = platform_get_drvdata(pdev); 1803 struct net_device *ndev = dev_get_drvdata(dev);
1796 struct stmmac_priv *priv = netdev_priv(dev); 1804 struct stmmac_priv *priv = netdev_priv(ndev);
1797 int dis_ic = 0; 1805 int dis_ic = 0;
1798 1806
1799 if (!dev || !netif_running(dev)) 1807 if (!ndev || !netif_running(ndev))
1800 return 0; 1808 return 0;
1801 1809
1802 spin_lock(&priv->lock); 1810 spin_lock(&priv->lock);
1803 1811
1804 if (state.event == PM_EVENT_SUSPEND) { 1812 netif_device_detach(ndev);
1805 netif_device_detach(dev); 1813 netif_stop_queue(ndev);
1806 netif_stop_queue(dev); 1814 if (priv->phydev)
1807 if (priv->phydev) 1815 phy_stop(priv->phydev);
1808 phy_stop(priv->phydev);
1809 1816
1810#ifdef CONFIG_STMMAC_TIMER 1817#ifdef CONFIG_STMMAC_TIMER
1811 priv->tm->timer_stop(); 1818 priv->tm->timer_stop();
1812 if (likely(priv->tm->enable)) 1819 if (likely(priv->tm->enable))
1813 dis_ic = 1; 1820 dis_ic = 1;
1814#endif 1821#endif
1815 napi_disable(&priv->napi); 1822 napi_disable(&priv->napi);
1816 1823
1817 /* Stop TX/RX DMA */ 1824 /* Stop TX/RX DMA */
1818 priv->hw->dma->stop_tx(priv->ioaddr); 1825 priv->hw->dma->stop_tx(priv->ioaddr);
1819 priv->hw->dma->stop_rx(priv->ioaddr); 1826 priv->hw->dma->stop_rx(priv->ioaddr);
1820 /* Clear the Rx/Tx descriptors */ 1827 /* Clear the Rx/Tx descriptors */
1821 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size, 1828 priv->hw->desc->init_rx_desc(priv->dma_rx, priv->dma_rx_size,
1822 dis_ic); 1829 dis_ic);
1823 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size); 1830 priv->hw->desc->init_tx_desc(priv->dma_tx, priv->dma_tx_size);
1824 1831
1825 /* Enable Power down mode by programming the PMT regs */ 1832 /* Enable Power down mode by programming the PMT regs */
1826 if (device_can_wakeup(priv->device)) 1833 if (device_may_wakeup(priv->device))
1827 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts); 1834 priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
1828 else 1835 else
1829 stmmac_disable_mac(priv->ioaddr); 1836 stmmac_disable_mac(priv->ioaddr);
1830 } else {
1831 priv->shutdown = 1;
1832 /* Although this can appear slightly redundant it actually
1833 * makes fast the standby operation and guarantees the driver
1834 * working if hibernation is on media. */
1835 stmmac_release(dev);
1836 }
1837 1837
1838 spin_unlock(&priv->lock); 1838 spin_unlock(&priv->lock);
1839 return 0; 1839 return 0;
1840} 1840}
1841 1841
1842static int stmmac_resume(struct platform_device *pdev) 1842static int stmmac_resume(struct device *dev)
1843{ 1843{
1844 struct net_device *dev = platform_get_drvdata(pdev); 1844 struct net_device *ndev = dev_get_drvdata(dev);
1845 struct stmmac_priv *priv = netdev_priv(dev); 1845 struct stmmac_priv *priv = netdev_priv(ndev);
1846
1847 if (!netif_running(dev))
1848 return 0;
1849 1846
1850 if (priv->shutdown) { 1847 if (!netif_running(ndev))
1851 /* Re-open the interface and re-init the MAC/DMA
1852 and the rings (i.e. on hibernation stage) */
1853 stmmac_open(dev);
1854 return 0; 1848 return 0;
1855 }
1856 1849
1857 spin_lock(&priv->lock); 1850 spin_lock(&priv->lock);
1858 1851
@@ -1861,10 +1854,10 @@ static int stmmac_resume(struct platform_device *pdev)
1861 * is received. Anyway, it's better to manually clear 1854 * is received. Anyway, it's better to manually clear
1862 * this bit because it can generate problems while resuming 1855 * this bit because it can generate problems while resuming
1863 * from another devices (e.g. serial console). */ 1856 * from another devices (e.g. serial console). */
1864 if (device_can_wakeup(priv->device)) 1857 if (device_may_wakeup(priv->device))
1865 priv->hw->mac->pmt(priv->ioaddr, 0); 1858 priv->hw->mac->pmt(priv->ioaddr, 0);
1866 1859
1867 netif_device_attach(dev); 1860 netif_device_attach(ndev);
1868 1861
1869 /* Enable the MAC and DMA */ 1862 /* Enable the MAC and DMA */
1870 stmmac_enable_mac(priv->ioaddr); 1863 stmmac_enable_mac(priv->ioaddr);
@@ -1872,31 +1865,59 @@ static int stmmac_resume(struct platform_device *pdev)
1872 priv->hw->dma->start_rx(priv->ioaddr); 1865 priv->hw->dma->start_rx(priv->ioaddr);
1873 1866
1874#ifdef CONFIG_STMMAC_TIMER 1867#ifdef CONFIG_STMMAC_TIMER
1875 priv->tm->timer_start(tmrate); 1868 if (likely(priv->tm->enable))
1869 priv->tm->timer_start(tmrate);
1876#endif 1870#endif
1877 napi_enable(&priv->napi); 1871 napi_enable(&priv->napi);
1878 1872
1879 if (priv->phydev) 1873 if (priv->phydev)
1880 phy_start(priv->phydev); 1874 phy_start(priv->phydev);
1881 1875
1882 netif_start_queue(dev); 1876 netif_start_queue(ndev);
1883 1877
1884 spin_unlock(&priv->lock); 1878 spin_unlock(&priv->lock);
1885 return 0; 1879 return 0;
1886} 1880}
1887#endif
1888 1881
1889static struct platform_driver stmmac_driver = { 1882static int stmmac_freeze(struct device *dev)
1890 .driver = { 1883{
1891 .name = STMMAC_RESOURCE_NAME, 1884 struct net_device *ndev = dev_get_drvdata(dev);
1892 }, 1885
1893 .probe = stmmac_dvr_probe, 1886 if (!ndev || !netif_running(ndev))
1894 .remove = stmmac_dvr_remove, 1887 return 0;
1895#ifdef CONFIG_PM 1888
1889 return stmmac_release(ndev);
1890}
1891
1892static int stmmac_restore(struct device *dev)
1893{
1894 struct net_device *ndev = dev_get_drvdata(dev);
1895
1896 if (!ndev || !netif_running(ndev))
1897 return 0;
1898
1899 return stmmac_open(ndev);
1900}
1901
1902static const struct dev_pm_ops stmmac_pm_ops = {
1896 .suspend = stmmac_suspend, 1903 .suspend = stmmac_suspend,
1897 .resume = stmmac_resume, 1904 .resume = stmmac_resume,
1898#endif 1905 .freeze = stmmac_freeze,
1906 .thaw = stmmac_restore,
1907 .restore = stmmac_restore,
1908};
1909#else
1910static const struct dev_pm_ops stmmac_pm_ops;
1911#endif /* CONFIG_PM */
1899 1912
1913static struct platform_driver stmmac_driver = {
1914 .probe = stmmac_dvr_probe,
1915 .remove = stmmac_dvr_remove,
1916 .driver = {
1917 .name = STMMAC_RESOURCE_NAME,
1918 .owner = THIS_MODULE,
1919 .pm = &stmmac_pm_ops,
1920 },
1900}; 1921};
1901 1922
1902/** 1923/**
diff --git a/drivers/net/stmmac/stmmac_mdio.c b/drivers/net/stmmac/stmmac_mdio.c
index d7441616357d..234b4068a1fc 100644
--- a/drivers/net/stmmac/stmmac_mdio.c
+++ b/drivers/net/stmmac/stmmac_mdio.c
@@ -53,7 +53,7 @@ static int stmmac_mdio_read(struct mii_bus *bus, int phyaddr, int phyreg)
53 int data; 53 int data;
54 u16 regValue = (((phyaddr << 11) & (0x0000F800)) | 54 u16 regValue = (((phyaddr << 11) & (0x0000F800)) |
55 ((phyreg << 6) & (0x000007C0))); 55 ((phyreg << 6) & (0x000007C0)));
56 regValue |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2); 56 regValue |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
57 57
58 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1); 58 do {} while (((readl(priv->ioaddr + mii_address)) & MII_BUSY) == 1);
59 writel(regValue, priv->ioaddr + mii_address); 59 writel(regValue, priv->ioaddr + mii_address);
@@ -85,7 +85,7 @@ static int stmmac_mdio_write(struct mii_bus *bus, int phyaddr, int phyreg,
85 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0))) 85 (((phyaddr << 11) & (0x0000F800)) | ((phyreg << 6) & (0x000007C0)))
86 | MII_WRITE; 86 | MII_WRITE;
87 87
88 value |= MII_BUSY | ((priv->mii_clk_csr & 7) << 2); 88 value |= MII_BUSY | ((priv->plat->clk_csr & 7) << 2);
89 89
90 90
91 /* Wait until any existing MII operation is complete */ 91 /* Wait until any existing MII operation is complete */
@@ -114,7 +114,7 @@ static int stmmac_mdio_reset(struct mii_bus *bus)
114 114
115 if (priv->phy_reset) { 115 if (priv->phy_reset) {
116 pr_debug("stmmac_mdio_reset: calling phy_reset\n"); 116 pr_debug("stmmac_mdio_reset: calling phy_reset\n");
117 priv->phy_reset(priv->bsp_priv); 117 priv->phy_reset(priv->plat->bsp_priv);
118 } 118 }
119 119
120 /* This is a workaround for problems with the STE101P PHY. 120 /* This is a workaround for problems with the STE101P PHY.
@@ -157,7 +157,7 @@ int stmmac_mdio_register(struct net_device *ndev)
157 new_bus->read = &stmmac_mdio_read; 157 new_bus->read = &stmmac_mdio_read;
158 new_bus->write = &stmmac_mdio_write; 158 new_bus->write = &stmmac_mdio_write;
159 new_bus->reset = &stmmac_mdio_reset; 159 new_bus->reset = &stmmac_mdio_reset;
160 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->bus_id); 160 snprintf(new_bus->id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id);
161 new_bus->priv = ndev; 161 new_bus->priv = ndev;
162 new_bus->irq = irqlist; 162 new_bus->irq = irqlist;
163 new_bus->phy_mask = priv->phy_mask; 163 new_bus->phy_mask = priv->phy_mask;
diff --git a/drivers/net/sundance.c b/drivers/net/sundance.c
index 3ed2a67bd6d3..4793df843c24 100644
--- a/drivers/net/sundance.c
+++ b/drivers/net/sundance.c
@@ -294,6 +294,9 @@ enum alta_offsets {
294 /* Aliased and bogus values! */ 294 /* Aliased and bogus values! */
295 RxStatus = 0x0c, 295 RxStatus = 0x0c,
296}; 296};
297
298#define ASIC_HI_WORD(x) ((x) + 2)
299
297enum ASICCtrl_HiWord_bit { 300enum ASICCtrl_HiWord_bit {
298 GlobalReset = 0x0001, 301 GlobalReset = 0x0001,
299 RxReset = 0x0002, 302 RxReset = 0x0002,
@@ -431,6 +434,7 @@ static void netdev_error(struct net_device *dev, int intr_status);
431static void netdev_error(struct net_device *dev, int intr_status); 434static void netdev_error(struct net_device *dev, int intr_status);
432static void set_rx_mode(struct net_device *dev); 435static void set_rx_mode(struct net_device *dev);
433static int __set_mac_addr(struct net_device *dev); 436static int __set_mac_addr(struct net_device *dev);
437static int sundance_set_mac_addr(struct net_device *dev, void *data);
434static struct net_device_stats *get_stats(struct net_device *dev); 438static struct net_device_stats *get_stats(struct net_device *dev);
435static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 439static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
436static int netdev_close(struct net_device *dev); 440static int netdev_close(struct net_device *dev);
@@ -464,7 +468,7 @@ static const struct net_device_ops netdev_ops = {
464 .ndo_do_ioctl = netdev_ioctl, 468 .ndo_do_ioctl = netdev_ioctl,
465 .ndo_tx_timeout = tx_timeout, 469 .ndo_tx_timeout = tx_timeout,
466 .ndo_change_mtu = change_mtu, 470 .ndo_change_mtu = change_mtu,
467 .ndo_set_mac_address = eth_mac_addr, 471 .ndo_set_mac_address = sundance_set_mac_addr,
468 .ndo_validate_addr = eth_validate_addr, 472 .ndo_validate_addr = eth_validate_addr,
469}; 473};
470 474
@@ -1016,7 +1020,7 @@ static void init_ring(struct net_device *dev)
1016 1020
1017 /* Fill in the Rx buffers. Handle allocation failure gracefully. */ 1021 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1018 for (i = 0; i < RX_RING_SIZE; i++) { 1022 for (i = 0; i < RX_RING_SIZE; i++) {
1019 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz); 1023 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz + 2);
1020 np->rx_skbuff[i] = skb; 1024 np->rx_skbuff[i] = skb;
1021 if (skb == NULL) 1025 if (skb == NULL)
1022 break; 1026 break;
@@ -1407,7 +1411,7 @@ static void refill_rx (struct net_device *dev)
1407 struct sk_buff *skb; 1411 struct sk_buff *skb;
1408 entry = np->dirty_rx % RX_RING_SIZE; 1412 entry = np->dirty_rx % RX_RING_SIZE;
1409 if (np->rx_skbuff[entry] == NULL) { 1413 if (np->rx_skbuff[entry] == NULL) {
1410 skb = dev_alloc_skb(np->rx_buf_sz); 1414 skb = dev_alloc_skb(np->rx_buf_sz + 2);
1411 np->rx_skbuff[entry] = skb; 1415 np->rx_skbuff[entry] = skb;
1412 if (skb == NULL) 1416 if (skb == NULL)
1413 break; /* Better luck next round. */ 1417 break; /* Better luck next round. */
@@ -1592,6 +1596,19 @@ static int __set_mac_addr(struct net_device *dev)
1592 return 0; 1596 return 0;
1593} 1597}
1594 1598
1599/* Invoked with rtnl_lock held */
1600static int sundance_set_mac_addr(struct net_device *dev, void *data)
1601{
1602 const struct sockaddr *addr = data;
1603
1604 if (!is_valid_ether_addr(addr->sa_data))
1605 return -EINVAL;
1606 memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);
1607 __set_mac_addr(dev);
1608
1609 return 0;
1610}
1611
1595static const struct { 1612static const struct {
1596 const char name[ETH_GSTRING_LEN]; 1613 const char name[ETH_GSTRING_LEN];
1597} sundance_stats[] = { 1614} sundance_stats[] = {
@@ -1772,10 +1789,10 @@ static int netdev_close(struct net_device *dev)
1772 } 1789 }
1773 1790
1774 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset, 1791 iowrite16(GlobalReset | DMAReset | FIFOReset | NetworkReset,
1775 ioaddr +ASICCtrl + 2); 1792 ioaddr + ASIC_HI_WORD(ASICCtrl));
1776 1793
1777 for (i = 2000; i > 0; i--) { 1794 for (i = 2000; i > 0; i--) {
1778 if ((ioread16(ioaddr + ASICCtrl +2) & ResetBusy) == 0) 1795 if ((ioread16(ioaddr + ASIC_HI_WORD(ASICCtrl)) & ResetBusy) == 0)
1779 break; 1796 break;
1780 mdelay(1); 1797 mdelay(1);
1781 } 1798 }
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 4ceb3cf6a9a9..1c5408f83937 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1004,7 +1004,7 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
1004 1004
1005 ctrl = 0; 1005 ctrl = 0;
1006 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1006 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1007 const u64 csum_start_off = skb_transport_offset(skb); 1007 const u64 csum_start_off = skb_checksum_start_offset(skb);
1008 const u64 csum_stuff_off = csum_start_off + skb->csum_offset; 1008 const u64 csum_stuff_off = csum_start_off + skb->csum_offset;
1009 1009
1010 ctrl = (TXDCTRL_CENAB | 1010 ctrl = (TXDCTRL_CENAB |
@@ -2380,10 +2380,8 @@ static int gem_suspend(struct pci_dev *pdev, pm_message_t state)
2380 */ 2380 */
2381 mutex_unlock(&gp->pm_mutex); 2381 mutex_unlock(&gp->pm_mutex);
2382 2382
2383 /* Wait for a pending reset task to complete */ 2383 /* Wait for the pending reset task to complete */
2384 while (gp->reset_task_pending) 2384 flush_work_sync(&gp->reset_task);
2385 yield();
2386 flush_scheduled_work();
2387 2385
2388 /* Shut the PHY down eventually and setup WOL */ 2386 /* Shut the PHY down eventually and setup WOL */
2389 gem_stop_phy(gp, gp->asleep_wol); 2387 gem_stop_phy(gp, gp->asleep_wol);
@@ -2928,10 +2926,8 @@ static void gem_remove_one(struct pci_dev *pdev)
2928 /* We shouldn't need any locking here */ 2926 /* We shouldn't need any locking here */
2929 gem_get_cell(gp); 2927 gem_get_cell(gp);
2930 2928
2931 /* Wait for a pending reset task to complete */ 2929 /* Cancel reset task */
2932 while (gp->reset_task_pending) 2930 cancel_work_sync(&gp->reset_task);
2933 yield();
2934 flush_scheduled_work();
2935 2931
2936 /* Shut the PHY down */ 2932 /* Shut the PHY down */
2937 gem_stop_phy(gp, 0); 2933 gem_stop_phy(gp, 0);
diff --git a/drivers/net/sunhme.c b/drivers/net/sunhme.c
index 5e28c414421a..55bbb9c15d96 100644
--- a/drivers/net/sunhme.c
+++ b/drivers/net/sunhme.c
@@ -2266,7 +2266,7 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2266 2266
2267 tx_flags = TXFLAG_OWN; 2267 tx_flags = TXFLAG_OWN;
2268 if (skb->ip_summed == CHECKSUM_PARTIAL) { 2268 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2269 const u32 csum_start_off = skb_transport_offset(skb); 2269 const u32 csum_start_off = skb_checksum_start_offset(skb);
2270 const u32 csum_stuff_off = csum_start_off + skb->csum_offset; 2270 const u32 csum_stuff_off = csum_start_off + skb->csum_offset;
2271 2271
2272 tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE | 2272 tx_flags = (TXFLAG_OWN | TXFLAG_CSENABLE |
diff --git a/drivers/net/sunlance.c b/drivers/net/sunlance.c
index 2cf84e5968b2..767e1e2b210d 100644
--- a/drivers/net/sunlance.c
+++ b/drivers/net/sunlance.c
@@ -1295,17 +1295,9 @@ static void sparc_lance_get_drvinfo(struct net_device *dev, struct ethtool_drvin
1295 strcpy(info->version, "2.02"); 1295 strcpy(info->version, "2.02");
1296} 1296}
1297 1297
1298static u32 sparc_lance_get_link(struct net_device *dev)
1299{
1300 /* We really do not keep track of this, but this
1301 * is better than not reporting anything at all.
1302 */
1303 return 1;
1304}
1305
1306static const struct ethtool_ops sparc_lance_ethtool_ops = { 1298static const struct ethtool_ops sparc_lance_ethtool_ops = {
1307 .get_drvinfo = sparc_lance_get_drvinfo, 1299 .get_drvinfo = sparc_lance_get_drvinfo,
1308 .get_link = sparc_lance_get_link, 1300 .get_link = ethtool_op_get_link,
1309}; 1301};
1310 1302
1311static const struct net_device_ops sparc_lance_ops = { 1303static const struct net_device_ops sparc_lance_ops = {
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index 474652a2f70d..3397618d4d96 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -324,7 +324,7 @@ static int bdx_fw_load(struct bdx_priv *priv)
324 ENTER; 324 ENTER;
325 master = READ_REG(priv, regINIT_SEMAPHORE); 325 master = READ_REG(priv, regINIT_SEMAPHORE);
326 if (!READ_REG(priv, regINIT_STATUS) && master) { 326 if (!READ_REG(priv, regINIT_STATUS) && master) {
327 rc = request_firmware(&fw, "tehuti/firmware.bin", &priv->pdev->dev); 327 rc = request_firmware(&fw, "tehuti/bdx.bin", &priv->pdev->dev);
328 if (rc) 328 if (rc)
329 goto out; 329 goto out;
330 bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size); 330 bdx_tx_push_desc_safe(priv, (char *)fw->data, fw->size);
@@ -2510,4 +2510,4 @@ module_exit(bdx_module_exit);
2510MODULE_LICENSE("GPL"); 2510MODULE_LICENSE("GPL");
2511MODULE_AUTHOR(DRIVER_AUTHOR); 2511MODULE_AUTHOR(DRIVER_AUTHOR);
2512MODULE_DESCRIPTION(BDX_DRV_DESC); 2512MODULE_DESCRIPTION(BDX_DRV_DESC);
2513MODULE_FIRMWARE("tehuti/firmware.bin"); 2513MODULE_FIRMWARE("tehuti/bdx.bin");
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 30ccbb6d097a..7841a8f69998 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -32,6 +32,7 @@
32#include <linux/etherdevice.h> 32#include <linux/etherdevice.h>
33#include <linux/skbuff.h> 33#include <linux/skbuff.h>
34#include <linux/ethtool.h> 34#include <linux/ethtool.h>
35#include <linux/mdio.h>
35#include <linux/mii.h> 36#include <linux/mii.h>
36#include <linux/phy.h> 37#include <linux/phy.h>
37#include <linux/brcmphy.h> 38#include <linux/brcmphy.h>
@@ -69,10 +70,10 @@
69 70
70#define DRV_MODULE_NAME "tg3" 71#define DRV_MODULE_NAME "tg3"
71#define TG3_MAJ_NUM 3 72#define TG3_MAJ_NUM 3
72#define TG3_MIN_NUM 115 73#define TG3_MIN_NUM 116
73#define DRV_MODULE_VERSION \ 74#define DRV_MODULE_VERSION \
74 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM) 75 __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
75#define DRV_MODULE_RELDATE "October 14, 2010" 76#define DRV_MODULE_RELDATE "December 3, 2010"
76 77
77#define TG3_DEF_MAC_MODE 0 78#define TG3_DEF_MAC_MODE 0
78#define TG3_DEF_RX_MODE 0 79#define TG3_DEF_RX_MODE 0
@@ -1769,9 +1770,9 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1769 1770
1770 if (tp->link_config.autoneg == AUTONEG_ENABLE && 1771 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1771 current_link_up == 1 && 1772 current_link_up == 1 &&
1772 (tp->link_config.active_speed == SPEED_1000 || 1773 tp->link_config.active_duplex == DUPLEX_FULL &&
1773 (tp->link_config.active_speed == SPEED_100 && 1774 (tp->link_config.active_speed == SPEED_100 ||
1774 tp->link_config.active_duplex == DUPLEX_FULL))) { 1775 tp->link_config.active_speed == SPEED_1000)) {
1775 u32 eeectl; 1776 u32 eeectl;
1776 1777
1777 if (tp->link_config.active_speed == SPEED_1000) 1778 if (tp->link_config.active_speed == SPEED_1000)
@@ -1781,7 +1782,8 @@ static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
1781 1782
1782 tw32(TG3_CPMU_EEE_CTRL, eeectl); 1783 tw32(TG3_CPMU_EEE_CTRL, eeectl);
1783 1784
1784 tg3_phy_cl45_read(tp, 0x7, TG3_CL45_D7_EEERES_STAT, &val); 1785 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
1786 TG3_CL45_D7_EEERES_STAT, &val);
1785 1787
1786 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T || 1788 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
1787 val == TG3_CL45_D7_EEERES_STAT_LP_100TX) 1789 val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
@@ -2549,39 +2551,35 @@ static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
2549 tw32(MAC_TX_BACKOFF_SEED, addr_high); 2551 tw32(MAC_TX_BACKOFF_SEED, addr_high);
2550} 2552}
2551 2553
2552static int tg3_set_power_state(struct tg3 *tp, pci_power_t state) 2554static void tg3_enable_register_access(struct tg3 *tp)
2553{ 2555{
2554 u32 misc_host_ctrl; 2556 /*
2555 bool device_should_wake, do_low_power; 2557 * Make sure register accesses (indirect or otherwise) will function
2556 2558 * correctly.
2557 /* Make sure register accesses (indirect or otherwise)
2558 * will function correctly.
2559 */ 2559 */
2560 pci_write_config_dword(tp->pdev, 2560 pci_write_config_dword(tp->pdev,
2561 TG3PCI_MISC_HOST_CTRL, 2561 TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
2562 tp->misc_host_ctrl); 2562}
2563 2563
2564 switch (state) { 2564static int tg3_power_up(struct tg3 *tp)
2565 case PCI_D0: 2565{
2566 pci_enable_wake(tp->pdev, state, false); 2566 tg3_enable_register_access(tp);
2567 pci_set_power_state(tp->pdev, PCI_D0);
2568 2567
2569 /* Switch out of Vaux if it is a NIC */ 2568 pci_set_power_state(tp->pdev, PCI_D0);
2570 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2571 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2572 2569
2573 return 0; 2570 /* Switch out of Vaux if it is a NIC */
2571 if (tp->tg3_flags2 & TG3_FLG2_IS_NIC)
2572 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl, 100);
2574 2573
2575 case PCI_D1: 2574 return 0;
2576 case PCI_D2: 2575}
2577 case PCI_D3hot:
2578 break;
2579 2576
2580 default: 2577static int tg3_power_down_prepare(struct tg3 *tp)
2581 netdev_err(tp->dev, "Invalid power state (D%d) requested\n", 2578{
2582 state); 2579 u32 misc_host_ctrl;
2583 return -EINVAL; 2580 bool device_should_wake, do_low_power;
2584 } 2581
2582 tg3_enable_register_access(tp);
2585 2583
2586 /* Restore the CLKREQ setting. */ 2584 /* Restore the CLKREQ setting. */
2587 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) { 2585 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG) {
@@ -2600,8 +2598,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2600 tw32(TG3PCI_MISC_HOST_CTRL, 2598 tw32(TG3PCI_MISC_HOST_CTRL,
2601 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT); 2599 misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
2602 2600
2603 device_should_wake = pci_pme_capable(tp->pdev, state) && 2601 device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
2604 device_may_wakeup(&tp->pdev->dev) &&
2605 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE); 2602 (tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2606 2603
2607 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 2604 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
@@ -2728,12 +2725,10 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2728 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE))) 2725 (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)))
2729 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL; 2726 mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
2730 2727
2731 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) { 2728 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
2732 mac_mode |= tp->mac_mode & 2729 mac_mode |= MAC_MODE_APE_TX_EN |
2733 (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN); 2730 MAC_MODE_APE_RX_EN |
2734 if (mac_mode & MAC_MODE_APE_TX_EN) 2731 MAC_MODE_TDE_ENABLE;
2735 mac_mode |= MAC_MODE_TDE_ENABLE;
2736 }
2737 2732
2738 tw32_f(MAC_MODE, mac_mode); 2733 tw32_f(MAC_MODE, mac_mode);
2739 udelay(100); 2734 udelay(100);
@@ -2823,13 +2818,15 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2823 2818
2824 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN); 2819 tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
2825 2820
2826 if (device_should_wake) 2821 return 0;
2827 pci_enable_wake(tp->pdev, state, true); 2822}
2828 2823
2829 /* Finally, set the new power state. */ 2824static void tg3_power_down(struct tg3 *tp)
2830 pci_set_power_state(tp->pdev, state); 2825{
2826 tg3_power_down_prepare(tp);
2831 2827
2832 return 0; 2828 pci_wake_from_d3(tp->pdev, tp->tg3_flags & TG3_FLAG_WOL_ENABLE);
2829 pci_set_power_state(tp->pdev, PCI_D3hot);
2833} 2830}
2834 2831
2835static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex) 2832static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
@@ -2969,7 +2966,7 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
2969 } 2966 }
2970 2967
2971 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) { 2968 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
2972 u32 val = 0; 2969 u32 val;
2973 2970
2974 tw32(TG3_CPMU_EEE_MODE, 2971 tw32(TG3_CPMU_EEE_MODE,
2975 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE); 2972 tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
@@ -2986,19 +2983,18 @@ static void tg3_phy_copper_begin(struct tg3 *tp)
2986 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, 2983 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2,
2987 val | MII_TG3_DSP_CH34TP2_HIBW01); 2984 val | MII_TG3_DSP_CH34TP2_HIBW01);
2988 2985
2986 val = 0;
2989 if (tp->link_config.autoneg == AUTONEG_ENABLE) { 2987 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2990 /* Advertise 100-BaseTX EEE ability */ 2988 /* Advertise 100-BaseTX EEE ability */
2991 if (tp->link_config.advertising & 2989 if (tp->link_config.advertising &
2992 (ADVERTISED_100baseT_Half | 2990 ADVERTISED_100baseT_Full)
2993 ADVERTISED_100baseT_Full)) 2991 val |= MDIO_AN_EEE_ADV_100TX;
2994 val |= TG3_CL45_D7_EEEADV_CAP_100TX;
2995 /* Advertise 1000-BaseT EEE ability */ 2992 /* Advertise 1000-BaseT EEE ability */
2996 if (tp->link_config.advertising & 2993 if (tp->link_config.advertising &
2997 (ADVERTISED_1000baseT_Half | 2994 ADVERTISED_1000baseT_Full)
2998 ADVERTISED_1000baseT_Full)) 2995 val |= MDIO_AN_EEE_ADV_1000T;
2999 val |= TG3_CL45_D7_EEEADV_CAP_1000T;
3000 } 2996 }
3001 tg3_phy_cl45_write(tp, 0x7, TG3_CL45_D7_EEEADV_CAP, val); 2997 tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3002 2998
3003 /* Turn off SM_DSP clock. */ 2999 /* Turn off SM_DSP clock. */
3004 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL | 3000 val = MII_TG3_AUXCTL_SHDWSEL_AUXCTL |
@@ -5763,7 +5759,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5763 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping); 5759 dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
5764 5760
5765 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && 5761 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
5766 !mss && skb->len > ETH_DATA_LEN) 5762 !mss && skb->len > VLAN_ETH_FRAME_LEN)
5767 base_flags |= TXD_FLAG_JMB_PKT; 5763 base_flags |= TXD_FLAG_JMB_PKT;
5768 5764
5769 tg3_set_txd(tnapi, entry, mapping, len, base_flags, 5765 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
@@ -5997,7 +5993,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5997#endif 5993#endif
5998 5994
5999 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) && 5995 if ((tp->tg3_flags3 & TG3_FLG3_USE_JUMBO_BDFLAG) &&
6000 !mss && skb->len > ETH_DATA_LEN) 5996 !mss && skb->len > VLAN_ETH_FRAME_LEN)
6001 base_flags |= TXD_FLAG_JMB_PKT; 5997 base_flags |= TXD_FLAG_JMB_PKT;
6002 5998
6003 len = skb_headlen(skb); 5999 len = skb_headlen(skb);
@@ -6339,13 +6335,13 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
6339 kfree(tpr->rx_jmb_buffers); 6335 kfree(tpr->rx_jmb_buffers);
6340 tpr->rx_jmb_buffers = NULL; 6336 tpr->rx_jmb_buffers = NULL;
6341 if (tpr->rx_std) { 6337 if (tpr->rx_std) {
6342 pci_free_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp), 6338 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
6343 tpr->rx_std, tpr->rx_std_mapping); 6339 tpr->rx_std, tpr->rx_std_mapping);
6344 tpr->rx_std = NULL; 6340 tpr->rx_std = NULL;
6345 } 6341 }
6346 if (tpr->rx_jmb) { 6342 if (tpr->rx_jmb) {
6347 pci_free_consistent(tp->pdev, TG3_RX_JMB_RING_BYTES(tp), 6343 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
6348 tpr->rx_jmb, tpr->rx_jmb_mapping); 6344 tpr->rx_jmb, tpr->rx_jmb_mapping);
6349 tpr->rx_jmb = NULL; 6345 tpr->rx_jmb = NULL;
6350 } 6346 }
6351} 6347}
@@ -6358,8 +6354,10 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
6358 if (!tpr->rx_std_buffers) 6354 if (!tpr->rx_std_buffers)
6359 return -ENOMEM; 6355 return -ENOMEM;
6360 6356
6361 tpr->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_STD_RING_BYTES(tp), 6357 tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
6362 &tpr->rx_std_mapping); 6358 TG3_RX_STD_RING_BYTES(tp),
6359 &tpr->rx_std_mapping,
6360 GFP_KERNEL);
6363 if (!tpr->rx_std) 6361 if (!tpr->rx_std)
6364 goto err_out; 6362 goto err_out;
6365 6363
@@ -6370,9 +6368,10 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
6370 if (!tpr->rx_jmb_buffers) 6368 if (!tpr->rx_jmb_buffers)
6371 goto err_out; 6369 goto err_out;
6372 6370
6373 tpr->rx_jmb = pci_alloc_consistent(tp->pdev, 6371 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
6374 TG3_RX_JMB_RING_BYTES(tp), 6372 TG3_RX_JMB_RING_BYTES(tp),
6375 &tpr->rx_jmb_mapping); 6373 &tpr->rx_jmb_mapping,
6374 GFP_KERNEL);
6376 if (!tpr->rx_jmb) 6375 if (!tpr->rx_jmb)
6377 goto err_out; 6376 goto err_out;
6378 } 6377 }
@@ -6491,7 +6490,7 @@ static void tg3_free_consistent(struct tg3 *tp)
6491 struct tg3_napi *tnapi = &tp->napi[i]; 6490 struct tg3_napi *tnapi = &tp->napi[i];
6492 6491
6493 if (tnapi->tx_ring) { 6492 if (tnapi->tx_ring) {
6494 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES, 6493 dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
6495 tnapi->tx_ring, tnapi->tx_desc_mapping); 6494 tnapi->tx_ring, tnapi->tx_desc_mapping);
6496 tnapi->tx_ring = NULL; 6495 tnapi->tx_ring = NULL;
6497 } 6496 }
@@ -6500,25 +6499,26 @@ static void tg3_free_consistent(struct tg3 *tp)
6500 tnapi->tx_buffers = NULL; 6499 tnapi->tx_buffers = NULL;
6501 6500
6502 if (tnapi->rx_rcb) { 6501 if (tnapi->rx_rcb) {
6503 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp), 6502 dma_free_coherent(&tp->pdev->dev,
6504 tnapi->rx_rcb, 6503 TG3_RX_RCB_RING_BYTES(tp),
6505 tnapi->rx_rcb_mapping); 6504 tnapi->rx_rcb,
6505 tnapi->rx_rcb_mapping);
6506 tnapi->rx_rcb = NULL; 6506 tnapi->rx_rcb = NULL;
6507 } 6507 }
6508 6508
6509 tg3_rx_prodring_fini(tp, &tnapi->prodring); 6509 tg3_rx_prodring_fini(tp, &tnapi->prodring);
6510 6510
6511 if (tnapi->hw_status) { 6511 if (tnapi->hw_status) {
6512 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE, 6512 dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
6513 tnapi->hw_status, 6513 tnapi->hw_status,
6514 tnapi->status_mapping); 6514 tnapi->status_mapping);
6515 tnapi->hw_status = NULL; 6515 tnapi->hw_status = NULL;
6516 } 6516 }
6517 } 6517 }
6518 6518
6519 if (tp->hw_stats) { 6519 if (tp->hw_stats) {
6520 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats), 6520 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
6521 tp->hw_stats, tp->stats_mapping); 6521 tp->hw_stats, tp->stats_mapping);
6522 tp->hw_stats = NULL; 6522 tp->hw_stats = NULL;
6523 } 6523 }
6524} 6524}
@@ -6531,9 +6531,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6531{ 6531{
6532 int i; 6532 int i;
6533 6533
6534 tp->hw_stats = pci_alloc_consistent(tp->pdev, 6534 tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
6535 sizeof(struct tg3_hw_stats), 6535 sizeof(struct tg3_hw_stats),
6536 &tp->stats_mapping); 6536 &tp->stats_mapping,
6537 GFP_KERNEL);
6537 if (!tp->hw_stats) 6538 if (!tp->hw_stats)
6538 goto err_out; 6539 goto err_out;
6539 6540
@@ -6543,9 +6544,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6543 struct tg3_napi *tnapi = &tp->napi[i]; 6544 struct tg3_napi *tnapi = &tp->napi[i];
6544 struct tg3_hw_status *sblk; 6545 struct tg3_hw_status *sblk;
6545 6546
6546 tnapi->hw_status = pci_alloc_consistent(tp->pdev, 6547 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
6547 TG3_HW_STATUS_SIZE, 6548 TG3_HW_STATUS_SIZE,
6548 &tnapi->status_mapping); 6549 &tnapi->status_mapping,
6550 GFP_KERNEL);
6549 if (!tnapi->hw_status) 6551 if (!tnapi->hw_status)
6550 goto err_out; 6552 goto err_out;
6551 6553
@@ -6566,9 +6568,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6566 if (!tnapi->tx_buffers) 6568 if (!tnapi->tx_buffers)
6567 goto err_out; 6569 goto err_out;
6568 6570
6569 tnapi->tx_ring = pci_alloc_consistent(tp->pdev, 6571 tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
6570 TG3_TX_RING_BYTES, 6572 TG3_TX_RING_BYTES,
6571 &tnapi->tx_desc_mapping); 6573 &tnapi->tx_desc_mapping,
6574 GFP_KERNEL);
6572 if (!tnapi->tx_ring) 6575 if (!tnapi->tx_ring)
6573 goto err_out; 6576 goto err_out;
6574 } 6577 }
@@ -6601,9 +6604,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
6601 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)) 6604 if (!i && (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS))
6602 continue; 6605 continue;
6603 6606
6604 tnapi->rx_rcb = pci_alloc_consistent(tp->pdev, 6607 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
6605 TG3_RX_RCB_RING_BYTES(tp), 6608 TG3_RX_RCB_RING_BYTES(tp),
6606 &tnapi->rx_rcb_mapping); 6609 &tnapi->rx_rcb_mapping,
6610 GFP_KERNEL);
6607 if (!tnapi->rx_rcb) 6611 if (!tnapi->rx_rcb)
6608 goto err_out; 6612 goto err_out;
6609 6613
@@ -6987,7 +6991,7 @@ static void tg3_restore_pci_state(struct tg3 *tp)
6987 6991
6988 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) { 6992 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785) {
6989 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) 6993 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)
6990 pcie_set_readrq(tp->pdev, 4096); 6994 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
6991 else { 6995 else {
6992 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, 6996 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
6993 tp->pci_cacheline_sz); 6997 tp->pci_cacheline_sz);
@@ -7181,7 +7185,7 @@ static int tg3_chip_reset(struct tg3 *tp)
7181 tp->pcie_cap + PCI_EXP_DEVCTL, 7185 tp->pcie_cap + PCI_EXP_DEVCTL,
7182 val16); 7186 val16);
7183 7187
7184 pcie_set_readrq(tp->pdev, 4096); 7188 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
7185 7189
7186 /* Clear error status */ 7190 /* Clear error status */
7187 pci_write_config_word(tp->pdev, 7191 pci_write_config_word(tp->pdev,
@@ -7222,19 +7226,21 @@ static int tg3_chip_reset(struct tg3 *tp)
7222 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl); 7226 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7223 } 7227 }
7224 7228
7229 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7230 tp->mac_mode = MAC_MODE_APE_TX_EN |
7231 MAC_MODE_APE_RX_EN |
7232 MAC_MODE_TDE_ENABLE;
7233
7225 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) { 7234 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7226 tp->mac_mode = MAC_MODE_PORT_MODE_TBI; 7235 tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
7227 tw32_f(MAC_MODE, tp->mac_mode); 7236 val = tp->mac_mode;
7228 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) { 7237 } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7229 tp->mac_mode = MAC_MODE_PORT_MODE_GMII; 7238 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7230 tw32_f(MAC_MODE, tp->mac_mode); 7239 val = tp->mac_mode;
7231 } else if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) {
7232 tp->mac_mode &= (MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN);
7233 if (tp->mac_mode & MAC_MODE_APE_TX_EN)
7234 tp->mac_mode |= MAC_MODE_TDE_ENABLE;
7235 tw32_f(MAC_MODE, tp->mac_mode);
7236 } else 7240 } else
7237 tw32_f(MAC_MODE, 0); 7241 val = 0;
7242
7243 tw32_f(MAC_MODE, val);
7238 udelay(40); 7244 udelay(40);
7239 7245
7240 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC); 7246 tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
@@ -7801,6 +7807,37 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7801 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) 7807 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
7802 tg3_abort_hw(tp, 1); 7808 tg3_abort_hw(tp, 1);
7803 7809
7810 /* Enable MAC control of LPI */
7811 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7812 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7813 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7814 TG3_CPMU_EEE_LNKIDL_UART_IDL);
7815
7816 tw32_f(TG3_CPMU_EEE_CTRL,
7817 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7818
7819 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7820 TG3_CPMU_EEEMD_LPI_IN_TX |
7821 TG3_CPMU_EEEMD_LPI_IN_RX |
7822 TG3_CPMU_EEEMD_EEE_ENABLE;
7823
7824 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
7825 val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
7826
7827 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
7828 val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
7829
7830 tw32_f(TG3_CPMU_EEE_MODE, val);
7831
7832 tw32_f(TG3_CPMU_EEE_DBTMR1,
7833 TG3_CPMU_DBTMR1_PCIEXIT_2047US |
7834 TG3_CPMU_DBTMR1_LNKIDLE_2047US);
7835
7836 tw32_f(TG3_CPMU_EEE_DBTMR2,
7837 TG3_CPMU_DBTMR1_APE_TX_2047US |
7838 TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
7839 }
7840
7804 if (reset_phy) 7841 if (reset_phy)
7805 tg3_phy_reset(tp); 7842 tg3_phy_reset(tp);
7806 7843
@@ -7860,18 +7897,21 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7860 tw32(GRC_MODE, grc_mode); 7897 tw32(GRC_MODE, grc_mode);
7861 } 7898 }
7862 7899
7863 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) { 7900 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
7864 u32 grc_mode = tr32(GRC_MODE); 7901 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
7902 u32 grc_mode = tr32(GRC_MODE);
7865 7903
7866 /* Access the lower 1K of PL PCIE block registers. */ 7904 /* Access the lower 1K of PL PCIE block registers. */
7867 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK; 7905 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
7868 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL); 7906 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
7869 7907
7870 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5); 7908 val = tr32(TG3_PCIE_TLDLPL_PORT +
7871 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5, 7909 TG3_PCIE_PL_LO_PHYCTL5);
7872 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ); 7910 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
7911 val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
7873 7912
7874 tw32(GRC_MODE, grc_mode); 7913 tw32(GRC_MODE, grc_mode);
7914 }
7875 7915
7876 val = tr32(TG3_CPMU_LSPD_10MB_CLK); 7916 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
7877 val &= ~CPMU_LSPD_10MB_MACCLK_MASK; 7917 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
@@ -7879,22 +7919,6 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7879 tw32(TG3_CPMU_LSPD_10MB_CLK, val); 7919 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
7880 } 7920 }
7881 7921
7882 /* Enable MAC control of LPI */
7883 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
7884 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
7885 TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
7886 TG3_CPMU_EEE_LNKIDL_UART_IDL);
7887
7888 tw32_f(TG3_CPMU_EEE_CTRL,
7889 TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
7890
7891 tw32_f(TG3_CPMU_EEE_MODE,
7892 TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
7893 TG3_CPMU_EEEMD_LPI_IN_TX |
7894 TG3_CPMU_EEEMD_LPI_IN_RX |
7895 TG3_CPMU_EEEMD_EEE_ENABLE);
7896 }
7897
7898 /* This works around an issue with Athlon chipsets on 7922 /* This works around an issue with Athlon chipsets on
7899 * B3 tigon3 silicon. This bit has no effect on any 7923 * B3 tigon3 silicon. This bit has no effect on any
7900 * other revision. But do not set this on PCI Express 7924 * other revision. But do not set this on PCI Express
@@ -8162,8 +8186,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8162 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB | 8186 RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8163 RDMAC_MODE_LNGREAD_ENAB); 8187 RDMAC_MODE_LNGREAD_ENAB);
8164 8188
8165 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 || 8189 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8166 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8167 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS; 8190 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8168 8191
8169 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 || 8192 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
@@ -8203,6 +8226,10 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8203 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 8226 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8204 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) { 8227 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) {
8205 val = tr32(TG3_RDMA_RSRVCTRL_REG); 8228 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8229 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
8230 val &= ~TG3_RDMA_RSRVCTRL_TXMRGN_MASK;
8231 val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B;
8232 }
8206 tw32(TG3_RDMA_RSRVCTRL_REG, 8233 tw32(TG3_RDMA_RSRVCTRL_REG,
8207 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX); 8234 val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8208 } 8235 }
@@ -8280,7 +8307,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8280 } 8307 }
8281 8308
8282 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 8309 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
8283 tp->mac_mode &= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN; 8310 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
8284 else 8311 else
8285 tp->mac_mode = 0; 8312 tp->mac_mode = 0;
8286 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE | 8313 tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
@@ -9031,8 +9058,14 @@ static bool tg3_enable_msix(struct tg3 *tp)
9031 pci_disable_msix(tp->pdev); 9058 pci_disable_msix(tp->pdev);
9032 return false; 9059 return false;
9033 } 9060 }
9034 if (tp->irq_cnt > 1) 9061
9062 if (tp->irq_cnt > 1) {
9035 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS; 9063 tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
9064 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
9065 tp->tg3_flags3 |= TG3_FLG3_ENABLE_TSS;
9066 netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9067 }
9068 }
9036 9069
9037 return true; 9070 return true;
9038} 9071}
@@ -9101,7 +9134,7 @@ static int tg3_open(struct net_device *dev)
9101 9134
9102 netif_carrier_off(tp->dev); 9135 netif_carrier_off(tp->dev);
9103 9136
9104 err = tg3_set_power_state(tp, PCI_D0); 9137 err = tg3_power_up(tp);
9105 if (err) 9138 if (err)
9106 return err; 9139 return err;
9107 9140
@@ -9266,7 +9299,7 @@ static int tg3_close(struct net_device *dev)
9266 9299
9267 tg3_free_consistent(tp); 9300 tg3_free_consistent(tp);
9268 9301
9269 tg3_set_power_state(tp, PCI_D3hot); 9302 tg3_power_down(tp);
9270 9303
9271 netif_carrier_off(tp->dev); 9304 netif_carrier_off(tp->dev);
9272 9305
@@ -11068,7 +11101,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11068 struct tg3 *tp = netdev_priv(dev); 11101 struct tg3 *tp = netdev_priv(dev);
11069 11102
11070 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11103 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11071 tg3_set_power_state(tp, PCI_D0); 11104 tg3_power_up(tp);
11072 11105
11073 memset(data, 0, sizeof(u64) * TG3_NUM_TEST); 11106 memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11074 11107
@@ -11136,7 +11169,7 @@ static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11136 tg3_phy_start(tp); 11169 tg3_phy_start(tp);
11137 } 11170 }
11138 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) 11171 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
11139 tg3_set_power_state(tp, PCI_D3hot); 11172 tg3_power_down(tp);
11140 11173
11141} 11174}
11142 11175
@@ -12411,8 +12444,9 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
12411 if (cfg2 & (1 << 18)) 12444 if (cfg2 & (1 << 18))
12412 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS; 12445 tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
12413 12446
12414 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 12447 if (((tp->tg3_flags3 & TG3_FLG3_5717_PLUS) ||
12415 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) && 12448 ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
12449 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX))) &&
12416 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN)) 12450 (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
12417 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD; 12451 tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
12418 12452
@@ -12548,9 +12582,11 @@ static int __devinit tg3_phy_probe(struct tg3 *tp)
12548 } 12582 }
12549 } 12583 }
12550 12584
12551 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 || 12585 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
12552 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 && 12586 ((tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
12553 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)) 12587 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
12588 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
12589 tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
12554 tp->phy_flags |= TG3_PHYFLG_EEE_CAP; 12590 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
12555 12591
12556 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) && 12592 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
@@ -12658,7 +12694,7 @@ static void __devinit tg3_read_vpd(struct tg3 *tp)
12658 cnt = pci_read_vpd(tp->pdev, pos, 12694 cnt = pci_read_vpd(tp->pdev, pos,
12659 TG3_NVM_VPD_LEN - pos, 12695 TG3_NVM_VPD_LEN - pos,
12660 &vpd_data[pos]); 12696 &vpd_data[pos]);
12661 if (cnt == -ETIMEDOUT || -EINTR) 12697 if (cnt == -ETIMEDOUT || cnt == -EINTR)
12662 cnt = 0; 12698 cnt = 0;
12663 else if (cnt < 0) 12699 else if (cnt < 0)
12664 goto out_not_found; 12700 goto out_not_found;
@@ -13047,17 +13083,15 @@ static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13047 return 512; 13083 return 512;
13048} 13084}
13049 13085
13086DEFINE_PCI_DEVICE_TABLE(write_reorder_chipsets) = {
13087 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13088 { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13089 { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13090 { },
13091};
13092
13050static int __devinit tg3_get_invariants(struct tg3 *tp) 13093static int __devinit tg3_get_invariants(struct tg3 *tp)
13051{ 13094{
13052 static struct pci_device_id write_reorder_chipsets[] = {
13053 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
13054 PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13055 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
13056 PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13057 { PCI_DEVICE(PCI_VENDOR_ID_VIA,
13058 PCI_DEVICE_ID_VIA_8385_0) },
13059 { },
13060 };
13061 u32 misc_ctrl_reg; 13095 u32 misc_ctrl_reg;
13062 u32 pci_state_reg, grc_misc_cfg; 13096 u32 pci_state_reg, grc_misc_cfg;
13063 u32 val; 13097 u32 val;
@@ -13359,7 +13393,45 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13359 13393
13360 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS; 13394 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
13361 13395
13362 pcie_set_readrq(tp->pdev, 4096); 13396 tp->pcie_readrq = 4096;
13397 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) {
13398 u16 word;
13399
13400 pci_read_config_word(tp->pdev,
13401 tp->pcie_cap + PCI_EXP_LNKSTA,
13402 &word);
13403 switch (word & PCI_EXP_LNKSTA_CLS) {
13404 case PCI_EXP_LNKSTA_CLS_2_5GB:
13405 word &= PCI_EXP_LNKSTA_NLW;
13406 word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
13407 switch (word) {
13408 case 2:
13409 tp->pcie_readrq = 2048;
13410 break;
13411 case 4:
13412 tp->pcie_readrq = 1024;
13413 break;
13414 }
13415 break;
13416
13417 case PCI_EXP_LNKSTA_CLS_5_0GB:
13418 word &= PCI_EXP_LNKSTA_NLW;
13419 word >>= PCI_EXP_LNKSTA_NLW_SHIFT;
13420 switch (word) {
13421 case 1:
13422 tp->pcie_readrq = 2048;
13423 break;
13424 case 2:
13425 tp->pcie_readrq = 1024;
13426 break;
13427 case 4:
13428 tp->pcie_readrq = 512;
13429 break;
13430 }
13431 }
13432 }
13433
13434 pcie_set_readrq(tp->pdev, tp->pcie_readrq);
13363 13435
13364 pci_read_config_word(tp->pdev, 13436 pci_read_config_word(tp->pdev,
13365 tp->pcie_cap + PCI_EXP_LNKCTL, 13437 tp->pcie_cap + PCI_EXP_LNKCTL,
@@ -13546,7 +13618,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13546 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS)) 13618 (tp->tg3_flags3 & TG3_FLG3_5717_PLUS))
13547 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT; 13619 tp->tg3_flags |= TG3_FLAG_CPMU_PRESENT;
13548 13620
13549 /* Set up tp->grc_local_ctrl before calling tg3_set_power_state(). 13621 /* Set up tp->grc_local_ctrl before calling tg_power_up().
13550 * GPIO1 driven high will bring 5700's external PHY out of reset. 13622 * GPIO1 driven high will bring 5700's external PHY out of reset.
13551 * It is also used as eeprom write protect on LOMs. 13623 * It is also used as eeprom write protect on LOMs.
13552 */ 13624 */
@@ -13577,7 +13649,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13577 } 13649 }
13578 13650
13579 /* Force the chip into D0. */ 13651 /* Force the chip into D0. */
13580 err = tg3_set_power_state(tp, PCI_D0); 13652 err = tg3_power_up(tp);
13581 if (err) { 13653 if (err) {
13582 dev_err(&tp->pdev->dev, "Transition to D0 failed\n"); 13654 dev_err(&tp->pdev->dev, "Transition to D0 failed\n");
13583 return err; 13655 return err;
@@ -13722,8 +13794,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
13722 13794
13723 /* Preserve the APE MAC_MODE bits */ 13795 /* Preserve the APE MAC_MODE bits */
13724 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE) 13796 if (tp->tg3_flags3 & TG3_FLG3_ENABLE_APE)
13725 tp->mac_mode = tr32(MAC_MODE) | 13797 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13726 MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
13727 else 13798 else
13728 tp->mac_mode = TG3_DEF_MAC_MODE; 13799 tp->mac_mode = TG3_DEF_MAC_MODE;
13729 13800
@@ -14153,13 +14224,19 @@ static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dm
14153 14224
14154#define TEST_BUFFER_SIZE 0x2000 14225#define TEST_BUFFER_SIZE 0x2000
14155 14226
14227DEFINE_PCI_DEVICE_TABLE(dma_wait_state_chipsets) = {
14228 { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14229 { },
14230};
14231
14156static int __devinit tg3_test_dma(struct tg3 *tp) 14232static int __devinit tg3_test_dma(struct tg3 *tp)
14157{ 14233{
14158 dma_addr_t buf_dma; 14234 dma_addr_t buf_dma;
14159 u32 *buf, saved_dma_rwctrl; 14235 u32 *buf, saved_dma_rwctrl;
14160 int ret = 0; 14236 int ret = 0;
14161 14237
14162 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); 14238 buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
14239 &buf_dma, GFP_KERNEL);
14163 if (!buf) { 14240 if (!buf) {
14164 ret = -ENOMEM; 14241 ret = -ENOMEM;
14165 goto out_nofree; 14242 goto out_nofree;
@@ -14321,11 +14398,6 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14321 } 14398 }
14322 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) != 14399 if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
14323 DMA_RWCTRL_WRITE_BNDRY_16) { 14400 DMA_RWCTRL_WRITE_BNDRY_16) {
14324 static struct pci_device_id dma_wait_state_chipsets[] = {
14325 { PCI_DEVICE(PCI_VENDOR_ID_APPLE,
14326 PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
14327 { },
14328 };
14329 14401
14330 /* DMA test passed without adjusting DMA boundary, 14402 /* DMA test passed without adjusting DMA boundary,
14331 * now look for chipsets that are known to expose the 14403 * now look for chipsets that are known to expose the
@@ -14343,7 +14415,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
14343 } 14415 }
14344 14416
14345out: 14417out:
14346 pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma); 14418 dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
14347out_nofree: 14419out_nofree:
14348 return ret; 14420 return ret;
14349} 14421}
@@ -14957,7 +15029,7 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
14957 if (tp->fw) 15029 if (tp->fw)
14958 release_firmware(tp->fw); 15030 release_firmware(tp->fw);
14959 15031
14960 flush_scheduled_work(); 15032 cancel_work_sync(&tp->reset_task);
14961 15033
14962 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 15034 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
14963 tg3_phy_fini(tp); 15035 tg3_phy_fini(tp);
@@ -14980,23 +15052,18 @@ static void __devexit tg3_remove_one(struct pci_dev *pdev)
14980 } 15052 }
14981} 15053}
14982 15054
14983static int tg3_suspend(struct pci_dev *pdev, pm_message_t state) 15055#ifdef CONFIG_PM_SLEEP
15056static int tg3_suspend(struct device *device)
14984{ 15057{
15058 struct pci_dev *pdev = to_pci_dev(device);
14985 struct net_device *dev = pci_get_drvdata(pdev); 15059 struct net_device *dev = pci_get_drvdata(pdev);
14986 struct tg3 *tp = netdev_priv(dev); 15060 struct tg3 *tp = netdev_priv(dev);
14987 pci_power_t target_state;
14988 int err; 15061 int err;
14989 15062
14990 /* PCI register 4 needs to be saved whether netif_running() or not.
14991 * MSI address and data need to be saved if using MSI and
14992 * netif_running().
14993 */
14994 pci_save_state(pdev);
14995
14996 if (!netif_running(dev)) 15063 if (!netif_running(dev))
14997 return 0; 15064 return 0;
14998 15065
14999 flush_scheduled_work(); 15066 flush_work_sync(&tp->reset_task);
15000 tg3_phy_stop(tp); 15067 tg3_phy_stop(tp);
15001 tg3_netif_stop(tp); 15068 tg3_netif_stop(tp);
15002 15069
@@ -15013,9 +15080,7 @@ static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
15013 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE; 15080 tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
15014 tg3_full_unlock(tp); 15081 tg3_full_unlock(tp);
15015 15082
15016 target_state = pdev->pm_cap ? pci_target_state(pdev) : PCI_D3hot; 15083 err = tg3_power_down_prepare(tp);
15017
15018 err = tg3_set_power_state(tp, target_state);
15019 if (err) { 15084 if (err) {
15020 int err2; 15085 int err2;
15021 15086
@@ -15042,21 +15107,16 @@ out:
15042 return err; 15107 return err;
15043} 15108}
15044 15109
15045static int tg3_resume(struct pci_dev *pdev) 15110static int tg3_resume(struct device *device)
15046{ 15111{
15112 struct pci_dev *pdev = to_pci_dev(device);
15047 struct net_device *dev = pci_get_drvdata(pdev); 15113 struct net_device *dev = pci_get_drvdata(pdev);
15048 struct tg3 *tp = netdev_priv(dev); 15114 struct tg3 *tp = netdev_priv(dev);
15049 int err; 15115 int err;
15050 15116
15051 pci_restore_state(tp->pdev);
15052
15053 if (!netif_running(dev)) 15117 if (!netif_running(dev))
15054 return 0; 15118 return 0;
15055 15119
15056 err = tg3_set_power_state(tp, PCI_D0);
15057 if (err)
15058 return err;
15059
15060 netif_device_attach(dev); 15120 netif_device_attach(dev);
15061 15121
15062 tg3_full_lock(tp, 0); 15122 tg3_full_lock(tp, 0);
@@ -15080,13 +15140,21 @@ out:
15080 return err; 15140 return err;
15081} 15141}
15082 15142
15143static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15144#define TG3_PM_OPS (&tg3_pm_ops)
15145
15146#else
15147
15148#define TG3_PM_OPS NULL
15149
15150#endif /* CONFIG_PM_SLEEP */
15151
15083static struct pci_driver tg3_driver = { 15152static struct pci_driver tg3_driver = {
15084 .name = DRV_MODULE_NAME, 15153 .name = DRV_MODULE_NAME,
15085 .id_table = tg3_pci_tbl, 15154 .id_table = tg3_pci_tbl,
15086 .probe = tg3_init_one, 15155 .probe = tg3_init_one,
15087 .remove = __devexit_p(tg3_remove_one), 15156 .remove = __devexit_p(tg3_remove_one),
15088 .suspend = tg3_suspend, 15157 .driver.pm = TG3_PM_OPS,
15089 .resume = tg3_resume
15090}; 15158};
15091 15159
15092static int __init tg3_init(void) 15160static int __init tg3_init(void)
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 4a1974804b9f..d62c8d937c82 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -1094,13 +1094,19 @@
1094/* 0x3664 --> 0x36b0 unused */ 1094/* 0x3664 --> 0x36b0 unused */
1095 1095
1096#define TG3_CPMU_EEE_MODE 0x000036b0 1096#define TG3_CPMU_EEE_MODE 0x000036b0
1097#define TG3_CPMU_EEEMD_ERLY_L1_XIT_DET 0x00000008 1097#define TG3_CPMU_EEEMD_APE_TX_DET_EN 0x00000004
1098#define TG3_CPMU_EEEMD_LPI_ENABLE 0x00000080 1098#define TG3_CPMU_EEEMD_ERLY_L1_XIT_DET 0x00000008
1099#define TG3_CPMU_EEEMD_LPI_IN_TX 0x00000100 1099#define TG3_CPMU_EEEMD_SND_IDX_DET_EN 0x00000040
1100#define TG3_CPMU_EEEMD_LPI_IN_RX 0x00000200 1100#define TG3_CPMU_EEEMD_LPI_ENABLE 0x00000080
1101#define TG3_CPMU_EEEMD_EEE_ENABLE 0x00100000 1101#define TG3_CPMU_EEEMD_LPI_IN_TX 0x00000100
1102/* 0x36b4 --> 0x36b8 unused */ 1102#define TG3_CPMU_EEEMD_LPI_IN_RX 0x00000200
1103 1103#define TG3_CPMU_EEEMD_EEE_ENABLE 0x00100000
1104#define TG3_CPMU_EEE_DBTMR1 0x000036b4
1105#define TG3_CPMU_DBTMR1_PCIEXIT_2047US 0x07ff0000
1106#define TG3_CPMU_DBTMR1_LNKIDLE_2047US 0x000070ff
1107#define TG3_CPMU_EEE_DBTMR2 0x000036b8
1108#define TG3_CPMU_DBTMR1_APE_TX_2047US 0x07ff0000
1109#define TG3_CPMU_DBTMR2_TXIDXEQ_2047US 0x000070ff
1104#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc 1110#define TG3_CPMU_EEE_LNKIDL_CTRL 0x000036bc
1105#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000 1111#define TG3_CPMU_EEE_LNKIDL_PCIE_NL0 0x01000000
1106#define TG3_CPMU_EEE_LNKIDL_UART_IDL 0x00000004 1112#define TG3_CPMU_EEE_LNKIDL_UART_IDL 0x00000004
@@ -1327,6 +1333,8 @@
1327 1333
1328#define TG3_RDMA_RSRVCTRL_REG 0x00004900 1334#define TG3_RDMA_RSRVCTRL_REG 0x00004900
1329#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004 1335#define TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX 0x00000004
1336#define TG3_RDMA_RSRVCTRL_TXMRGN_320B 0x28000000
1337#define TG3_RDMA_RSRVCTRL_TXMRGN_MASK 0xffe00000
1330/* 0x4904 --> 0x4910 unused */ 1338/* 0x4904 --> 0x4910 unused */
1331 1339
1332#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910 1340#define TG3_LSO_RD_DMA_CRPTEN_CTRL 0x00004910
@@ -2170,9 +2178,6 @@
2170#define MII_TG3_TEST1_CRC_EN 0x8000 2178#define MII_TG3_TEST1_CRC_EN 0x8000
2171 2179
2172/* Clause 45 expansion registers */ 2180/* Clause 45 expansion registers */
2173#define TG3_CL45_D7_EEEADV_CAP 0x003c
2174#define TG3_CL45_D7_EEEADV_CAP_100TX 0x0002
2175#define TG3_CL45_D7_EEEADV_CAP_1000T 0x0004
2176#define TG3_CL45_D7_EEERES_STAT 0x803e 2181#define TG3_CL45_D7_EEERES_STAT 0x803e
2177#define TG3_CL45_D7_EEERES_STAT_LP_100TX 0x0002 2182#define TG3_CL45_D7_EEERES_STAT_LP_100TX 0x0002
2178#define TG3_CL45_D7_EEERES_STAT_LP_1000T 0x0004 2183#define TG3_CL45_D7_EEERES_STAT_LP_1000T 0x0004
@@ -2562,10 +2567,6 @@ struct ring_info {
2562 DEFINE_DMA_UNMAP_ADDR(mapping); 2567 DEFINE_DMA_UNMAP_ADDR(mapping);
2563}; 2568};
2564 2569
2565struct tg3_config_info {
2566 u32 flags;
2567};
2568
2569struct tg3_link_config { 2570struct tg3_link_config {
2570 /* Describes what we're trying to get. */ 2571 /* Describes what we're trying to get. */
2571 u32 advertising; 2572 u32 advertising;
@@ -2713,17 +2714,17 @@ struct tg3_napi {
2713 u32 last_irq_tag; 2714 u32 last_irq_tag;
2714 u32 int_mbox; 2715 u32 int_mbox;
2715 u32 coal_now; 2716 u32 coal_now;
2716 u32 tx_prod;
2717 u32 tx_cons;
2718 u32 tx_pending;
2719 u32 prodmbox;
2720 2717
2721 u32 consmbox; 2718 u32 consmbox ____cacheline_aligned;
2722 u32 rx_rcb_ptr; 2719 u32 rx_rcb_ptr;
2723 u16 *rx_rcb_prod_idx; 2720 u16 *rx_rcb_prod_idx;
2724 struct tg3_rx_prodring_set prodring; 2721 struct tg3_rx_prodring_set prodring;
2725
2726 struct tg3_rx_buffer_desc *rx_rcb; 2722 struct tg3_rx_buffer_desc *rx_rcb;
2723
2724 u32 tx_prod ____cacheline_aligned;
2725 u32 tx_cons;
2726 u32 tx_pending;
2727 u32 prodmbox;
2727 struct tg3_tx_buffer_desc *tx_ring; 2728 struct tg3_tx_buffer_desc *tx_ring;
2728 struct ring_info *tx_buffers; 2729 struct ring_info *tx_buffers;
2729 2730
@@ -2946,6 +2947,7 @@ struct tg3 {
2946 int pcix_cap; 2947 int pcix_cap;
2947 int pcie_cap; 2948 int pcie_cap;
2948 }; 2949 };
2950 int pcie_readrq;
2949 2951
2950 struct mii_bus *mdio_bus; 2952 struct mii_bus *mdio_bus;
2951 int mdio_irq[PHY_MAX_ADDR]; 2953 int mdio_irq[PHY_MAX_ADDR];
diff --git a/drivers/net/tokenring/ibmtr.c b/drivers/net/tokenring/ibmtr.c
index 91e6c78271a3..4786497de03e 100644
--- a/drivers/net/tokenring/ibmtr.c
+++ b/drivers/net/tokenring/ibmtr.c
@@ -657,8 +657,9 @@ static int __devinit ibmtr_probe1(struct net_device *dev, int PIOaddr)
657#ifndef PCMCIA 657#ifndef PCMCIA
658 /* finish figuring the shared RAM address */ 658 /* finish figuring the shared RAM address */
659 if (cardpresent == TR_ISA) { 659 if (cardpresent == TR_ISA) {
660 static __u32 ram_bndry_mask[] = 660 static const __u32 ram_bndry_mask[] = {
661 { 0xffffe000, 0xffffc000, 0xffff8000, 0xffff0000 }; 661 0xffffe000, 0xffffc000, 0xffff8000, 0xffff0000
662 };
662 __u32 new_base, rrr_32, chk_base, rbm; 663 __u32 new_base, rrr_32, chk_base, rbm;
663 664
664 rrr_32=readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_ODD) >> 2 & 0x03; 665 rrr_32=readb(ti->mmio+ACA_OFFSET+ACA_RW+RRR_ODD) >> 2 & 0x03;
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index c78a50586c1d..b13c6b040be3 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -964,7 +964,7 @@ static void de_set_media (struct de_private *de)
964 dw32(MacMode, macmode); 964 dw32(MacMode, macmode);
965} 965}
966 966
967static void de_next_media (struct de_private *de, u32 *media, 967static void de_next_media (struct de_private *de, const u32 *media,
968 unsigned int n_media) 968 unsigned int n_media)
969{ 969{
970 unsigned int i; 970 unsigned int i;
@@ -1008,10 +1008,10 @@ static void de21040_media_timer (unsigned long data)
1008 return; 1008 return;
1009 1009
1010 if (de->media_type == DE_MEDIA_AUI) { 1010 if (de->media_type == DE_MEDIA_AUI) {
1011 u32 next_state = DE_MEDIA_TP; 1011 static const u32 next_state = DE_MEDIA_TP;
1012 de_next_media(de, &next_state, 1); 1012 de_next_media(de, &next_state, 1);
1013 } else { 1013 } else {
1014 u32 next_state = DE_MEDIA_AUI; 1014 static const u32 next_state = DE_MEDIA_AUI;
1015 de_next_media(de, &next_state, 1); 1015 de_next_media(de, &next_state, 1);
1016 } 1016 }
1017 1017
@@ -1136,13 +1136,19 @@ static void de21041_media_timer (unsigned long data)
1136 * simply resets the PHY and reloads the current media settings. 1136 * simply resets the PHY and reloads the current media settings.
1137 */ 1137 */
1138 if (de->media_type == DE_MEDIA_AUI) { 1138 if (de->media_type == DE_MEDIA_AUI) {
1139 u32 next_states[] = { DE_MEDIA_BNC, DE_MEDIA_TP_AUTO }; 1139 static const u32 next_states[] = {
1140 DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1141 };
1140 de_next_media(de, next_states, ARRAY_SIZE(next_states)); 1142 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1141 } else if (de->media_type == DE_MEDIA_BNC) { 1143 } else if (de->media_type == DE_MEDIA_BNC) {
1142 u32 next_states[] = { DE_MEDIA_TP_AUTO, DE_MEDIA_AUI }; 1144 static const u32 next_states[] = {
1145 DE_MEDIA_TP_AUTO, DE_MEDIA_AUI
1146 };
1143 de_next_media(de, next_states, ARRAY_SIZE(next_states)); 1147 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1144 } else { 1148 } else {
1145 u32 next_states[] = { DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO }; 1149 static const u32 next_states[] = {
1150 DE_MEDIA_AUI, DE_MEDIA_BNC, DE_MEDIA_TP_AUTO
1151 };
1146 de_next_media(de, next_states, ARRAY_SIZE(next_states)); 1152 de_next_media(de, next_states, ARRAY_SIZE(next_states));
1147 } 1153 }
1148 1154
diff --git a/drivers/net/tulip/tulip_core.c b/drivers/net/tulip/tulip_core.c
index 2c39f2591216..5c01e260f1ba 100644
--- a/drivers/net/tulip/tulip_core.c
+++ b/drivers/net/tulip/tulip_core.c
@@ -1302,17 +1302,18 @@ static const struct net_device_ops tulip_netdev_ops = {
1302#endif 1302#endif
1303}; 1303};
1304 1304
1305DEFINE_PCI_DEVICE_TABLE(early_486_chipsets) = {
1306 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1307 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1308 { },
1309};
1310
1305static int __devinit tulip_init_one (struct pci_dev *pdev, 1311static int __devinit tulip_init_one (struct pci_dev *pdev,
1306 const struct pci_device_id *ent) 1312 const struct pci_device_id *ent)
1307{ 1313{
1308 struct tulip_private *tp; 1314 struct tulip_private *tp;
1309 /* See note below on the multiport cards. */ 1315 /* See note below on the multiport cards. */
1310 static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'}; 1316 static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
1311 static struct pci_device_id early_486_chipsets[] = {
1312 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
1313 { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
1314 { },
1315 };
1316 static int last_irq; 1317 static int last_irq;
1317 static int multiport_cnt; /* For four-port boards w/one EEPROM */ 1318 static int multiport_cnt; /* For four-port boards w/one EEPROM */
1318 int i, irq; 1319 int i, irq;
@@ -1682,7 +1683,9 @@ static int __devinit tulip_init_one (struct pci_dev *pdev,
1682 tp->full_duplex_lock = 1; 1683 tp->full_duplex_lock = 1;
1683 1684
1684 if (tulip_media_cap[tp->default_port] & MediaIsMII) { 1685 if (tulip_media_cap[tp->default_port] & MediaIsMII) {
1685 u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 }; 1686 static const u16 media2advert[] = {
1687 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200
1688 };
1686 tp->mii_advertise = media2advert[tp->default_port - 9]; 1689 tp->mii_advertise = media2advert[tp->default_port - 9];
1687 tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */ 1690 tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
1688 } 1691 }
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index e625f9530cf9..b100bd50a0d7 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -757,7 +757,7 @@ static __inline__ ssize_t tun_put_user(struct tun_struct *tun,
757 757
758 if (skb->ip_summed == CHECKSUM_PARTIAL) { 758 if (skb->ip_summed == CHECKSUM_PARTIAL) {
759 gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 759 gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
760 gso.csum_start = skb->csum_start - skb_headroom(skb); 760 gso.csum_start = skb_checksum_start_offset(skb);
761 gso.csum_offset = skb->csum_offset; 761 gso.csum_offset = skb->csum_offset;
762 } /* else everything is zero */ 762 } /* else everything is zero */
763 763
diff --git a/drivers/net/typhoon.c b/drivers/net/typhoon.c
index 5b83c3f35f47..a3c46f6a15e7 100644
--- a/drivers/net/typhoon.c
+++ b/drivers/net/typhoon.c
@@ -1004,7 +1004,6 @@ typhoon_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1004 } 1004 }
1005 1005
1006 strcpy(info->driver, KBUILD_MODNAME); 1006 strcpy(info->driver, KBUILD_MODNAME);
1007 strcpy(info->version, UTS_RELEASE);
1008 strcpy(info->bus_info, pci_name(pci_dev)); 1007 strcpy(info->bus_info, pci_name(pci_dev));
1009} 1008}
1010 1009
diff --git a/drivers/net/ucc_geth.c b/drivers/net/ucc_geth.c
index acbdab3d66ca..73a3e0d93237 100644
--- a/drivers/net/ucc_geth.c
+++ b/drivers/net/ucc_geth.c
@@ -28,6 +28,7 @@
28#include <linux/phy.h> 28#include <linux/phy.h>
29#include <linux/workqueue.h> 29#include <linux/workqueue.h>
30#include <linux/of_mdio.h> 30#include <linux/of_mdio.h>
31#include <linux/of_net.h>
31#include <linux/of_platform.h> 32#include <linux/of_platform.h>
32 33
33#include <asm/uaccess.h> 34#include <asm/uaccess.h>
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 52ffabe6db0e..6f600cced6e1 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -196,6 +196,25 @@ config USB_NET_CDC_EEM
196 IEEE 802 "local assignment" bit is set in the address, a "usbX" 196 IEEE 802 "local assignment" bit is set in the address, a "usbX"
197 name is used instead. 197 name is used instead.
198 198
199config USB_NET_CDC_NCM
200 tristate "CDC NCM support"
201 depends on USB_USBNET
202 default y
203 help
204 This driver provides support for CDC NCM (Network Control Model
205 Device USB Class Specification). The CDC NCM specification is
206 available from <http://www.usb.org/>.
207
208 Say "y" to link the driver statically, or "m" to build a
209 dynamically linked module.
210
211 This driver should work with at least the following devices:
212 * ST-Ericsson M700 LTE FDD/TDD Mobile Broadband Modem (ref. design)
213 * ST-Ericsson M5730 HSPA+ Mobile Broadband Modem (reference design)
214 * ST-Ericsson M570 HSPA+ Mobile Broadband Modem (reference design)
215 * ST-Ericsson M343 HSPA Mobile Broadband Modem (reference design)
216 * Ericsson F5521gw Mobile Broadband Module
217
199config USB_NET_DM9601 218config USB_NET_DM9601
200 tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices" 219 tristate "Davicom DM9601 based USB 1.1 10/100 ethernet devices"
201 depends on USB_USBNET 220 depends on USB_USBNET
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index a19b0259ae16..cac170301187 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -26,4 +26,5 @@ obj-$(CONFIG_USB_CDC_PHONET) += cdc-phonet.o
26obj-$(CONFIG_USB_IPHETH) += ipheth.o 26obj-$(CONFIG_USB_IPHETH) += ipheth.o
27obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o 27obj-$(CONFIG_USB_SIERRA_NET) += sierra_net.o
28obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o 28obj-$(CONFIG_USB_NET_CX82310_ETH) += cx82310_eth.o
29obj-$(CONFIG_USB_NET_CDC_NCM) += cdc_ncm.o
29 30
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index aea4645be7f6..6140b56cce53 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -1508,6 +1508,10 @@ static const struct usb_device_id products [] = {
1508 USB_DEVICE (0x0b95, 0x1780), 1508 USB_DEVICE (0x0b95, 0x1780),
1509 .driver_info = (unsigned long) &ax88178_info, 1509 .driver_info = (unsigned long) &ax88178_info,
1510}, { 1510}, {
1511 // Logitec LAN-GTJ/U2A
1512 USB_DEVICE (0x0789, 0x0160),
1513 .driver_info = (unsigned long) &ax88178_info,
1514}, {
1511 // Linksys USB200M Rev 2 1515 // Linksys USB200M Rev 2
1512 USB_DEVICE (0x13b1, 0x0018), 1516 USB_DEVICE (0x13b1, 0x0018),
1513 .driver_info = (unsigned long) &ax88772_info, 1517 .driver_info = (unsigned long) &ax88772_info,
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index b3fe0de40469..9a60e415d76b 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -99,9 +99,7 @@ int usbnet_generic_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
99 */ 99 */
100 buf = dev->udev->actconfig->extra; 100 buf = dev->udev->actconfig->extra;
101 len = dev->udev->actconfig->extralen; 101 len = dev->udev->actconfig->extralen;
102 if (len) 102 dev_dbg(&intf->dev, "CDC descriptors on config\n");
103 dev_dbg(&intf->dev,
104 "CDC descriptors on config\n");
105 } 103 }
106 104
107 /* Maybe CDC descriptors are after the endpoint? This bug has 105 /* Maybe CDC descriptors are after the endpoint? This bug has
diff --git a/drivers/net/usb/cdc_ncm.c b/drivers/net/usb/cdc_ncm.c
new file mode 100644
index 000000000000..593c104ab199
--- /dev/null
+++ b/drivers/net/usb/cdc_ncm.c
@@ -0,0 +1,1213 @@
1/*
2 * cdc_ncm.c
3 *
4 * Copyright (C) ST-Ericsson 2010
5 * Contact: Alexey Orishko <alexey.orishko@stericsson.com>
6 * Original author: Hans Petter Selasky <hans.petter.selasky@stericsson.com>
7 *
8 * USB Host Driver for Network Control Model (NCM)
9 * http://www.usb.org/developers/devclass_docs/NCM10.zip
10 *
11 * The NCM encoding, decoding and initialization logic
12 * derives from FreeBSD 8.x. if_cdce.c and if_cdcereg.h
13 *
14 * This software is available to you under a choice of one of two
15 * licenses. You may choose this file to be licensed under the terms
16 * of the GNU General Public License (GPL) Version 2 or the 2-clause
17 * BSD license listed below:
18 *
19 * Redistribution and use in source and binary forms, with or without
20 * modification, are permitted provided that the following conditions
21 * are met:
22 * 1. Redistributions of source code must retain the above copyright
23 * notice, this list of conditions and the following disclaimer.
24 * 2. Redistributions in binary form must reproduce the above copyright
25 * notice, this list of conditions and the following disclaimer in the
26 * documentation and/or other materials provided with the distribution.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 * SUCH DAMAGE.
39 */
40
41#include <linux/module.h>
42#include <linux/init.h>
43#include <linux/netdevice.h>
44#include <linux/ctype.h>
45#include <linux/ethtool.h>
46#include <linux/workqueue.h>
47#include <linux/mii.h>
48#include <linux/crc32.h>
49#include <linux/usb.h>
50#include <linux/version.h>
51#include <linux/timer.h>
52#include <linux/spinlock.h>
53#include <linux/atomic.h>
54#include <linux/usb/usbnet.h>
55#include <linux/usb/cdc.h>
56
57#define DRIVER_VERSION "30-Nov-2010"
58
59/* CDC NCM subclass 3.2.1 */
60#define USB_CDC_NCM_NDP16_LENGTH_MIN 0x10
61
62/* Maximum NTB length */
63#define CDC_NCM_NTB_MAX_SIZE_TX 16384 /* bytes */
64#define CDC_NCM_NTB_MAX_SIZE_RX 16384 /* bytes */
65
66/* Minimum value for MaxDatagramSize, ch. 6.2.9 */
67#define CDC_NCM_MIN_DATAGRAM_SIZE 1514 /* bytes */
68
69#define CDC_NCM_MIN_TX_PKT 512 /* bytes */
70
71/* Default value for MaxDatagramSize */
72#define CDC_NCM_MAX_DATAGRAM_SIZE 2048 /* bytes */
73
74/*
75 * Maximum amount of datagrams in NCM Datagram Pointer Table, not counting
76 * the last NULL entry. Any additional datagrams in NTB would be discarded.
77 */
78#define CDC_NCM_DPT_DATAGRAMS_MAX 32
79
80/* Restart the timer, if amount of datagrams is less than given value */
81#define CDC_NCM_RESTART_TIMER_DATAGRAM_CNT 3
82
83/* The following macro defines the minimum header space */
84#define CDC_NCM_MIN_HDR_SIZE \
85 (sizeof(struct usb_cdc_ncm_nth16) + sizeof(struct usb_cdc_ncm_ndp16) + \
86 (CDC_NCM_DPT_DATAGRAMS_MAX + 1) * sizeof(struct usb_cdc_ncm_dpe16))
87
88struct connection_speed_change {
89 __le32 USBitRate; /* holds 3GPP downlink value, bits per second */
90 __le32 DSBitRate; /* holds 3GPP uplink value, bits per second */
91} __attribute__ ((packed));
92
93struct cdc_ncm_data {
94 struct usb_cdc_ncm_nth16 nth16;
95 struct usb_cdc_ncm_ndp16 ndp16;
96 struct usb_cdc_ncm_dpe16 dpe16[CDC_NCM_DPT_DATAGRAMS_MAX + 1];
97};
98
99struct cdc_ncm_ctx {
100 struct cdc_ncm_data rx_ncm;
101 struct cdc_ncm_data tx_ncm;
102 struct usb_cdc_ncm_ntb_parameters ncm_parm;
103 struct timer_list tx_timer;
104
105 const struct usb_cdc_ncm_desc *func_desc;
106 const struct usb_cdc_header_desc *header_desc;
107 const struct usb_cdc_union_desc *union_desc;
108 const struct usb_cdc_ether_desc *ether_desc;
109
110 struct net_device *netdev;
111 struct usb_device *udev;
112 struct usb_host_endpoint *in_ep;
113 struct usb_host_endpoint *out_ep;
114 struct usb_host_endpoint *status_ep;
115 struct usb_interface *intf;
116 struct usb_interface *control;
117 struct usb_interface *data;
118
119 struct sk_buff *tx_curr_skb;
120 struct sk_buff *tx_rem_skb;
121
122 spinlock_t mtx;
123
124 u32 tx_timer_pending;
125 u32 tx_curr_offset;
126 u32 tx_curr_last_offset;
127 u32 tx_curr_frame_num;
128 u32 rx_speed;
129 u32 tx_speed;
130 u32 rx_max;
131 u32 tx_max;
132 u32 max_datagram_size;
133 u16 tx_max_datagrams;
134 u16 tx_remainder;
135 u16 tx_modulus;
136 u16 tx_ndp_modulus;
137 u16 tx_seq;
138 u16 connected;
139 u8 data_claimed;
140 u8 control_claimed;
141};
142
143static void cdc_ncm_tx_timeout(unsigned long arg);
144static const struct driver_info cdc_ncm_info;
145static struct usb_driver cdc_ncm_driver;
146static struct ethtool_ops cdc_ncm_ethtool_ops;
147
148static const struct usb_device_id cdc_devs[] = {
149 { USB_INTERFACE_INFO(USB_CLASS_COMM,
150 USB_CDC_SUBCLASS_NCM, USB_CDC_PROTO_NONE),
151 .driver_info = (unsigned long)&cdc_ncm_info,
152 },
153 {
154 },
155};
156
157MODULE_DEVICE_TABLE(usb, cdc_devs);
158
159static void
160cdc_ncm_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
161{
162 struct usbnet *dev = netdev_priv(net);
163
164 strncpy(info->driver, dev->driver_name, sizeof(info->driver));
165 strncpy(info->version, DRIVER_VERSION, sizeof(info->version));
166 strncpy(info->fw_version, dev->driver_info->description,
167 sizeof(info->fw_version));
168 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
169}
170
171static int
172cdc_ncm_do_request(struct cdc_ncm_ctx *ctx, struct usb_cdc_notification *req,
173 void *data, u16 flags, u16 *actlen, u16 timeout)
174{
175 int err;
176
177 err = usb_control_msg(ctx->udev, (req->bmRequestType & USB_DIR_IN) ?
178 usb_rcvctrlpipe(ctx->udev, 0) :
179 usb_sndctrlpipe(ctx->udev, 0),
180 req->bNotificationType, req->bmRequestType,
181 req->wValue,
182 req->wIndex, data,
183 req->wLength, timeout);
184
185 if (err < 0) {
186 if (actlen)
187 *actlen = 0;
188 return err;
189 }
190
191 if (actlen)
192 *actlen = err;
193
194 return 0;
195}
196
197static u8 cdc_ncm_setup(struct cdc_ncm_ctx *ctx)
198{
199 struct usb_cdc_notification req;
200 u32 val;
201 __le16 max_datagram_size;
202 u8 flags;
203 u8 iface_no;
204 int err;
205
206 iface_no = ctx->control->cur_altsetting->desc.bInterfaceNumber;
207
208 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
209 req.bNotificationType = USB_CDC_GET_NTB_PARAMETERS;
210 req.wValue = 0;
211 req.wIndex = cpu_to_le16(iface_no);
212 req.wLength = cpu_to_le16(sizeof(ctx->ncm_parm));
213
214 err = cdc_ncm_do_request(ctx, &req, &ctx->ncm_parm, 0, NULL, 1000);
215 if (err) {
216 pr_debug("failed GET_NTB_PARAMETERS\n");
217 return 1;
218 }
219
220 /* read correct set of parameters according to device mode */
221 ctx->rx_max = le32_to_cpu(ctx->ncm_parm.dwNtbInMaxSize);
222 ctx->tx_max = le32_to_cpu(ctx->ncm_parm.dwNtbOutMaxSize);
223 ctx->tx_remainder = le16_to_cpu(ctx->ncm_parm.wNdpOutPayloadRemainder);
224 ctx->tx_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutDivisor);
225 ctx->tx_ndp_modulus = le16_to_cpu(ctx->ncm_parm.wNdpOutAlignment);
226
227 if (ctx->func_desc != NULL)
228 flags = ctx->func_desc->bmNetworkCapabilities;
229 else
230 flags = 0;
231
232 pr_debug("dwNtbInMaxSize=%u dwNtbOutMaxSize=%u "
233 "wNdpOutPayloadRemainder=%u wNdpOutDivisor=%u "
234 "wNdpOutAlignment=%u flags=0x%x\n",
235 ctx->rx_max, ctx->tx_max, ctx->tx_remainder, ctx->tx_modulus,
236 ctx->tx_ndp_modulus, flags);
237
238 /* max count of tx datagrams without terminating NULL entry */
239 ctx->tx_max_datagrams = CDC_NCM_DPT_DATAGRAMS_MAX;
240
241 /* verify maximum size of received NTB in bytes */
242 if ((ctx->rx_max <
243 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
244 (ctx->rx_max > CDC_NCM_NTB_MAX_SIZE_RX)) {
245 pr_debug("Using default maximum receive length=%d\n",
246 CDC_NCM_NTB_MAX_SIZE_RX);
247 ctx->rx_max = CDC_NCM_NTB_MAX_SIZE_RX;
248 }
249
250 /* verify maximum size of transmitted NTB in bytes */
251 if ((ctx->tx_max <
252 (CDC_NCM_MIN_HDR_SIZE + CDC_NCM_MIN_DATAGRAM_SIZE)) ||
253 (ctx->tx_max > CDC_NCM_NTB_MAX_SIZE_TX)) {
254 pr_debug("Using default maximum transmit length=%d\n",
255 CDC_NCM_NTB_MAX_SIZE_TX);
256 ctx->tx_max = CDC_NCM_NTB_MAX_SIZE_TX;
257 }
258
259 /*
260 * verify that the structure alignment is:
261 * - power of two
262 * - not greater than the maximum transmit length
263 * - not less than four bytes
264 */
265 val = ctx->tx_ndp_modulus;
266
267 if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
268 (val != ((-val) & val)) || (val >= ctx->tx_max)) {
269 pr_debug("Using default alignment: 4 bytes\n");
270 ctx->tx_ndp_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
271 }
272
273 /*
274 * verify that the payload alignment is:
275 * - power of two
276 * - not greater than the maximum transmit length
277 * - not less than four bytes
278 */
279 val = ctx->tx_modulus;
280
281 if ((val < USB_CDC_NCM_NDP_ALIGN_MIN_SIZE) ||
282 (val != ((-val) & val)) || (val >= ctx->tx_max)) {
283 pr_debug("Using default transmit modulus: 4 bytes\n");
284 ctx->tx_modulus = USB_CDC_NCM_NDP_ALIGN_MIN_SIZE;
285 }
286
287 /* verify the payload remainder */
288 if (ctx->tx_remainder >= ctx->tx_modulus) {
289 pr_debug("Using default transmit remainder: 0 bytes\n");
290 ctx->tx_remainder = 0;
291 }
292
293 /* adjust TX-remainder according to NCM specification. */
294 ctx->tx_remainder = ((ctx->tx_remainder - ETH_HLEN) &
295 (ctx->tx_modulus - 1));
296
297 /* additional configuration */
298
299 /* set CRC Mode */
300 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
301 req.bNotificationType = USB_CDC_SET_CRC_MODE;
302 req.wValue = cpu_to_le16(USB_CDC_NCM_CRC_NOT_APPENDED);
303 req.wIndex = cpu_to_le16(iface_no);
304 req.wLength = 0;
305
306 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
307 if (err)
308 pr_debug("Setting CRC mode off failed\n");
309
310 /* set NTB format */
311 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_OUT | USB_RECIP_INTERFACE;
312 req.bNotificationType = USB_CDC_SET_NTB_FORMAT;
313 req.wValue = cpu_to_le16(USB_CDC_NCM_NTB16_FORMAT);
314 req.wIndex = cpu_to_le16(iface_no);
315 req.wLength = 0;
316
317 err = cdc_ncm_do_request(ctx, &req, NULL, 0, NULL, 1000);
318 if (err)
319 pr_debug("Setting NTB format to 16-bit failed\n");
320
321 /* set Max Datagram Size (MTU) */
322 req.bmRequestType = USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE;
323 req.bNotificationType = USB_CDC_GET_MAX_DATAGRAM_SIZE;
324 req.wValue = 0;
325 req.wIndex = cpu_to_le16(iface_no);
326 req.wLength = cpu_to_le16(2);
327
328 err = cdc_ncm_do_request(ctx, &req, &max_datagram_size, 0, NULL, 1000);
329 if (err) {
330 pr_debug(" GET_MAX_DATAGRAM_SIZE failed, using size=%u\n",
331 CDC_NCM_MIN_DATAGRAM_SIZE);
332 /* use default */
333 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
334 } else {
335 ctx->max_datagram_size = le16_to_cpu(max_datagram_size);
336
337 if (ctx->max_datagram_size < CDC_NCM_MIN_DATAGRAM_SIZE)
338 ctx->max_datagram_size = CDC_NCM_MIN_DATAGRAM_SIZE;
339 else if (ctx->max_datagram_size > CDC_NCM_MAX_DATAGRAM_SIZE)
340 ctx->max_datagram_size = CDC_NCM_MAX_DATAGRAM_SIZE;
341 }
342
343 if (ctx->netdev->mtu != (ctx->max_datagram_size - ETH_HLEN))
344 ctx->netdev->mtu = ctx->max_datagram_size - ETH_HLEN;
345
346 return 0;
347}
348
349static void
350cdc_ncm_find_endpoints(struct cdc_ncm_ctx *ctx, struct usb_interface *intf)
351{
352 struct usb_host_endpoint *e;
353 u8 ep;
354
355 for (ep = 0; ep < intf->cur_altsetting->desc.bNumEndpoints; ep++) {
356
357 e = intf->cur_altsetting->endpoint + ep;
358 switch (e->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) {
359 case USB_ENDPOINT_XFER_INT:
360 if (usb_endpoint_dir_in(&e->desc)) {
361 if (ctx->status_ep == NULL)
362 ctx->status_ep = e;
363 }
364 break;
365
366 case USB_ENDPOINT_XFER_BULK:
367 if (usb_endpoint_dir_in(&e->desc)) {
368 if (ctx->in_ep == NULL)
369 ctx->in_ep = e;
370 } else {
371 if (ctx->out_ep == NULL)
372 ctx->out_ep = e;
373 }
374 break;
375
376 default:
377 break;
378 }
379 }
380}
381
382static void cdc_ncm_free(struct cdc_ncm_ctx *ctx)
383{
384 if (ctx == NULL)
385 return;
386
387 del_timer_sync(&ctx->tx_timer);
388
389 if (ctx->data_claimed) {
390 usb_set_intfdata(ctx->data, NULL);
391 usb_driver_release_interface(driver_of(ctx->intf), ctx->data);
392 }
393
394 if (ctx->control_claimed) {
395 usb_set_intfdata(ctx->control, NULL);
396 usb_driver_release_interface(driver_of(ctx->intf),
397 ctx->control);
398 }
399
400 if (ctx->tx_rem_skb != NULL) {
401 dev_kfree_skb_any(ctx->tx_rem_skb);
402 ctx->tx_rem_skb = NULL;
403 }
404
405 if (ctx->tx_curr_skb != NULL) {
406 dev_kfree_skb_any(ctx->tx_curr_skb);
407 ctx->tx_curr_skb = NULL;
408 }
409
410 kfree(ctx);
411}
412
413static int cdc_ncm_bind(struct usbnet *dev, struct usb_interface *intf)
414{
415 struct cdc_ncm_ctx *ctx;
416 struct usb_driver *driver;
417 u8 *buf;
418 int len;
419 int temp;
420 u8 iface_no;
421
422 ctx = kmalloc(sizeof(*ctx), GFP_KERNEL);
423 if (ctx == NULL)
424 goto error;
425
426 memset(ctx, 0, sizeof(*ctx));
427
428 init_timer(&ctx->tx_timer);
429 spin_lock_init(&ctx->mtx);
430 ctx->netdev = dev->net;
431
432 /* store ctx pointer in device data field */
433 dev->data[0] = (unsigned long)ctx;
434
435 /* get some pointers */
436 driver = driver_of(intf);
437 buf = intf->cur_altsetting->extra;
438 len = intf->cur_altsetting->extralen;
439
440 ctx->udev = dev->udev;
441 ctx->intf = intf;
442
443 /* parse through descriptors associated with control interface */
444 while ((len > 0) && (buf[0] > 2) && (buf[0] <= len)) {
445
446 if (buf[1] != USB_DT_CS_INTERFACE)
447 goto advance;
448
449 switch (buf[2]) {
450 case USB_CDC_UNION_TYPE:
451 if (buf[0] < sizeof(*(ctx->union_desc)))
452 break;
453
454 ctx->union_desc =
455 (const struct usb_cdc_union_desc *)buf;
456
457 ctx->control = usb_ifnum_to_if(dev->udev,
458 ctx->union_desc->bMasterInterface0);
459 ctx->data = usb_ifnum_to_if(dev->udev,
460 ctx->union_desc->bSlaveInterface0);
461 break;
462
463 case USB_CDC_ETHERNET_TYPE:
464 if (buf[0] < sizeof(*(ctx->ether_desc)))
465 break;
466
467 ctx->ether_desc =
468 (const struct usb_cdc_ether_desc *)buf;
469
470 dev->hard_mtu =
471 le16_to_cpu(ctx->ether_desc->wMaxSegmentSize);
472
473 if (dev->hard_mtu <
474 (CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN))
475 dev->hard_mtu =
476 CDC_NCM_MIN_DATAGRAM_SIZE - ETH_HLEN;
477
478 else if (dev->hard_mtu >
479 (CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN))
480 dev->hard_mtu =
481 CDC_NCM_MAX_DATAGRAM_SIZE - ETH_HLEN;
482 break;
483
484 case USB_CDC_NCM_TYPE:
485 if (buf[0] < sizeof(*(ctx->func_desc)))
486 break;
487
488 ctx->func_desc = (const struct usb_cdc_ncm_desc *)buf;
489 break;
490
491 default:
492 break;
493 }
494advance:
495 /* advance to next descriptor */
496 temp = buf[0];
497 buf += temp;
498 len -= temp;
499 }
500
501 /* check if we got everything */
502 if ((ctx->control == NULL) || (ctx->data == NULL) ||
503 (ctx->ether_desc == NULL))
504 goto error;
505
506 /* claim interfaces, if any */
507 if (ctx->data != intf) {
508 temp = usb_driver_claim_interface(driver, ctx->data, dev);
509 if (temp)
510 goto error;
511 ctx->data_claimed = 1;
512 }
513
514 if (ctx->control != intf) {
515 temp = usb_driver_claim_interface(driver, ctx->control, dev);
516 if (temp)
517 goto error;
518 ctx->control_claimed = 1;
519 }
520
521 iface_no = ctx->data->cur_altsetting->desc.bInterfaceNumber;
522
523 /* reset data interface */
524 temp = usb_set_interface(dev->udev, iface_no, 0);
525 if (temp)
526 goto error;
527
528 /* initialize data interface */
529 if (cdc_ncm_setup(ctx))
530 goto error;
531
532 /* configure data interface */
533 temp = usb_set_interface(dev->udev, iface_no, 1);
534 if (temp)
535 goto error;
536
537 cdc_ncm_find_endpoints(ctx, ctx->data);
538 cdc_ncm_find_endpoints(ctx, ctx->control);
539
540 if ((ctx->in_ep == NULL) || (ctx->out_ep == NULL) ||
541 (ctx->status_ep == NULL))
542 goto error;
543
544 dev->net->ethtool_ops = &cdc_ncm_ethtool_ops;
545
546 usb_set_intfdata(ctx->data, dev);
547 usb_set_intfdata(ctx->control, dev);
548 usb_set_intfdata(ctx->intf, dev);
549
550 temp = usbnet_get_ethernet_addr(dev, ctx->ether_desc->iMACAddress);
551 if (temp)
552 goto error;
553
554 dev_info(&dev->udev->dev, "MAC-Address: "
555 "0x%02x:0x%02x:0x%02x:0x%02x:0x%02x:0x%02x\n",
556 dev->net->dev_addr[0], dev->net->dev_addr[1],
557 dev->net->dev_addr[2], dev->net->dev_addr[3],
558 dev->net->dev_addr[4], dev->net->dev_addr[5]);
559
560 dev->in = usb_rcvbulkpipe(dev->udev,
561 ctx->in_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
562 dev->out = usb_sndbulkpipe(dev->udev,
563 ctx->out_ep->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK);
564 dev->status = ctx->status_ep;
565 dev->rx_urb_size = ctx->rx_max;
566
567 /*
568 * We should get an event when network connection is "connected" or
569 * "disconnected". Set network connection in "disconnected" state
570 * (carrier is OFF) during attach, so the IP network stack does not
571 * start IPv6 negotiation and more.
572 */
573 netif_carrier_off(dev->net);
574 ctx->tx_speed = ctx->rx_speed = 0;
575 return 0;
576
577error:
578 cdc_ncm_free((struct cdc_ncm_ctx *)dev->data[0]);
579 dev->data[0] = 0;
580 dev_info(&dev->udev->dev, "Descriptor failure\n");
581 return -ENODEV;
582}
583
584static void cdc_ncm_unbind(struct usbnet *dev, struct usb_interface *intf)
585{
586 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
587 struct usb_driver *driver;
588
589 if (ctx == NULL)
590 return; /* no setup */
591
592 driver = driver_of(intf);
593
594 usb_set_intfdata(ctx->data, NULL);
595 usb_set_intfdata(ctx->control, NULL);
596 usb_set_intfdata(ctx->intf, NULL);
597
598 /* release interfaces, if any */
599 if (ctx->data_claimed) {
600 usb_driver_release_interface(driver, ctx->data);
601 ctx->data_claimed = 0;
602 }
603
604 if (ctx->control_claimed) {
605 usb_driver_release_interface(driver, ctx->control);
606 ctx->control_claimed = 0;
607 }
608
609 cdc_ncm_free(ctx);
610}
611
612static void cdc_ncm_zero_fill(u8 *ptr, u32 first, u32 end, u32 max)
613{
614 if (first >= max)
615 return;
616 if (first >= end)
617 return;
618 if (end > max)
619 end = max;
620 memset(ptr + first, 0, end - first);
621}
622
623static struct sk_buff *
624cdc_ncm_fill_tx_frame(struct cdc_ncm_ctx *ctx, struct sk_buff *skb)
625{
626 struct sk_buff *skb_out;
627 u32 rem;
628 u32 offset;
629 u32 last_offset;
630 u16 n = 0;
631 u8 timeout = 0;
632
633 /* if there is a remaining skb, it gets priority */
634 if (skb != NULL)
635 swap(skb, ctx->tx_rem_skb);
636 else
637 timeout = 1;
638
639 /*
640 * +----------------+
641 * | skb_out |
642 * +----------------+
643 * ^ offset
644 * ^ last_offset
645 */
646
647 /* check if we are resuming an OUT skb */
648 if (ctx->tx_curr_skb != NULL) {
649 /* pop variables */
650 skb_out = ctx->tx_curr_skb;
651 offset = ctx->tx_curr_offset;
652 last_offset = ctx->tx_curr_last_offset;
653 n = ctx->tx_curr_frame_num;
654
655 } else {
656 /* reset variables */
657 skb_out = alloc_skb(ctx->tx_max, GFP_ATOMIC);
658 if (skb_out == NULL) {
659 if (skb != NULL) {
660 dev_kfree_skb_any(skb);
661 ctx->netdev->stats.tx_dropped++;
662 }
663 goto exit_no_skb;
664 }
665
666 /* make room for NTH and NDP */
667 offset = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
668 ctx->tx_ndp_modulus) +
669 sizeof(struct usb_cdc_ncm_ndp16) +
670 (ctx->tx_max_datagrams + 1) *
671 sizeof(struct usb_cdc_ncm_dpe16);
672
673 /* store last valid offset before alignment */
674 last_offset = offset;
675 /* align first Datagram offset correctly */
676 offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
677 /* zero buffer till the first IP datagram */
678 cdc_ncm_zero_fill(skb_out->data, 0, offset, offset);
679 n = 0;
680 ctx->tx_curr_frame_num = 0;
681 }
682
683 for (; n < ctx->tx_max_datagrams; n++) {
684 /* check if end of transmit buffer is reached */
685 if (offset >= ctx->tx_max)
686 break;
687
688 /* compute maximum buffer size */
689 rem = ctx->tx_max - offset;
690
691 if (skb == NULL) {
692 skb = ctx->tx_rem_skb;
693 ctx->tx_rem_skb = NULL;
694
695 /* check for end of skb */
696 if (skb == NULL)
697 break;
698 }
699
700 if (skb->len > rem) {
701 if (n == 0) {
702 /* won't fit, MTU problem? */
703 dev_kfree_skb_any(skb);
704 skb = NULL;
705 ctx->netdev->stats.tx_dropped++;
706 } else {
707 /* no room for skb - store for later */
708 if (ctx->tx_rem_skb != NULL) {
709 dev_kfree_skb_any(ctx->tx_rem_skb);
710 ctx->netdev->stats.tx_dropped++;
711 }
712 ctx->tx_rem_skb = skb;
713 skb = NULL;
714
715 /* loop one more time */
716 timeout = 1;
717 }
718 break;
719 }
720
721 memcpy(((u8 *)skb_out->data) + offset, skb->data, skb->len);
722
723 ctx->tx_ncm.dpe16[n].wDatagramLength = cpu_to_le16(skb->len);
724 ctx->tx_ncm.dpe16[n].wDatagramIndex = cpu_to_le16(offset);
725
726 /* update offset */
727 offset += skb->len;
728
729 /* store last valid offset before alignment */
730 last_offset = offset;
731
732 /* align offset correctly */
733 offset = ALIGN(offset, ctx->tx_modulus) + ctx->tx_remainder;
734
735 /* zero padding */
736 cdc_ncm_zero_fill(skb_out->data, last_offset, offset,
737 ctx->tx_max);
738 dev_kfree_skb_any(skb);
739 skb = NULL;
740 }
741
742 /* free up any dangling skb */
743 if (skb != NULL) {
744 dev_kfree_skb_any(skb);
745 skb = NULL;
746 ctx->netdev->stats.tx_dropped++;
747 }
748
749 ctx->tx_curr_frame_num = n;
750
751 if (n == 0) {
752 /* wait for more frames */
753 /* push variables */
754 ctx->tx_curr_skb = skb_out;
755 ctx->tx_curr_offset = offset;
756 ctx->tx_curr_last_offset = last_offset;
757 goto exit_no_skb;
758
759 } else if ((n < ctx->tx_max_datagrams) && (timeout == 0)) {
760 /* wait for more frames */
761 /* push variables */
762 ctx->tx_curr_skb = skb_out;
763 ctx->tx_curr_offset = offset;
764 ctx->tx_curr_last_offset = last_offset;
765 /* set the pending count */
766 if (n < CDC_NCM_RESTART_TIMER_DATAGRAM_CNT)
767 ctx->tx_timer_pending = 2;
768 goto exit_no_skb;
769
770 } else {
771 /* frame goes out */
772 /* variables will be reset at next call */
773 }
774
775 /* check for overflow */
776 if (last_offset > ctx->tx_max)
777 last_offset = ctx->tx_max;
778
779 /* revert offset */
780 offset = last_offset;
781
782 /*
783 * If collected data size is less or equal CDC_NCM_MIN_TX_PKT bytes,
784 * we send buffers as it is. If we get more data, it would be more
785 * efficient for USB HS mobile device with DMA engine to receive a full
786 * size NTB, than canceling DMA transfer and receiving a short packet.
787 */
788 if (offset > CDC_NCM_MIN_TX_PKT)
789 offset = ctx->tx_max;
790
791 /* final zero padding */
792 cdc_ncm_zero_fill(skb_out->data, last_offset, offset, ctx->tx_max);
793
794 /* store last offset */
795 last_offset = offset;
796
797 if ((last_offset < ctx->tx_max) && ((last_offset %
798 le16_to_cpu(ctx->out_ep->desc.wMaxPacketSize)) == 0)) {
799 /* force short packet */
800 *(((u8 *)skb_out->data) + last_offset) = 0;
801 last_offset++;
802 }
803
804 /* zero the rest of the DPEs plus the last NULL entry */
805 for (; n <= CDC_NCM_DPT_DATAGRAMS_MAX; n++) {
806 ctx->tx_ncm.dpe16[n].wDatagramLength = 0;
807 ctx->tx_ncm.dpe16[n].wDatagramIndex = 0;
808 }
809
810 /* fill out 16-bit NTB header */
811 ctx->tx_ncm.nth16.dwSignature = cpu_to_le32(USB_CDC_NCM_NTH16_SIGN);
812 ctx->tx_ncm.nth16.wHeaderLength =
813 cpu_to_le16(sizeof(ctx->tx_ncm.nth16));
814 ctx->tx_ncm.nth16.wSequence = cpu_to_le16(ctx->tx_seq);
815 ctx->tx_ncm.nth16.wBlockLength = cpu_to_le16(last_offset);
816 ctx->tx_ncm.nth16.wFpIndex = ALIGN(sizeof(struct usb_cdc_ncm_nth16),
817 ctx->tx_ndp_modulus);
818
819 memcpy(skb_out->data, &(ctx->tx_ncm.nth16), sizeof(ctx->tx_ncm.nth16));
820 ctx->tx_seq++;
821
822 /* fill out 16-bit NDP table */
823 ctx->tx_ncm.ndp16.dwSignature =
824 cpu_to_le32(USB_CDC_NCM_NDP16_NOCRC_SIGN);
825 rem = sizeof(ctx->tx_ncm.ndp16) + ((ctx->tx_curr_frame_num + 1) *
826 sizeof(struct usb_cdc_ncm_dpe16));
827 ctx->tx_ncm.ndp16.wLength = cpu_to_le16(rem);
828 ctx->tx_ncm.ndp16.wNextFpIndex = 0; /* reserved */
829
830 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex,
831 &(ctx->tx_ncm.ndp16),
832 sizeof(ctx->tx_ncm.ndp16));
833
834 memcpy(((u8 *)skb_out->data) + ctx->tx_ncm.nth16.wFpIndex +
835 sizeof(ctx->tx_ncm.ndp16),
836 &(ctx->tx_ncm.dpe16),
837 (ctx->tx_curr_frame_num + 1) *
838 sizeof(struct usb_cdc_ncm_dpe16));
839
840 /* set frame length */
841 skb_put(skb_out, last_offset);
842
843 /* return skb */
844 ctx->tx_curr_skb = NULL;
845 return skb_out;
846
847exit_no_skb:
848 return NULL;
849}
850
851static void cdc_ncm_tx_timeout_start(struct cdc_ncm_ctx *ctx)
852{
853 /* start timer, if not already started */
854 if (timer_pending(&ctx->tx_timer) == 0) {
855 ctx->tx_timer.function = &cdc_ncm_tx_timeout;
856 ctx->tx_timer.data = (unsigned long)ctx;
857 ctx->tx_timer.expires = jiffies + ((HZ + 999) / 1000);
858 add_timer(&ctx->tx_timer);
859 }
860}
861
862static void cdc_ncm_tx_timeout(unsigned long arg)
863{
864 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)arg;
865 u8 restart;
866
867 spin_lock(&ctx->mtx);
868 if (ctx->tx_timer_pending != 0) {
869 ctx->tx_timer_pending--;
870 restart = 1;
871 } else
872 restart = 0;
873
874 spin_unlock(&ctx->mtx);
875
876 if (restart)
877 cdc_ncm_tx_timeout_start(ctx);
878 else if (ctx->netdev != NULL)
879 usbnet_start_xmit(NULL, ctx->netdev);
880}
881
882static struct sk_buff *
883cdc_ncm_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags)
884{
885 struct sk_buff *skb_out;
886 struct cdc_ncm_ctx *ctx = (struct cdc_ncm_ctx *)dev->data[0];
887 u8 need_timer = 0;
888
889 /*
890 * The Ethernet API we are using does not support transmitting
891 * multiple Ethernet frames in a single call. This driver will
892 * accumulate multiple Ethernet frames and send out a larger
893 * USB frame when the USB buffer is full or when a single jiffies
894 * timeout happens.
895 */
896 if (ctx == NULL)
897 goto error;
898
899 spin_lock(&ctx->mtx);
900 skb_out = cdc_ncm_fill_tx_frame(ctx, skb);
901 if (ctx->tx_curr_skb != NULL)
902 need_timer = 1;
903 spin_unlock(&ctx->mtx);
904
905 /* Start timer, if there is a remaining skb */
906 if (need_timer)
907 cdc_ncm_tx_timeout_start(ctx);
908
909 if (skb_out)
910 dev->net->stats.tx_packets += ctx->tx_curr_frame_num;
911 return skb_out;
912
913error:
914 if (skb != NULL)
915 dev_kfree_skb_any(skb);
916
917 return NULL;
918}
919
920static int cdc_ncm_rx_fixup(struct usbnet *dev, struct sk_buff *skb_in)
921{
922 struct sk_buff *skb;
923 struct cdc_ncm_ctx *ctx;
924 int sumlen;
925 int actlen;
926 int temp;
927 int nframes;
928 int x;
929 int offset;
930
931 ctx = (struct cdc_ncm_ctx *)dev->data[0];
932 if (ctx == NULL)
933 goto error;
934
935 actlen = skb_in->len;
936 sumlen = CDC_NCM_NTB_MAX_SIZE_RX;
937
938 if (actlen < (sizeof(ctx->rx_ncm.nth16) + sizeof(ctx->rx_ncm.ndp16))) {
939 pr_debug("frame too short\n");
940 goto error;
941 }
942
943 memcpy(&(ctx->rx_ncm.nth16), ((u8 *)skb_in->data),
944 sizeof(ctx->rx_ncm.nth16));
945
946 if (le32_to_cpu(ctx->rx_ncm.nth16.dwSignature) !=
947 USB_CDC_NCM_NTH16_SIGN) {
948 pr_debug("invalid NTH16 signature <%u>\n",
949 le32_to_cpu(ctx->rx_ncm.nth16.dwSignature));
950 goto error;
951 }
952
953 temp = le16_to_cpu(ctx->rx_ncm.nth16.wBlockLength);
954 if (temp > sumlen) {
955 pr_debug("unsupported NTB block length %u/%u\n", temp, sumlen);
956 goto error;
957 }
958
959 temp = le16_to_cpu(ctx->rx_ncm.nth16.wFpIndex);
960 if ((temp + sizeof(ctx->rx_ncm.ndp16)) > actlen) {
961 pr_debug("invalid DPT16 index\n");
962 goto error;
963 }
964
965 memcpy(&(ctx->rx_ncm.ndp16), ((u8 *)skb_in->data) + temp,
966 sizeof(ctx->rx_ncm.ndp16));
967
968 if (le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature) !=
969 USB_CDC_NCM_NDP16_NOCRC_SIGN) {
970 pr_debug("invalid DPT16 signature <%u>\n",
971 le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature));
972 goto error;
973 }
974
975 if (le16_to_cpu(ctx->rx_ncm.ndp16.wLength) <
976 USB_CDC_NCM_NDP16_LENGTH_MIN) {
977 pr_debug("invalid DPT16 length <%u>\n",
978 le32_to_cpu(ctx->rx_ncm.ndp16.dwSignature));
979 goto error;
980 }
981
982 nframes = ((le16_to_cpu(ctx->rx_ncm.ndp16.wLength) -
983 sizeof(struct usb_cdc_ncm_ndp16)) /
984 sizeof(struct usb_cdc_ncm_dpe16));
985 nframes--; /* we process NDP entries except for the last one */
986
987 pr_debug("nframes = %u\n", nframes);
988
989 temp += sizeof(ctx->rx_ncm.ndp16);
990
991 if ((temp + nframes * (sizeof(struct usb_cdc_ncm_dpe16))) > actlen) {
992 pr_debug("Invalid nframes = %d\n", nframes);
993 goto error;
994 }
995
996 if (nframes > CDC_NCM_DPT_DATAGRAMS_MAX) {
997 pr_debug("Truncating number of frames from %u to %u\n",
998 nframes, CDC_NCM_DPT_DATAGRAMS_MAX);
999 nframes = CDC_NCM_DPT_DATAGRAMS_MAX;
1000 }
1001
1002 memcpy(&(ctx->rx_ncm.dpe16), ((u8 *)skb_in->data) + temp,
1003 nframes * (sizeof(struct usb_cdc_ncm_dpe16)));
1004
1005 for (x = 0; x < nframes; x++) {
1006 offset = le16_to_cpu(ctx->rx_ncm.dpe16[x].wDatagramIndex);
1007 temp = le16_to_cpu(ctx->rx_ncm.dpe16[x].wDatagramLength);
1008
1009 /*
1010 * CDC NCM ch. 3.7
1011 * All entries after first NULL entry are to be ignored
1012 */
1013 if ((offset == 0) || (temp == 0)) {
1014 if (!x)
1015 goto error; /* empty NTB */
1016 break;
1017 }
1018
1019 /* sanity checking */
1020 if (((offset + temp) > actlen) ||
1021 (temp > CDC_NCM_MAX_DATAGRAM_SIZE) || (temp < ETH_HLEN)) {
1022 pr_debug("invalid frame detected (ignored)"
1023 "offset[%u]=%u, length=%u, skb=%p\n",
1024 x, offset, temp, skb);
1025 if (!x)
1026 goto error;
1027 break;
1028
1029 } else {
1030 skb = skb_clone(skb_in, GFP_ATOMIC);
1031 skb->len = temp;
1032 skb->data = ((u8 *)skb_in->data) + offset;
1033 skb_set_tail_pointer(skb, temp);
1034 usbnet_skb_return(dev, skb);
1035 }
1036 }
1037 return 1;
1038error:
1039 return 0;
1040}
1041
1042static void
1043cdc_ncm_speed_change(struct cdc_ncm_ctx *ctx,
1044 struct connection_speed_change *data)
1045{
1046 uint32_t rx_speed = le32_to_cpu(data->USBitRate);
1047 uint32_t tx_speed = le32_to_cpu(data->DSBitRate);
1048
1049 /*
1050 * Currently the USB-NET API does not support reporting the actual
1051 * device speed. Do print it instead.
1052 */
1053 if ((tx_speed != ctx->tx_speed) || (rx_speed != ctx->rx_speed)) {
1054 ctx->tx_speed = tx_speed;
1055 ctx->rx_speed = rx_speed;
1056
1057 if ((tx_speed > 1000000) && (rx_speed > 1000000)) {
1058 printk(KERN_INFO KBUILD_MODNAME
1059 ": %s: %u mbit/s downlink "
1060 "%u mbit/s uplink\n",
1061 ctx->netdev->name,
1062 (unsigned int)(rx_speed / 1000000U),
1063 (unsigned int)(tx_speed / 1000000U));
1064 } else {
1065 printk(KERN_INFO KBUILD_MODNAME
1066 ": %s: %u kbit/s downlink "
1067 "%u kbit/s uplink\n",
1068 ctx->netdev->name,
1069 (unsigned int)(rx_speed / 1000U),
1070 (unsigned int)(tx_speed / 1000U));
1071 }
1072 }
1073}
1074
1075static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
1076{
1077 struct cdc_ncm_ctx *ctx;
1078 struct usb_cdc_notification *event;
1079
1080 ctx = (struct cdc_ncm_ctx *)dev->data[0];
1081
1082 if (urb->actual_length < sizeof(*event))
1083 return;
1084
1085 /* test for split data in 8-byte chunks */
1086 if (test_and_clear_bit(EVENT_STS_SPLIT, &dev->flags)) {
1087 cdc_ncm_speed_change(ctx,
1088 (struct connection_speed_change *)urb->transfer_buffer);
1089 return;
1090 }
1091
1092 event = urb->transfer_buffer;
1093
1094 switch (event->bNotificationType) {
1095 case USB_CDC_NOTIFY_NETWORK_CONNECTION:
1096 /*
1097 * According to the CDC NCM specification ch.7.1
1098 * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
1099 * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
1100 */
1101 ctx->connected = event->wValue;
1102
1103 printk(KERN_INFO KBUILD_MODNAME ": %s: network connection:"
1104 " %sconnected\n",
1105 ctx->netdev->name, ctx->connected ? "" : "dis");
1106
1107 if (ctx->connected)
1108 netif_carrier_on(dev->net);
1109 else {
1110 netif_carrier_off(dev->net);
1111 ctx->tx_speed = ctx->rx_speed = 0;
1112 }
1113 break;
1114
1115 case USB_CDC_NOTIFY_SPEED_CHANGE:
1116 if (urb->actual_length <
1117 (sizeof(*event) + sizeof(struct connection_speed_change)))
1118 set_bit(EVENT_STS_SPLIT, &dev->flags);
1119 else
1120 cdc_ncm_speed_change(ctx,
1121 (struct connection_speed_change *) &event[1]);
1122 break;
1123
1124 default:
1125 dev_err(&dev->udev->dev, "NCM: unexpected "
1126 "notification 0x%02x!\n", event->bNotificationType);
1127 break;
1128 }
1129}
1130
1131static int cdc_ncm_check_connect(struct usbnet *dev)
1132{
1133 struct cdc_ncm_ctx *ctx;
1134
1135 ctx = (struct cdc_ncm_ctx *)dev->data[0];
1136 if (ctx == NULL)
1137 return 1; /* disconnected */
1138
1139 return !ctx->connected;
1140}
1141
1142static int
1143cdc_ncm_probe(struct usb_interface *udev, const struct usb_device_id *prod)
1144{
1145 return usbnet_probe(udev, prod);
1146}
1147
1148static void cdc_ncm_disconnect(struct usb_interface *intf)
1149{
1150 struct usbnet *dev = usb_get_intfdata(intf);
1151
1152 if (dev == NULL)
1153 return; /* already disconnected */
1154
1155 usbnet_disconnect(intf);
1156}
1157
1158static int cdc_ncm_manage_power(struct usbnet *dev, int status)
1159{
1160 dev->intf->needs_remote_wakeup = status;
1161 return 0;
1162}
1163
1164static const struct driver_info cdc_ncm_info = {
1165 .description = "CDC NCM",
1166 .flags = FLAG_NO_SETINT | FLAG_MULTI_PACKET,
1167 .bind = cdc_ncm_bind,
1168 .unbind = cdc_ncm_unbind,
1169 .check_connect = cdc_ncm_check_connect,
1170 .manage_power = cdc_ncm_manage_power,
1171 .status = cdc_ncm_status,
1172 .rx_fixup = cdc_ncm_rx_fixup,
1173 .tx_fixup = cdc_ncm_tx_fixup,
1174};
1175
1176static struct usb_driver cdc_ncm_driver = {
1177 .name = "cdc_ncm",
1178 .id_table = cdc_devs,
1179 .probe = cdc_ncm_probe,
1180 .disconnect = cdc_ncm_disconnect,
1181 .suspend = usbnet_suspend,
1182 .resume = usbnet_resume,
1183 .supports_autosuspend = 1,
1184};
1185
1186static struct ethtool_ops cdc_ncm_ethtool_ops = {
1187 .get_drvinfo = cdc_ncm_get_drvinfo,
1188 .get_link = usbnet_get_link,
1189 .get_msglevel = usbnet_get_msglevel,
1190 .set_msglevel = usbnet_set_msglevel,
1191 .get_settings = usbnet_get_settings,
1192 .set_settings = usbnet_set_settings,
1193 .nway_reset = usbnet_nway_reset,
1194};
1195
1196static int __init cdc_ncm_init(void)
1197{
1198 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION "\n");
1199 return usb_register(&cdc_ncm_driver);
1200}
1201
1202module_init(cdc_ncm_init);
1203
1204static void __exit cdc_ncm_exit(void)
1205{
1206 usb_deregister(&cdc_ncm_driver);
1207}
1208
1209module_exit(cdc_ncm_exit);
1210
1211MODULE_AUTHOR("Hans Petter Selasky");
1212MODULE_DESCRIPTION("USB CDC NCM host driver");
1213MODULE_LICENSE("Dual BSD/GPL");
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index 812edf85d6d3..bed8fcedff49 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -997,6 +997,18 @@ static void packetizeRx(struct hso_net *odev, unsigned char *ip_pkt,
997 } 997 }
998} 998}
999 999
1000static void fix_crc_bug(struct urb *urb, __le16 max_packet_size)
1001{
1002 static const u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
1003 u32 rest = urb->actual_length % le16_to_cpu(max_packet_size);
1004
1005 if (((rest == 5) || (rest == 6)) &&
1006 !memcmp(((u8 *)urb->transfer_buffer) + urb->actual_length - 4,
1007 crc_check, 4)) {
1008 urb->actual_length -= 4;
1009 }
1010}
1011
1000/* Moving data from usb to kernel (in interrupt state) */ 1012/* Moving data from usb to kernel (in interrupt state) */
1001static void read_bulk_callback(struct urb *urb) 1013static void read_bulk_callback(struct urb *urb)
1002{ 1014{
@@ -1025,17 +1037,8 @@ static void read_bulk_callback(struct urb *urb)
1025 return; 1037 return;
1026 } 1038 }
1027 1039
1028 if (odev->parent->port_spec & HSO_INFO_CRC_BUG) { 1040 if (odev->parent->port_spec & HSO_INFO_CRC_BUG)
1029 u32 rest; 1041 fix_crc_bug(urb, odev->in_endp->wMaxPacketSize);
1030 u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
1031 rest = urb->actual_length %
1032 le16_to_cpu(odev->in_endp->wMaxPacketSize);
1033 if (((rest == 5) || (rest == 6)) &&
1034 !memcmp(((u8 *) urb->transfer_buffer) +
1035 urb->actual_length - 4, crc_check, 4)) {
1036 urb->actual_length -= 4;
1037 }
1038 }
1039 1042
1040 /* do we even have a packet? */ 1043 /* do we even have a packet? */
1041 if (urb->actual_length) { 1044 if (urb->actual_length) {
@@ -1227,18 +1230,8 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
1227 return; 1230 return;
1228 1231
1229 if (status == 0) { 1232 if (status == 0) {
1230 if (serial->parent->port_spec & HSO_INFO_CRC_BUG) { 1233 if (serial->parent->port_spec & HSO_INFO_CRC_BUG)
1231 u32 rest; 1234 fix_crc_bug(urb, serial->in_endp->wMaxPacketSize);
1232 u8 crc_check[4] = { 0xDE, 0xAD, 0xBE, 0xEF };
1233 rest =
1234 urb->actual_length %
1235 le16_to_cpu(serial->in_endp->wMaxPacketSize);
1236 if (((rest == 5) || (rest == 6)) &&
1237 !memcmp(((u8 *) urb->transfer_buffer) +
1238 urb->actual_length - 4, crc_check, 4)) {
1239 urb->actual_length -= 4;
1240 }
1241 }
1242 /* Valid data, handle RX data */ 1235 /* Valid data, handle RX data */
1243 spin_lock(&serial->serial_lock); 1236 spin_lock(&serial->serial_lock);
1244 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1; 1237 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 1;
@@ -1741,7 +1734,6 @@ static int hso_serial_ioctl(struct tty_struct *tty, struct file *file,
1741 unsigned int cmd, unsigned long arg) 1734 unsigned int cmd, unsigned long arg)
1742{ 1735{
1743 struct hso_serial *serial = get_serial_by_tty(tty); 1736 struct hso_serial *serial = get_serial_by_tty(tty);
1744 void __user *uarg = (void __user *)arg;
1745 int ret = 0; 1737 int ret = 0;
1746 D4("IOCTL cmd: %d, arg: %ld", cmd, arg); 1738 D4("IOCTL cmd: %d, arg: %ld", cmd, arg);
1747 1739
diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c
index b2bcf99e6f08..7d42f9a2c068 100644
--- a/drivers/net/usb/ipheth.c
+++ b/drivers/net/usb/ipheth.c
@@ -363,7 +363,7 @@ static int ipheth_tx(struct sk_buff *skb, struct net_device *net)
363 363
364 /* Paranoid */ 364 /* Paranoid */
365 if (skb->len > IPHETH_BUF_SIZE) { 365 if (skb->len > IPHETH_BUF_SIZE) {
366 WARN(1, "%s: skb too large: %d bytes", __func__, skb->len); 366 WARN(1, "%s: skb too large: %d bytes\n", __func__, skb->len);
367 dev->net->stats.tx_dropped++; 367 dev->net->stats.tx_dropped++;
368 dev_kfree_skb_irq(skb); 368 dev_kfree_skb_irq(skb);
369 return NETDEV_TX_OK; 369 return NETDEV_TX_OK;
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index a6281e3987b5..2b791392e788 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * MOSCHIP MCS7830 based USB 2.0 Ethernet Devices 2 * MOSCHIP MCS7830 based (7730/7830/7832) USB 2.0 Ethernet Devices
3 * 3 *
4 * based on usbnet.c, asix.c and the vendor provided mcs7830 driver 4 * based on usbnet.c, asix.c and the vendor provided mcs7830 driver
5 * 5 *
@@ -11,6 +11,9 @@
11 * 11 *
12 * Definitions gathered from MOSCHIP, Data Sheet_7830DA.pdf (thanks!). 12 * Definitions gathered from MOSCHIP, Data Sheet_7830DA.pdf (thanks!).
13 * 13 *
14 * 2010-12-19: add 7832 USB PID ("functionality same as MCS7830"),
15 * per active notification by manufacturer
16 *
14 * TODO: 17 * TODO:
15 * - support HIF_REG_CONFIG_SLEEPMODE/HIF_REG_CONFIG_TXENABLE (via autopm?) 18 * - support HIF_REG_CONFIG_SLEEPMODE/HIF_REG_CONFIG_TXENABLE (via autopm?)
16 * - implement ethtool_ops get_pauseparam/set_pauseparam 19 * - implement ethtool_ops get_pauseparam/set_pauseparam
@@ -60,6 +63,7 @@
60#define MCS7830_MAX_MCAST 64 63#define MCS7830_MAX_MCAST 64
61 64
62#define MCS7830_VENDOR_ID 0x9710 65#define MCS7830_VENDOR_ID 0x9710
66#define MCS7832_PRODUCT_ID 0x7832
63#define MCS7830_PRODUCT_ID 0x7830 67#define MCS7830_PRODUCT_ID 0x7830
64#define MCS7730_PRODUCT_ID 0x7730 68#define MCS7730_PRODUCT_ID 0x7730
65 69
@@ -351,7 +355,7 @@ static int mcs7830_set_autoneg(struct usbnet *dev, int ptrUserPhyMode)
351 if (!ret) 355 if (!ret)
352 ret = mcs7830_write_phy(dev, MII_BMCR, 356 ret = mcs7830_write_phy(dev, MII_BMCR,
353 BMCR_ANENABLE | BMCR_ANRESTART ); 357 BMCR_ANENABLE | BMCR_ANRESTART );
354 return ret < 0 ? : 0; 358 return ret;
355} 359}
356 360
357 361
@@ -626,7 +630,7 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
626} 630}
627 631
628static const struct driver_info moschip_info = { 632static const struct driver_info moschip_info = {
629 .description = "MOSCHIP 7830/7730 usb-NET adapter", 633 .description = "MOSCHIP 7830/7832/7730 usb-NET adapter",
630 .bind = mcs7830_bind, 634 .bind = mcs7830_bind,
631 .rx_fixup = mcs7830_rx_fixup, 635 .rx_fixup = mcs7830_rx_fixup,
632 .flags = FLAG_ETHER, 636 .flags = FLAG_ETHER,
@@ -645,6 +649,10 @@ static const struct driver_info sitecom_info = {
645 649
646static const struct usb_device_id products[] = { 650static const struct usb_device_id products[] = {
647 { 651 {
652 USB_DEVICE(MCS7830_VENDOR_ID, MCS7832_PRODUCT_ID),
653 .driver_info = (unsigned long) &moschip_info,
654 },
655 {
648 USB_DEVICE(MCS7830_VENDOR_ID, MCS7830_PRODUCT_ID), 656 USB_DEVICE(MCS7830_VENDOR_ID, MCS7830_PRODUCT_ID),
649 .driver_info = (unsigned long) &moschip_info, 657 .driver_info = (unsigned long) &moschip_info,
650 }, 658 },
diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c
index 6710f09346d6..ef3667690b12 100644
--- a/drivers/net/usb/pegasus.c
+++ b/drivers/net/usb/pegasus.c
@@ -359,7 +359,7 @@ fail:
359 359
360static int mdio_read(struct net_device *dev, int phy_id, int loc) 360static int mdio_read(struct net_device *dev, int phy_id, int loc)
361{ 361{
362 pegasus_t *pegasus = (pegasus_t *) netdev_priv(dev); 362 pegasus_t *pegasus = netdev_priv(dev);
363 u16 res; 363 u16 res;
364 364
365 read_mii_word(pegasus, phy_id, loc, &res); 365 read_mii_word(pegasus, phy_id, loc, &res);
@@ -397,7 +397,7 @@ fail:
397 397
398static void mdio_write(struct net_device *dev, int phy_id, int loc, int val) 398static void mdio_write(struct net_device *dev, int phy_id, int loc, int val)
399{ 399{
400 pegasus_t *pegasus = (pegasus_t *) netdev_priv(dev); 400 pegasus_t *pegasus = netdev_priv(dev);
401 401
402 write_mii_word(pegasus, phy_id, loc, val); 402 write_mii_word(pegasus, phy_id, loc, val);
403} 403}
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index d1ac15c95faf..ed1b43210584 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -802,10 +802,9 @@ static void sierra_net_unbind(struct usbnet *dev, struct usb_interface *intf)
802 802
803 dev_dbg(&dev->udev->dev, "%s", __func__); 803 dev_dbg(&dev->udev->dev, "%s", __func__);
804 804
805 /* Kill the timer then flush the work queue */ 805 /* kill the timer and work */
806 del_timer_sync(&priv->sync_timer); 806 del_timer_sync(&priv->sync_timer);
807 807 cancel_work_sync(&priv->sierra_net_kevent);
808 flush_scheduled_work();
809 808
810 /* tell modem we are going away */ 809 /* tell modem we are going away */
811 status = sierra_net_send_cmd(dev, priv->shdwn_msg, 810 status = sierra_net_send_cmd(dev, priv->shdwn_msg,
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 65cb1abfbe57..bc86f4b6ecc2 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1163,9 +1163,8 @@ static int smsc95xx_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
1163 1163
1164static u32 smsc95xx_calc_csum_preamble(struct sk_buff *skb) 1164static u32 smsc95xx_calc_csum_preamble(struct sk_buff *skb)
1165{ 1165{
1166 int len = skb->data - skb->head; 1166 u16 low_16 = (u16)skb_checksum_start_offset(skb);
1167 u16 high_16 = (u16)(skb->csum_offset + skb->csum_start - len); 1167 u16 high_16 = low_16 + skb->csum_offset;
1168 u16 low_16 = (u16)(skb->csum_start - len);
1169 return (high_16 << 16) | low_16; 1168 return (high_16 << 16) | low_16;
1170} 1169}
1171 1170
@@ -1193,7 +1192,7 @@ static struct sk_buff *smsc95xx_tx_fixup(struct usbnet *dev,
1193 if (skb->len <= 45) { 1192 if (skb->len <= 45) {
1194 /* workaround - hardware tx checksum does not work 1193 /* workaround - hardware tx checksum does not work
1195 * properly with extremely small packets */ 1194 * properly with extremely small packets */
1196 long csstart = skb->csum_start - skb_headroom(skb); 1195 long csstart = skb_checksum_start_offset(skb);
1197 __wsum calc = csum_partial(skb->data + csstart, 1196 __wsum calc = csum_partial(skb->data + csstart,
1198 skb->len - csstart, 0); 1197 skb->len - csstart, 0);
1199 *((__sum16 *)(skb->data + csstart 1198 *((__sum16 *)(skb->data + csstart
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index c04d49e31f81..ed9a41643ff4 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -391,14 +391,19 @@ static inline void rx_process (struct usbnet *dev, struct sk_buff *skb)
391 goto error; 391 goto error;
392 // else network stack removes extra byte if we forced a short packet 392 // else network stack removes extra byte if we forced a short packet
393 393
394 if (skb->len) 394 if (skb->len) {
395 usbnet_skb_return (dev, skb); 395 /* all data was already cloned from skb inside the driver */
396 else { 396 if (dev->driver_info->flags & FLAG_MULTI_PACKET)
397 netif_dbg(dev, rx_err, dev->net, "drop\n"); 397 dev_kfree_skb_any(skb);
398error: 398 else
399 dev->net->stats.rx_errors++; 399 usbnet_skb_return(dev, skb);
400 skb_queue_tail (&dev->done, skb); 400 return;
401 } 401 }
402
403 netif_dbg(dev, rx_err, dev->net, "drop\n");
404error:
405 dev->net->stats.rx_errors++;
406 skb_queue_tail(&dev->done, skb);
402} 407}
403 408
404/*-------------------------------------------------------------------------*/ 409/*-------------------------------------------------------------------------*/
@@ -971,7 +976,8 @@ static void tx_complete (struct urb *urb)
971 struct usbnet *dev = entry->dev; 976 struct usbnet *dev = entry->dev;
972 977
973 if (urb->status == 0) { 978 if (urb->status == 0) {
974 dev->net->stats.tx_packets++; 979 if (!(dev->driver_info->flags & FLAG_MULTI_PACKET))
980 dev->net->stats.tx_packets++;
975 dev->net->stats.tx_bytes += entry->length; 981 dev->net->stats.tx_bytes += entry->length;
976 } else { 982 } else {
977 dev->net->stats.tx_errors++; 983 dev->net->stats.tx_errors++;
@@ -1044,8 +1050,13 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1044 if (info->tx_fixup) { 1050 if (info->tx_fixup) {
1045 skb = info->tx_fixup (dev, skb, GFP_ATOMIC); 1051 skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
1046 if (!skb) { 1052 if (!skb) {
1047 netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n"); 1053 if (netif_msg_tx_err(dev)) {
1048 goto drop; 1054 netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
1055 goto drop;
1056 } else {
1057 /* cdc_ncm collected packet; waits for more */
1058 goto not_drop;
1059 }
1049 } 1060 }
1050 } 1061 }
1051 length = skb->len; 1062 length = skb->len;
@@ -1067,13 +1078,18 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1067 /* don't assume the hardware handles USB_ZERO_PACKET 1078 /* don't assume the hardware handles USB_ZERO_PACKET
1068 * NOTE: strictly conforming cdc-ether devices should expect 1079 * NOTE: strictly conforming cdc-ether devices should expect
1069 * the ZLP here, but ignore the one-byte packet. 1080 * the ZLP here, but ignore the one-byte packet.
1081 * NOTE2: CDC NCM specification is different from CDC ECM when
1082 * handling ZLP/short packets, so cdc_ncm driver will make short
1083 * packet itself if needed.
1070 */ 1084 */
1071 if (length % dev->maxpacket == 0) { 1085 if (length % dev->maxpacket == 0) {
1072 if (!(info->flags & FLAG_SEND_ZLP)) { 1086 if (!(info->flags & FLAG_SEND_ZLP)) {
1073 urb->transfer_buffer_length++; 1087 if (!(info->flags & FLAG_MULTI_PACKET)) {
1074 if (skb_tailroom(skb)) { 1088 urb->transfer_buffer_length++;
1075 skb->data[skb->len] = 0; 1089 if (skb_tailroom(skb)) {
1076 __skb_put(skb, 1); 1090 skb->data[skb->len] = 0;
1091 __skb_put(skb, 1);
1092 }
1077 } 1093 }
1078 } else 1094 } else
1079 urb->transfer_flags |= URB_ZERO_PACKET; 1095 urb->transfer_flags |= URB_ZERO_PACKET;
@@ -1122,6 +1138,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
1122 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval); 1138 netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
1123drop: 1139drop:
1124 dev->net->stats.tx_dropped++; 1140 dev->net->stats.tx_dropped++;
1141not_drop:
1125 if (skb) 1142 if (skb)
1126 dev_kfree_skb_any (skb); 1143 dev_kfree_skb_any (skb);
1127 usb_free_urb (urb); 1144 usb_free_urb (urb);
@@ -1231,8 +1248,7 @@ void usbnet_disconnect (struct usb_interface *intf)
1231 net = dev->net; 1248 net = dev->net;
1232 unregister_netdev (net); 1249 unregister_netdev (net);
1233 1250
1234 /* we don't hold rtnl here ... */ 1251 cancel_work_sync(&dev->kevent);
1235 flush_scheduled_work ();
1236 1252
1237 if (dev->driver_info->unbind) 1253 if (dev->driver_info->unbind)
1238 dev->driver_info->unbind (dev, intf); 1254 dev->driver_info->unbind (dev, intf);
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index 0bbc0c323135..cc83fa71c3ff 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -166,7 +166,9 @@ static netdev_tx_t veth_xmit(struct sk_buff *skb, struct net_device *dev)
166 if (!(rcv->flags & IFF_UP)) 166 if (!(rcv->flags & IFF_UP))
167 goto tx_drop; 167 goto tx_drop;
168 168
169 if (dev->features & NETIF_F_NO_CSUM) 169 /* don't change ip_summed == CHECKSUM_PARTIAL, as that
170 will cause bad checksum on forwarded packets */
171 if (skb->ip_summed == CHECKSUM_NONE)
170 skb->ip_summed = rcv_priv->ip_summed; 172 skb->ip_summed = rcv_priv->ip_summed;
171 173
172 length = skb->len + ETH_HLEN; 174 length = skb->len + ETH_HLEN;
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 4930f9dbc493..5e7f069eab53 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -30,8 +30,8 @@
30*/ 30*/
31 31
32#define DRV_NAME "via-rhine" 32#define DRV_NAME "via-rhine"
33#define DRV_VERSION "1.4.3" 33#define DRV_VERSION "1.5.0"
34#define DRV_RELDATE "2007-03-06" 34#define DRV_RELDATE "2010-10-09"
35 35
36 36
37/* A few user-configurable values. 37/* A few user-configurable values.
@@ -100,6 +100,7 @@ static const int multicast_filter_limit = 32;
100#include <linux/mii.h> 100#include <linux/mii.h>
101#include <linux/ethtool.h> 101#include <linux/ethtool.h>
102#include <linux/crc32.h> 102#include <linux/crc32.h>
103#include <linux/if_vlan.h>
103#include <linux/bitops.h> 104#include <linux/bitops.h>
104#include <linux/workqueue.h> 105#include <linux/workqueue.h>
105#include <asm/processor.h> /* Processor type for cache alignment. */ 106#include <asm/processor.h> /* Processor type for cache alignment. */
@@ -133,6 +134,9 @@ MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
133MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames"); 134MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
134MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)"); 135MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
135 136
137#define MCAM_SIZE 32
138#define VCAM_SIZE 32
139
136/* 140/*
137 Theory of Operation 141 Theory of Operation
138 142
@@ -279,15 +283,16 @@ MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
279/* Offsets to the device registers. */ 283/* Offsets to the device registers. */
280enum register_offsets { 284enum register_offsets {
281 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08, 285 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
282 ChipCmd1=0x09, 286 ChipCmd1=0x09, TQWake=0x0A,
283 IntrStatus=0x0C, IntrEnable=0x0E, 287 IntrStatus=0x0C, IntrEnable=0x0E,
284 MulticastFilter0=0x10, MulticastFilter1=0x14, 288 MulticastFilter0=0x10, MulticastFilter1=0x14,
285 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54, 289 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
286 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, 290 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
287 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74, 291 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
288 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B, 292 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
289 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81, 293 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
290 StickyHW=0x83, IntrStatus2=0x84, 294 StickyHW=0x83, IntrStatus2=0x84,
295 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
291 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4, 296 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
292 WOLcrClr1=0xA6, WOLcgClr=0xA7, 297 WOLcrClr1=0xA6, WOLcgClr=0xA7,
293 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD, 298 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
@@ -299,6 +304,40 @@ enum backoff_bits {
299 BackCaptureEffect=0x04, BackRandom=0x08 304 BackCaptureEffect=0x04, BackRandom=0x08
300}; 305};
301 306
307/* Bits in the TxConfig (TCR) register */
308enum tcr_bits {
309 TCR_PQEN=0x01,
310 TCR_LB0=0x02, /* loopback[0] */
311 TCR_LB1=0x04, /* loopback[1] */
312 TCR_OFSET=0x08,
313 TCR_RTGOPT=0x10,
314 TCR_RTFT0=0x20,
315 TCR_RTFT1=0x40,
316 TCR_RTSF=0x80,
317};
318
319/* Bits in the CamCon (CAMC) register */
320enum camcon_bits {
321 CAMC_CAMEN=0x01,
322 CAMC_VCAMSL=0x02,
323 CAMC_CAMWR=0x04,
324 CAMC_CAMRD=0x08,
325};
326
327/* Bits in the PCIBusConfig1 (BCR1) register */
328enum bcr1_bits {
329 BCR1_POT0=0x01,
330 BCR1_POT1=0x02,
331 BCR1_POT2=0x04,
332 BCR1_CTFT0=0x08,
333 BCR1_CTFT1=0x10,
334 BCR1_CTSF=0x20,
335 BCR1_TXQNOBK=0x40, /* for VT6105 */
336 BCR1_VIDFR=0x80, /* for VT6105 */
337 BCR1_MED0=0x40, /* for VT6102 */
338 BCR1_MED1=0x80, /* for VT6102 */
339};
340
302#ifdef USE_MMIO 341#ifdef USE_MMIO
303/* Registers we check that mmio and reg are the same. */ 342/* Registers we check that mmio and reg are the same. */
304static const int mmio_verify_registers[] = { 343static const int mmio_verify_registers[] = {
@@ -356,6 +395,11 @@ enum desc_status_bits {
356 DescOwn=0x80000000 395 DescOwn=0x80000000
357}; 396};
358 397
398/* Bits in *_desc.*_length */
399enum desc_length_bits {
400 DescTag=0x00010000
401};
402
359/* Bits in ChipCmd. */ 403/* Bits in ChipCmd. */
360enum chip_cmd_bits { 404enum chip_cmd_bits {
361 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08, 405 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
@@ -365,6 +409,9 @@ enum chip_cmd_bits {
365}; 409};
366 410
367struct rhine_private { 411struct rhine_private {
412 /* Bit mask for configured VLAN ids */
413 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
414
368 /* Descriptor rings */ 415 /* Descriptor rings */
369 struct rx_desc *rx_ring; 416 struct rx_desc *rx_ring;
370 struct tx_desc *tx_ring; 417 struct tx_desc *tx_ring;
@@ -405,6 +452,23 @@ struct rhine_private {
405 void __iomem *base; 452 void __iomem *base;
406}; 453};
407 454
455#define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
456#define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
457#define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
458
459#define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
460#define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
461#define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
462
463#define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
464#define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
465#define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
466
467#define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
468#define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
469#define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
470
471
408static int mdio_read(struct net_device *dev, int phy_id, int location); 472static int mdio_read(struct net_device *dev, int phy_id, int location);
409static void mdio_write(struct net_device *dev, int phy_id, int location, int value); 473static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
410static int rhine_open(struct net_device *dev); 474static int rhine_open(struct net_device *dev);
@@ -422,6 +486,14 @@ static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
422static const struct ethtool_ops netdev_ethtool_ops; 486static const struct ethtool_ops netdev_ethtool_ops;
423static int rhine_close(struct net_device *dev); 487static int rhine_close(struct net_device *dev);
424static void rhine_shutdown (struct pci_dev *pdev); 488static void rhine_shutdown (struct pci_dev *pdev);
489static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
490static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
491static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr);
492static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr);
493static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask);
494static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask);
495static void rhine_init_cam_filter(struct net_device *dev);
496static void rhine_update_vcam(struct net_device *dev);
425 497
426#define RHINE_WAIT_FOR(condition) do { \ 498#define RHINE_WAIT_FOR(condition) do { \
427 int i=1024; \ 499 int i=1024; \
@@ -629,6 +701,8 @@ static const struct net_device_ops rhine_netdev_ops = {
629 .ndo_set_mac_address = eth_mac_addr, 701 .ndo_set_mac_address = eth_mac_addr,
630 .ndo_do_ioctl = netdev_ioctl, 702 .ndo_do_ioctl = netdev_ioctl,
631 .ndo_tx_timeout = rhine_tx_timeout, 703 .ndo_tx_timeout = rhine_tx_timeout,
704 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
705 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
632#ifdef CONFIG_NET_POLL_CONTROLLER 706#ifdef CONFIG_NET_POLL_CONTROLLER
633 .ndo_poll_controller = rhine_poll, 707 .ndo_poll_controller = rhine_poll,
634#endif 708#endif
@@ -795,6 +869,10 @@ static int __devinit rhine_init_one(struct pci_dev *pdev,
795 if (rp->quirks & rqRhineI) 869 if (rp->quirks & rqRhineI)
796 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM; 870 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
797 871
872 if (pdev->revision >= VT6105M)
873 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
874 NETIF_F_HW_VLAN_FILTER;
875
798 /* dev->name not defined before register_netdev()! */ 876 /* dev->name not defined before register_netdev()! */
799 rc = register_netdev(dev); 877 rc = register_netdev(dev);
800 if (rc) 878 if (rc)
@@ -1040,6 +1118,167 @@ static void rhine_set_carrier(struct mii_if_info *mii)
1040 netif_carrier_ok(mii->dev)); 1118 netif_carrier_ok(mii->dev));
1041} 1119}
1042 1120
1121/**
1122 * rhine_set_cam - set CAM multicast filters
1123 * @ioaddr: register block of this Rhine
1124 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1125 * @addr: multicast address (6 bytes)
1126 *
1127 * Load addresses into multicast filters.
1128 */
1129static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1130{
1131 int i;
1132
1133 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1134 wmb();
1135
1136 /* Paranoid -- idx out of range should never happen */
1137 idx &= (MCAM_SIZE - 1);
1138
1139 iowrite8((u8) idx, ioaddr + CamAddr);
1140
1141 for (i = 0; i < 6; i++, addr++)
1142 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1143 udelay(10);
1144 wmb();
1145
1146 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1147 udelay(10);
1148
1149 iowrite8(0, ioaddr + CamCon);
1150}
1151
1152/**
1153 * rhine_set_vlan_cam - set CAM VLAN filters
1154 * @ioaddr: register block of this Rhine
1155 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1156 * @addr: VLAN ID (2 bytes)
1157 *
1158 * Load addresses into VLAN filters.
1159 */
1160static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1161{
1162 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1163 wmb();
1164
1165 /* Paranoid -- idx out of range should never happen */
1166 idx &= (VCAM_SIZE - 1);
1167
1168 iowrite8((u8) idx, ioaddr + CamAddr);
1169
1170 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1171 udelay(10);
1172 wmb();
1173
1174 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1175 udelay(10);
1176
1177 iowrite8(0, ioaddr + CamCon);
1178}
1179
1180/**
1181 * rhine_set_cam_mask - set multicast CAM mask
1182 * @ioaddr: register block of this Rhine
1183 * @mask: multicast CAM mask
1184 *
1185 * Mask sets multicast filters active/inactive.
1186 */
1187static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1188{
1189 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1190 wmb();
1191
1192 /* write mask */
1193 iowrite32(mask, ioaddr + CamMask);
1194
1195 /* disable CAMEN */
1196 iowrite8(0, ioaddr + CamCon);
1197}
1198
1199/**
1200 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1201 * @ioaddr: register block of this Rhine
1202 * @mask: VLAN CAM mask
1203 *
1204 * Mask sets VLAN filters active/inactive.
1205 */
1206static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1207{
1208 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1209 wmb();
1210
1211 /* write mask */
1212 iowrite32(mask, ioaddr + CamMask);
1213
1214 /* disable CAMEN */
1215 iowrite8(0, ioaddr + CamCon);
1216}
1217
1218/**
1219 * rhine_init_cam_filter - initialize CAM filters
1220 * @dev: network device
1221 *
1222 * Initialize (disable) hardware VLAN and multicast support on this
1223 * Rhine.
1224 */
1225static void rhine_init_cam_filter(struct net_device *dev)
1226{
1227 struct rhine_private *rp = netdev_priv(dev);
1228 void __iomem *ioaddr = rp->base;
1229
1230 /* Disable all CAMs */
1231 rhine_set_vlan_cam_mask(ioaddr, 0);
1232 rhine_set_cam_mask(ioaddr, 0);
1233
1234 /* disable hardware VLAN support */
1235 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1236 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1237}
1238
1239/**
1240 * rhine_update_vcam - update VLAN CAM filters
1241 * @rp: rhine_private data of this Rhine
1242 *
1243 * Update VLAN CAM filters to match configuration change.
1244 */
1245static void rhine_update_vcam(struct net_device *dev)
1246{
1247 struct rhine_private *rp = netdev_priv(dev);
1248 void __iomem *ioaddr = rp->base;
1249 u16 vid;
1250 u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
1251 unsigned int i = 0;
1252
1253 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1254 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1255 vCAMmask |= 1 << i;
1256 if (++i >= VCAM_SIZE)
1257 break;
1258 }
1259 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1260}
1261
1262static void rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1263{
1264 struct rhine_private *rp = netdev_priv(dev);
1265
1266 spin_lock_irq(&rp->lock);
1267 set_bit(vid, rp->active_vlans);
1268 rhine_update_vcam(dev);
1269 spin_unlock_irq(&rp->lock);
1270}
1271
1272static void rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1273{
1274 struct rhine_private *rp = netdev_priv(dev);
1275
1276 spin_lock_irq(&rp->lock);
1277 clear_bit(vid, rp->active_vlans);
1278 rhine_update_vcam(dev);
1279 spin_unlock_irq(&rp->lock);
1280}
1281
1043static void init_registers(struct net_device *dev) 1282static void init_registers(struct net_device *dev)
1044{ 1283{
1045 struct rhine_private *rp = netdev_priv(dev); 1284 struct rhine_private *rp = netdev_priv(dev);
@@ -1061,6 +1300,9 @@ static void init_registers(struct net_device *dev)
1061 1300
1062 rhine_set_rx_mode(dev); 1301 rhine_set_rx_mode(dev);
1063 1302
1303 if (rp->pdev->revision >= VT6105M)
1304 rhine_init_cam_filter(dev);
1305
1064 napi_enable(&rp->napi); 1306 napi_enable(&rp->napi);
1065 1307
1066 /* Enable interrupts by setting the interrupt mask. */ 1308 /* Enable interrupts by setting the interrupt mask. */
@@ -1276,16 +1518,28 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1276 rp->tx_ring[entry].desc_length = 1518 rp->tx_ring[entry].desc_length =
1277 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); 1519 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1278 1520
1521 if (unlikely(vlan_tx_tag_present(skb))) {
1522 rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
1523 /* request tagging */
1524 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1525 }
1526 else
1527 rp->tx_ring[entry].tx_status = 0;
1528
1279 /* lock eth irq */ 1529 /* lock eth irq */
1280 spin_lock_irqsave(&rp->lock, flags); 1530 spin_lock_irqsave(&rp->lock, flags);
1281 wmb(); 1531 wmb();
1282 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn); 1532 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1283 wmb(); 1533 wmb();
1284 1534
1285 rp->cur_tx++; 1535 rp->cur_tx++;
1286 1536
1287 /* Non-x86 Todo: explicitly flush cache lines here. */ 1537 /* Non-x86 Todo: explicitly flush cache lines here. */
1288 1538
1539 if (vlan_tx_tag_present(skb))
1540 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1541 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1542
1289 /* Wake the potentially-idle transmit channel */ 1543 /* Wake the potentially-idle transmit channel */
1290 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, 1544 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1291 ioaddr + ChipCmd1); 1545 ioaddr + ChipCmd1);
@@ -1437,6 +1691,21 @@ static void rhine_tx(struct net_device *dev)
1437 spin_unlock(&rp->lock); 1691 spin_unlock(&rp->lock);
1438} 1692}
1439 1693
1694/**
1695 * rhine_get_vlan_tci - extract TCI from Rx data buffer
1696 * @skb: pointer to sk_buff
1697 * @data_size: used data area of the buffer including CRC
1698 *
1699 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1700 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1701 * aligned following the CRC.
1702 */
1703static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1704{
1705 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1706 return ntohs(*(u16 *)trailer);
1707}
1708
1440/* Process up to limit frames from receive ring */ 1709/* Process up to limit frames from receive ring */
1441static int rhine_rx(struct net_device *dev, int limit) 1710static int rhine_rx(struct net_device *dev, int limit)
1442{ 1711{
@@ -1454,6 +1723,7 @@ static int rhine_rx(struct net_device *dev, int limit)
1454 for (count = 0; count < limit; ++count) { 1723 for (count = 0; count < limit; ++count) {
1455 struct rx_desc *desc = rp->rx_head_desc; 1724 struct rx_desc *desc = rp->rx_head_desc;
1456 u32 desc_status = le32_to_cpu(desc->rx_status); 1725 u32 desc_status = le32_to_cpu(desc->rx_status);
1726 u32 desc_length = le32_to_cpu(desc->desc_length);
1457 int data_size = desc_status >> 16; 1727 int data_size = desc_status >> 16;
1458 1728
1459 if (desc_status & DescOwn) 1729 if (desc_status & DescOwn)
@@ -1498,6 +1768,7 @@ static int rhine_rx(struct net_device *dev, int limit)
1498 struct sk_buff *skb = NULL; 1768 struct sk_buff *skb = NULL;
1499 /* Length should omit the CRC */ 1769 /* Length should omit the CRC */
1500 int pkt_len = data_size - 4; 1770 int pkt_len = data_size - 4;
1771 u16 vlan_tci = 0;
1501 1772
1502 /* Check if the packet is long enough to accept without 1773 /* Check if the packet is long enough to accept without
1503 copying to a minimally-sized skbuff. */ 1774 copying to a minimally-sized skbuff. */
@@ -1532,7 +1803,14 @@ static int rhine_rx(struct net_device *dev, int limit)
1532 rp->rx_buf_sz, 1803 rp->rx_buf_sz,
1533 PCI_DMA_FROMDEVICE); 1804 PCI_DMA_FROMDEVICE);
1534 } 1805 }
1806
1807 if (unlikely(desc_length & DescTag))
1808 vlan_tci = rhine_get_vlan_tci(skb, data_size);
1809
1535 skb->protocol = eth_type_trans(skb, dev); 1810 skb->protocol = eth_type_trans(skb, dev);
1811
1812 if (unlikely(desc_length & DescTag))
1813 __vlan_hwaccel_put_tag(skb, vlan_tci);
1536 netif_receive_skb(skb); 1814 netif_receive_skb(skb);
1537 dev->stats.rx_bytes += pkt_len; 1815 dev->stats.rx_bytes += pkt_len;
1538 dev->stats.rx_packets++; 1816 dev->stats.rx_packets++;
@@ -1596,6 +1874,11 @@ static void rhine_restart_tx(struct net_device *dev) {
1596 1874
1597 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn, 1875 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1598 ioaddr + ChipCmd); 1876 ioaddr + ChipCmd);
1877
1878 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
1879 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1880 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1881
1599 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand, 1882 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1600 ioaddr + ChipCmd1); 1883 ioaddr + ChipCmd1);
1601 IOSYNC; 1884 IOSYNC;
@@ -1631,7 +1914,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
1631 } 1914 }
1632 if (intr_status & IntrTxUnderrun) { 1915 if (intr_status & IntrTxUnderrun) {
1633 if (rp->tx_thresh < 0xE0) 1916 if (rp->tx_thresh < 0xE0)
1634 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig); 1917 BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
1635 if (debug > 1) 1918 if (debug > 1)
1636 printk(KERN_INFO "%s: Transmitter underrun, Tx " 1919 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1637 "threshold now %2.2x.\n", 1920 "threshold now %2.2x.\n",
@@ -1646,7 +1929,7 @@ static void rhine_error(struct net_device *dev, int intr_status)
1646 (intr_status & (IntrTxAborted | 1929 (intr_status & (IntrTxAborted |
1647 IntrTxUnderrun | IntrTxDescRace)) == 0) { 1930 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1648 if (rp->tx_thresh < 0xE0) { 1931 if (rp->tx_thresh < 0xE0) {
1649 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig); 1932 BYTE_REG_BITS_SET((rp->tx_thresh += 0x20), 0x80, ioaddr + TxConfig);
1650 } 1933 }
1651 if (debug > 1) 1934 if (debug > 1)
1652 printk(KERN_INFO "%s: Unspecified error. Tx " 1935 printk(KERN_INFO "%s: Unspecified error. Tx "
@@ -1688,7 +1971,8 @@ static void rhine_set_rx_mode(struct net_device *dev)
1688 struct rhine_private *rp = netdev_priv(dev); 1971 struct rhine_private *rp = netdev_priv(dev);
1689 void __iomem *ioaddr = rp->base; 1972 void __iomem *ioaddr = rp->base;
1690 u32 mc_filter[2]; /* Multicast hash filter */ 1973 u32 mc_filter[2]; /* Multicast hash filter */
1691 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */ 1974 u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
1975 struct netdev_hw_addr *ha;
1692 1976
1693 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ 1977 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1694 rx_mode = 0x1C; 1978 rx_mode = 0x1C;
@@ -1699,10 +1983,18 @@ static void rhine_set_rx_mode(struct net_device *dev)
1699 /* Too many to match, or accept all multicasts. */ 1983 /* Too many to match, or accept all multicasts. */
1700 iowrite32(0xffffffff, ioaddr + MulticastFilter0); 1984 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1701 iowrite32(0xffffffff, ioaddr + MulticastFilter1); 1985 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1702 rx_mode = 0x0C; 1986 } else if (rp->pdev->revision >= VT6105M) {
1987 int i = 0;
1988 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
1989 netdev_for_each_mc_addr(ha, dev) {
1990 if (i == MCAM_SIZE)
1991 break;
1992 rhine_set_cam(ioaddr, i, ha->addr);
1993 mCAMmask |= 1 << i;
1994 i++;
1995 }
1996 rhine_set_cam_mask(ioaddr, mCAMmask);
1703 } else { 1997 } else {
1704 struct netdev_hw_addr *ha;
1705
1706 memset(mc_filter, 0, sizeof(mc_filter)); 1998 memset(mc_filter, 0, sizeof(mc_filter));
1707 netdev_for_each_mc_addr(ha, dev) { 1999 netdev_for_each_mc_addr(ha, dev) {
1708 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26; 2000 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
@@ -1711,9 +2003,15 @@ static void rhine_set_rx_mode(struct net_device *dev)
1711 } 2003 }
1712 iowrite32(mc_filter[0], ioaddr + MulticastFilter0); 2004 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1713 iowrite32(mc_filter[1], ioaddr + MulticastFilter1); 2005 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1714 rx_mode = 0x0C;
1715 } 2006 }
1716 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig); 2007 /* enable/disable VLAN receive filtering */
2008 if (rp->pdev->revision >= VT6105M) {
2009 if (dev->flags & IFF_PROMISC)
2010 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2011 else
2012 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2013 }
2014 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
1717} 2015}
1718 2016
1719static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) 2017static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
@@ -1966,7 +2264,7 @@ static int rhine_resume(struct pci_dev *pdev)
1966 if (!netif_running(dev)) 2264 if (!netif_running(dev))
1967 return 0; 2265 return 0;
1968 2266
1969 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev)) 2267 if (request_irq(dev->irq, rhine_interrupt, IRQF_SHARED, dev->name, dev))
1970 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name); 2268 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
1971 2269
1972 ret = pci_set_power_state(pdev, PCI_D0); 2270 ret = pci_set_power_state(pdev, PCI_D0);
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index b6d402806ae6..90a23e410d1b 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -519,7 +519,7 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
519 519
520 if (skb->ip_summed == CHECKSUM_PARTIAL) { 520 if (skb->ip_summed == CHECKSUM_PARTIAL) {
521 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 521 hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
522 hdr->hdr.csum_start = skb->csum_start - skb_headroom(skb); 522 hdr->hdr.csum_start = skb_checksum_start_offset(skb);
523 hdr->hdr.csum_offset = skb->csum_offset; 523 hdr->hdr.csum_offset = skb->csum_offset;
524 } else { 524 } else {
525 hdr->hdr.flags = 0; 525 hdr->hdr.flags = 0;
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 21314e06e6d7..d143e8b72b5b 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -44,6 +44,9 @@ MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
44 44
45static atomic_t devices_found; 45static atomic_t devices_found;
46 46
47#define VMXNET3_MAX_DEVICES 10
48static int enable_mq = 1;
49static int irq_share_mode;
47 50
48/* 51/*
49 * Enable/Disable the given intr 52 * Enable/Disable the given intr
@@ -99,7 +102,7 @@ vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
99static bool 102static bool
100vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 103vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
101{ 104{
102 return netif_queue_stopped(adapter->netdev); 105 return tq->stopped;
103} 106}
104 107
105 108
@@ -107,7 +110,7 @@ static void
107vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 110vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
108{ 111{
109 tq->stopped = false; 112 tq->stopped = false;
110 netif_start_queue(adapter->netdev); 113 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
111} 114}
112 115
113 116
@@ -115,7 +118,7 @@ static void
115vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) 118vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
116{ 119{
117 tq->stopped = false; 120 tq->stopped = false;
118 netif_wake_queue(adapter->netdev); 121 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
119} 122}
120 123
121 124
@@ -124,7 +127,7 @@ vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
124{ 127{
125 tq->stopped = true; 128 tq->stopped = true;
126 tq->num_stop++; 129 tq->num_stop++;
127 netif_stop_queue(adapter->netdev); 130 netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
128} 131}
129 132
130 133
@@ -135,6 +138,7 @@ static void
135vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue) 138vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
136{ 139{
137 u32 ret; 140 u32 ret;
141 int i;
138 142
139 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK); 143 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
140 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD); 144 ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
@@ -145,22 +149,28 @@ vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
145 if (!netif_carrier_ok(adapter->netdev)) 149 if (!netif_carrier_ok(adapter->netdev))
146 netif_carrier_on(adapter->netdev); 150 netif_carrier_on(adapter->netdev);
147 151
148 if (affectTxQueue) 152 if (affectTxQueue) {
149 vmxnet3_tq_start(&adapter->tx_queue, adapter); 153 for (i = 0; i < adapter->num_tx_queues; i++)
154 vmxnet3_tq_start(&adapter->tx_queue[i],
155 adapter);
156 }
150 } else { 157 } else {
151 printk(KERN_INFO "%s: NIC Link is Down\n", 158 printk(KERN_INFO "%s: NIC Link is Down\n",
152 adapter->netdev->name); 159 adapter->netdev->name);
153 if (netif_carrier_ok(adapter->netdev)) 160 if (netif_carrier_ok(adapter->netdev))
154 netif_carrier_off(adapter->netdev); 161 netif_carrier_off(adapter->netdev);
155 162
156 if (affectTxQueue) 163 if (affectTxQueue) {
157 vmxnet3_tq_stop(&adapter->tx_queue, adapter); 164 for (i = 0; i < adapter->num_tx_queues; i++)
165 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
166 }
158 } 167 }
159} 168}
160 169
161static void 170static void
162vmxnet3_process_events(struct vmxnet3_adapter *adapter) 171vmxnet3_process_events(struct vmxnet3_adapter *adapter)
163{ 172{
173 int i;
164 u32 events = le32_to_cpu(adapter->shared->ecr); 174 u32 events = le32_to_cpu(adapter->shared->ecr);
165 if (!events) 175 if (!events)
166 return; 176 return;
@@ -176,16 +186,18 @@ vmxnet3_process_events(struct vmxnet3_adapter *adapter)
176 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, 186 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
177 VMXNET3_CMD_GET_QUEUE_STATUS); 187 VMXNET3_CMD_GET_QUEUE_STATUS);
178 188
179 if (adapter->tqd_start->status.stopped) { 189 for (i = 0; i < adapter->num_tx_queues; i++)
180 printk(KERN_ERR "%s: tq error 0x%x\n", 190 if (adapter->tqd_start[i].status.stopped)
181 adapter->netdev->name, 191 dev_err(&adapter->netdev->dev,
182 le32_to_cpu(adapter->tqd_start->status.error)); 192 "%s: tq[%d] error 0x%x\n",
183 } 193 adapter->netdev->name, i, le32_to_cpu(
184 if (adapter->rqd_start->status.stopped) { 194 adapter->tqd_start[i].status.error));
185 printk(KERN_ERR "%s: rq error 0x%x\n", 195 for (i = 0; i < adapter->num_rx_queues; i++)
186 adapter->netdev->name, 196 if (adapter->rqd_start[i].status.stopped)
187 adapter->rqd_start->status.error); 197 dev_err(&adapter->netdev->dev,
188 } 198 "%s: rq[%d] error 0x%x\n",
199 adapter->netdev->name, i,
200 adapter->rqd_start[i].status.error);
189 201
190 schedule_work(&adapter->work); 202 schedule_work(&adapter->work);
191 } 203 }
@@ -410,7 +422,7 @@ vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
410} 422}
411 423
412 424
413void 425static void
414vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, 426vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
415 struct vmxnet3_adapter *adapter) 427 struct vmxnet3_adapter *adapter)
416{ 428{
@@ -437,6 +449,17 @@ vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
437} 449}
438 450
439 451
452/* Destroy all tx queues */
453void
454vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
455{
456 int i;
457
458 for (i = 0; i < adapter->num_tx_queues; i++)
459 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
460}
461
462
440static void 463static void
441vmxnet3_tq_init(struct vmxnet3_tx_queue *tq, 464vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
442 struct vmxnet3_adapter *adapter) 465 struct vmxnet3_adapter *adapter)
@@ -518,6 +541,14 @@ err:
518 return -ENOMEM; 541 return -ENOMEM;
519} 542}
520 543
544static void
545vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
546{
547 int i;
548
549 for (i = 0; i < adapter->num_tx_queues; i++)
550 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
551}
521 552
522/* 553/*
523 * starting from ring->next2fill, allocate rx buffers for the given ring 554 * starting from ring->next2fill, allocate rx buffers for the given ring
@@ -732,6 +763,17 @@ vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
732} 763}
733 764
734 765
766/* Init all tx queues */
767static void
768vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
769{
770 int i;
771
772 for (i = 0; i < adapter->num_tx_queues; i++)
773 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
774}
775
776
735/* 777/*
736 * parse and copy relevant protocol headers: 778 * parse and copy relevant protocol headers:
737 * For a tso pkt, relevant headers are L2/3/4 including options 779 * For a tso pkt, relevant headers are L2/3/4 including options
@@ -756,7 +798,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
756{ 798{
757 struct Vmxnet3_TxDataDesc *tdd; 799 struct Vmxnet3_TxDataDesc *tdd;
758 800
759 if (ctx->mss) { 801 if (ctx->mss) { /* TSO */
760 ctx->eth_ip_hdr_size = skb_transport_offset(skb); 802 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
761 ctx->l4_hdr_size = ((struct tcphdr *) 803 ctx->l4_hdr_size = ((struct tcphdr *)
762 skb_transport_header(skb))->doff * 4; 804 skb_transport_header(skb))->doff * 4;
@@ -765,7 +807,7 @@ vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
765 unsigned int pull_size; 807 unsigned int pull_size;
766 808
767 if (skb->ip_summed == CHECKSUM_PARTIAL) { 809 if (skb->ip_summed == CHECKSUM_PARTIAL) {
768 ctx->eth_ip_hdr_size = skb_transport_offset(skb); 810 ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
769 811
770 if (ctx->ipv4) { 812 if (ctx->ipv4) {
771 struct iphdr *iph = (struct iphdr *) 813 struct iphdr *iph = (struct iphdr *)
@@ -903,6 +945,21 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
903 } 945 }
904 } 946 }
905 947
948 spin_lock_irqsave(&tq->tx_lock, flags);
949
950 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
951 tq->stats.tx_ring_full++;
952 dev_dbg(&adapter->netdev->dev,
953 "tx queue stopped on %s, next2comp %u"
954 " next2fill %u\n", adapter->netdev->name,
955 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
956
957 vmxnet3_tq_stop(tq, adapter);
958 spin_unlock_irqrestore(&tq->tx_lock, flags);
959 return NETDEV_TX_BUSY;
960 }
961
962
906 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter); 963 ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
907 if (ret >= 0) { 964 if (ret >= 0) {
908 BUG_ON(ret <= 0 && ctx.copy_size != 0); 965 BUG_ON(ret <= 0 && ctx.copy_size != 0);
@@ -923,21 +980,7 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
923 } 980 }
924 } else { 981 } else {
925 tq->stats.drop_hdr_inspect_err++; 982 tq->stats.drop_hdr_inspect_err++;
926 goto drop_pkt; 983 goto unlock_drop_pkt;
927 }
928
929 spin_lock_irqsave(&tq->tx_lock, flags);
930
931 if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
932 tq->stats.tx_ring_full++;
933 dev_dbg(&adapter->netdev->dev,
934 "tx queue stopped on %s, next2comp %u"
935 " next2fill %u\n", adapter->netdev->name,
936 tq->tx_ring.next2comp, tq->tx_ring.next2fill);
937
938 vmxnet3_tq_stop(tq, adapter);
939 spin_unlock_irqrestore(&tq->tx_lock, flags);
940 return NETDEV_TX_BUSY;
941 } 984 }
942 985
943 /* fill tx descs related to addr & len */ 986 /* fill tx descs related to addr & len */
@@ -1000,7 +1043,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1000 if (le32_to_cpu(tq->shared->txNumDeferred) >= 1043 if (le32_to_cpu(tq->shared->txNumDeferred) >=
1001 le32_to_cpu(tq->shared->txThreshold)) { 1044 le32_to_cpu(tq->shared->txThreshold)) {
1002 tq->shared->txNumDeferred = 0; 1045 tq->shared->txNumDeferred = 0;
1003 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_TXPROD, 1046 VMXNET3_WRITE_BAR0_REG(adapter,
1047 VMXNET3_REG_TXPROD + tq->qid * 8,
1004 tq->tx_ring.next2fill); 1048 tq->tx_ring.next2fill);
1005 } 1049 }
1006 1050
@@ -1008,6 +1052,8 @@ vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
1008 1052
1009hdr_too_big: 1053hdr_too_big:
1010 tq->stats.drop_oversized_hdr++; 1054 tq->stats.drop_oversized_hdr++;
1055unlock_drop_pkt:
1056 spin_unlock_irqrestore(&tq->tx_lock, flags);
1011drop_pkt: 1057drop_pkt:
1012 tq->stats.drop_total++; 1058 tq->stats.drop_total++;
1013 dev_kfree_skb(skb); 1059 dev_kfree_skb(skb);
@@ -1020,7 +1066,10 @@ vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1020{ 1066{
1021 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1067 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1022 1068
1023 return vmxnet3_tq_xmit(skb, &adapter->tx_queue, adapter, netdev); 1069 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1070 return vmxnet3_tq_xmit(skb,
1071 &adapter->tx_queue[skb->queue_mapping],
1072 adapter, netdev);
1024} 1073}
1025 1074
1026 1075
@@ -1082,7 +1131,9 @@ static int
1082vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq, 1131vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1083 struct vmxnet3_adapter *adapter, int quota) 1132 struct vmxnet3_adapter *adapter, int quota)
1084{ 1133{
1085 static u32 rxprod_reg[2] = {VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2}; 1134 static const u32 rxprod_reg[2] = {
1135 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1136 };
1086 u32 num_rxd = 0; 1137 u32 num_rxd = 0;
1087 struct Vmxnet3_RxCompDesc *rcd; 1138 struct Vmxnet3_RxCompDesc *rcd;
1088 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx; 1139 struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
@@ -1106,9 +1157,9 @@ vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1106 break; 1157 break;
1107 } 1158 }
1108 num_rxd++; 1159 num_rxd++;
1109 1160 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
1110 idx = rcd->rxdIdx; 1161 idx = rcd->rxdIdx;
1111 ring_idx = rcd->rqID == rq->qid ? 0 : 1; 1162 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
1112 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd, 1163 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1113 &rxCmdDesc); 1164 &rxCmdDesc);
1114 rbi = rq->buf_info[ring_idx] + idx; 1165 rbi = rq->buf_info[ring_idx] + idx;
@@ -1260,6 +1311,16 @@ vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1260} 1311}
1261 1312
1262 1313
1314static void
1315vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1316{
1317 int i;
1318
1319 for (i = 0; i < adapter->num_rx_queues; i++)
1320 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1321}
1322
1323
1263void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, 1324void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1264 struct vmxnet3_adapter *adapter) 1325 struct vmxnet3_adapter *adapter)
1265{ 1326{
@@ -1351,6 +1412,25 @@ vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1351 1412
1352 1413
1353static int 1414static int
1415vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
1416{
1417 int i, err = 0;
1418
1419 for (i = 0; i < adapter->num_rx_queues; i++) {
1420 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
1421 if (unlikely(err)) {
1422 dev_err(&adapter->netdev->dev, "%s: failed to "
1423 "initialize rx queue%i\n",
1424 adapter->netdev->name, i);
1425 break;
1426 }
1427 }
1428 return err;
1429
1430}
1431
1432
1433static int
1354vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter) 1434vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1355{ 1435{
1356 int i; 1436 int i;
@@ -1398,33 +1478,177 @@ err:
1398 1478
1399 1479
1400static int 1480static int
1481vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1482{
1483 int i, err = 0;
1484
1485 for (i = 0; i < adapter->num_rx_queues; i++) {
1486 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
1487 if (unlikely(err)) {
1488 dev_err(&adapter->netdev->dev,
1489 "%s: failed to create rx queue%i\n",
1490 adapter->netdev->name, i);
1491 goto err_out;
1492 }
1493 }
1494 return err;
1495err_out:
1496 vmxnet3_rq_destroy_all(adapter);
1497 return err;
1498
1499}
1500
1501/* Multiple queue aware polling function for tx and rx */
1502
1503static int
1401vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget) 1504vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1402{ 1505{
1506 int rcd_done = 0, i;
1403 if (unlikely(adapter->shared->ecr)) 1507 if (unlikely(adapter->shared->ecr))
1404 vmxnet3_process_events(adapter); 1508 vmxnet3_process_events(adapter);
1509 for (i = 0; i < adapter->num_tx_queues; i++)
1510 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
1405 1511
1406 vmxnet3_tq_tx_complete(&adapter->tx_queue, adapter); 1512 for (i = 0; i < adapter->num_rx_queues; i++)
1407 return vmxnet3_rq_rx_complete(&adapter->rx_queue, adapter, budget); 1513 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
1514 adapter, budget);
1515 return rcd_done;
1408} 1516}
1409 1517
1410 1518
1411static int 1519static int
1412vmxnet3_poll(struct napi_struct *napi, int budget) 1520vmxnet3_poll(struct napi_struct *napi, int budget)
1413{ 1521{
1414 struct vmxnet3_adapter *adapter = container_of(napi, 1522 struct vmxnet3_rx_queue *rx_queue = container_of(napi,
1415 struct vmxnet3_adapter, napi); 1523 struct vmxnet3_rx_queue, napi);
1524 int rxd_done;
1525
1526 rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
1527
1528 if (rxd_done < budget) {
1529 napi_complete(napi);
1530 vmxnet3_enable_all_intrs(rx_queue->adapter);
1531 }
1532 return rxd_done;
1533}
1534
1535/*
1536 * NAPI polling function for MSI-X mode with multiple Rx queues
1537 * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1538 */
1539
1540static int
1541vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
1542{
1543 struct vmxnet3_rx_queue *rq = container_of(napi,
1544 struct vmxnet3_rx_queue, napi);
1545 struct vmxnet3_adapter *adapter = rq->adapter;
1416 int rxd_done; 1546 int rxd_done;
1417 1547
1418 rxd_done = vmxnet3_do_poll(adapter, budget); 1548 /* When sharing interrupt with corresponding tx queue, process
1549 * tx completions in that queue as well
1550 */
1551 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
1552 struct vmxnet3_tx_queue *tq =
1553 &adapter->tx_queue[rq - adapter->rx_queue];
1554 vmxnet3_tq_tx_complete(tq, adapter);
1555 }
1556
1557 rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
1419 1558
1420 if (rxd_done < budget) { 1559 if (rxd_done < budget) {
1421 napi_complete(napi); 1560 napi_complete(napi);
1422 vmxnet3_enable_intr(adapter, 0); 1561 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
1423 } 1562 }
1424 return rxd_done; 1563 return rxd_done;
1425} 1564}
1426 1565
1427 1566
1567#ifdef CONFIG_PCI_MSI
1568
1569/*
1570 * Handle completion interrupts on tx queues
1571 * Returns whether or not the intr is handled
1572 */
1573
1574static irqreturn_t
1575vmxnet3_msix_tx(int irq, void *data)
1576{
1577 struct vmxnet3_tx_queue *tq = data;
1578 struct vmxnet3_adapter *adapter = tq->adapter;
1579
1580 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1581 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
1582
1583 /* Handle the case where only one irq is allocate for all tx queues */
1584 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1585 int i;
1586 for (i = 0; i < adapter->num_tx_queues; i++) {
1587 struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
1588 vmxnet3_tq_tx_complete(txq, adapter);
1589 }
1590 } else {
1591 vmxnet3_tq_tx_complete(tq, adapter);
1592 }
1593 vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
1594
1595 return IRQ_HANDLED;
1596}
1597
1598
1599/*
1600 * Handle completion interrupts on rx queues. Returns whether or not the
1601 * intr is handled
1602 */
1603
1604static irqreturn_t
1605vmxnet3_msix_rx(int irq, void *data)
1606{
1607 struct vmxnet3_rx_queue *rq = data;
1608 struct vmxnet3_adapter *adapter = rq->adapter;
1609
1610 /* disable intr if needed */
1611 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1612 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
1613 napi_schedule(&rq->napi);
1614
1615 return IRQ_HANDLED;
1616}
1617
1618/*
1619 *----------------------------------------------------------------------------
1620 *
1621 * vmxnet3_msix_event --
1622 *
1623 * vmxnet3 msix event intr handler
1624 *
1625 * Result:
1626 * whether or not the intr is handled
1627 *
1628 *----------------------------------------------------------------------------
1629 */
1630
1631static irqreturn_t
1632vmxnet3_msix_event(int irq, void *data)
1633{
1634 struct net_device *dev = data;
1635 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1636
1637 /* disable intr if needed */
1638 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1639 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
1640
1641 if (adapter->shared->ecr)
1642 vmxnet3_process_events(adapter);
1643
1644 vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
1645
1646 return IRQ_HANDLED;
1647}
1648
1649#endif /* CONFIG_PCI_MSI */
1650
1651
1428/* Interrupt handler for vmxnet3 */ 1652/* Interrupt handler for vmxnet3 */
1429static irqreturn_t 1653static irqreturn_t
1430vmxnet3_intr(int irq, void *dev_id) 1654vmxnet3_intr(int irq, void *dev_id)
@@ -1432,7 +1656,7 @@ vmxnet3_intr(int irq, void *dev_id)
1432 struct net_device *dev = dev_id; 1656 struct net_device *dev = dev_id;
1433 struct vmxnet3_adapter *adapter = netdev_priv(dev); 1657 struct vmxnet3_adapter *adapter = netdev_priv(dev);
1434 1658
1435 if (unlikely(adapter->intr.type == VMXNET3_IT_INTX)) { 1659 if (adapter->intr.type == VMXNET3_IT_INTX) {
1436 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR); 1660 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1437 if (unlikely(icr == 0)) 1661 if (unlikely(icr == 0))
1438 /* not ours */ 1662 /* not ours */
@@ -1442,77 +1666,144 @@ vmxnet3_intr(int irq, void *dev_id)
1442 1666
1443 /* disable intr if needed */ 1667 /* disable intr if needed */
1444 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE) 1668 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1445 vmxnet3_disable_intr(adapter, 0); 1669 vmxnet3_disable_all_intrs(adapter);
1446 1670
1447 napi_schedule(&adapter->napi); 1671 napi_schedule(&adapter->rx_queue[0].napi);
1448 1672
1449 return IRQ_HANDLED; 1673 return IRQ_HANDLED;
1450} 1674}
1451 1675
1452#ifdef CONFIG_NET_POLL_CONTROLLER 1676#ifdef CONFIG_NET_POLL_CONTROLLER
1453 1677
1454
1455/* netpoll callback. */ 1678/* netpoll callback. */
1456static void 1679static void
1457vmxnet3_netpoll(struct net_device *netdev) 1680vmxnet3_netpoll(struct net_device *netdev)
1458{ 1681{
1459 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 1682 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1460 int irq;
1461 1683
1462#ifdef CONFIG_PCI_MSI 1684 if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1463 if (adapter->intr.type == VMXNET3_IT_MSIX) 1685 vmxnet3_disable_all_intrs(adapter);
1464 irq = adapter->intr.msix_entries[0].vector; 1686
1465 else 1687 vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
1466#endif 1688 vmxnet3_enable_all_intrs(adapter);
1467 irq = adapter->pdev->irq;
1468 1689
1469 disable_irq(irq);
1470 vmxnet3_intr(irq, netdev);
1471 enable_irq(irq);
1472} 1690}
1473#endif 1691#endif /* CONFIG_NET_POLL_CONTROLLER */
1474 1692
1475static int 1693static int
1476vmxnet3_request_irqs(struct vmxnet3_adapter *adapter) 1694vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1477{ 1695{
1478 int err; 1696 struct vmxnet3_intr *intr = &adapter->intr;
1697 int err = 0, i;
1698 int vector = 0;
1479 1699
1480#ifdef CONFIG_PCI_MSI 1700#ifdef CONFIG_PCI_MSI
1481 if (adapter->intr.type == VMXNET3_IT_MSIX) { 1701 if (adapter->intr.type == VMXNET3_IT_MSIX) {
1482 /* we only use 1 MSI-X vector */ 1702 for (i = 0; i < adapter->num_tx_queues; i++) {
1483 err = request_irq(adapter->intr.msix_entries[0].vector, 1703 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1484 vmxnet3_intr, 0, adapter->netdev->name, 1704 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
1485 adapter->netdev); 1705 adapter->netdev->name, vector);
1486 } else if (adapter->intr.type == VMXNET3_IT_MSI) { 1706 err = request_irq(
1707 intr->msix_entries[vector].vector,
1708 vmxnet3_msix_tx, 0,
1709 adapter->tx_queue[i].name,
1710 &adapter->tx_queue[i]);
1711 } else {
1712 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
1713 adapter->netdev->name, vector);
1714 }
1715 if (err) {
1716 dev_err(&adapter->netdev->dev,
1717 "Failed to request irq for MSIX, %s, "
1718 "error %d\n",
1719 adapter->tx_queue[i].name, err);
1720 return err;
1721 }
1722
1723 /* Handle the case where only 1 MSIx was allocated for
1724 * all tx queues */
1725 if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1726 for (; i < adapter->num_tx_queues; i++)
1727 adapter->tx_queue[i].comp_ring.intr_idx
1728 = vector;
1729 vector++;
1730 break;
1731 } else {
1732 adapter->tx_queue[i].comp_ring.intr_idx
1733 = vector++;
1734 }
1735 }
1736 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
1737 vector = 0;
1738
1739 for (i = 0; i < adapter->num_rx_queues; i++) {
1740 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
1741 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
1742 adapter->netdev->name, vector);
1743 else
1744 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
1745 adapter->netdev->name, vector);
1746 err = request_irq(intr->msix_entries[vector].vector,
1747 vmxnet3_msix_rx, 0,
1748 adapter->rx_queue[i].name,
1749 &(adapter->rx_queue[i]));
1750 if (err) {
1751 printk(KERN_ERR "Failed to request irq for MSIX"
1752 ", %s, error %d\n",
1753 adapter->rx_queue[i].name, err);
1754 return err;
1755 }
1756
1757 adapter->rx_queue[i].comp_ring.intr_idx = vector++;
1758 }
1759
1760 sprintf(intr->event_msi_vector_name, "%s-event-%d",
1761 adapter->netdev->name, vector);
1762 err = request_irq(intr->msix_entries[vector].vector,
1763 vmxnet3_msix_event, 0,
1764 intr->event_msi_vector_name, adapter->netdev);
1765 intr->event_intr_idx = vector;
1766
1767 } else if (intr->type == VMXNET3_IT_MSI) {
1768 adapter->num_rx_queues = 1;
1487 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0, 1769 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1488 adapter->netdev->name, adapter->netdev); 1770 adapter->netdev->name, adapter->netdev);
1489 } else 1771 } else {
1490#endif 1772#endif
1491 { 1773 adapter->num_rx_queues = 1;
1492 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 1774 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1493 IRQF_SHARED, adapter->netdev->name, 1775 IRQF_SHARED, adapter->netdev->name,
1494 adapter->netdev); 1776 adapter->netdev);
1777#ifdef CONFIG_PCI_MSI
1495 } 1778 }
1496 1779#endif
1497 if (err) 1780 intr->num_intrs = vector + 1;
1781 if (err) {
1498 printk(KERN_ERR "Failed to request irq %s (intr type:%d), error" 1782 printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
1499 ":%d\n", adapter->netdev->name, adapter->intr.type, err); 1783 ":%d\n", adapter->netdev->name, intr->type, err);
1784 } else {
1785 /* Number of rx queues will not change after this */
1786 for (i = 0; i < adapter->num_rx_queues; i++) {
1787 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1788 rq->qid = i;
1789 rq->qid2 = i + adapter->num_rx_queues;
1790 }
1500 1791
1501 1792
1502 if (!err) {
1503 int i;
1504 /* init our intr settings */
1505 for (i = 0; i < adapter->intr.num_intrs; i++)
1506 adapter->intr.mod_levels[i] = UPT1_IML_ADAPTIVE;
1507 1793
1508 /* next setup intr index for all intr sources */ 1794 /* init our intr settings */
1509 adapter->tx_queue.comp_ring.intr_idx = 0; 1795 for (i = 0; i < intr->num_intrs; i++)
1510 adapter->rx_queue.comp_ring.intr_idx = 0; 1796 intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
1511 adapter->intr.event_intr_idx = 0; 1797 if (adapter->intr.type != VMXNET3_IT_MSIX) {
1798 adapter->intr.event_intr_idx = 0;
1799 for (i = 0; i < adapter->num_tx_queues; i++)
1800 adapter->tx_queue[i].comp_ring.intr_idx = 0;
1801 adapter->rx_queue[0].comp_ring.intr_idx = 0;
1802 }
1512 1803
1513 printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors " 1804 printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
1514 "allocated\n", adapter->netdev->name, adapter->intr.type, 1805 "allocated\n", adapter->netdev->name, intr->type,
1515 adapter->intr.mask_mode, adapter->intr.num_intrs); 1806 intr->mask_mode, intr->num_intrs);
1516 } 1807 }
1517 1808
1518 return err; 1809 return err;
@@ -1522,18 +1813,32 @@ vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1522static void 1813static void
1523vmxnet3_free_irqs(struct vmxnet3_adapter *adapter) 1814vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1524{ 1815{
1525 BUG_ON(adapter->intr.type == VMXNET3_IT_AUTO || 1816 struct vmxnet3_intr *intr = &adapter->intr;
1526 adapter->intr.num_intrs <= 0); 1817 BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
1527 1818
1528 switch (adapter->intr.type) { 1819 switch (intr->type) {
1529#ifdef CONFIG_PCI_MSI 1820#ifdef CONFIG_PCI_MSI
1530 case VMXNET3_IT_MSIX: 1821 case VMXNET3_IT_MSIX:
1531 { 1822 {
1532 int i; 1823 int i, vector = 0;
1824
1825 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1826 for (i = 0; i < adapter->num_tx_queues; i++) {
1827 free_irq(intr->msix_entries[vector++].vector,
1828 &(adapter->tx_queue[i]));
1829 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
1830 break;
1831 }
1832 }
1533 1833
1534 for (i = 0; i < adapter->intr.num_intrs; i++) 1834 for (i = 0; i < adapter->num_rx_queues; i++) {
1535 free_irq(adapter->intr.msix_entries[i].vector, 1835 free_irq(intr->msix_entries[vector++].vector,
1536 adapter->netdev); 1836 &(adapter->rx_queue[i]));
1837 }
1838
1839 free_irq(intr->msix_entries[vector].vector,
1840 adapter->netdev);
1841 BUG_ON(vector >= intr->num_intrs);
1537 break; 1842 break;
1538 } 1843 }
1539#endif 1844#endif
@@ -1727,6 +2032,15 @@ vmxnet3_set_mc(struct net_device *netdev)
1727 kfree(new_table); 2032 kfree(new_table);
1728} 2033}
1729 2034
2035void
2036vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2037{
2038 int i;
2039
2040 for (i = 0; i < adapter->num_rx_queues; i++)
2041 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2042}
2043
1730 2044
1731/* 2045/*
1732 * Set up driver_shared based on settings in adapter. 2046 * Set up driver_shared based on settings in adapter.
@@ -1774,40 +2088,72 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
1774 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu); 2088 devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
1775 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa); 2089 devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
1776 devRead->misc.queueDescLen = cpu_to_le32( 2090 devRead->misc.queueDescLen = cpu_to_le32(
1777 sizeof(struct Vmxnet3_TxQueueDesc) + 2091 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
1778 sizeof(struct Vmxnet3_RxQueueDesc)); 2092 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
1779 2093
1780 /* tx queue settings */ 2094 /* tx queue settings */
1781 BUG_ON(adapter->tx_queue.tx_ring.base == NULL); 2095 devRead->misc.numTxQueues = adapter->num_tx_queues;
1782 2096 for (i = 0; i < adapter->num_tx_queues; i++) {
1783 devRead->misc.numTxQueues = 1; 2097 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
1784 tqc = &adapter->tqd_start->conf; 2098 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
1785 tqc->txRingBasePA = cpu_to_le64(adapter->tx_queue.tx_ring.basePA); 2099 tqc = &adapter->tqd_start[i].conf;
1786 tqc->dataRingBasePA = cpu_to_le64(adapter->tx_queue.data_ring.basePA); 2100 tqc->txRingBasePA = cpu_to_le64(tq->tx_ring.basePA);
1787 tqc->compRingBasePA = cpu_to_le64(adapter->tx_queue.comp_ring.basePA); 2101 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
1788 tqc->ddPA = cpu_to_le64(virt_to_phys( 2102 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
1789 adapter->tx_queue.buf_info)); 2103 tqc->ddPA = cpu_to_le64(virt_to_phys(tq->buf_info));
1790 tqc->txRingSize = cpu_to_le32(adapter->tx_queue.tx_ring.size); 2104 tqc->txRingSize = cpu_to_le32(tq->tx_ring.size);
1791 tqc->dataRingSize = cpu_to_le32(adapter->tx_queue.data_ring.size); 2105 tqc->dataRingSize = cpu_to_le32(tq->data_ring.size);
1792 tqc->compRingSize = cpu_to_le32(adapter->tx_queue.comp_ring.size); 2106 tqc->compRingSize = cpu_to_le32(tq->comp_ring.size);
1793 tqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_tx_buf_info) * 2107 tqc->ddLen = cpu_to_le32(
1794 tqc->txRingSize); 2108 sizeof(struct vmxnet3_tx_buf_info) *
1795 tqc->intrIdx = adapter->tx_queue.comp_ring.intr_idx; 2109 tqc->txRingSize);
2110 tqc->intrIdx = tq->comp_ring.intr_idx;
2111 }
1796 2112
1797 /* rx queue settings */ 2113 /* rx queue settings */
1798 devRead->misc.numRxQueues = 1; 2114 devRead->misc.numRxQueues = adapter->num_rx_queues;
1799 rqc = &adapter->rqd_start->conf; 2115 for (i = 0; i < adapter->num_rx_queues; i++) {
1800 rqc->rxRingBasePA[0] = cpu_to_le64(adapter->rx_queue.rx_ring[0].basePA); 2116 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1801 rqc->rxRingBasePA[1] = cpu_to_le64(adapter->rx_queue.rx_ring[1].basePA); 2117 rqc = &adapter->rqd_start[i].conf;
1802 rqc->compRingBasePA = cpu_to_le64(adapter->rx_queue.comp_ring.basePA); 2118 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
1803 rqc->ddPA = cpu_to_le64(virt_to_phys( 2119 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
1804 adapter->rx_queue.buf_info)); 2120 rqc->compRingBasePA = cpu_to_le64(rq->comp_ring.basePA);
1805 rqc->rxRingSize[0] = cpu_to_le32(adapter->rx_queue.rx_ring[0].size); 2121 rqc->ddPA = cpu_to_le64(virt_to_phys(
1806 rqc->rxRingSize[1] = cpu_to_le32(adapter->rx_queue.rx_ring[1].size); 2122 rq->buf_info));
1807 rqc->compRingSize = cpu_to_le32(adapter->rx_queue.comp_ring.size); 2123 rqc->rxRingSize[0] = cpu_to_le32(rq->rx_ring[0].size);
1808 rqc->ddLen = cpu_to_le32(sizeof(struct vmxnet3_rx_buf_info) * 2124 rqc->rxRingSize[1] = cpu_to_le32(rq->rx_ring[1].size);
1809 (rqc->rxRingSize[0] + rqc->rxRingSize[1])); 2125 rqc->compRingSize = cpu_to_le32(rq->comp_ring.size);
1810 rqc->intrIdx = adapter->rx_queue.comp_ring.intr_idx; 2126 rqc->ddLen = cpu_to_le32(
2127 sizeof(struct vmxnet3_rx_buf_info) *
2128 (rqc->rxRingSize[0] +
2129 rqc->rxRingSize[1]));
2130 rqc->intrIdx = rq->comp_ring.intr_idx;
2131 }
2132
2133#ifdef VMXNET3_RSS
2134 memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2135
2136 if (adapter->rss) {
2137 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2138 devRead->misc.uptFeatures |= UPT1_F_RSS;
2139 devRead->misc.numRxQueues = adapter->num_rx_queues;
2140 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2141 UPT1_RSS_HASH_TYPE_IPV4 |
2142 UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2143 UPT1_RSS_HASH_TYPE_IPV6;
2144 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2145 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2146 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
2147 get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
2148 for (i = 0; i < rssConf->indTableSize; i++)
2149 rssConf->indTable[i] = i % adapter->num_rx_queues;
2150
2151 devRead->rssConfDesc.confVer = 1;
2152 devRead->rssConfDesc.confLen = sizeof(*rssConf);
2153 devRead->rssConfDesc.confPA = virt_to_phys(rssConf);
2154 }
2155
2156#endif /* VMXNET3_RSS */
1811 2157
1812 /* intr settings */ 2158 /* intr settings */
1813 devRead->intrConf.autoMask = adapter->intr.mask_mode == 2159 devRead->intrConf.autoMask = adapter->intr.mask_mode ==
@@ -1829,18 +2175,18 @@ vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
1829int 2175int
1830vmxnet3_activate_dev(struct vmxnet3_adapter *adapter) 2176vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1831{ 2177{
1832 int err; 2178 int err, i;
1833 u32 ret; 2179 u32 ret;
1834 2180
1835 dev_dbg(&adapter->netdev->dev, 2181 dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
1836 "%s: skb_buf_size %d, rx_buf_per_pkt %d, ring sizes" 2182 " ring sizes %u %u %u\n", adapter->netdev->name,
1837 " %u %u %u\n", adapter->netdev->name, adapter->skb_buf_size, 2183 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
1838 adapter->rx_buf_per_pkt, adapter->tx_queue.tx_ring.size, 2184 adapter->tx_queue[0].tx_ring.size,
1839 adapter->rx_queue.rx_ring[0].size, 2185 adapter->rx_queue[0].rx_ring[0].size,
1840 adapter->rx_queue.rx_ring[1].size); 2186 adapter->rx_queue[0].rx_ring[1].size);
1841 2187
1842 vmxnet3_tq_init(&adapter->tx_queue, adapter); 2188 vmxnet3_tq_init_all(adapter);
1843 err = vmxnet3_rq_init(&adapter->rx_queue, adapter); 2189 err = vmxnet3_rq_init_all(adapter);
1844 if (err) { 2190 if (err) {
1845 printk(KERN_ERR "Failed to init rx queue for %s: error %d\n", 2191 printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
1846 adapter->netdev->name, err); 2192 adapter->netdev->name, err);
@@ -1870,10 +2216,15 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1870 err = -EINVAL; 2216 err = -EINVAL;
1871 goto activate_err; 2217 goto activate_err;
1872 } 2218 }
1873 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD, 2219
1874 adapter->rx_queue.rx_ring[0].next2fill); 2220 for (i = 0; i < adapter->num_rx_queues; i++) {
1875 VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_RXPROD2, 2221 VMXNET3_WRITE_BAR0_REG(adapter,
1876 adapter->rx_queue.rx_ring[1].next2fill); 2222 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
2223 adapter->rx_queue[i].rx_ring[0].next2fill);
2224 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
2225 (i * VMXNET3_REG_ALIGN)),
2226 adapter->rx_queue[i].rx_ring[1].next2fill);
2227 }
1877 2228
1878 /* Apply the rx filter settins last. */ 2229 /* Apply the rx filter settins last. */
1879 vmxnet3_set_mc(adapter->netdev); 2230 vmxnet3_set_mc(adapter->netdev);
@@ -1883,8 +2234,8 @@ vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
1883 * tx queue if the link is up. 2234 * tx queue if the link is up.
1884 */ 2235 */
1885 vmxnet3_check_link(adapter, true); 2236 vmxnet3_check_link(adapter, true);
1886 2237 for (i = 0; i < adapter->num_rx_queues; i++)
1887 napi_enable(&adapter->napi); 2238 napi_enable(&adapter->rx_queue[i].napi);
1888 vmxnet3_enable_all_intrs(adapter); 2239 vmxnet3_enable_all_intrs(adapter);
1889 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state); 2240 clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
1890 return 0; 2241 return 0;
@@ -1896,7 +2247,7 @@ activate_err:
1896irq_err: 2247irq_err:
1897rq_err: 2248rq_err:
1898 /* free up buffers we allocated */ 2249 /* free up buffers we allocated */
1899 vmxnet3_rq_cleanup(&adapter->rx_queue, adapter); 2250 vmxnet3_rq_cleanup_all(adapter);
1900 return err; 2251 return err;
1901} 2252}
1902 2253
@@ -1911,6 +2262,7 @@ vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
1911int 2262int
1912vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter) 2263vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
1913{ 2264{
2265 int i;
1914 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state)) 2266 if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
1915 return 0; 2267 return 0;
1916 2268
@@ -1919,13 +2271,14 @@ vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
1919 VMXNET3_CMD_QUIESCE_DEV); 2271 VMXNET3_CMD_QUIESCE_DEV);
1920 vmxnet3_disable_all_intrs(adapter); 2272 vmxnet3_disable_all_intrs(adapter);
1921 2273
1922 napi_disable(&adapter->napi); 2274 for (i = 0; i < adapter->num_rx_queues; i++)
2275 napi_disable(&adapter->rx_queue[i].napi);
1923 netif_tx_disable(adapter->netdev); 2276 netif_tx_disable(adapter->netdev);
1924 adapter->link_speed = 0; 2277 adapter->link_speed = 0;
1925 netif_carrier_off(adapter->netdev); 2278 netif_carrier_off(adapter->netdev);
1926 2279
1927 vmxnet3_tq_cleanup(&adapter->tx_queue, adapter); 2280 vmxnet3_tq_cleanup_all(adapter);
1928 vmxnet3_rq_cleanup(&adapter->rx_queue, adapter); 2281 vmxnet3_rq_cleanup_all(adapter);
1929 vmxnet3_free_irqs(adapter); 2282 vmxnet3_free_irqs(adapter);
1930 return 0; 2283 return 0;
1931} 2284}
@@ -2047,7 +2400,9 @@ vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2047static void 2400static void
2048vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter) 2401vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2049{ 2402{
2050 size_t sz; 2403 size_t sz, i, ring0_size, ring1_size, comp_size;
2404 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
2405
2051 2406
2052 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE - 2407 if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2053 VMXNET3_MAX_ETH_HDR_SIZE) { 2408 VMXNET3_MAX_ETH_HDR_SIZE) {
@@ -2069,11 +2424,19 @@ vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2069 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN 2424 * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2070 */ 2425 */
2071 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN; 2426 sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
2072 adapter->rx_queue.rx_ring[0].size = (adapter->rx_queue.rx_ring[0].size + 2427 ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2073 sz - 1) / sz * sz; 2428 ring0_size = (ring0_size + sz - 1) / sz * sz;
2074 adapter->rx_queue.rx_ring[0].size = min_t(u32, 2429 ring0_size = min_t(u32, rq->rx_ring[0].size, VMXNET3_RX_RING_MAX_SIZE /
2075 adapter->rx_queue.rx_ring[0].size, 2430 sz * sz);
2076 VMXNET3_RX_RING_MAX_SIZE / sz * sz); 2431 ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2432 comp_size = ring0_size + ring1_size;
2433
2434 for (i = 0; i < adapter->num_rx_queues; i++) {
2435 rq = &adapter->rx_queue[i];
2436 rq->rx_ring[0].size = ring0_size;
2437 rq->rx_ring[1].size = ring1_size;
2438 rq->comp_ring.size = comp_size;
2439 }
2077} 2440}
2078 2441
2079 2442
@@ -2081,29 +2444,53 @@ int
2081vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size, 2444vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2082 u32 rx_ring_size, u32 rx_ring2_size) 2445 u32 rx_ring_size, u32 rx_ring2_size)
2083{ 2446{
2084 int err; 2447 int err = 0, i;
2085 2448
2086 adapter->tx_queue.tx_ring.size = tx_ring_size; 2449 for (i = 0; i < adapter->num_tx_queues; i++) {
2087 adapter->tx_queue.data_ring.size = tx_ring_size; 2450 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2088 adapter->tx_queue.comp_ring.size = tx_ring_size; 2451 tq->tx_ring.size = tx_ring_size;
2089 adapter->tx_queue.shared = &adapter->tqd_start->ctrl; 2452 tq->data_ring.size = tx_ring_size;
2090 adapter->tx_queue.stopped = true; 2453 tq->comp_ring.size = tx_ring_size;
2091 err = vmxnet3_tq_create(&adapter->tx_queue, adapter); 2454 tq->shared = &adapter->tqd_start[i].ctrl;
2092 if (err) 2455 tq->stopped = true;
2093 return err; 2456 tq->adapter = adapter;
2457 tq->qid = i;
2458 err = vmxnet3_tq_create(tq, adapter);
2459 /*
2460 * Too late to change num_tx_queues. We cannot do away with
2461 * lesser number of queues than what we asked for
2462 */
2463 if (err)
2464 goto queue_err;
2465 }
2094 2466
2095 adapter->rx_queue.rx_ring[0].size = rx_ring_size; 2467 adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
2096 adapter->rx_queue.rx_ring[1].size = rx_ring2_size; 2468 adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
2097 vmxnet3_adjust_rx_ring_size(adapter); 2469 vmxnet3_adjust_rx_ring_size(adapter);
2098 adapter->rx_queue.comp_ring.size = adapter->rx_queue.rx_ring[0].size + 2470 for (i = 0; i < adapter->num_rx_queues; i++) {
2099 adapter->rx_queue.rx_ring[1].size; 2471 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2100 adapter->rx_queue.qid = 0; 2472 /* qid and qid2 for rx queues will be assigned later when num
2101 adapter->rx_queue.qid2 = 1; 2473 * of rx queues is finalized after allocating intrs */
2102 adapter->rx_queue.shared = &adapter->rqd_start->ctrl; 2474 rq->shared = &adapter->rqd_start[i].ctrl;
2103 err = vmxnet3_rq_create(&adapter->rx_queue, adapter); 2475 rq->adapter = adapter;
2104 if (err) 2476 err = vmxnet3_rq_create(rq, adapter);
2105 vmxnet3_tq_destroy(&adapter->tx_queue, adapter); 2477 if (err) {
2106 2478 if (i == 0) {
2479 printk(KERN_ERR "Could not allocate any rx"
2480 "queues. Aborting.\n");
2481 goto queue_err;
2482 } else {
2483 printk(KERN_INFO "Number of rx queues changed "
2484 "to : %d.\n", i);
2485 adapter->num_rx_queues = i;
2486 err = 0;
2487 break;
2488 }
2489 }
2490 }
2491 return err;
2492queue_err:
2493 vmxnet3_tq_destroy_all(adapter);
2107 return err; 2494 return err;
2108} 2495}
2109 2496
@@ -2111,11 +2498,12 @@ static int
2111vmxnet3_open(struct net_device *netdev) 2498vmxnet3_open(struct net_device *netdev)
2112{ 2499{
2113 struct vmxnet3_adapter *adapter; 2500 struct vmxnet3_adapter *adapter;
2114 int err; 2501 int err, i;
2115 2502
2116 adapter = netdev_priv(netdev); 2503 adapter = netdev_priv(netdev);
2117 2504
2118 spin_lock_init(&adapter->tx_queue.tx_lock); 2505 for (i = 0; i < adapter->num_tx_queues; i++)
2506 spin_lock_init(&adapter->tx_queue[i].tx_lock);
2119 2507
2120 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE, 2508 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
2121 VMXNET3_DEF_RX_RING_SIZE, 2509 VMXNET3_DEF_RX_RING_SIZE,
@@ -2130,8 +2518,8 @@ vmxnet3_open(struct net_device *netdev)
2130 return 0; 2518 return 0;
2131 2519
2132activate_err: 2520activate_err:
2133 vmxnet3_rq_destroy(&adapter->rx_queue, adapter); 2521 vmxnet3_rq_destroy_all(adapter);
2134 vmxnet3_tq_destroy(&adapter->tx_queue, adapter); 2522 vmxnet3_tq_destroy_all(adapter);
2135queue_err: 2523queue_err:
2136 return err; 2524 return err;
2137} 2525}
@@ -2151,8 +2539,8 @@ vmxnet3_close(struct net_device *netdev)
2151 2539
2152 vmxnet3_quiesce_dev(adapter); 2540 vmxnet3_quiesce_dev(adapter);
2153 2541
2154 vmxnet3_rq_destroy(&adapter->rx_queue, adapter); 2542 vmxnet3_rq_destroy_all(adapter);
2155 vmxnet3_tq_destroy(&adapter->tx_queue, adapter); 2543 vmxnet3_tq_destroy_all(adapter);
2156 2544
2157 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 2545 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2158 2546
@@ -2164,6 +2552,8 @@ vmxnet3_close(struct net_device *netdev)
2164void 2552void
2165vmxnet3_force_close(struct vmxnet3_adapter *adapter) 2553vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2166{ 2554{
2555 int i;
2556
2167 /* 2557 /*
2168 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise 2558 * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2169 * vmxnet3_close() will deadlock. 2559 * vmxnet3_close() will deadlock.
@@ -2171,7 +2561,8 @@ vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2171 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state)); 2561 BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2172 2562
2173 /* we need to enable NAPI, otherwise dev_close will deadlock */ 2563 /* we need to enable NAPI, otherwise dev_close will deadlock */
2174 napi_enable(&adapter->napi); 2564 for (i = 0; i < adapter->num_rx_queues; i++)
2565 napi_enable(&adapter->rx_queue[i].napi);
2175 dev_close(adapter->netdev); 2566 dev_close(adapter->netdev);
2176} 2567}
2177 2568
@@ -2202,14 +2593,11 @@ vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2202 vmxnet3_reset_dev(adapter); 2593 vmxnet3_reset_dev(adapter);
2203 2594
2204 /* we need to re-create the rx queue based on the new mtu */ 2595 /* we need to re-create the rx queue based on the new mtu */
2205 vmxnet3_rq_destroy(&adapter->rx_queue, adapter); 2596 vmxnet3_rq_destroy_all(adapter);
2206 vmxnet3_adjust_rx_ring_size(adapter); 2597 vmxnet3_adjust_rx_ring_size(adapter);
2207 adapter->rx_queue.comp_ring.size = 2598 err = vmxnet3_rq_create_all(adapter);
2208 adapter->rx_queue.rx_ring[0].size +
2209 adapter->rx_queue.rx_ring[1].size;
2210 err = vmxnet3_rq_create(&adapter->rx_queue, adapter);
2211 if (err) { 2599 if (err) {
2212 printk(KERN_ERR "%s: failed to re-create rx queue," 2600 printk(KERN_ERR "%s: failed to re-create rx queues,"
2213 " error %d. Closing it.\n", netdev->name, err); 2601 " error %d. Closing it.\n", netdev->name, err);
2214 goto out; 2602 goto out;
2215 } 2603 }
@@ -2274,6 +2662,55 @@ vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2274 mac[5] = (tmp >> 8) & 0xff; 2662 mac[5] = (tmp >> 8) & 0xff;
2275} 2663}
2276 2664
2665#ifdef CONFIG_PCI_MSI
2666
2667/*
2668 * Enable MSIx vectors.
2669 * Returns :
2670 * 0 on successful enabling of required vectors,
2671 * VMXNET3_LINUX_MIN_MSIX_VECT when only minumum number of vectors required
2672 * could be enabled.
2673 * number of vectors which can be enabled otherwise (this number is smaller
2674 * than VMXNET3_LINUX_MIN_MSIX_VECT)
2675 */
2676
2677static int
2678vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
2679 int vectors)
2680{
2681 int err = 0, vector_threshold;
2682 vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
2683
2684 while (vectors >= vector_threshold) {
2685 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
2686 vectors);
2687 if (!err) {
2688 adapter->intr.num_intrs = vectors;
2689 return 0;
2690 } else if (err < 0) {
2691 printk(KERN_ERR "Failed to enable MSI-X for %s, error"
2692 " %d\n", adapter->netdev->name, err);
2693 vectors = 0;
2694 } else if (err < vector_threshold) {
2695 break;
2696 } else {
2697 /* If fails to enable required number of MSI-x vectors
2698 * try enabling 3 of them. One each for rx, tx and event
2699 */
2700 vectors = vector_threshold;
2701 printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
2702 " %d instead\n", vectors, adapter->netdev->name,
2703 vector_threshold);
2704 }
2705 }
2706
2707 printk(KERN_INFO "Number of MSI-X interrupts which can be allocatedi"
2708 " are lower than min threshold required.\n");
2709 return err;
2710}
2711
2712
2713#endif /* CONFIG_PCI_MSI */
2277 2714
2278static void 2715static void
2279vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter) 2716vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
@@ -2293,16 +2730,47 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2293 2730
2294#ifdef CONFIG_PCI_MSI 2731#ifdef CONFIG_PCI_MSI
2295 if (adapter->intr.type == VMXNET3_IT_MSIX) { 2732 if (adapter->intr.type == VMXNET3_IT_MSIX) {
2296 int err; 2733 int vector, err = 0;
2297 2734
2298 adapter->intr.msix_entries[0].entry = 0; 2735 adapter->intr.num_intrs = (adapter->share_intr ==
2299 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries, 2736 VMXNET3_INTR_TXSHARE) ? 1 :
2300 VMXNET3_LINUX_MAX_MSIX_VECT); 2737 adapter->num_tx_queues;
2301 if (!err) { 2738 adapter->intr.num_intrs += (adapter->share_intr ==
2302 adapter->intr.num_intrs = 1; 2739 VMXNET3_INTR_BUDDYSHARE) ? 0 :
2303 adapter->intr.type = VMXNET3_IT_MSIX; 2740 adapter->num_rx_queues;
2741 adapter->intr.num_intrs += 1; /* for link event */
2742
2743 adapter->intr.num_intrs = (adapter->intr.num_intrs >
2744 VMXNET3_LINUX_MIN_MSIX_VECT
2745 ? adapter->intr.num_intrs :
2746 VMXNET3_LINUX_MIN_MSIX_VECT);
2747
2748 for (vector = 0; vector < adapter->intr.num_intrs; vector++)
2749 adapter->intr.msix_entries[vector].entry = vector;
2750
2751 err = vmxnet3_acquire_msix_vectors(adapter,
2752 adapter->intr.num_intrs);
2753 /* If we cannot allocate one MSIx vector per queue
2754 * then limit the number of rx queues to 1
2755 */
2756 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
2757 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
2758 || adapter->num_rx_queues != 2) {
2759 adapter->share_intr = VMXNET3_INTR_TXSHARE;
2760 printk(KERN_ERR "Number of rx queues : 1\n");
2761 adapter->num_rx_queues = 1;
2762 adapter->intr.num_intrs =
2763 VMXNET3_LINUX_MIN_MSIX_VECT;
2764 }
2304 return; 2765 return;
2305 } 2766 }
2767 if (!err)
2768 return;
2769
2770 /* If we cannot allocate MSIx vectors use only one rx queue */
2771 printk(KERN_INFO "Failed to enable MSI-X for %s, error %d."
2772 "#rx queues : 1, try MSI\n", adapter->netdev->name, err);
2773
2306 adapter->intr.type = VMXNET3_IT_MSI; 2774 adapter->intr.type = VMXNET3_IT_MSI;
2307 } 2775 }
2308 2776
@@ -2310,12 +2778,15 @@ vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2310 int err; 2778 int err;
2311 err = pci_enable_msi(adapter->pdev); 2779 err = pci_enable_msi(adapter->pdev);
2312 if (!err) { 2780 if (!err) {
2781 adapter->num_rx_queues = 1;
2313 adapter->intr.num_intrs = 1; 2782 adapter->intr.num_intrs = 1;
2314 return; 2783 return;
2315 } 2784 }
2316 } 2785 }
2317#endif /* CONFIG_PCI_MSI */ 2786#endif /* CONFIG_PCI_MSI */
2318 2787
2788 adapter->num_rx_queues = 1;
2789 printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n");
2319 adapter->intr.type = VMXNET3_IT_INTX; 2790 adapter->intr.type = VMXNET3_IT_INTX;
2320 2791
2321 /* INT-X related setting */ 2792 /* INT-X related setting */
@@ -2343,6 +2814,7 @@ vmxnet3_tx_timeout(struct net_device *netdev)
2343 2814
2344 printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name); 2815 printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
2345 schedule_work(&adapter->work); 2816 schedule_work(&adapter->work);
2817 netif_wake_queue(adapter->netdev);
2346} 2818}
2347 2819
2348 2820
@@ -2399,8 +2871,29 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2399 struct net_device *netdev; 2871 struct net_device *netdev;
2400 struct vmxnet3_adapter *adapter; 2872 struct vmxnet3_adapter *adapter;
2401 u8 mac[ETH_ALEN]; 2873 u8 mac[ETH_ALEN];
2874 int size;
2875 int num_tx_queues;
2876 int num_rx_queues;
2877
2878#ifdef VMXNET3_RSS
2879 if (enable_mq)
2880 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
2881 (int)num_online_cpus());
2882 else
2883#endif
2884 num_rx_queues = 1;
2885
2886 if (enable_mq)
2887 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
2888 (int)num_online_cpus());
2889 else
2890 num_tx_queues = 1;
2891
2892 netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
2893 max(num_tx_queues, num_rx_queues));
2894 printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
2895 num_tx_queues, num_rx_queues);
2402 2896
2403 netdev = alloc_etherdev(sizeof(struct vmxnet3_adapter));
2404 if (!netdev) { 2897 if (!netdev) {
2405 printk(KERN_ERR "Failed to alloc ethernet device for adapter " 2898 printk(KERN_ERR "Failed to alloc ethernet device for adapter "
2406 "%s\n", pci_name(pdev)); 2899 "%s\n", pci_name(pdev));
@@ -2422,9 +2915,12 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2422 goto err_alloc_shared; 2915 goto err_alloc_shared;
2423 } 2916 }
2424 2917
2425 adapter->tqd_start = pci_alloc_consistent(adapter->pdev, 2918 adapter->num_rx_queues = num_rx_queues;
2426 sizeof(struct Vmxnet3_TxQueueDesc) + 2919 adapter->num_tx_queues = num_tx_queues;
2427 sizeof(struct Vmxnet3_RxQueueDesc), 2920
2921 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
2922 size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
2923 adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
2428 &adapter->queue_desc_pa); 2924 &adapter->queue_desc_pa);
2429 2925
2430 if (!adapter->tqd_start) { 2926 if (!adapter->tqd_start) {
@@ -2433,8 +2929,8 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2433 err = -ENOMEM; 2929 err = -ENOMEM;
2434 goto err_alloc_queue_desc; 2930 goto err_alloc_queue_desc;
2435 } 2931 }
2436 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start 2932 adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
2437 + 1); 2933 adapter->num_tx_queues);
2438 2934
2439 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL); 2935 adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
2440 if (adapter->pm_conf == NULL) { 2936 if (adapter->pm_conf == NULL) {
@@ -2444,6 +2940,17 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2444 goto err_alloc_pm; 2940 goto err_alloc_pm;
2445 } 2941 }
2446 2942
2943#ifdef VMXNET3_RSS
2944
2945 adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
2946 if (adapter->rss_conf == NULL) {
2947 printk(KERN_ERR "Failed to allocate memory for %s\n",
2948 pci_name(pdev));
2949 err = -ENOMEM;
2950 goto err_alloc_rss;
2951 }
2952#endif /* VMXNET3_RSS */
2953
2447 err = vmxnet3_alloc_pci_resources(adapter, &dma64); 2954 err = vmxnet3_alloc_pci_resources(adapter, &dma64);
2448 if (err < 0) 2955 if (err < 0)
2449 goto err_alloc_pci; 2956 goto err_alloc_pci;
@@ -2471,18 +2978,48 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2471 vmxnet3_declare_features(adapter, dma64); 2978 vmxnet3_declare_features(adapter, dma64);
2472 2979
2473 adapter->dev_number = atomic_read(&devices_found); 2980 adapter->dev_number = atomic_read(&devices_found);
2981
2982 adapter->share_intr = irq_share_mode;
2983 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
2984 adapter->num_tx_queues != adapter->num_rx_queues)
2985 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
2986
2474 vmxnet3_alloc_intr_resources(adapter); 2987 vmxnet3_alloc_intr_resources(adapter);
2475 2988
2989#ifdef VMXNET3_RSS
2990 if (adapter->num_rx_queues > 1 &&
2991 adapter->intr.type == VMXNET3_IT_MSIX) {
2992 adapter->rss = true;
2993 printk(KERN_INFO "RSS is enabled.\n");
2994 } else {
2995 adapter->rss = false;
2996 }
2997#endif
2998
2476 vmxnet3_read_mac_addr(adapter, mac); 2999 vmxnet3_read_mac_addr(adapter, mac);
2477 memcpy(netdev->dev_addr, mac, netdev->addr_len); 3000 memcpy(netdev->dev_addr, mac, netdev->addr_len);
2478 3001
2479 netdev->netdev_ops = &vmxnet3_netdev_ops; 3002 netdev->netdev_ops = &vmxnet3_netdev_ops;
2480 netdev->watchdog_timeo = 5 * HZ;
2481 vmxnet3_set_ethtool_ops(netdev); 3003 vmxnet3_set_ethtool_ops(netdev);
3004 netdev->watchdog_timeo = 5 * HZ;
2482 3005
2483 INIT_WORK(&adapter->work, vmxnet3_reset_work); 3006 INIT_WORK(&adapter->work, vmxnet3_reset_work);
2484 3007
2485 netif_napi_add(netdev, &adapter->napi, vmxnet3_poll, 64); 3008 if (adapter->intr.type == VMXNET3_IT_MSIX) {
3009 int i;
3010 for (i = 0; i < adapter->num_rx_queues; i++) {
3011 netif_napi_add(adapter->netdev,
3012 &adapter->rx_queue[i].napi,
3013 vmxnet3_poll_rx_only, 64);
3014 }
3015 } else {
3016 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
3017 vmxnet3_poll, 64);
3018 }
3019
3020 netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3021 netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3022
2486 SET_NETDEV_DEV(netdev, &pdev->dev); 3023 SET_NETDEV_DEV(netdev, &pdev->dev);
2487 err = register_netdev(netdev); 3024 err = register_netdev(netdev);
2488 3025
@@ -2502,11 +3039,14 @@ err_register:
2502err_ver: 3039err_ver:
2503 vmxnet3_free_pci_resources(adapter); 3040 vmxnet3_free_pci_resources(adapter);
2504err_alloc_pci: 3041err_alloc_pci:
3042#ifdef VMXNET3_RSS
3043 kfree(adapter->rss_conf);
3044err_alloc_rss:
3045#endif
2505 kfree(adapter->pm_conf); 3046 kfree(adapter->pm_conf);
2506err_alloc_pm: 3047err_alloc_pm:
2507 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) + 3048 pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
2508 sizeof(struct Vmxnet3_RxQueueDesc), 3049 adapter->queue_desc_pa);
2509 adapter->tqd_start, adapter->queue_desc_pa);
2510err_alloc_queue_desc: 3050err_alloc_queue_desc:
2511 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), 3051 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
2512 adapter->shared, adapter->shared_pa); 3052 adapter->shared, adapter->shared_pa);
@@ -2522,17 +3062,32 @@ vmxnet3_remove_device(struct pci_dev *pdev)
2522{ 3062{
2523 struct net_device *netdev = pci_get_drvdata(pdev); 3063 struct net_device *netdev = pci_get_drvdata(pdev);
2524 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 3064 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3065 int size = 0;
3066 int num_rx_queues;
2525 3067
2526 flush_scheduled_work(); 3068#ifdef VMXNET3_RSS
3069 if (enable_mq)
3070 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3071 (int)num_online_cpus());
3072 else
3073#endif
3074 num_rx_queues = 1;
3075
3076 cancel_work_sync(&adapter->work);
2527 3077
2528 unregister_netdev(netdev); 3078 unregister_netdev(netdev);
2529 3079
2530 vmxnet3_free_intr_resources(adapter); 3080 vmxnet3_free_intr_resources(adapter);
2531 vmxnet3_free_pci_resources(adapter); 3081 vmxnet3_free_pci_resources(adapter);
3082#ifdef VMXNET3_RSS
3083 kfree(adapter->rss_conf);
3084#endif
2532 kfree(adapter->pm_conf); 3085 kfree(adapter->pm_conf);
2533 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_TxQueueDesc) + 3086
2534 sizeof(struct Vmxnet3_RxQueueDesc), 3087 size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
2535 adapter->tqd_start, adapter->queue_desc_pa); 3088 size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
3089 pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
3090 adapter->queue_desc_pa);
2536 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared), 3091 pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
2537 adapter->shared, adapter->shared_pa); 3092 adapter->shared, adapter->shared_pa);
2538 free_netdev(netdev); 3093 free_netdev(netdev);
@@ -2563,7 +3118,7 @@ vmxnet3_suspend(struct device *device)
2563 vmxnet3_free_intr_resources(adapter); 3118 vmxnet3_free_intr_resources(adapter);
2564 3119
2565 netif_device_detach(netdev); 3120 netif_device_detach(netdev);
2566 netif_stop_queue(netdev); 3121 netif_tx_stop_all_queues(netdev);
2567 3122
2568 /* Create wake-up filters. */ 3123 /* Create wake-up filters. */
2569 pmConf = adapter->pm_conf; 3124 pmConf = adapter->pm_conf;
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index b79070bcc92e..8e17fc8a7fe7 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -151,44 +151,42 @@ vmxnet3_get_stats(struct net_device *netdev)
151 struct UPT1_TxStats *devTxStats; 151 struct UPT1_TxStats *devTxStats;
152 struct UPT1_RxStats *devRxStats; 152 struct UPT1_RxStats *devRxStats;
153 struct net_device_stats *net_stats = &netdev->stats; 153 struct net_device_stats *net_stats = &netdev->stats;
154 int i;
154 155
155 adapter = netdev_priv(netdev); 156 adapter = netdev_priv(netdev);
156 157
157 /* Collect the dev stats into the shared area */ 158 /* Collect the dev stats into the shared area */
158 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 159 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
159 160
160 /* Assuming that we have a single queue device */
161 devTxStats = &adapter->tqd_start->stats;
162 devRxStats = &adapter->rqd_start->stats;
163
164 /* Get access to the driver stats per queue */
165 drvTxStats = &adapter->tx_queue.stats;
166 drvRxStats = &adapter->rx_queue.stats;
167
168 memset(net_stats, 0, sizeof(*net_stats)); 161 memset(net_stats, 0, sizeof(*net_stats));
162 for (i = 0; i < adapter->num_tx_queues; i++) {
163 devTxStats = &adapter->tqd_start[i].stats;
164 drvTxStats = &adapter->tx_queue[i].stats;
165 net_stats->tx_packets += devTxStats->ucastPktsTxOK +
166 devTxStats->mcastPktsTxOK +
167 devTxStats->bcastPktsTxOK;
168 net_stats->tx_bytes += devTxStats->ucastBytesTxOK +
169 devTxStats->mcastBytesTxOK +
170 devTxStats->bcastBytesTxOK;
171 net_stats->tx_errors += devTxStats->pktsTxError;
172 net_stats->tx_dropped += drvTxStats->drop_total;
173 }
169 174
170 net_stats->rx_packets = devRxStats->ucastPktsRxOK + 175 for (i = 0; i < adapter->num_rx_queues; i++) {
171 devRxStats->mcastPktsRxOK + 176 devRxStats = &adapter->rqd_start[i].stats;
172 devRxStats->bcastPktsRxOK; 177 drvRxStats = &adapter->rx_queue[i].stats;
173 178 net_stats->rx_packets += devRxStats->ucastPktsRxOK +
174 net_stats->tx_packets = devTxStats->ucastPktsTxOK + 179 devRxStats->mcastPktsRxOK +
175 devTxStats->mcastPktsTxOK + 180 devRxStats->bcastPktsRxOK;
176 devTxStats->bcastPktsTxOK;
177
178 net_stats->rx_bytes = devRxStats->ucastBytesRxOK +
179 devRxStats->mcastBytesRxOK +
180 devRxStats->bcastBytesRxOK;
181
182 net_stats->tx_bytes = devTxStats->ucastBytesTxOK +
183 devTxStats->mcastBytesTxOK +
184 devTxStats->bcastBytesTxOK;
185 181
186 net_stats->rx_errors = devRxStats->pktsRxError; 182 net_stats->rx_bytes += devRxStats->ucastBytesRxOK +
187 net_stats->tx_errors = devTxStats->pktsTxError; 183 devRxStats->mcastBytesRxOK +
188 net_stats->rx_dropped = drvRxStats->drop_total; 184 devRxStats->bcastBytesRxOK;
189 net_stats->tx_dropped = drvTxStats->drop_total;
190 net_stats->multicast = devRxStats->mcastPktsRxOK;
191 185
186 net_stats->rx_errors += devRxStats->pktsRxError;
187 net_stats->rx_dropped += drvRxStats->drop_total;
188 net_stats->multicast += devRxStats->mcastPktsRxOK;
189 }
192 return net_stats; 190 return net_stats;
193} 191}
194 192
@@ -307,24 +305,26 @@ vmxnet3_get_ethtool_stats(struct net_device *netdev,
307 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 305 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
308 u8 *base; 306 u8 *base;
309 int i; 307 int i;
308 int j = 0;
310 309
311 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS); 310 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_STATS);
312 311
313 /* this does assume each counter is 64-bit wide */ 312 /* this does assume each counter is 64-bit wide */
313/* TODO change this for multiple queues */
314 314
315 base = (u8 *)&adapter->tqd_start->stats; 315 base = (u8 *)&adapter->tqd_start[j].stats;
316 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++) 316 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_dev_stats); i++)
317 *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset); 317 *buf++ = *(u64 *)(base + vmxnet3_tq_dev_stats[i].offset);
318 318
319 base = (u8 *)&adapter->tx_queue.stats; 319 base = (u8 *)&adapter->tx_queue[j].stats;
320 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++) 320 for (i = 0; i < ARRAY_SIZE(vmxnet3_tq_driver_stats); i++)
321 *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset); 321 *buf++ = *(u64 *)(base + vmxnet3_tq_driver_stats[i].offset);
322 322
323 base = (u8 *)&adapter->rqd_start->stats; 323 base = (u8 *)&adapter->rqd_start[j].stats;
324 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++) 324 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_dev_stats); i++)
325 *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset); 325 *buf++ = *(u64 *)(base + vmxnet3_rq_dev_stats[i].offset);
326 326
327 base = (u8 *)&adapter->rx_queue.stats; 327 base = (u8 *)&adapter->rx_queue[j].stats;
328 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++) 328 for (i = 0; i < ARRAY_SIZE(vmxnet3_rq_driver_stats); i++)
329 *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset); 329 *buf++ = *(u64 *)(base + vmxnet3_rq_driver_stats[i].offset);
330 330
@@ -339,6 +339,7 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
339{ 339{
340 struct vmxnet3_adapter *adapter = netdev_priv(netdev); 340 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
341 u32 *buf = p; 341 u32 *buf = p;
342 int i = 0;
342 343
343 memset(p, 0, vmxnet3_get_regs_len(netdev)); 344 memset(p, 0, vmxnet3_get_regs_len(netdev));
344 345
@@ -347,28 +348,29 @@ vmxnet3_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *p)
347 /* Update vmxnet3_get_regs_len if we want to dump more registers */ 348 /* Update vmxnet3_get_regs_len if we want to dump more registers */
348 349
349 /* make each ring use multiple of 16 bytes */ 350 /* make each ring use multiple of 16 bytes */
350 buf[0] = adapter->tx_queue.tx_ring.next2fill; 351/* TODO change this for multiple queues */
351 buf[1] = adapter->tx_queue.tx_ring.next2comp; 352 buf[0] = adapter->tx_queue[i].tx_ring.next2fill;
352 buf[2] = adapter->tx_queue.tx_ring.gen; 353 buf[1] = adapter->tx_queue[i].tx_ring.next2comp;
354 buf[2] = adapter->tx_queue[i].tx_ring.gen;
353 buf[3] = 0; 355 buf[3] = 0;
354 356
355 buf[4] = adapter->tx_queue.comp_ring.next2proc; 357 buf[4] = adapter->tx_queue[i].comp_ring.next2proc;
356 buf[5] = adapter->tx_queue.comp_ring.gen; 358 buf[5] = adapter->tx_queue[i].comp_ring.gen;
357 buf[6] = adapter->tx_queue.stopped; 359 buf[6] = adapter->tx_queue[i].stopped;
358 buf[7] = 0; 360 buf[7] = 0;
359 361
360 buf[8] = adapter->rx_queue.rx_ring[0].next2fill; 362 buf[8] = adapter->rx_queue[i].rx_ring[0].next2fill;
361 buf[9] = adapter->rx_queue.rx_ring[0].next2comp; 363 buf[9] = adapter->rx_queue[i].rx_ring[0].next2comp;
362 buf[10] = adapter->rx_queue.rx_ring[0].gen; 364 buf[10] = adapter->rx_queue[i].rx_ring[0].gen;
363 buf[11] = 0; 365 buf[11] = 0;
364 366
365 buf[12] = adapter->rx_queue.rx_ring[1].next2fill; 367 buf[12] = adapter->rx_queue[i].rx_ring[1].next2fill;
366 buf[13] = adapter->rx_queue.rx_ring[1].next2comp; 368 buf[13] = adapter->rx_queue[i].rx_ring[1].next2comp;
367 buf[14] = adapter->rx_queue.rx_ring[1].gen; 369 buf[14] = adapter->rx_queue[i].rx_ring[1].gen;
368 buf[15] = 0; 370 buf[15] = 0;
369 371
370 buf[16] = adapter->rx_queue.comp_ring.next2proc; 372 buf[16] = adapter->rx_queue[i].comp_ring.next2proc;
371 buf[17] = adapter->rx_queue.comp_ring.gen; 373 buf[17] = adapter->rx_queue[i].comp_ring.gen;
372 buf[18] = 0; 374 buf[18] = 0;
373 buf[19] = 0; 375 buf[19] = 0;
374} 376}
@@ -435,8 +437,10 @@ vmxnet3_get_ringparam(struct net_device *netdev,
435 param->rx_mini_max_pending = 0; 437 param->rx_mini_max_pending = 0;
436 param->rx_jumbo_max_pending = 0; 438 param->rx_jumbo_max_pending = 0;
437 439
438 param->rx_pending = adapter->rx_queue.rx_ring[0].size; 440 param->rx_pending = adapter->rx_queue[0].rx_ring[0].size *
439 param->tx_pending = adapter->tx_queue.tx_ring.size; 441 adapter->num_rx_queues;
442 param->tx_pending = adapter->tx_queue[0].tx_ring.size *
443 adapter->num_tx_queues;
440 param->rx_mini_pending = 0; 444 param->rx_mini_pending = 0;
441 param->rx_jumbo_pending = 0; 445 param->rx_jumbo_pending = 0;
442} 446}
@@ -480,8 +484,8 @@ vmxnet3_set_ringparam(struct net_device *netdev,
480 sz) != 0) 484 sz) != 0)
481 return -EINVAL; 485 return -EINVAL;
482 486
483 if (new_tx_ring_size == adapter->tx_queue.tx_ring.size && 487 if (new_tx_ring_size == adapter->tx_queue[0].tx_ring.size &&
484 new_rx_ring_size == adapter->rx_queue.rx_ring[0].size) { 488 new_rx_ring_size == adapter->rx_queue[0].rx_ring[0].size) {
485 return 0; 489 return 0;
486 } 490 }
487 491
@@ -498,11 +502,12 @@ vmxnet3_set_ringparam(struct net_device *netdev,
498 502
499 /* recreate the rx queue and the tx queue based on the 503 /* recreate the rx queue and the tx queue based on the
500 * new sizes */ 504 * new sizes */
501 vmxnet3_tq_destroy(&adapter->tx_queue, adapter); 505 vmxnet3_tq_destroy_all(adapter);
502 vmxnet3_rq_destroy(&adapter->rx_queue, adapter); 506 vmxnet3_rq_destroy_all(adapter);
503 507
504 err = vmxnet3_create_queues(adapter, new_tx_ring_size, 508 err = vmxnet3_create_queues(adapter, new_tx_ring_size,
505 new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE); 509 new_rx_ring_size, VMXNET3_DEF_RX_RING_SIZE);
510
506 if (err) { 511 if (err) {
507 /* failed, most likely because of OOM, try default 512 /* failed, most likely because of OOM, try default
508 * size */ 513 * size */
@@ -535,6 +540,66 @@ out:
535} 540}
536 541
537 542
543static int
544vmxnet3_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *info,
545 void *rules)
546{
547 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
548 switch (info->cmd) {
549 case ETHTOOL_GRXRINGS:
550 info->data = adapter->num_rx_queues;
551 return 0;
552 }
553 return -EOPNOTSUPP;
554}
555
556#ifdef VMXNET3_RSS
557static int
558vmxnet3_get_rss_indir(struct net_device *netdev,
559 struct ethtool_rxfh_indir *p)
560{
561 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
562 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
563 unsigned int n = min_t(unsigned int, p->size, rssConf->indTableSize);
564
565 p->size = rssConf->indTableSize;
566 while (n--)
567 p->ring_index[n] = rssConf->indTable[n];
568 return 0;
569
570}
571
572static int
573vmxnet3_set_rss_indir(struct net_device *netdev,
574 const struct ethtool_rxfh_indir *p)
575{
576 unsigned int i;
577 struct vmxnet3_adapter *adapter = netdev_priv(netdev);
578 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
579
580 if (p->size != rssConf->indTableSize)
581 return -EINVAL;
582 for (i = 0; i < rssConf->indTableSize; i++) {
583 /*
584 * Return with error code if any of the queue indices
585 * is out of range
586 */
587 if (p->ring_index[i] < 0 ||
588 p->ring_index[i] >= adapter->num_rx_queues)
589 return -EINVAL;
590 }
591
592 for (i = 0; i < rssConf->indTableSize; i++)
593 rssConf->indTable[i] = p->ring_index[i];
594
595 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
596 VMXNET3_CMD_UPDATE_RSSIDT);
597
598 return 0;
599
600}
601#endif
602
538static struct ethtool_ops vmxnet3_ethtool_ops = { 603static struct ethtool_ops vmxnet3_ethtool_ops = {
539 .get_settings = vmxnet3_get_settings, 604 .get_settings = vmxnet3_get_settings,
540 .get_drvinfo = vmxnet3_get_drvinfo, 605 .get_drvinfo = vmxnet3_get_drvinfo,
@@ -558,6 +623,11 @@ static struct ethtool_ops vmxnet3_ethtool_ops = {
558 .get_ethtool_stats = vmxnet3_get_ethtool_stats, 623 .get_ethtool_stats = vmxnet3_get_ethtool_stats,
559 .get_ringparam = vmxnet3_get_ringparam, 624 .get_ringparam = vmxnet3_get_ringparam,
560 .set_ringparam = vmxnet3_set_ringparam, 625 .set_ringparam = vmxnet3_set_ringparam,
626 .get_rxnfc = vmxnet3_get_rxnfc,
627#ifdef VMXNET3_RSS
628 .get_rxfh_indir = vmxnet3_get_rss_indir,
629 .set_rxfh_indir = vmxnet3_set_rss_indir,
630#endif
561}; 631};
562 632
563void vmxnet3_set_ethtool_ops(struct net_device *netdev) 633void vmxnet3_set_ethtool_ops(struct net_device *netdev)
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index edf228843afc..7fadeed37f03 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -68,11 +68,15 @@
68/* 68/*
69 * Version numbers 69 * Version numbers
70 */ 70 */
71#define VMXNET3_DRIVER_VERSION_STRING "1.0.14.0-k" 71#define VMXNET3_DRIVER_VERSION_STRING "1.0.16.0-k"
72 72
73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */ 73/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
74#define VMXNET3_DRIVER_VERSION_NUM 0x01000E00 74#define VMXNET3_DRIVER_VERSION_NUM 0x01001000
75 75
76#if defined(CONFIG_PCI_MSI)
77 /* RSS only makes sense if MSI-X is supported. */
78 #define VMXNET3_RSS
79#endif
76 80
77/* 81/*
78 * Capabilities 82 * Capabilities
@@ -218,16 +222,19 @@ struct vmxnet3_tx_ctx {
218}; 222};
219 223
220struct vmxnet3_tx_queue { 224struct vmxnet3_tx_queue {
225 char name[IFNAMSIZ+8]; /* To identify interrupt */
226 struct vmxnet3_adapter *adapter;
221 spinlock_t tx_lock; 227 spinlock_t tx_lock;
222 struct vmxnet3_cmd_ring tx_ring; 228 struct vmxnet3_cmd_ring tx_ring;
223 struct vmxnet3_tx_buf_info *buf_info; 229 struct vmxnet3_tx_buf_info *buf_info;
224 struct vmxnet3_tx_data_ring data_ring; 230 struct vmxnet3_tx_data_ring data_ring;
225 struct vmxnet3_comp_ring comp_ring; 231 struct vmxnet3_comp_ring comp_ring;
226 struct Vmxnet3_TxQueueCtrl *shared; 232 struct Vmxnet3_TxQueueCtrl *shared;
227 struct vmxnet3_tq_driver_stats stats; 233 struct vmxnet3_tq_driver_stats stats;
228 bool stopped; 234 bool stopped;
229 int num_stop; /* # of times the queue is 235 int num_stop; /* # of times the queue is
230 * stopped */ 236 * stopped */
237 int qid;
231} __attribute__((__aligned__(SMP_CACHE_BYTES))); 238} __attribute__((__aligned__(SMP_CACHE_BYTES)));
232 239
233enum vmxnet3_rx_buf_type { 240enum vmxnet3_rx_buf_type {
@@ -259,6 +266,9 @@ struct vmxnet3_rq_driver_stats {
259}; 266};
260 267
261struct vmxnet3_rx_queue { 268struct vmxnet3_rx_queue {
269 char name[IFNAMSIZ + 8]; /* To identify interrupt */
270 struct vmxnet3_adapter *adapter;
271 struct napi_struct napi;
262 struct vmxnet3_cmd_ring rx_ring[2]; 272 struct vmxnet3_cmd_ring rx_ring[2];
263 struct vmxnet3_comp_ring comp_ring; 273 struct vmxnet3_comp_ring comp_ring;
264 struct vmxnet3_rx_ctx rx_ctx; 274 struct vmxnet3_rx_ctx rx_ctx;
@@ -271,7 +281,16 @@ struct vmxnet3_rx_queue {
271 struct vmxnet3_rq_driver_stats stats; 281 struct vmxnet3_rq_driver_stats stats;
272} __attribute__((__aligned__(SMP_CACHE_BYTES))); 282} __attribute__((__aligned__(SMP_CACHE_BYTES)));
273 283
274#define VMXNET3_LINUX_MAX_MSIX_VECT 1 284#define VMXNET3_DEVICE_MAX_TX_QUEUES 8
285#define VMXNET3_DEVICE_MAX_RX_QUEUES 8 /* Keep this value as a power of 2 */
286
287/* Should be less than UPT1_RSS_MAX_IND_TABLE_SIZE */
288#define VMXNET3_RSS_IND_TABLE_SIZE (VMXNET3_DEVICE_MAX_RX_QUEUES * 4)
289
290#define VMXNET3_LINUX_MAX_MSIX_VECT (VMXNET3_DEVICE_MAX_TX_QUEUES + \
291 VMXNET3_DEVICE_MAX_RX_QUEUES + 1)
292#define VMXNET3_LINUX_MIN_MSIX_VECT 3 /* 1 for each : tx, rx and event */
293
275 294
276struct vmxnet3_intr { 295struct vmxnet3_intr {
277 enum vmxnet3_intr_mask_mode mask_mode; 296 enum vmxnet3_intr_mask_mode mask_mode;
@@ -279,27 +298,32 @@ struct vmxnet3_intr {
279 u8 num_intrs; /* # of intr vectors */ 298 u8 num_intrs; /* # of intr vectors */
280 u8 event_intr_idx; /* idx of the intr vector for event */ 299 u8 event_intr_idx; /* idx of the intr vector for event */
281 u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */ 300 u8 mod_levels[VMXNET3_LINUX_MAX_MSIX_VECT]; /* moderation level */
301 char event_msi_vector_name[IFNAMSIZ+11];
282#ifdef CONFIG_PCI_MSI 302#ifdef CONFIG_PCI_MSI
283 struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT]; 303 struct msix_entry msix_entries[VMXNET3_LINUX_MAX_MSIX_VECT];
284#endif 304#endif
285}; 305};
286 306
307/* Interrupt sharing schemes, share_intr */
308#define VMXNET3_INTR_BUDDYSHARE 0 /* Corresponding tx,rx queues share irq */
309#define VMXNET3_INTR_TXSHARE 1 /* All tx queues share one irq */
310#define VMXNET3_INTR_DONTSHARE 2 /* each queue has its own irq */
311
312
287#define VMXNET3_STATE_BIT_RESETTING 0 313#define VMXNET3_STATE_BIT_RESETTING 0
288#define VMXNET3_STATE_BIT_QUIESCED 1 314#define VMXNET3_STATE_BIT_QUIESCED 1
289struct vmxnet3_adapter { 315struct vmxnet3_adapter {
290 struct vmxnet3_tx_queue tx_queue; 316 struct vmxnet3_tx_queue tx_queue[VMXNET3_DEVICE_MAX_TX_QUEUES];
291 struct vmxnet3_rx_queue rx_queue; 317 struct vmxnet3_rx_queue rx_queue[VMXNET3_DEVICE_MAX_RX_QUEUES];
292 struct napi_struct napi; 318 struct vlan_group *vlan_grp;
293 struct vlan_group *vlan_grp; 319 struct vmxnet3_intr intr;
294 320 struct Vmxnet3_DriverShared *shared;
295 struct vmxnet3_intr intr; 321 struct Vmxnet3_PMConf *pm_conf;
296 322 struct Vmxnet3_TxQueueDesc *tqd_start; /* all tx queue desc */
297 struct Vmxnet3_DriverShared *shared; 323 struct Vmxnet3_RxQueueDesc *rqd_start; /* all rx queue desc */
298 struct Vmxnet3_PMConf *pm_conf; 324 struct net_device *netdev;
299 struct Vmxnet3_TxQueueDesc *tqd_start; /* first tx queue desc */ 325 struct net_device_stats net_stats;
300 struct Vmxnet3_RxQueueDesc *rqd_start; /* first rx queue desc */ 326 struct pci_dev *pdev;
301 struct net_device *netdev;
302 struct pci_dev *pdev;
303 327
304 u8 __iomem *hw_addr0; /* for BAR 0 */ 328 u8 __iomem *hw_addr0; /* for BAR 0 */
305 u8 __iomem *hw_addr1; /* for BAR 1 */ 329 u8 __iomem *hw_addr1; /* for BAR 1 */
@@ -308,6 +332,12 @@ struct vmxnet3_adapter {
308 bool rxcsum; 332 bool rxcsum;
309 bool lro; 333 bool lro;
310 bool jumbo_frame; 334 bool jumbo_frame;
335#ifdef VMXNET3_RSS
336 struct UPT1_RSSConf *rss_conf;
337 bool rss;
338#endif
339 u32 num_rx_queues;
340 u32 num_tx_queues;
311 341
312 /* rx buffer related */ 342 /* rx buffer related */
313 unsigned skb_buf_size; 343 unsigned skb_buf_size;
@@ -327,6 +357,7 @@ struct vmxnet3_adapter {
327 unsigned long state; /* VMXNET3_STATE_BIT_xxx */ 357 unsigned long state; /* VMXNET3_STATE_BIT_xxx */
328 358
329 int dev_number; 359 int dev_number;
360 int share_intr;
330}; 361};
331 362
332#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \ 363#define VMXNET3_WRITE_BAR0_REG(adapter, reg, val) \
@@ -366,12 +397,10 @@ void
366vmxnet3_reset_dev(struct vmxnet3_adapter *adapter); 397vmxnet3_reset_dev(struct vmxnet3_adapter *adapter);
367 398
368void 399void
369vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq, 400vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter);
370 struct vmxnet3_adapter *adapter);
371 401
372void 402void
373vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq, 403vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter);
374 struct vmxnet3_adapter *adapter);
375 404
376int 405int
377vmxnet3_create_queues(struct vmxnet3_adapter *adapter, 406vmxnet3_create_queues(struct vmxnet3_adapter *adapter,
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 906a3ca3676b..01c05f53e2f9 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -19,109 +19,128 @@
19 19
20#include "vxge-traffic.h" 20#include "vxge-traffic.h"
21#include "vxge-config.h" 21#include "vxge-config.h"
22 22#include "vxge-main.h"
23static enum vxge_hw_status 23
24__vxge_hw_fifo_create( 24#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
25 struct __vxge_hw_vpath_handle *vpath_handle, 25 status = __vxge_hw_vpath_stats_access(vpath, \
26 struct vxge_hw_fifo_attr *attr); 26 VXGE_HW_STATS_OP_READ, \
27 27 offset, \
28static enum vxge_hw_status 28 &val64); \
29__vxge_hw_fifo_abort( 29 if (status != VXGE_HW_OK) \
30 struct __vxge_hw_fifo *fifoh); 30 return status; \
31 31}
32static enum vxge_hw_status
33__vxge_hw_fifo_reset(
34 struct __vxge_hw_fifo *ringh);
35
36static enum vxge_hw_status
37__vxge_hw_fifo_delete(
38 struct __vxge_hw_vpath_handle *vpath_handle);
39
40static struct __vxge_hw_blockpool_entry *
41__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *hldev,
42 u32 size);
43 32
44static void 33static void
45__vxge_hw_blockpool_block_free(struct __vxge_hw_device *hldev, 34vxge_hw_vpath_set_zero_rx_frm_len(struct vxge_hw_vpath_reg __iomem *vp_reg)
46 struct __vxge_hw_blockpool_entry *entry); 35{
47 36 u64 val64;
48static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
49 void *block_addr,
50 u32 length,
51 struct pci_dev *dma_h,
52 struct pci_dev *acc_handle);
53
54static enum vxge_hw_status
55__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
56 struct __vxge_hw_blockpool *blockpool,
57 u32 pool_size,
58 u32 pool_max);
59 37
60static void 38 val64 = readq(&vp_reg->rxmac_vcfg0);
61__vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool); 39 val64 &= ~VXGE_HW_RXMAC_VCFG0_RTS_MAX_FRM_LEN(0x3fff);
40 writeq(val64, &vp_reg->rxmac_vcfg0);
41 val64 = readq(&vp_reg->rxmac_vcfg0);
42}
62 43
63static void * 44/*
64__vxge_hw_blockpool_malloc(struct __vxge_hw_device *hldev, 45 * vxge_hw_vpath_wait_receive_idle - Wait for Rx to become idle
65 u32 size, 46 */
66 struct vxge_hw_mempool_dma *dma_object); 47int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id)
48{
49 struct vxge_hw_vpath_reg __iomem *vp_reg;
50 struct __vxge_hw_virtualpath *vpath;
51 u64 val64, rxd_count, rxd_spat;
52 int count = 0, total_count = 0;
67 53
68static void 54 vpath = &hldev->virtual_paths[vp_id];
69__vxge_hw_blockpool_free(struct __vxge_hw_device *hldev, 55 vp_reg = vpath->vp_reg;
70 void *memblock,
71 u32 size,
72 struct vxge_hw_mempool_dma *dma_object);
73 56
57 vxge_hw_vpath_set_zero_rx_frm_len(vp_reg);
74 58
75static struct __vxge_hw_channel* 59 /* Check that the ring controller for this vpath has enough free RxDs
76__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, 60 * to send frames to the host. This is done by reading the
77 enum __vxge_hw_channel_type type, u32 length, 61 * PRC_RXD_DOORBELL_VPn register and comparing the read value to the
78 u32 per_dtr_space, void *userdata); 62 * RXD_SPAT value for the vpath.
63 */
64 val64 = readq(&vp_reg->prc_cfg6);
65 rxd_spat = VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val64) + 1;
66 /* Use a factor of 2 when comparing rxd_count against rxd_spat for some
67 * leg room.
68 */
69 rxd_spat *= 2;
79 70
80static void 71 do {
81__vxge_hw_channel_free( 72 mdelay(1);
82 struct __vxge_hw_channel *channel);
83 73
84static enum vxge_hw_status 74 rxd_count = readq(&vp_reg->prc_rxd_doorbell);
85__vxge_hw_channel_initialize(
86 struct __vxge_hw_channel *channel);
87 75
88static enum vxge_hw_status 76 /* Check that the ring controller for this vpath does
89__vxge_hw_channel_reset( 77 * not have any frame in its pipeline.
90 struct __vxge_hw_channel *channel); 78 */
79 val64 = readq(&vp_reg->frm_in_progress_cnt);
80 if ((rxd_count <= rxd_spat) || (val64 > 0))
81 count = 0;
82 else
83 count++;
84 total_count++;
85 } while ((count < VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT) &&
86 (total_count < VXGE_HW_MAX_POLLING_COUNT));
91 87
92static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp); 88 if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
89 printk(KERN_ALERT "%s: Still Receiving traffic. Abort wait\n",
90 __func__);
93 91
94static enum vxge_hw_status 92 return total_count;
95__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config); 93}
96 94
97static enum vxge_hw_status 95/* vxge_hw_device_wait_receive_idle - This function waits until all frames
98__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config); 96 * stored in the frame buffer for each vpath assigned to the given
97 * function (hldev) have been sent to the host.
98 */
99void vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev)
100{
101 int i, total_count = 0;
99 102
100static void 103 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
101__vxge_hw_device_id_get(struct __vxge_hw_device *hldev); 104 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
105 continue;
102 106
103static void 107 total_count += vxge_hw_vpath_wait_receive_idle(hldev, i);
104__vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev); 108 if (total_count >= VXGE_HW_MAX_POLLING_COUNT)
109 break;
110 }
111}
105 112
113/*
114 * __vxge_hw_device_register_poll
115 * Will poll certain register for specified amount of time.
116 * Will poll until masked bit is not cleared.
117 */
106static enum vxge_hw_status 118static enum vxge_hw_status
107__vxge_hw_vpath_card_info_get( 119__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
108 u32 vp_id, 120{
109 struct vxge_hw_vpath_reg __iomem *vpath_reg, 121 u64 val64;
110 struct vxge_hw_device_hw_info *hw_info); 122 u32 i = 0;
123 enum vxge_hw_status ret = VXGE_HW_FAIL;
111 124
112static enum vxge_hw_status 125 udelay(10);
113__vxge_hw_device_initialize(struct __vxge_hw_device *hldev);
114 126
115static void 127 do {
116__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev); 128 val64 = readq(reg);
129 if (!(val64 & mask))
130 return VXGE_HW_OK;
131 udelay(100);
132 } while (++i <= 9);
117 133
118static enum vxge_hw_status 134 i = 0;
119__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev); 135 do {
136 val64 = readq(reg);
137 if (!(val64 & mask))
138 return VXGE_HW_OK;
139 mdelay(1);
140 } while (++i <= max_millis);
120 141
121static enum vxge_hw_status 142 return ret;
122__vxge_hw_device_register_poll( 143}
123 void __iomem *reg,
124 u64 mask, u32 max_millis);
125 144
126static inline enum vxge_hw_status 145static inline enum vxge_hw_status
127__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr, 146__vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
@@ -129,139 +148,258 @@ __vxge_hw_pio_mem_write64(u64 val64, void __iomem *addr,
129{ 148{
130 __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr); 149 __vxge_hw_pio_mem_write32_lower((u32)vxge_bVALn(val64, 32, 32), addr);
131 wmb(); 150 wmb();
132
133 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr); 151 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), addr);
134 wmb(); 152 wmb();
135 153
136 return __vxge_hw_device_register_poll(addr, mask, max_millis); 154 return __vxge_hw_device_register_poll(addr, mask, max_millis);
137} 155}
138 156
139static struct vxge_hw_mempool*
140__vxge_hw_mempool_create(struct __vxge_hw_device *devh, u32 memblock_size,
141 u32 item_size, u32 private_size, u32 items_initial,
142 u32 items_max, struct vxge_hw_mempool_cbs *mp_callback,
143 void *userdata);
144static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool);
145
146static enum vxge_hw_status 157static enum vxge_hw_status
147__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath, 158vxge_hw_vpath_fw_api(struct __vxge_hw_virtualpath *vpath, u32 action,
148 struct vxge_hw_vpath_stats_hw_info *hw_stats); 159 u32 fw_memo, u32 offset, u64 *data0, u64 *data1,
160 u64 *steer_ctrl)
161{
162 struct vxge_hw_vpath_reg __iomem *vp_reg;
163 enum vxge_hw_status status;
164 u64 val64;
165 u32 retry = 0, max_retry = 100;
149 166
150static enum vxge_hw_status 167 vp_reg = vpath->vp_reg;
151vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vpath_handle);
152 168
153static enum vxge_hw_status 169 if (vpath->vp_open) {
154__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg); 170 max_retry = 3;
171 spin_lock(&vpath->lock);
172 }
155 173
156static u64 174 writeq(*data0, &vp_reg->rts_access_steer_data0);
157__vxge_hw_vpath_pci_func_mode_get(u32 vp_id, 175 writeq(*data1, &vp_reg->rts_access_steer_data1);
158 struct vxge_hw_vpath_reg __iomem *vpath_reg); 176 wmb();
159 177
160static u32 178 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
161__vxge_hw_vpath_func_id_get(u32 vp_id, struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg); 179 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(fw_memo) |
180 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset) |
181 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
182 *steer_ctrl;
162 183
163static enum vxge_hw_status 184 status = __vxge_hw_pio_mem_write64(val64,
164__vxge_hw_vpath_addr_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg, 185 &vp_reg->rts_access_steer_ctrl,
165 u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN]); 186 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
187 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
188
189 /* The __vxge_hw_device_register_poll can udelay for a significant
190 * amount of time, blocking other proccess from the CPU. If it delays
191 * for ~5secs, a NMI error can occur. A way around this is to give up
192 * the processor via msleep, but this is not allowed is under lock.
193 * So, only allow it to sleep for ~4secs if open. Otherwise, delay for
194 * 1sec and sleep for 10ms until the firmware operation has completed
195 * or timed-out.
196 */
197 while ((status != VXGE_HW_OK) && retry++ < max_retry) {
198 if (!vpath->vp_open)
199 msleep(20);
200 status = __vxge_hw_device_register_poll(
201 &vp_reg->rts_access_steer_ctrl,
202 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
203 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
204 }
166 205
167static enum vxge_hw_status 206 if (status != VXGE_HW_OK)
168__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath); 207 goto out;
169 208
209 val64 = readq(&vp_reg->rts_access_steer_ctrl);
210 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
211 *data0 = readq(&vp_reg->rts_access_steer_data0);
212 *data1 = readq(&vp_reg->rts_access_steer_data1);
213 *steer_ctrl = val64;
214 } else
215 status = VXGE_HW_FAIL;
170 216
171static enum vxge_hw_status 217out:
172__vxge_hw_vpath_sw_reset(struct __vxge_hw_device *devh, u32 vp_id); 218 if (vpath->vp_open)
219 spin_unlock(&vpath->lock);
220 return status;
221}
173 222
174static enum vxge_hw_status 223enum vxge_hw_status
175__vxge_hw_vpath_fw_ver_get(u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg, 224vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
176 struct vxge_hw_device_hw_info *hw_info); 225 u32 *minor, u32 *build)
226{
227 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
228 struct __vxge_hw_virtualpath *vpath;
229 enum vxge_hw_status status;
177 230
178static enum vxge_hw_status 231 vpath = &hldev->virtual_paths[hldev->first_vp_id];
179__vxge_hw_vpath_mac_configure(struct __vxge_hw_device *devh, u32 vp_id);
180 232
181static void 233 status = vxge_hw_vpath_fw_api(vpath,
182__vxge_hw_vp_terminate(struct __vxge_hw_device *devh, u32 vp_id); 234 VXGE_HW_FW_UPGRADE_ACTION,
235 VXGE_HW_FW_UPGRADE_MEMO,
236 VXGE_HW_FW_UPGRADE_OFFSET_READ,
237 &data0, &data1, &steer_ctrl);
238 if (status != VXGE_HW_OK)
239 return status;
183 240
184static enum vxge_hw_status 241 *major = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
185__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath, 242 *minor = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
186 u32 operation, u32 offset, u64 *stat); 243 *build = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
187 244
188static enum vxge_hw_status 245 return status;
189__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath, 246}
190 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats);
191 247
192static enum vxge_hw_status 248enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev)
193__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath, 249{
194 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats); 250 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
251 struct __vxge_hw_virtualpath *vpath;
252 enum vxge_hw_status status;
253 u32 ret;
195 254
196/* 255 vpath = &hldev->virtual_paths[hldev->first_vp_id];
197 * __vxge_hw_channel_allocate - Allocate memory for channel 256
198 * This function allocates required memory for the channel and various arrays 257 status = vxge_hw_vpath_fw_api(vpath,
199 * in the channel 258 VXGE_HW_FW_UPGRADE_ACTION,
200 */ 259 VXGE_HW_FW_UPGRADE_MEMO,
201struct __vxge_hw_channel* 260 VXGE_HW_FW_UPGRADE_OFFSET_COMMIT,
202__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph, 261 &data0, &data1, &steer_ctrl);
203 enum __vxge_hw_channel_type type, 262 if (status != VXGE_HW_OK) {
204 u32 length, u32 per_dtr_space, void *userdata) 263 vxge_debug_init(VXGE_ERR, "%s: FW upgrade failed", __func__);
264 goto exit;
265 }
266
267 ret = VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(steer_ctrl) & 0x7F;
268 if (ret != 1) {
269 vxge_debug_init(VXGE_ERR, "%s: FW commit failed with error %d",
270 __func__, ret);
271 status = VXGE_HW_FAIL;
272 }
273
274exit:
275 return status;
276}
277
278enum vxge_hw_status
279vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *fwdata, int size)
205{ 280{
206 struct __vxge_hw_channel *channel; 281 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
207 struct __vxge_hw_device *hldev; 282 struct __vxge_hw_virtualpath *vpath;
208 int size = 0; 283 enum vxge_hw_status status;
209 u32 vp_id; 284 int ret_code, sec_code;
210 285
211 hldev = vph->vpath->hldev; 286 vpath = &hldev->virtual_paths[hldev->first_vp_id];
212 vp_id = vph->vpath->vp_id;
213 287
214 switch (type) { 288 /* send upgrade start command */
215 case VXGE_HW_CHANNEL_TYPE_FIFO: 289 status = vxge_hw_vpath_fw_api(vpath,
216 size = sizeof(struct __vxge_hw_fifo); 290 VXGE_HW_FW_UPGRADE_ACTION,
217 break; 291 VXGE_HW_FW_UPGRADE_MEMO,
218 case VXGE_HW_CHANNEL_TYPE_RING: 292 VXGE_HW_FW_UPGRADE_OFFSET_START,
219 size = sizeof(struct __vxge_hw_ring); 293 &data0, &data1, &steer_ctrl);
220 break; 294 if (status != VXGE_HW_OK) {
221 default: 295 vxge_debug_init(VXGE_ERR, " %s: Upgrade start cmd failed",
222 break; 296 __func__);
297 return status;
223 } 298 }
224 299
225 channel = kzalloc(size, GFP_KERNEL); 300 /* Transfer fw image to adapter 16 bytes at a time */
226 if (channel == NULL) 301 for (; size > 0; size -= VXGE_HW_FW_UPGRADE_BLK_SIZE) {
227 goto exit0; 302 steer_ctrl = 0;
228 INIT_LIST_HEAD(&channel->item);
229 303
230 channel->common_reg = hldev->common_reg; 304 /* The next 128bits of fwdata to be loaded onto the adapter */
231 channel->first_vp_id = hldev->first_vp_id; 305 data0 = *((u64 *)fwdata);
232 channel->type = type; 306 data1 = *((u64 *)fwdata + 1);
233 channel->devh = hldev;
234 channel->vph = vph;
235 channel->userdata = userdata;
236 channel->per_dtr_space = per_dtr_space;
237 channel->length = length;
238 channel->vp_id = vp_id;
239 307
240 channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); 308 status = vxge_hw_vpath_fw_api(vpath,
241 if (channel->work_arr == NULL) 309 VXGE_HW_FW_UPGRADE_ACTION,
242 goto exit1; 310 VXGE_HW_FW_UPGRADE_MEMO,
311 VXGE_HW_FW_UPGRADE_OFFSET_SEND,
312 &data0, &data1, &steer_ctrl);
313 if (status != VXGE_HW_OK) {
314 vxge_debug_init(VXGE_ERR, "%s: Upgrade send failed",
315 __func__);
316 goto out;
317 }
243 318
244 channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); 319 ret_code = VXGE_HW_UPGRADE_GET_RET_ERR_CODE(data0);
245 if (channel->free_arr == NULL) 320 switch (ret_code) {
246 goto exit1; 321 case VXGE_HW_FW_UPGRADE_OK:
247 channel->free_ptr = length; 322 /* All OK, send next 16 bytes. */
323 break;
324 case VXGE_FW_UPGRADE_BYTES2SKIP:
325 /* skip bytes in the stream */
326 fwdata += (data0 >> 8) & 0xFFFFFFFF;
327 break;
328 case VXGE_HW_FW_UPGRADE_DONE:
329 goto out;
330 case VXGE_HW_FW_UPGRADE_ERR:
331 sec_code = VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(data0);
332 switch (sec_code) {
333 case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1:
334 case VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7:
335 printk(KERN_ERR
336 "corrupted data from .ncf file\n");
337 break;
338 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3:
339 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4:
340 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5:
341 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6:
342 case VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8:
343 printk(KERN_ERR "invalid .ncf file\n");
344 break;
345 case VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW:
346 printk(KERN_ERR "buffer overflow\n");
347 break;
348 case VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH:
349 printk(KERN_ERR "failed to flash the image\n");
350 break;
351 case VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN:
352 printk(KERN_ERR
353 "generic error. Unknown error type\n");
354 break;
355 default:
356 printk(KERN_ERR "Unknown error of type %d\n",
357 sec_code);
358 break;
359 }
360 status = VXGE_HW_FAIL;
361 goto out;
362 default:
363 printk(KERN_ERR "Unknown FW error: %d\n", ret_code);
364 status = VXGE_HW_FAIL;
365 goto out;
366 }
367 /* point to next 16 bytes */
368 fwdata += VXGE_HW_FW_UPGRADE_BLK_SIZE;
369 }
370out:
371 return status;
372}
248 373
249 channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); 374enum vxge_hw_status
250 if (channel->reserve_arr == NULL) 375vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
251 goto exit1; 376 struct eprom_image *img)
252 channel->reserve_ptr = length; 377{
253 channel->reserve_top = 0; 378 u64 data0 = 0, data1 = 0, steer_ctrl = 0;
379 struct __vxge_hw_virtualpath *vpath;
380 enum vxge_hw_status status;
381 int i;
254 382
255 channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL); 383 vpath = &hldev->virtual_paths[hldev->first_vp_id];
256 if (channel->orig_arr == NULL)
257 goto exit1;
258 384
259 return channel; 385 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
260exit1: 386 data0 = VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(i);
261 __vxge_hw_channel_free(channel); 387 data1 = steer_ctrl = 0;
262 388
263exit0: 389 status = vxge_hw_vpath_fw_api(vpath,
264 return NULL; 390 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
391 VXGE_HW_FW_API_GET_EPROM_REV,
392 0, &data0, &data1, &steer_ctrl);
393 if (status != VXGE_HW_OK)
394 break;
395
396 img[i].is_valid = VXGE_HW_GET_EPROM_IMAGE_VALID(data0);
397 img[i].index = VXGE_HW_GET_EPROM_IMAGE_INDEX(data0);
398 img[i].type = VXGE_HW_GET_EPROM_IMAGE_TYPE(data0);
399 img[i].version = VXGE_HW_GET_EPROM_IMAGE_REV(data0);
400 }
401
402 return status;
265} 403}
266 404
267/* 405/*
@@ -269,7 +407,7 @@ exit0:
269 * This function deallocates memory from the channel and various arrays 407 * This function deallocates memory from the channel and various arrays
270 * in the channel 408 * in the channel
271 */ 409 */
272void __vxge_hw_channel_free(struct __vxge_hw_channel *channel) 410static void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
273{ 411{
274 kfree(channel->work_arr); 412 kfree(channel->work_arr);
275 kfree(channel->free_arr); 413 kfree(channel->free_arr);
@@ -283,7 +421,7 @@ void __vxge_hw_channel_free(struct __vxge_hw_channel *channel)
283 * This function initializes a channel by properly setting the 421 * This function initializes a channel by properly setting the
284 * various references 422 * various references
285 */ 423 */
286enum vxge_hw_status 424static enum vxge_hw_status
287__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel) 425__vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
288{ 426{
289 u32 i; 427 u32 i;
@@ -318,7 +456,7 @@ __vxge_hw_channel_initialize(struct __vxge_hw_channel *channel)
318 * __vxge_hw_channel_reset - Resets a channel 456 * __vxge_hw_channel_reset - Resets a channel
319 * This function resets a channel by properly setting the various references 457 * This function resets a channel by properly setting the various references
320 */ 458 */
321enum vxge_hw_status 459static enum vxge_hw_status
322__vxge_hw_channel_reset(struct __vxge_hw_channel *channel) 460__vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
323{ 461{
324 u32 i; 462 u32 i;
@@ -345,8 +483,7 @@ __vxge_hw_channel_reset(struct __vxge_hw_channel *channel)
345 * Initialize certain PCI/PCI-X configuration registers 483 * Initialize certain PCI/PCI-X configuration registers
346 * with recommended values. Save config space for future hw resets. 484 * with recommended values. Save config space for future hw resets.
347 */ 485 */
348void 486static void __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
349__vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
350{ 487{
351 u16 cmd = 0; 488 u16 cmd = 0;
352 489
@@ -358,39 +495,7 @@ __vxge_hw_device_pci_e_init(struct __vxge_hw_device *hldev)
358 pci_save_state(hldev->pdev); 495 pci_save_state(hldev->pdev);
359} 496}
360 497
361/* 498/* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
362 * __vxge_hw_device_register_poll
363 * Will poll certain register for specified amount of time.
364 * Will poll until masked bit is not cleared.
365 */
366static enum vxge_hw_status
367__vxge_hw_device_register_poll(void __iomem *reg, u64 mask, u32 max_millis)
368{
369 u64 val64;
370 u32 i = 0;
371 enum vxge_hw_status ret = VXGE_HW_FAIL;
372
373 udelay(10);
374
375 do {
376 val64 = readq(reg);
377 if (!(val64 & mask))
378 return VXGE_HW_OK;
379 udelay(100);
380 } while (++i <= 9);
381
382 i = 0;
383 do {
384 val64 = readq(reg);
385 if (!(val64 & mask))
386 return VXGE_HW_OK;
387 mdelay(1);
388 } while (++i <= max_millis);
389
390 return ret;
391}
392
393 /* __vxge_hw_device_vpath_reset_in_prog_check - Check if vpath reset
394 * in progress 499 * in progress
395 * This routine checks the vpath reset in progress register is turned zero 500 * This routine checks the vpath reset in progress register is turned zero
396 */ 501 */
@@ -405,6 +510,60 @@ __vxge_hw_device_vpath_reset_in_prog_check(u64 __iomem *vpath_rst_in_prog)
405} 510}
406 511
407/* 512/*
513 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
514 * Set the swapper bits appropriately for the lagacy section.
515 */
516static enum vxge_hw_status
517__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
518{
519 u64 val64;
520 enum vxge_hw_status status = VXGE_HW_OK;
521
522 val64 = readq(&legacy_reg->toc_swapper_fb);
523
524 wmb();
525
526 switch (val64) {
527 case VXGE_HW_SWAPPER_INITIAL_VALUE:
528 return status;
529
530 case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
531 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
532 &legacy_reg->pifm_rd_swap_en);
533 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
534 &legacy_reg->pifm_rd_flip_en);
535 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
536 &legacy_reg->pifm_wr_swap_en);
537 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
538 &legacy_reg->pifm_wr_flip_en);
539 break;
540
541 case VXGE_HW_SWAPPER_BYTE_SWAPPED:
542 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
543 &legacy_reg->pifm_rd_swap_en);
544 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
545 &legacy_reg->pifm_wr_swap_en);
546 break;
547
548 case VXGE_HW_SWAPPER_BIT_FLIPPED:
549 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
550 &legacy_reg->pifm_rd_flip_en);
551 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
552 &legacy_reg->pifm_wr_flip_en);
553 break;
554 }
555
556 wmb();
557
558 val64 = readq(&legacy_reg->toc_swapper_fb);
559
560 if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
561 status = VXGE_HW_ERR_SWAPPER_CTRL;
562
563 return status;
564}
565
566/*
408 * __vxge_hw_device_toc_get 567 * __vxge_hw_device_toc_get
409 * This routine sets the swapper and reads the toc pointer and returns the 568 * This routine sets the swapper and reads the toc pointer and returns the
410 * memory mapped address of the toc 569 * memory mapped address of the toc
@@ -435,7 +594,7 @@ exit:
435 * register location pointers in the device object. It waits until the ric is 594 * register location pointers in the device object. It waits until the ric is
436 * completed initializing registers. 595 * completed initializing registers.
437 */ 596 */
438enum vxge_hw_status 597static enum vxge_hw_status
439__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev) 598__vxge_hw_device_reg_addr_get(struct __vxge_hw_device *hldev)
440{ 599{
441 u64 val64; 600 u64 val64;
@@ -496,26 +655,6 @@ exit:
496} 655}
497 656
498/* 657/*
499 * __vxge_hw_device_id_get
500 * This routine returns sets the device id and revision numbers into the device
501 * structure
502 */
503void __vxge_hw_device_id_get(struct __vxge_hw_device *hldev)
504{
505 u64 val64;
506
507 val64 = readq(&hldev->common_reg->titan_asic_id);
508 hldev->device_id =
509 (u16)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_DEVICE_ID(val64);
510
511 hldev->major_revision =
512 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MAJOR_REVISION(val64);
513
514 hldev->minor_revision =
515 (u8)VXGE_HW_TITAN_ASIC_ID_GET_INITIAL_MINOR_REVISION(val64);
516}
517
518/*
519 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver 658 * __vxge_hw_device_access_rights_get: Get Access Rights of the driver
520 * This routine returns the Access Rights of the driver 659 * This routine returns the Access Rights of the driver
521 */ 660 */
@@ -568,10 +707,25 @@ __vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
568} 707}
569 708
570/* 709/*
710 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
711 * Returns the function number of the vpath.
712 */
713static u32
714__vxge_hw_vpath_func_id_get(struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
715{
716 u64 val64;
717
718 val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
719
720 return
721 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
722}
723
724/*
571 * __vxge_hw_device_host_info_get 725 * __vxge_hw_device_host_info_get
572 * This routine returns the host type assignments 726 * This routine returns the host type assignments
573 */ 727 */
574void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev) 728static void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
575{ 729{
576 u64 val64; 730 u64 val64;
577 u32 i; 731 u32 i;
@@ -584,16 +738,18 @@ void __vxge_hw_device_host_info_get(struct __vxge_hw_device *hldev)
584 hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments); 738 hldev->vpath_assignments = readq(&hldev->common_reg->vpath_assignments);
585 739
586 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 740 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
587
588 if (!(hldev->vpath_assignments & vxge_mBIT(i))) 741 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
589 continue; 742 continue;
590 743
591 hldev->func_id = 744 hldev->func_id =
592 __vxge_hw_vpath_func_id_get(i, hldev->vpmgmt_reg[i]); 745 __vxge_hw_vpath_func_id_get(hldev->vpmgmt_reg[i]);
593 746
594 hldev->access_rights = __vxge_hw_device_access_rights_get( 747 hldev->access_rights = __vxge_hw_device_access_rights_get(
595 hldev->host_type, hldev->func_id); 748 hldev->host_type, hldev->func_id);
596 749
750 hldev->virtual_paths[i].vp_open = VXGE_HW_VP_NOT_OPEN;
751 hldev->virtual_paths[i].vp_reg = hldev->vpath_reg[i];
752
597 hldev->first_vp_id = i; 753 hldev->first_vp_id = i;
598 break; 754 break;
599 } 755 }
@@ -634,7 +790,8 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
634 * __vxge_hw_device_initialize 790 * __vxge_hw_device_initialize
635 * Initialize Titan-V hardware. 791 * Initialize Titan-V hardware.
636 */ 792 */
637enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev) 793static enum vxge_hw_status
794__vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
638{ 795{
639 enum vxge_hw_status status = VXGE_HW_OK; 796 enum vxge_hw_status status = VXGE_HW_OK;
640 797
@@ -650,6 +807,196 @@ exit:
650 return status; 807 return status;
651} 808}
652 809
810/*
811 * __vxge_hw_vpath_fw_ver_get - Get the fw version
812 * Returns FW Version
813 */
814static enum vxge_hw_status
815__vxge_hw_vpath_fw_ver_get(struct __vxge_hw_virtualpath *vpath,
816 struct vxge_hw_device_hw_info *hw_info)
817{
818 struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
819 struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
820 struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
821 struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
822 u64 data0, data1 = 0, steer_ctrl = 0;
823 enum vxge_hw_status status;
824
825 status = vxge_hw_vpath_fw_api(vpath,
826 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
827 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
828 0, &data0, &data1, &steer_ctrl);
829 if (status != VXGE_HW_OK)
830 goto exit;
831
832 fw_date->day =
833 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(data0);
834 fw_date->month =
835 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(data0);
836 fw_date->year =
837 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(data0);
838
839 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
840 fw_date->month, fw_date->day, fw_date->year);
841
842 fw_version->major =
843 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data0);
844 fw_version->minor =
845 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data0);
846 fw_version->build =
847 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data0);
848
849 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
850 fw_version->major, fw_version->minor, fw_version->build);
851
852 flash_date->day =
853 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data1);
854 flash_date->month =
855 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data1);
856 flash_date->year =
857 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data1);
858
859 snprintf(flash_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
860 flash_date->month, flash_date->day, flash_date->year);
861
862 flash_version->major =
863 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data1);
864 flash_version->minor =
865 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data1);
866 flash_version->build =
867 (u32) VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data1);
868
869 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
870 flash_version->major, flash_version->minor,
871 flash_version->build);
872
873exit:
874 return status;
875}
876
877/*
878 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
879 * part number and product description.
880 */
881static enum vxge_hw_status
882__vxge_hw_vpath_card_info_get(struct __vxge_hw_virtualpath *vpath,
883 struct vxge_hw_device_hw_info *hw_info)
884{
885 enum vxge_hw_status status;
886 u64 data0, data1 = 0, steer_ctrl = 0;
887 u8 *serial_number = hw_info->serial_number;
888 u8 *part_number = hw_info->part_number;
889 u8 *product_desc = hw_info->product_desc;
890 u32 i, j = 0;
891
892 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER;
893
894 status = vxge_hw_vpath_fw_api(vpath,
895 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
896 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
897 0, &data0, &data1, &steer_ctrl);
898 if (status != VXGE_HW_OK)
899 return status;
900
901 ((u64 *)serial_number)[0] = be64_to_cpu(data0);
902 ((u64 *)serial_number)[1] = be64_to_cpu(data1);
903
904 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER;
905 data1 = steer_ctrl = 0;
906
907 status = vxge_hw_vpath_fw_api(vpath,
908 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
909 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
910 0, &data0, &data1, &steer_ctrl);
911 if (status != VXGE_HW_OK)
912 return status;
913
914 ((u64 *)part_number)[0] = be64_to_cpu(data0);
915 ((u64 *)part_number)[1] = be64_to_cpu(data1);
916
917 for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
918 i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
919 data0 = i;
920 data1 = steer_ctrl = 0;
921
922 status = vxge_hw_vpath_fw_api(vpath,
923 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY,
924 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
925 0, &data0, &data1, &steer_ctrl);
926 if (status != VXGE_HW_OK)
927 return status;
928
929 ((u64 *)product_desc)[j++] = be64_to_cpu(data0);
930 ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
931 }
932
933 return status;
934}
935
936/*
937 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
938 * Returns pci function mode
939 */
940static enum vxge_hw_status
941__vxge_hw_vpath_pci_func_mode_get(struct __vxge_hw_virtualpath *vpath,
942 struct vxge_hw_device_hw_info *hw_info)
943{
944 u64 data0, data1 = 0, steer_ctrl = 0;
945 enum vxge_hw_status status;
946
947 data0 = 0;
948
949 status = vxge_hw_vpath_fw_api(vpath,
950 VXGE_HW_FW_API_GET_FUNC_MODE,
951 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
952 0, &data0, &data1, &steer_ctrl);
953 if (status != VXGE_HW_OK)
954 return status;
955
956 hw_info->function_mode = VXGE_HW_GET_FUNC_MODE_VAL(data0);
957 return status;
958}
959
960/*
961 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
962 * from MAC address table.
963 */
964static enum vxge_hw_status
965__vxge_hw_vpath_addr_get(struct __vxge_hw_virtualpath *vpath,
966 u8 *macaddr, u8 *macaddr_mask)
967{
968 u64 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY,
969 data0 = 0, data1 = 0, steer_ctrl = 0;
970 enum vxge_hw_status status;
971 int i;
972
973 do {
974 status = vxge_hw_vpath_fw_api(vpath, action,
975 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA,
976 0, &data0, &data1, &steer_ctrl);
977 if (status != VXGE_HW_OK)
978 goto exit;
979
980 data0 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data0);
981 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
982 data1);
983
984 for (i = ETH_ALEN; i > 0; i--) {
985 macaddr[i - 1] = (u8) (data0 & 0xFF);
986 data0 >>= 8;
987
988 macaddr_mask[i - 1] = (u8) (data1 & 0xFF);
989 data1 >>= 8;
990 }
991
992 action = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_NEXT_ENTRY;
993 data0 = 0, data1 = 0, steer_ctrl = 0;
994
995 } while (!is_valid_ether_addr(macaddr));
996exit:
997 return status;
998}
999
653/** 1000/**
654 * vxge_hw_device_hw_info_get - Get the hw information 1001 * vxge_hw_device_hw_info_get - Get the hw information
655 * Returns the vpath mask that has the bits set for each vpath allocated 1002 * Returns the vpath mask that has the bits set for each vpath allocated
@@ -665,9 +1012,9 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
665 struct vxge_hw_toc_reg __iomem *toc; 1012 struct vxge_hw_toc_reg __iomem *toc;
666 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg; 1013 struct vxge_hw_mrpcim_reg __iomem *mrpcim_reg;
667 struct vxge_hw_common_reg __iomem *common_reg; 1014 struct vxge_hw_common_reg __iomem *common_reg;
668 struct vxge_hw_vpath_reg __iomem *vpath_reg;
669 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg; 1015 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
670 enum vxge_hw_status status; 1016 enum vxge_hw_status status;
1017 struct __vxge_hw_virtualpath vpath;
671 1018
672 memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info)); 1019 memset(hw_info, 0, sizeof(struct vxge_hw_device_hw_info));
673 1020
@@ -693,7 +1040,6 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
693 (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64); 1040 (u32)VXGE_HW_HOST_TYPE_ASSIGNMENTS_GET_HOST_TYPE_ASSIGNMENTS(val64);
694 1041
695 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 1042 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
696
697 if (!((hw_info->vpath_mask) & vxge_mBIT(i))) 1043 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
698 continue; 1044 continue;
699 1045
@@ -702,7 +1048,7 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
702 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *) 1048 vpmgmt_reg = (struct vxge_hw_vpmgmt_reg __iomem *)
703 (bar0 + val64); 1049 (bar0 + val64);
704 1050
705 hw_info->func_id = __vxge_hw_vpath_func_id_get(i, vpmgmt_reg); 1051 hw_info->func_id = __vxge_hw_vpath_func_id_get(vpmgmt_reg);
706 if (__vxge_hw_device_access_rights_get(hw_info->host_type, 1052 if (__vxge_hw_device_access_rights_get(hw_info->host_type,
707 hw_info->func_id) & 1053 hw_info->func_id) &
708 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) { 1054 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM) {
@@ -718,16 +1064,19 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
718 1064
719 val64 = readq(&toc->toc_vpath_pointer[i]); 1065 val64 = readq(&toc->toc_vpath_pointer[i]);
720 1066
721 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64); 1067 vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
1068 (bar0 + val64);
1069 vpath.vp_open = 0;
722 1070
723 hw_info->function_mode = 1071 status = __vxge_hw_vpath_pci_func_mode_get(&vpath, hw_info);
724 __vxge_hw_vpath_pci_func_mode_get(i, vpath_reg); 1072 if (status != VXGE_HW_OK)
1073 goto exit;
725 1074
726 status = __vxge_hw_vpath_fw_ver_get(i, vpath_reg, hw_info); 1075 status = __vxge_hw_vpath_fw_ver_get(&vpath, hw_info);
727 if (status != VXGE_HW_OK) 1076 if (status != VXGE_HW_OK)
728 goto exit; 1077 goto exit;
729 1078
730 status = __vxge_hw_vpath_card_info_get(i, vpath_reg, hw_info); 1079 status = __vxge_hw_vpath_card_info_get(&vpath, hw_info);
731 if (status != VXGE_HW_OK) 1080 if (status != VXGE_HW_OK)
732 goto exit; 1081 goto exit;
733 1082
@@ -735,14 +1084,15 @@ vxge_hw_device_hw_info_get(void __iomem *bar0,
735 } 1084 }
736 1085
737 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 1086 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
738
739 if (!((hw_info->vpath_mask) & vxge_mBIT(i))) 1087 if (!((hw_info->vpath_mask) & vxge_mBIT(i)))
740 continue; 1088 continue;
741 1089
742 val64 = readq(&toc->toc_vpath_pointer[i]); 1090 val64 = readq(&toc->toc_vpath_pointer[i]);
743 vpath_reg = (struct vxge_hw_vpath_reg __iomem *)(bar0 + val64); 1091 vpath.vp_reg = (struct vxge_hw_vpath_reg __iomem *)
1092 (bar0 + val64);
1093 vpath.vp_open = 0;
744 1094
745 status = __vxge_hw_vpath_addr_get(i, vpath_reg, 1095 status = __vxge_hw_vpath_addr_get(&vpath,
746 hw_info->mac_addrs[i], 1096 hw_info->mac_addrs[i],
747 hw_info->mac_addr_masks[i]); 1097 hw_info->mac_addr_masks[i]);
748 if (status != VXGE_HW_OK) 1098 if (status != VXGE_HW_OK)
@@ -753,6 +1103,218 @@ exit:
753} 1103}
754 1104
755/* 1105/*
1106 * __vxge_hw_blockpool_destroy - Deallocates the block pool
1107 */
1108static void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
1109{
1110 struct __vxge_hw_device *hldev;
1111 struct list_head *p, *n;
1112 u16 ret;
1113
1114 if (blockpool == NULL) {
1115 ret = 1;
1116 goto exit;
1117 }
1118
1119 hldev = blockpool->hldev;
1120
1121 list_for_each_safe(p, n, &blockpool->free_block_list) {
1122 pci_unmap_single(hldev->pdev,
1123 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
1124 ((struct __vxge_hw_blockpool_entry *)p)->length,
1125 PCI_DMA_BIDIRECTIONAL);
1126
1127 vxge_os_dma_free(hldev->pdev,
1128 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
1129 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
1130
1131 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
1132 kfree(p);
1133 blockpool->pool_size--;
1134 }
1135
1136 list_for_each_safe(p, n, &blockpool->free_entry_list) {
1137 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
1138 kfree((void *)p);
1139 }
1140 ret = 0;
1141exit:
1142 return;
1143}
1144
1145/*
1146 * __vxge_hw_blockpool_create - Create block pool
1147 */
1148static enum vxge_hw_status
1149__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
1150 struct __vxge_hw_blockpool *blockpool,
1151 u32 pool_size,
1152 u32 pool_max)
1153{
1154 u32 i;
1155 struct __vxge_hw_blockpool_entry *entry = NULL;
1156 void *memblock;
1157 dma_addr_t dma_addr;
1158 struct pci_dev *dma_handle;
1159 struct pci_dev *acc_handle;
1160 enum vxge_hw_status status = VXGE_HW_OK;
1161
1162 if (blockpool == NULL) {
1163 status = VXGE_HW_FAIL;
1164 goto blockpool_create_exit;
1165 }
1166
1167 blockpool->hldev = hldev;
1168 blockpool->block_size = VXGE_HW_BLOCK_SIZE;
1169 blockpool->pool_size = 0;
1170 blockpool->pool_max = pool_max;
1171 blockpool->req_out = 0;
1172
1173 INIT_LIST_HEAD(&blockpool->free_block_list);
1174 INIT_LIST_HEAD(&blockpool->free_entry_list);
1175
1176 for (i = 0; i < pool_size + pool_max; i++) {
1177 entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
1178 GFP_KERNEL);
1179 if (entry == NULL) {
1180 __vxge_hw_blockpool_destroy(blockpool);
1181 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1182 goto blockpool_create_exit;
1183 }
1184 list_add(&entry->item, &blockpool->free_entry_list);
1185 }
1186
1187 for (i = 0; i < pool_size; i++) {
1188 memblock = vxge_os_dma_malloc(
1189 hldev->pdev,
1190 VXGE_HW_BLOCK_SIZE,
1191 &dma_handle,
1192 &acc_handle);
1193 if (memblock == NULL) {
1194 __vxge_hw_blockpool_destroy(blockpool);
1195 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1196 goto blockpool_create_exit;
1197 }
1198
1199 dma_addr = pci_map_single(hldev->pdev, memblock,
1200 VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
1201 if (unlikely(pci_dma_mapping_error(hldev->pdev,
1202 dma_addr))) {
1203 vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
1204 __vxge_hw_blockpool_destroy(blockpool);
1205 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1206 goto blockpool_create_exit;
1207 }
1208
1209 if (!list_empty(&blockpool->free_entry_list))
1210 entry = (struct __vxge_hw_blockpool_entry *)
1211 list_first_entry(&blockpool->free_entry_list,
1212 struct __vxge_hw_blockpool_entry,
1213 item);
1214
1215 if (entry == NULL)
1216 entry =
1217 kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
1218 GFP_KERNEL);
1219 if (entry != NULL) {
1220 list_del(&entry->item);
1221 entry->length = VXGE_HW_BLOCK_SIZE;
1222 entry->memblock = memblock;
1223 entry->dma_addr = dma_addr;
1224 entry->acc_handle = acc_handle;
1225 entry->dma_handle = dma_handle;
1226 list_add(&entry->item,
1227 &blockpool->free_block_list);
1228 blockpool->pool_size++;
1229 } else {
1230 __vxge_hw_blockpool_destroy(blockpool);
1231 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1232 goto blockpool_create_exit;
1233 }
1234 }
1235
1236blockpool_create_exit:
1237 return status;
1238}
1239
1240/*
1241 * __vxge_hw_device_fifo_config_check - Check fifo configuration.
1242 * Check the fifo configuration
1243 */
1244static enum vxge_hw_status
1245__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config)
1246{
1247 if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) ||
1248 (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS))
1249 return VXGE_HW_BADCFG_FIFO_BLOCKS;
1250
1251 return VXGE_HW_OK;
1252}
1253
1254/*
1255 * __vxge_hw_device_vpath_config_check - Check vpath configuration.
1256 * Check the vpath configuration
1257 */
1258static enum vxge_hw_status
1259__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config)
1260{
1261 enum vxge_hw_status status;
1262
1263 if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) ||
1264 (vp_config->min_bandwidth > VXGE_HW_VPATH_BANDWIDTH_MAX))
1265 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH;
1266
1267 status = __vxge_hw_device_fifo_config_check(&vp_config->fifo);
1268 if (status != VXGE_HW_OK)
1269 return status;
1270
1271 if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) &&
1272 ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
1273 (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
1274 return VXGE_HW_BADCFG_VPATH_MTU;
1275
1276 if ((vp_config->rpa_strip_vlan_tag !=
1277 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) &&
1278 (vp_config->rpa_strip_vlan_tag !=
1279 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
1280 (vp_config->rpa_strip_vlan_tag !=
1281 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
1282 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
1283
1284 return VXGE_HW_OK;
1285}
1286
1287/*
1288 * __vxge_hw_device_config_check - Check device configuration.
1289 * Check the device configuration
1290 */
1291static enum vxge_hw_status
1292__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config)
1293{
1294 u32 i;
1295 enum vxge_hw_status status;
1296
1297 if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) &&
1298 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) &&
1299 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) &&
1300 (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF))
1301 return VXGE_HW_BADCFG_INTR_MODE;
1302
1303 if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) &&
1304 (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE))
1305 return VXGE_HW_BADCFG_RTS_MAC_EN;
1306
1307 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1308 status = __vxge_hw_device_vpath_config_check(
1309 &new_config->vp_config[i]);
1310 if (status != VXGE_HW_OK)
1311 return status;
1312 }
1313
1314 return VXGE_HW_OK;
1315}
1316
1317/*
756 * vxge_hw_device_initialize - Initialize Titan device. 1318 * vxge_hw_device_initialize - Initialize Titan device.
757 * Initialize Titan device. Note that all the arguments of this public API 1319 * Initialize Titan device. Note that all the arguments of this public API
758 * are 'IN', including @hldev. Driver cooperates with 1320 * are 'IN', including @hldev. Driver cooperates with
@@ -776,14 +1338,12 @@ vxge_hw_device_initialize(
776 if (status != VXGE_HW_OK) 1338 if (status != VXGE_HW_OK)
777 goto exit; 1339 goto exit;
778 1340
779 hldev = (struct __vxge_hw_device *) 1341 hldev = vzalloc(sizeof(struct __vxge_hw_device));
780 vmalloc(sizeof(struct __vxge_hw_device));
781 if (hldev == NULL) { 1342 if (hldev == NULL) {
782 status = VXGE_HW_ERR_OUT_OF_MEMORY; 1343 status = VXGE_HW_ERR_OUT_OF_MEMORY;
783 goto exit; 1344 goto exit;
784 } 1345 }
785 1346
786 memset(hldev, 0, sizeof(struct __vxge_hw_device));
787 hldev->magic = VXGE_HW_DEVICE_MAGIC; 1347 hldev->magic = VXGE_HW_DEVICE_MAGIC;
788 1348
789 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL); 1349 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_ALL);
@@ -806,7 +1366,6 @@ vxge_hw_device_initialize(
806 vfree(hldev); 1366 vfree(hldev);
807 goto exit; 1367 goto exit;
808 } 1368 }
809 __vxge_hw_device_id_get(hldev);
810 1369
811 __vxge_hw_device_host_info_get(hldev); 1370 __vxge_hw_device_host_info_get(hldev);
812 1371
@@ -814,7 +1373,6 @@ vxge_hw_device_initialize(
814 nblocks++; 1373 nblocks++;
815 1374
816 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 1375 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
817
818 if (!(hldev->vpath_assignments & vxge_mBIT(i))) 1376 if (!(hldev->vpath_assignments & vxge_mBIT(i)))
819 continue; 1377 continue;
820 1378
@@ -839,7 +1397,6 @@ vxge_hw_device_initialize(
839 } 1397 }
840 1398
841 status = __vxge_hw_device_initialize(hldev); 1399 status = __vxge_hw_device_initialize(hldev);
842
843 if (status != VXGE_HW_OK) { 1400 if (status != VXGE_HW_OK) {
844 vxge_hw_device_terminate(hldev); 1401 vxge_hw_device_terminate(hldev);
845 goto exit; 1402 goto exit;
@@ -865,6 +1422,242 @@ vxge_hw_device_terminate(struct __vxge_hw_device *hldev)
865} 1422}
866 1423
867/* 1424/*
1425 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
1426 * and offset and perform an operation
1427 */
1428static enum vxge_hw_status
1429__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
1430 u32 operation, u32 offset, u64 *stat)
1431{
1432 u64 val64;
1433 enum vxge_hw_status status = VXGE_HW_OK;
1434 struct vxge_hw_vpath_reg __iomem *vp_reg;
1435
1436 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1437 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1438 goto vpath_stats_access_exit;
1439 }
1440
1441 vp_reg = vpath->vp_reg;
1442
1443 val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
1444 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
1445 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
1446
1447 status = __vxge_hw_pio_mem_write64(val64,
1448 &vp_reg->xmac_stats_access_cmd,
1449 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
1450 vpath->hldev->config.device_poll_millis);
1451 if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
1452 *stat = readq(&vp_reg->xmac_stats_access_data);
1453 else
1454 *stat = 0;
1455
1456vpath_stats_access_exit:
1457 return status;
1458}
1459
1460/*
1461 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
1462 */
1463static enum vxge_hw_status
1464__vxge_hw_vpath_xmac_tx_stats_get(struct __vxge_hw_virtualpath *vpath,
1465 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
1466{
1467 u64 *val64;
1468 int i;
1469 u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
1470 enum vxge_hw_status status = VXGE_HW_OK;
1471
1472 val64 = (u64 *)vpath_tx_stats;
1473
1474 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1475 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1476 goto exit;
1477 }
1478
1479 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
1480 status = __vxge_hw_vpath_stats_access(vpath,
1481 VXGE_HW_STATS_OP_READ,
1482 offset, val64);
1483 if (status != VXGE_HW_OK)
1484 goto exit;
1485 offset++;
1486 val64++;
1487 }
1488exit:
1489 return status;
1490}
1491
1492/*
1493 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
1494 */
1495static enum vxge_hw_status
1496__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
1497 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
1498{
1499 u64 *val64;
1500 enum vxge_hw_status status = VXGE_HW_OK;
1501 int i;
1502 u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
1503 val64 = (u64 *) vpath_rx_stats;
1504
1505 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1506 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1507 goto exit;
1508 }
1509 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
1510 status = __vxge_hw_vpath_stats_access(vpath,
1511 VXGE_HW_STATS_OP_READ,
1512 offset >> 3, val64);
1513 if (status != VXGE_HW_OK)
1514 goto exit;
1515
1516 offset += 8;
1517 val64++;
1518 }
1519exit:
1520 return status;
1521}
1522
1523/*
1524 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
1525 */
1526static enum vxge_hw_status
1527__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
1528 struct vxge_hw_vpath_stats_hw_info *hw_stats)
1529{
1530 u64 val64;
1531 enum vxge_hw_status status = VXGE_HW_OK;
1532 struct vxge_hw_vpath_reg __iomem *vp_reg;
1533
1534 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
1535 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
1536 goto exit;
1537 }
1538 vp_reg = vpath->vp_reg;
1539
1540 val64 = readq(&vp_reg->vpath_debug_stats0);
1541 hw_stats->ini_num_mwr_sent =
1542 (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
1543
1544 val64 = readq(&vp_reg->vpath_debug_stats1);
1545 hw_stats->ini_num_mrd_sent =
1546 (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
1547
1548 val64 = readq(&vp_reg->vpath_debug_stats2);
1549 hw_stats->ini_num_cpl_rcvd =
1550 (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
1551
1552 val64 = readq(&vp_reg->vpath_debug_stats3);
1553 hw_stats->ini_num_mwr_byte_sent =
1554 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
1555
1556 val64 = readq(&vp_reg->vpath_debug_stats4);
1557 hw_stats->ini_num_cpl_byte_rcvd =
1558 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
1559
1560 val64 = readq(&vp_reg->vpath_debug_stats5);
1561 hw_stats->wrcrdtarb_xoff =
1562 (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
1563
1564 val64 = readq(&vp_reg->vpath_debug_stats6);
1565 hw_stats->rdcrdtarb_xoff =
1566 (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
1567
1568 val64 = readq(&vp_reg->vpath_genstats_count01);
1569 hw_stats->vpath_genstats_count0 =
1570 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
1571 val64);
1572
1573 val64 = readq(&vp_reg->vpath_genstats_count01);
1574 hw_stats->vpath_genstats_count1 =
1575 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
1576 val64);
1577
1578 val64 = readq(&vp_reg->vpath_genstats_count23);
1579 hw_stats->vpath_genstats_count2 =
1580 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
1581 val64);
1582
1583 val64 = readq(&vp_reg->vpath_genstats_count01);
1584 hw_stats->vpath_genstats_count3 =
1585 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
1586 val64);
1587
1588 val64 = readq(&vp_reg->vpath_genstats_count4);
1589 hw_stats->vpath_genstats_count4 =
1590 (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
1591 val64);
1592
1593 val64 = readq(&vp_reg->vpath_genstats_count5);
1594 hw_stats->vpath_genstats_count5 =
1595 (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
1596 val64);
1597
1598 status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
1599 if (status != VXGE_HW_OK)
1600 goto exit;
1601
1602 status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
1603 if (status != VXGE_HW_OK)
1604 goto exit;
1605
1606 VXGE_HW_VPATH_STATS_PIO_READ(
1607 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
1608
1609 hw_stats->prog_event_vnum0 =
1610 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
1611
1612 hw_stats->prog_event_vnum1 =
1613 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
1614
1615 VXGE_HW_VPATH_STATS_PIO_READ(
1616 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
1617
1618 hw_stats->prog_event_vnum2 =
1619 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
1620
1621 hw_stats->prog_event_vnum3 =
1622 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
1623
1624 val64 = readq(&vp_reg->rx_multi_cast_stats);
1625 hw_stats->rx_multi_cast_frame_discard =
1626 (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
1627
1628 val64 = readq(&vp_reg->rx_frm_transferred);
1629 hw_stats->rx_frm_transferred =
1630 (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
1631
1632 val64 = readq(&vp_reg->rxd_returned);
1633 hw_stats->rxd_returned =
1634 (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
1635
1636 val64 = readq(&vp_reg->dbg_stats_rx_mpa);
1637 hw_stats->rx_mpa_len_fail_frms =
1638 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
1639 hw_stats->rx_mpa_mrk_fail_frms =
1640 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
1641 hw_stats->rx_mpa_crc_fail_frms =
1642 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
1643
1644 val64 = readq(&vp_reg->dbg_stats_rx_fau);
1645 hw_stats->rx_permitted_frms =
1646 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
1647 hw_stats->rx_vp_reset_discarded_frms =
1648 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
1649 hw_stats->rx_wol_frms =
1650 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
1651
1652 val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
1653 hw_stats->tx_vp_reset_discarded_frms =
1654 (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
1655 val64);
1656exit:
1657 return status;
1658}
1659
1660/*
868 * vxge_hw_device_stats_get - Get the device hw statistics. 1661 * vxge_hw_device_stats_get - Get the device hw statistics.
869 * Returns the vpath h/w stats for the device. 1662 * Returns the vpath h/w stats for the device.
870 */ 1663 */
@@ -876,7 +1669,6 @@ vxge_hw_device_stats_get(struct __vxge_hw_device *hldev,
876 enum vxge_hw_status status = VXGE_HW_OK; 1669 enum vxge_hw_status status = VXGE_HW_OK;
877 1670
878 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 1671 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
879
880 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) || 1672 if (!(hldev->vpaths_deployed & vxge_mBIT(i)) ||
881 (hldev->virtual_paths[i].vp_open == 1673 (hldev->virtual_paths[i].vp_open ==
882 VXGE_HW_VP_NOT_OPEN)) 1674 VXGE_HW_VP_NOT_OPEN))
@@ -1031,7 +1823,6 @@ vxge_hw_device_xmac_stats_get(struct __vxge_hw_device *hldev,
1031 1823
1032 status = vxge_hw_device_xmac_aggr_stats_get(hldev, 1824 status = vxge_hw_device_xmac_aggr_stats_get(hldev,
1033 0, &xmac_stats->aggr_stats[0]); 1825 0, &xmac_stats->aggr_stats[0]);
1034
1035 if (status != VXGE_HW_OK) 1826 if (status != VXGE_HW_OK)
1036 goto exit; 1827 goto exit;
1037 1828
@@ -1165,7 +1956,6 @@ exit:
1165 * It can be used to set or reset Pause frame generation or reception 1956 * It can be used to set or reset Pause frame generation or reception
1166 * support of the NIC. 1957 * support of the NIC.
1167 */ 1958 */
1168
1169enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev, 1959enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1170 u32 port, u32 tx, u32 rx) 1960 u32 port, u32 tx, u32 rx)
1171{ 1961{
@@ -1407,190 +2197,359 @@ exit:
1407} 2197}
1408 2198
1409/* 2199/*
1410 * __vxge_hw_ring_create - Create a Ring 2200 * __vxge_hw_channel_allocate - Allocate memory for channel
1411 * This function creates Ring and initializes it. 2201 * This function allocates required memory for the channel and various arrays
1412 * 2202 * in the channel
1413 */ 2203 */
1414static enum vxge_hw_status 2204static struct __vxge_hw_channel *
1415__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp, 2205__vxge_hw_channel_allocate(struct __vxge_hw_vpath_handle *vph,
1416 struct vxge_hw_ring_attr *attr) 2206 enum __vxge_hw_channel_type type,
2207 u32 length, u32 per_dtr_space,
2208 void *userdata)
1417{ 2209{
1418 enum vxge_hw_status status = VXGE_HW_OK; 2210 struct __vxge_hw_channel *channel;
1419 struct __vxge_hw_ring *ring;
1420 u32 ring_length;
1421 struct vxge_hw_ring_config *config;
1422 struct __vxge_hw_device *hldev; 2211 struct __vxge_hw_device *hldev;
2212 int size = 0;
1423 u32 vp_id; 2213 u32 vp_id;
1424 struct vxge_hw_mempool_cbs ring_mp_callback;
1425 2214
1426 if ((vp == NULL) || (attr == NULL)) { 2215 hldev = vph->vpath->hldev;
2216 vp_id = vph->vpath->vp_id;
2217
2218 switch (type) {
2219 case VXGE_HW_CHANNEL_TYPE_FIFO:
2220 size = sizeof(struct __vxge_hw_fifo);
2221 break;
2222 case VXGE_HW_CHANNEL_TYPE_RING:
2223 size = sizeof(struct __vxge_hw_ring);
2224 break;
2225 default:
2226 break;
2227 }
2228
2229 channel = kzalloc(size, GFP_KERNEL);
2230 if (channel == NULL)
2231 goto exit0;
2232 INIT_LIST_HEAD(&channel->item);
2233
2234 channel->common_reg = hldev->common_reg;
2235 channel->first_vp_id = hldev->first_vp_id;
2236 channel->type = type;
2237 channel->devh = hldev;
2238 channel->vph = vph;
2239 channel->userdata = userdata;
2240 channel->per_dtr_space = per_dtr_space;
2241 channel->length = length;
2242 channel->vp_id = vp_id;
2243
2244 channel->work_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2245 if (channel->work_arr == NULL)
2246 goto exit1;
2247
2248 channel->free_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2249 if (channel->free_arr == NULL)
2250 goto exit1;
2251 channel->free_ptr = length;
2252
2253 channel->reserve_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2254 if (channel->reserve_arr == NULL)
2255 goto exit1;
2256 channel->reserve_ptr = length;
2257 channel->reserve_top = 0;
2258
2259 channel->orig_arr = kzalloc(sizeof(void *)*length, GFP_KERNEL);
2260 if (channel->orig_arr == NULL)
2261 goto exit1;
2262
2263 return channel;
2264exit1:
2265 __vxge_hw_channel_free(channel);
2266
2267exit0:
2268 return NULL;
2269}
2270
2271/*
2272 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
2273 * Adds a block to block pool
2274 */
2275static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
2276 void *block_addr,
2277 u32 length,
2278 struct pci_dev *dma_h,
2279 struct pci_dev *acc_handle)
2280{
2281 struct __vxge_hw_blockpool *blockpool;
2282 struct __vxge_hw_blockpool_entry *entry = NULL;
2283 dma_addr_t dma_addr;
2284 enum vxge_hw_status status = VXGE_HW_OK;
2285 u32 req_out;
2286
2287 blockpool = &devh->block_pool;
2288
2289 if (block_addr == NULL) {
2290 blockpool->req_out--;
1427 status = VXGE_HW_FAIL; 2291 status = VXGE_HW_FAIL;
1428 goto exit; 2292 goto exit;
1429 } 2293 }
1430 2294
1431 hldev = vp->vpath->hldev; 2295 dma_addr = pci_map_single(devh->pdev, block_addr, length,
1432 vp_id = vp->vpath->vp_id; 2296 PCI_DMA_BIDIRECTIONAL);
1433 2297
1434 config = &hldev->config.vp_config[vp_id].ring; 2298 if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
2299 vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
2300 blockpool->req_out--;
2301 status = VXGE_HW_FAIL;
2302 goto exit;
2303 }
1435 2304
1436 ring_length = config->ring_blocks * 2305 if (!list_empty(&blockpool->free_entry_list))
1437 vxge_hw_ring_rxds_per_block_get(config->buffer_mode); 2306 entry = (struct __vxge_hw_blockpool_entry *)
2307 list_first_entry(&blockpool->free_entry_list,
2308 struct __vxge_hw_blockpool_entry,
2309 item);
1438 2310
1439 ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp, 2311 if (entry == NULL)
1440 VXGE_HW_CHANNEL_TYPE_RING, 2312 entry = vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
1441 ring_length, 2313 else
1442 attr->per_rxd_space, 2314 list_del(&entry->item);
1443 attr->userdata);
1444 2315
1445 if (ring == NULL) { 2316 if (entry != NULL) {
2317 entry->length = length;
2318 entry->memblock = block_addr;
2319 entry->dma_addr = dma_addr;
2320 entry->acc_handle = acc_handle;
2321 entry->dma_handle = dma_h;
2322 list_add(&entry->item, &blockpool->free_block_list);
2323 blockpool->pool_size++;
2324 status = VXGE_HW_OK;
2325 } else
1446 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2326 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1447 goto exit;
1448 }
1449 2327
1450 vp->vpath->ringh = ring; 2328 blockpool->req_out--;
1451 ring->vp_id = vp_id;
1452 ring->vp_reg = vp->vpath->vp_reg;
1453 ring->common_reg = hldev->common_reg;
1454 ring->stats = &vp->vpath->sw_stats->ring_stats;
1455 ring->config = config;
1456 ring->callback = attr->callback;
1457 ring->rxd_init = attr->rxd_init;
1458 ring->rxd_term = attr->rxd_term;
1459 ring->buffer_mode = config->buffer_mode;
1460 ring->rxds_limit = config->rxds_limit;
1461 2329
1462 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode); 2330 req_out = blockpool->req_out;
1463 ring->rxd_priv_size = 2331exit:
1464 sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space; 2332 return;
1465 ring->per_rxd_space = attr->per_rxd_space; 2333}
1466 2334
1467 ring->rxd_priv_size = 2335static inline void
1468 ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) / 2336vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh, unsigned long size)
1469 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE; 2337{
2338 gfp_t flags;
2339 void *vaddr;
1470 2340
1471 /* how many RxDs can fit into one block. Depends on configured 2341 if (in_interrupt())
1472 * buffer_mode. */ 2342 flags = GFP_ATOMIC | GFP_DMA;
1473 ring->rxds_per_block = 2343 else
1474 vxge_hw_ring_rxds_per_block_get(config->buffer_mode); 2344 flags = GFP_KERNEL | GFP_DMA;
1475 2345
1476 /* calculate actual RxD block private size */ 2346 vaddr = kmalloc((size), flags);
1477 ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
1478 ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
1479 ring->mempool = __vxge_hw_mempool_create(hldev,
1480 VXGE_HW_BLOCK_SIZE,
1481 VXGE_HW_BLOCK_SIZE,
1482 ring->rxdblock_priv_size,
1483 ring->config->ring_blocks,
1484 ring->config->ring_blocks,
1485 &ring_mp_callback,
1486 ring);
1487 2347
1488 if (ring->mempool == NULL) { 2348 vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
1489 __vxge_hw_ring_delete(vp); 2349}
1490 return VXGE_HW_ERR_OUT_OF_MEMORY;
1491 }
1492 2350
1493 status = __vxge_hw_channel_initialize(&ring->channel); 2351/*
1494 if (status != VXGE_HW_OK) { 2352 * __vxge_hw_blockpool_blocks_add - Request additional blocks
1495 __vxge_hw_ring_delete(vp); 2353 */
1496 goto exit; 2354static
2355void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
2356{
2357 u32 nreq = 0, i;
2358
2359 if ((blockpool->pool_size + blockpool->req_out) <
2360 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
2361 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
2362 blockpool->req_out += nreq;
1497 } 2363 }
1498 2364
1499 /* Note: 2365 for (i = 0; i < nreq; i++)
1500 * Specifying rxd_init callback means two things: 2366 vxge_os_dma_malloc_async(
1501 * 1) rxds need to be initialized by driver at channel-open time; 2367 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
1502 * 2) rxds need to be posted at channel-open time 2368 blockpool->hldev, VXGE_HW_BLOCK_SIZE);
1503 * (that's what the initial_replenish() below does) 2369}
1504 * Currently we don't have a case when the 1) is done without the 2). 2370
1505 */ 2371/*
1506 if (ring->rxd_init) { 2372 * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
1507 status = vxge_hw_ring_replenish(ring); 2373 * Allocates a block of memory of given size, either from block pool
1508 if (status != VXGE_HW_OK) { 2374 * or by calling vxge_os_dma_malloc()
1509 __vxge_hw_ring_delete(vp); 2375 */
2376static void *__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
2377 struct vxge_hw_mempool_dma *dma_object)
2378{
2379 struct __vxge_hw_blockpool_entry *entry = NULL;
2380 struct __vxge_hw_blockpool *blockpool;
2381 void *memblock = NULL;
2382 enum vxge_hw_status status = VXGE_HW_OK;
2383
2384 blockpool = &devh->block_pool;
2385
2386 if (size != blockpool->block_size) {
2387
2388 memblock = vxge_os_dma_malloc(devh->pdev, size,
2389 &dma_object->handle,
2390 &dma_object->acc_handle);
2391
2392 if (memblock == NULL) {
2393 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1510 goto exit; 2394 goto exit;
1511 } 2395 }
1512 }
1513 2396
1514 /* initial replenish will increment the counter in its post() routine, 2397 dma_object->addr = pci_map_single(devh->pdev, memblock, size,
1515 * we have to reset it */ 2398 PCI_DMA_BIDIRECTIONAL);
1516 ring->stats->common_stats.usage_cnt = 0; 2399
2400 if (unlikely(pci_dma_mapping_error(devh->pdev,
2401 dma_object->addr))) {
2402 vxge_os_dma_free(devh->pdev, memblock,
2403 &dma_object->acc_handle);
2404 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2405 goto exit;
2406 }
2407
2408 } else {
2409
2410 if (!list_empty(&blockpool->free_block_list))
2411 entry = (struct __vxge_hw_blockpool_entry *)
2412 list_first_entry(&blockpool->free_block_list,
2413 struct __vxge_hw_blockpool_entry,
2414 item);
2415
2416 if (entry != NULL) {
2417 list_del(&entry->item);
2418 dma_object->addr = entry->dma_addr;
2419 dma_object->handle = entry->dma_handle;
2420 dma_object->acc_handle = entry->acc_handle;
2421 memblock = entry->memblock;
2422
2423 list_add(&entry->item,
2424 &blockpool->free_entry_list);
2425 blockpool->pool_size--;
2426 }
2427
2428 if (memblock != NULL)
2429 __vxge_hw_blockpool_blocks_add(blockpool);
2430 }
1517exit: 2431exit:
1518 return status; 2432 return memblock;
1519} 2433}
1520 2434
1521/* 2435/*
1522 * __vxge_hw_ring_abort - Returns the RxD 2436 * __vxge_hw_blockpool_blocks_remove - Free additional blocks
1523 * This function terminates the RxDs of ring
1524 */ 2437 */
1525static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring) 2438static void
2439__vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
1526{ 2440{
1527 void *rxdh; 2441 struct list_head *p, *n;
1528 struct __vxge_hw_channel *channel;
1529
1530 channel = &ring->channel;
1531 2442
1532 for (;;) { 2443 list_for_each_safe(p, n, &blockpool->free_block_list) {
1533 vxge_hw_channel_dtr_try_complete(channel, &rxdh);
1534 2444
1535 if (rxdh == NULL) 2445 if (blockpool->pool_size < blockpool->pool_max)
1536 break; 2446 break;
1537 2447
1538 vxge_hw_channel_dtr_complete(channel); 2448 pci_unmap_single(
2449 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
2450 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
2451 ((struct __vxge_hw_blockpool_entry *)p)->length,
2452 PCI_DMA_BIDIRECTIONAL);
1539 2453
1540 if (ring->rxd_term) 2454 vxge_os_dma_free(
1541 ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED, 2455 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
1542 channel->userdata); 2456 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
2457 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
1543 2458
1544 vxge_hw_channel_dtr_free(channel, rxdh); 2459 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
1545 }
1546 2460
1547 return VXGE_HW_OK; 2461 list_add(p, &blockpool->free_entry_list);
2462
2463 blockpool->pool_size--;
2464
2465 }
1548} 2466}
1549 2467
1550/* 2468/*
1551 * __vxge_hw_ring_reset - Resets the ring 2469 * __vxge_hw_blockpool_free - Frees the memory allcoated with
1552 * This function resets the ring during vpath reset operation 2470 * __vxge_hw_blockpool_malloc
1553 */ 2471 */
1554static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring) 2472static void __vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
2473 void *memblock, u32 size,
2474 struct vxge_hw_mempool_dma *dma_object)
1555{ 2475{
2476 struct __vxge_hw_blockpool_entry *entry = NULL;
2477 struct __vxge_hw_blockpool *blockpool;
1556 enum vxge_hw_status status = VXGE_HW_OK; 2478 enum vxge_hw_status status = VXGE_HW_OK;
1557 struct __vxge_hw_channel *channel;
1558 2479
1559 channel = &ring->channel; 2480 blockpool = &devh->block_pool;
1560 2481
1561 __vxge_hw_ring_abort(ring); 2482 if (size != blockpool->block_size) {
2483 pci_unmap_single(devh->pdev, dma_object->addr, size,
2484 PCI_DMA_BIDIRECTIONAL);
2485 vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
2486 } else {
1562 2487
1563 status = __vxge_hw_channel_reset(channel); 2488 if (!list_empty(&blockpool->free_entry_list))
2489 entry = (struct __vxge_hw_blockpool_entry *)
2490 list_first_entry(&blockpool->free_entry_list,
2491 struct __vxge_hw_blockpool_entry,
2492 item);
1564 2493
1565 if (status != VXGE_HW_OK) 2494 if (entry == NULL)
1566 goto exit; 2495 entry = vmalloc(sizeof(
2496 struct __vxge_hw_blockpool_entry));
2497 else
2498 list_del(&entry->item);
1567 2499
1568 if (ring->rxd_init) { 2500 if (entry != NULL) {
1569 status = vxge_hw_ring_replenish(ring); 2501 entry->length = size;
1570 if (status != VXGE_HW_OK) 2502 entry->memblock = memblock;
1571 goto exit; 2503 entry->dma_addr = dma_object->addr;
2504 entry->acc_handle = dma_object->acc_handle;
2505 entry->dma_handle = dma_object->handle;
2506 list_add(&entry->item,
2507 &blockpool->free_block_list);
2508 blockpool->pool_size++;
2509 status = VXGE_HW_OK;
2510 } else
2511 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2512
2513 if (status == VXGE_HW_OK)
2514 __vxge_hw_blockpool_blocks_remove(blockpool);
1572 } 2515 }
1573exit:
1574 return status;
1575} 2516}
1576 2517
1577/* 2518/*
1578 * __vxge_hw_ring_delete - Removes the ring 2519 * vxge_hw_mempool_destroy
1579 * This function freeup the memory pool and removes the ring
1580 */ 2520 */
1581static enum vxge_hw_status __vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp) 2521static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool)
1582{ 2522{
1583 struct __vxge_hw_ring *ring = vp->vpath->ringh; 2523 u32 i, j;
2524 struct __vxge_hw_device *devh = mempool->devh;
1584 2525
1585 __vxge_hw_ring_abort(ring); 2526 for (i = 0; i < mempool->memblocks_allocated; i++) {
2527 struct vxge_hw_mempool_dma *dma_object;
1586 2528
1587 if (ring->mempool) 2529 vxge_assert(mempool->memblocks_arr[i]);
1588 __vxge_hw_mempool_destroy(ring->mempool); 2530 vxge_assert(mempool->memblocks_dma_arr + i);
1589 2531
1590 vp->vpath->ringh = NULL; 2532 dma_object = mempool->memblocks_dma_arr + i;
1591 __vxge_hw_channel_free(&ring->channel);
1592 2533
1593 return VXGE_HW_OK; 2534 for (j = 0; j < mempool->items_per_memblock; j++) {
2535 u32 index = i * mempool->items_per_memblock + j;
2536
2537 /* to skip last partially filled(if any) memblock */
2538 if (index >= mempool->items_current)
2539 break;
2540 }
2541
2542 vfree(mempool->memblocks_priv_arr[i]);
2543
2544 __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i],
2545 mempool->memblock_size, dma_object);
2546 }
2547
2548 vfree(mempool->items_arr);
2549 vfree(mempool->memblocks_dma_arr);
2550 vfree(mempool->memblocks_priv_arr);
2551 vfree(mempool->memblocks_arr);
2552 vfree(mempool);
1594} 2553}
1595 2554
1596/* 2555/*
@@ -1627,15 +2586,12 @@ __vxge_hw_mempool_grow(struct vxge_hw_mempool *mempool, u32 num_allocate,
1627 * allocate new memblock and its private part at once. 2586 * allocate new memblock and its private part at once.
1628 * This helps to minimize memory usage a lot. */ 2587 * This helps to minimize memory usage a lot. */
1629 mempool->memblocks_priv_arr[i] = 2588 mempool->memblocks_priv_arr[i] =
1630 vmalloc(mempool->items_priv_size * n_items); 2589 vzalloc(mempool->items_priv_size * n_items);
1631 if (mempool->memblocks_priv_arr[i] == NULL) { 2590 if (mempool->memblocks_priv_arr[i] == NULL) {
1632 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2591 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1633 goto exit; 2592 goto exit;
1634 } 2593 }
1635 2594
1636 memset(mempool->memblocks_priv_arr[i], 0,
1637 mempool->items_priv_size * n_items);
1638
1639 /* allocate DMA-capable memblock */ 2595 /* allocate DMA-capable memblock */
1640 mempool->memblocks_arr[i] = 2596 mempool->memblocks_arr[i] =
1641 __vxge_hw_blockpool_malloc(mempool->devh, 2597 __vxge_hw_blockpool_malloc(mempool->devh,
@@ -1686,16 +2642,15 @@ exit:
1686 * with size enough to hold %items_initial number of items. Memory is 2642 * with size enough to hold %items_initial number of items. Memory is
1687 * DMA-able but client must map/unmap before interoperating with the device. 2643 * DMA-able but client must map/unmap before interoperating with the device.
1688 */ 2644 */
1689static struct vxge_hw_mempool* 2645static struct vxge_hw_mempool *
1690__vxge_hw_mempool_create( 2646__vxge_hw_mempool_create(struct __vxge_hw_device *devh,
1691 struct __vxge_hw_device *devh, 2647 u32 memblock_size,
1692 u32 memblock_size, 2648 u32 item_size,
1693 u32 item_size, 2649 u32 items_priv_size,
1694 u32 items_priv_size, 2650 u32 items_initial,
1695 u32 items_initial, 2651 u32 items_max,
1696 u32 items_max, 2652 struct vxge_hw_mempool_cbs *mp_callback,
1697 struct vxge_hw_mempool_cbs *mp_callback, 2653 void *userdata)
1698 void *userdata)
1699{ 2654{
1700 enum vxge_hw_status status = VXGE_HW_OK; 2655 enum vxge_hw_status status = VXGE_HW_OK;
1701 u32 memblocks_to_allocate; 2656 u32 memblocks_to_allocate;
@@ -1707,13 +2662,11 @@ __vxge_hw_mempool_create(
1707 goto exit; 2662 goto exit;
1708 } 2663 }
1709 2664
1710 mempool = (struct vxge_hw_mempool *) 2665 mempool = vzalloc(sizeof(struct vxge_hw_mempool));
1711 vmalloc(sizeof(struct vxge_hw_mempool));
1712 if (mempool == NULL) { 2666 if (mempool == NULL) {
1713 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2667 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1714 goto exit; 2668 goto exit;
1715 } 2669 }
1716 memset(mempool, 0, sizeof(struct vxge_hw_mempool));
1717 2670
1718 mempool->devh = devh; 2671 mempool->devh = devh;
1719 mempool->memblock_size = memblock_size; 2672 mempool->memblock_size = memblock_size;
@@ -1733,53 +2686,43 @@ __vxge_hw_mempool_create(
1733 2686
1734 /* allocate array of memblocks */ 2687 /* allocate array of memblocks */
1735 mempool->memblocks_arr = 2688 mempool->memblocks_arr =
1736 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max); 2689 vzalloc(sizeof(void *) * mempool->memblocks_max);
1737 if (mempool->memblocks_arr == NULL) { 2690 if (mempool->memblocks_arr == NULL) {
1738 __vxge_hw_mempool_destroy(mempool); 2691 __vxge_hw_mempool_destroy(mempool);
1739 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2692 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1740 mempool = NULL; 2693 mempool = NULL;
1741 goto exit; 2694 goto exit;
1742 } 2695 }
1743 memset(mempool->memblocks_arr, 0,
1744 sizeof(void *) * mempool->memblocks_max);
1745 2696
1746 /* allocate array of private parts of items per memblocks */ 2697 /* allocate array of private parts of items per memblocks */
1747 mempool->memblocks_priv_arr = 2698 mempool->memblocks_priv_arr =
1748 (void **) vmalloc(sizeof(void *) * mempool->memblocks_max); 2699 vzalloc(sizeof(void *) * mempool->memblocks_max);
1749 if (mempool->memblocks_priv_arr == NULL) { 2700 if (mempool->memblocks_priv_arr == NULL) {
1750 __vxge_hw_mempool_destroy(mempool); 2701 __vxge_hw_mempool_destroy(mempool);
1751 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2702 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1752 mempool = NULL; 2703 mempool = NULL;
1753 goto exit; 2704 goto exit;
1754 } 2705 }
1755 memset(mempool->memblocks_priv_arr, 0,
1756 sizeof(void *) * mempool->memblocks_max);
1757 2706
1758 /* allocate array of memblocks DMA objects */ 2707 /* allocate array of memblocks DMA objects */
1759 mempool->memblocks_dma_arr = (struct vxge_hw_mempool_dma *) 2708 mempool->memblocks_dma_arr =
1760 vmalloc(sizeof(struct vxge_hw_mempool_dma) * 2709 vzalloc(sizeof(struct vxge_hw_mempool_dma) *
1761 mempool->memblocks_max); 2710 mempool->memblocks_max);
1762
1763 if (mempool->memblocks_dma_arr == NULL) { 2711 if (mempool->memblocks_dma_arr == NULL) {
1764 __vxge_hw_mempool_destroy(mempool); 2712 __vxge_hw_mempool_destroy(mempool);
1765 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2713 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1766 mempool = NULL; 2714 mempool = NULL;
1767 goto exit; 2715 goto exit;
1768 } 2716 }
1769 memset(mempool->memblocks_dma_arr, 0,
1770 sizeof(struct vxge_hw_mempool_dma) *
1771 mempool->memblocks_max);
1772 2717
1773 /* allocate hash array of items */ 2718 /* allocate hash array of items */
1774 mempool->items_arr = 2719 mempool->items_arr = vzalloc(sizeof(void *) * mempool->items_max);
1775 (void **) vmalloc(sizeof(void *) * mempool->items_max);
1776 if (mempool->items_arr == NULL) { 2720 if (mempool->items_arr == NULL) {
1777 __vxge_hw_mempool_destroy(mempool); 2721 __vxge_hw_mempool_destroy(mempool);
1778 status = VXGE_HW_ERR_OUT_OF_MEMORY; 2722 status = VXGE_HW_ERR_OUT_OF_MEMORY;
1779 mempool = NULL; 2723 mempool = NULL;
1780 goto exit; 2724 goto exit;
1781 } 2725 }
1782 memset(mempool->items_arr, 0, sizeof(void *) * mempool->items_max);
1783 2726
1784 /* calculate initial number of memblocks */ 2727 /* calculate initial number of memblocks */
1785 memblocks_to_allocate = (mempool->items_initial + 2728 memblocks_to_allocate = (mempool->items_initial +
@@ -1801,122 +2744,188 @@ exit:
1801} 2744}
1802 2745
1803/* 2746/*
1804 * vxge_hw_mempool_destroy 2747 * __vxge_hw_ring_abort - Returns the RxD
2748 * This function terminates the RxDs of ring
1805 */ 2749 */
1806static void __vxge_hw_mempool_destroy(struct vxge_hw_mempool *mempool) 2750static enum vxge_hw_status __vxge_hw_ring_abort(struct __vxge_hw_ring *ring)
1807{ 2751{
1808 u32 i, j; 2752 void *rxdh;
1809 struct __vxge_hw_device *devh = mempool->devh; 2753 struct __vxge_hw_channel *channel;
1810
1811 for (i = 0; i < mempool->memblocks_allocated; i++) {
1812 struct vxge_hw_mempool_dma *dma_object;
1813 2754
1814 vxge_assert(mempool->memblocks_arr[i]); 2755 channel = &ring->channel;
1815 vxge_assert(mempool->memblocks_dma_arr + i);
1816 2756
1817 dma_object = mempool->memblocks_dma_arr + i; 2757 for (;;) {
2758 vxge_hw_channel_dtr_try_complete(channel, &rxdh);
1818 2759
1819 for (j = 0; j < mempool->items_per_memblock; j++) { 2760 if (rxdh == NULL)
1820 u32 index = i * mempool->items_per_memblock + j; 2761 break;
1821 2762
1822 /* to skip last partially filled(if any) memblock */ 2763 vxge_hw_channel_dtr_complete(channel);
1823 if (index >= mempool->items_current)
1824 break;
1825 }
1826 2764
1827 vfree(mempool->memblocks_priv_arr[i]); 2765 if (ring->rxd_term)
2766 ring->rxd_term(rxdh, VXGE_HW_RXD_STATE_POSTED,
2767 channel->userdata);
1828 2768
1829 __vxge_hw_blockpool_free(devh, mempool->memblocks_arr[i], 2769 vxge_hw_channel_dtr_free(channel, rxdh);
1830 mempool->memblock_size, dma_object);
1831 } 2770 }
1832 2771
1833 vfree(mempool->items_arr); 2772 return VXGE_HW_OK;
2773}
1834 2774
1835 vfree(mempool->memblocks_dma_arr); 2775/*
2776 * __vxge_hw_ring_reset - Resets the ring
2777 * This function resets the ring during vpath reset operation
2778 */
2779static enum vxge_hw_status __vxge_hw_ring_reset(struct __vxge_hw_ring *ring)
2780{
2781 enum vxge_hw_status status = VXGE_HW_OK;
2782 struct __vxge_hw_channel *channel;
1836 2783
1837 vfree(mempool->memblocks_priv_arr); 2784 channel = &ring->channel;
1838 2785
1839 vfree(mempool->memblocks_arr); 2786 __vxge_hw_ring_abort(ring);
1840 2787
1841 vfree(mempool); 2788 status = __vxge_hw_channel_reset(channel);
2789
2790 if (status != VXGE_HW_OK)
2791 goto exit;
2792
2793 if (ring->rxd_init) {
2794 status = vxge_hw_ring_replenish(ring);
2795 if (status != VXGE_HW_OK)
2796 goto exit;
2797 }
2798exit:
2799 return status;
1842} 2800}
1843 2801
1844/* 2802/*
1845 * __vxge_hw_device_fifo_config_check - Check fifo configuration. 2803 * __vxge_hw_ring_delete - Removes the ring
1846 * Check the fifo configuration 2804 * This function freeup the memory pool and removes the ring
1847 */ 2805 */
1848enum vxge_hw_status 2806static enum vxge_hw_status
1849__vxge_hw_device_fifo_config_check(struct vxge_hw_fifo_config *fifo_config) 2807__vxge_hw_ring_delete(struct __vxge_hw_vpath_handle *vp)
1850{ 2808{
1851 if ((fifo_config->fifo_blocks < VXGE_HW_MIN_FIFO_BLOCKS) || 2809 struct __vxge_hw_ring *ring = vp->vpath->ringh;
1852 (fifo_config->fifo_blocks > VXGE_HW_MAX_FIFO_BLOCKS)) 2810
1853 return VXGE_HW_BADCFG_FIFO_BLOCKS; 2811 __vxge_hw_ring_abort(ring);
2812
2813 if (ring->mempool)
2814 __vxge_hw_mempool_destroy(ring->mempool);
2815
2816 vp->vpath->ringh = NULL;
2817 __vxge_hw_channel_free(&ring->channel);
1854 2818
1855 return VXGE_HW_OK; 2819 return VXGE_HW_OK;
1856} 2820}
1857 2821
1858/* 2822/*
1859 * __vxge_hw_device_vpath_config_check - Check vpath configuration. 2823 * __vxge_hw_ring_create - Create a Ring
1860 * Check the vpath configuration 2824 * This function creates Ring and initializes it.
1861 */ 2825 */
1862static enum vxge_hw_status 2826static enum vxge_hw_status
1863__vxge_hw_device_vpath_config_check(struct vxge_hw_vp_config *vp_config) 2827__vxge_hw_ring_create(struct __vxge_hw_vpath_handle *vp,
2828 struct vxge_hw_ring_attr *attr)
1864{ 2829{
1865 enum vxge_hw_status status; 2830 enum vxge_hw_status status = VXGE_HW_OK;
2831 struct __vxge_hw_ring *ring;
2832 u32 ring_length;
2833 struct vxge_hw_ring_config *config;
2834 struct __vxge_hw_device *hldev;
2835 u32 vp_id;
2836 struct vxge_hw_mempool_cbs ring_mp_callback;
1866 2837
1867 if ((vp_config->min_bandwidth < VXGE_HW_VPATH_BANDWIDTH_MIN) || 2838 if ((vp == NULL) || (attr == NULL)) {
1868 (vp_config->min_bandwidth > 2839 status = VXGE_HW_FAIL;
1869 VXGE_HW_VPATH_BANDWIDTH_MAX)) 2840 goto exit;
1870 return VXGE_HW_BADCFG_VPATH_MIN_BANDWIDTH; 2841 }
1871 2842
1872 status = __vxge_hw_device_fifo_config_check(&vp_config->fifo); 2843 hldev = vp->vpath->hldev;
1873 if (status != VXGE_HW_OK) 2844 vp_id = vp->vpath->vp_id;
1874 return status;
1875 2845
1876 if ((vp_config->mtu != VXGE_HW_VPATH_USE_FLASH_DEFAULT_INITIAL_MTU) && 2846 config = &hldev->config.vp_config[vp_id].ring;
1877 ((vp_config->mtu < VXGE_HW_VPATH_MIN_INITIAL_MTU) ||
1878 (vp_config->mtu > VXGE_HW_VPATH_MAX_INITIAL_MTU)))
1879 return VXGE_HW_BADCFG_VPATH_MTU;
1880 2847
1881 if ((vp_config->rpa_strip_vlan_tag != 2848 ring_length = config->ring_blocks *
1882 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_USE_FLASH_DEFAULT) && 2849 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1883 (vp_config->rpa_strip_vlan_tag !=
1884 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_ENABLE) &&
1885 (vp_config->rpa_strip_vlan_tag !=
1886 VXGE_HW_VPATH_RPA_STRIP_VLAN_TAG_DISABLE))
1887 return VXGE_HW_BADCFG_VPATH_RPA_STRIP_VLAN_TAG;
1888 2850
1889 return VXGE_HW_OK; 2851 ring = (struct __vxge_hw_ring *)__vxge_hw_channel_allocate(vp,
1890} 2852 VXGE_HW_CHANNEL_TYPE_RING,
2853 ring_length,
2854 attr->per_rxd_space,
2855 attr->userdata);
2856 if (ring == NULL) {
2857 status = VXGE_HW_ERR_OUT_OF_MEMORY;
2858 goto exit;
2859 }
1891 2860
1892/* 2861 vp->vpath->ringh = ring;
1893 * __vxge_hw_device_config_check - Check device configuration. 2862 ring->vp_id = vp_id;
1894 * Check the device configuration 2863 ring->vp_reg = vp->vpath->vp_reg;
1895 */ 2864 ring->common_reg = hldev->common_reg;
1896enum vxge_hw_status 2865 ring->stats = &vp->vpath->sw_stats->ring_stats;
1897__vxge_hw_device_config_check(struct vxge_hw_device_config *new_config) 2866 ring->config = config;
1898{ 2867 ring->callback = attr->callback;
1899 u32 i; 2868 ring->rxd_init = attr->rxd_init;
1900 enum vxge_hw_status status; 2869 ring->rxd_term = attr->rxd_term;
2870 ring->buffer_mode = config->buffer_mode;
2871 ring->rxds_limit = config->rxds_limit;
1901 2872
1902 if ((new_config->intr_mode != VXGE_HW_INTR_MODE_IRQLINE) && 2873 ring->rxd_size = vxge_hw_ring_rxd_size_get(config->buffer_mode);
1903 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX) && 2874 ring->rxd_priv_size =
1904 (new_config->intr_mode != VXGE_HW_INTR_MODE_MSIX_ONE_SHOT) && 2875 sizeof(struct __vxge_hw_ring_rxd_priv) + attr->per_rxd_space;
1905 (new_config->intr_mode != VXGE_HW_INTR_MODE_DEF)) 2876 ring->per_rxd_space = attr->per_rxd_space;
1906 return VXGE_HW_BADCFG_INTR_MODE;
1907 2877
1908 if ((new_config->rts_mac_en != VXGE_HW_RTS_MAC_DISABLE) && 2878 ring->rxd_priv_size =
1909 (new_config->rts_mac_en != VXGE_HW_RTS_MAC_ENABLE)) 2879 ((ring->rxd_priv_size + VXGE_CACHE_LINE_SIZE - 1) /
1910 return VXGE_HW_BADCFG_RTS_MAC_EN; 2880 VXGE_CACHE_LINE_SIZE) * VXGE_CACHE_LINE_SIZE;
1911 2881
1912 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 2882 /* how many RxDs can fit into one block. Depends on configured
1913 status = __vxge_hw_device_vpath_config_check( 2883 * buffer_mode. */
1914 &new_config->vp_config[i]); 2884 ring->rxds_per_block =
1915 if (status != VXGE_HW_OK) 2885 vxge_hw_ring_rxds_per_block_get(config->buffer_mode);
1916 return status; 2886
2887 /* calculate actual RxD block private size */
2888 ring->rxdblock_priv_size = ring->rxd_priv_size * ring->rxds_per_block;
2889 ring_mp_callback.item_func_alloc = __vxge_hw_ring_mempool_item_alloc;
2890 ring->mempool = __vxge_hw_mempool_create(hldev,
2891 VXGE_HW_BLOCK_SIZE,
2892 VXGE_HW_BLOCK_SIZE,
2893 ring->rxdblock_priv_size,
2894 ring->config->ring_blocks,
2895 ring->config->ring_blocks,
2896 &ring_mp_callback,
2897 ring);
2898 if (ring->mempool == NULL) {
2899 __vxge_hw_ring_delete(vp);
2900 return VXGE_HW_ERR_OUT_OF_MEMORY;
1917 } 2901 }
1918 2902
1919 return VXGE_HW_OK; 2903 status = __vxge_hw_channel_initialize(&ring->channel);
2904 if (status != VXGE_HW_OK) {
2905 __vxge_hw_ring_delete(vp);
2906 goto exit;
2907 }
2908
2909 /* Note:
2910 * Specifying rxd_init callback means two things:
2911 * 1) rxds need to be initialized by driver at channel-open time;
2912 * 2) rxds need to be posted at channel-open time
2913 * (that's what the initial_replenish() below does)
2914 * Currently we don't have a case when the 1) is done without the 2).
2915 */
2916 if (ring->rxd_init) {
2917 status = vxge_hw_ring_replenish(ring);
2918 if (status != VXGE_HW_OK) {
2919 __vxge_hw_ring_delete(vp);
2920 goto exit;
2921 }
2922 }
2923
2924 /* initial replenish will increment the counter in its post() routine,
2925 * we have to reset it */
2926 ring->stats->common_stats.usage_cnt = 0;
2927exit:
2928 return status;
1920} 2929}
1921 2930
1922/* 2931/*
@@ -1938,7 +2947,6 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
1938 device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT; 2947 device_config->rts_mac_en = VXGE_HW_RTS_MAC_DEFAULT;
1939 2948
1940 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 2949 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
1941
1942 device_config->vp_config[i].vp_id = i; 2950 device_config->vp_config[i].vp_id = i;
1943 2951
1944 device_config->vp_config[i].min_bandwidth = 2952 device_config->vp_config[i].min_bandwidth =
@@ -2078,61 +3086,6 @@ vxge_hw_device_config_default_get(struct vxge_hw_device_config *device_config)
2078} 3086}
2079 3087
2080/* 3088/*
2081 * _hw_legacy_swapper_set - Set the swapper bits for the legacy secion.
2082 * Set the swapper bits appropriately for the lagacy section.
2083 */
2084static enum vxge_hw_status
2085__vxge_hw_legacy_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg)
2086{
2087 u64 val64;
2088 enum vxge_hw_status status = VXGE_HW_OK;
2089
2090 val64 = readq(&legacy_reg->toc_swapper_fb);
2091
2092 wmb();
2093
2094 switch (val64) {
2095
2096 case VXGE_HW_SWAPPER_INITIAL_VALUE:
2097 return status;
2098
2099 case VXGE_HW_SWAPPER_BYTE_SWAPPED_BIT_FLIPPED:
2100 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2101 &legacy_reg->pifm_rd_swap_en);
2102 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2103 &legacy_reg->pifm_rd_flip_en);
2104 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2105 &legacy_reg->pifm_wr_swap_en);
2106 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2107 &legacy_reg->pifm_wr_flip_en);
2108 break;
2109
2110 case VXGE_HW_SWAPPER_BYTE_SWAPPED:
2111 writeq(VXGE_HW_SWAPPER_READ_BYTE_SWAP_ENABLE,
2112 &legacy_reg->pifm_rd_swap_en);
2113 writeq(VXGE_HW_SWAPPER_WRITE_BYTE_SWAP_ENABLE,
2114 &legacy_reg->pifm_wr_swap_en);
2115 break;
2116
2117 case VXGE_HW_SWAPPER_BIT_FLIPPED:
2118 writeq(VXGE_HW_SWAPPER_READ_BIT_FLAP_ENABLE,
2119 &legacy_reg->pifm_rd_flip_en);
2120 writeq(VXGE_HW_SWAPPER_WRITE_BIT_FLAP_ENABLE,
2121 &legacy_reg->pifm_wr_flip_en);
2122 break;
2123 }
2124
2125 wmb();
2126
2127 val64 = readq(&legacy_reg->toc_swapper_fb);
2128
2129 if (val64 != VXGE_HW_SWAPPER_INITIAL_VALUE)
2130 status = VXGE_HW_ERR_SWAPPER_CTRL;
2131
2132 return status;
2133}
2134
2135/*
2136 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath. 3089 * __vxge_hw_vpath_swapper_set - Set the swapper bits for the vpath.
2137 * Set the swapper bits appropriately for the vpath. 3090 * Set the swapper bits appropriately for the vpath.
2138 */ 3091 */
@@ -2156,9 +3109,8 @@ __vxge_hw_vpath_swapper_set(struct vxge_hw_vpath_reg __iomem *vpath_reg)
2156 * Set the swapper bits appropriately for the vpath. 3109 * Set the swapper bits appropriately for the vpath.
2157 */ 3110 */
2158static enum vxge_hw_status 3111static enum vxge_hw_status
2159__vxge_hw_kdfc_swapper_set( 3112__vxge_hw_kdfc_swapper_set(struct vxge_hw_legacy_reg __iomem *legacy_reg,
2160 struct vxge_hw_legacy_reg __iomem *legacy_reg, 3113 struct vxge_hw_vpath_reg __iomem *vpath_reg)
2161 struct vxge_hw_vpath_reg __iomem *vpath_reg)
2162{ 3114{
2163 u64 val64; 3115 u64 val64;
2164 3116
@@ -2408,6 +3360,69 @@ exit:
2408} 3360}
2409 3361
2410/* 3362/*
3363 * __vxge_hw_fifo_abort - Returns the TxD
3364 * This function terminates the TxDs of fifo
3365 */
3366static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
3367{
3368 void *txdlh;
3369
3370 for (;;) {
3371 vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
3372
3373 if (txdlh == NULL)
3374 break;
3375
3376 vxge_hw_channel_dtr_complete(&fifo->channel);
3377
3378 if (fifo->txdl_term) {
3379 fifo->txdl_term(txdlh,
3380 VXGE_HW_TXDL_STATE_POSTED,
3381 fifo->channel.userdata);
3382 }
3383
3384 vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
3385 }
3386
3387 return VXGE_HW_OK;
3388}
3389
3390/*
3391 * __vxge_hw_fifo_reset - Resets the fifo
3392 * This function resets the fifo during vpath reset operation
3393 */
3394static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
3395{
3396 enum vxge_hw_status status = VXGE_HW_OK;
3397
3398 __vxge_hw_fifo_abort(fifo);
3399 status = __vxge_hw_channel_reset(&fifo->channel);
3400
3401 return status;
3402}
3403
3404/*
3405 * __vxge_hw_fifo_delete - Removes the FIFO
3406 * This function freeup the memory pool and removes the FIFO
3407 */
3408static enum vxge_hw_status
3409__vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
3410{
3411 struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
3412
3413 __vxge_hw_fifo_abort(fifo);
3414
3415 if (fifo->mempool)
3416 __vxge_hw_mempool_destroy(fifo->mempool);
3417
3418 vp->vpath->fifoh = NULL;
3419
3420 __vxge_hw_channel_free(&fifo->channel);
3421
3422 return VXGE_HW_OK;
3423}
3424
3425/*
2411 * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD 3426 * __vxge_hw_fifo_mempool_item_alloc - Allocate List blocks for TxD
2412 * list callback 3427 * list callback
2413 * This function is callback passed to __vxge_hw_mempool_create to create memory 3428 * This function is callback passed to __vxge_hw_mempool_create to create memory
@@ -2453,7 +3468,7 @@ __vxge_hw_fifo_mempool_item_alloc(
2453 * __vxge_hw_fifo_create - Create a FIFO 3468 * __vxge_hw_fifo_create - Create a FIFO
2454 * This function creates FIFO and initializes it. 3469 * This function creates FIFO and initializes it.
2455 */ 3470 */
2456enum vxge_hw_status 3471static enum vxge_hw_status
2457__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp, 3472__vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
2458 struct vxge_hw_fifo_attr *attr) 3473 struct vxge_hw_fifo_attr *attr)
2459{ 3474{
@@ -2572,68 +3587,6 @@ exit:
2572} 3587}
2573 3588
2574/* 3589/*
2575 * __vxge_hw_fifo_abort - Returns the TxD
2576 * This function terminates the TxDs of fifo
2577 */
2578static enum vxge_hw_status __vxge_hw_fifo_abort(struct __vxge_hw_fifo *fifo)
2579{
2580 void *txdlh;
2581
2582 for (;;) {
2583 vxge_hw_channel_dtr_try_complete(&fifo->channel, &txdlh);
2584
2585 if (txdlh == NULL)
2586 break;
2587
2588 vxge_hw_channel_dtr_complete(&fifo->channel);
2589
2590 if (fifo->txdl_term) {
2591 fifo->txdl_term(txdlh,
2592 VXGE_HW_TXDL_STATE_POSTED,
2593 fifo->channel.userdata);
2594 }
2595
2596 vxge_hw_channel_dtr_free(&fifo->channel, txdlh);
2597 }
2598
2599 return VXGE_HW_OK;
2600}
2601
2602/*
2603 * __vxge_hw_fifo_reset - Resets the fifo
2604 * This function resets the fifo during vpath reset operation
2605 */
2606static enum vxge_hw_status __vxge_hw_fifo_reset(struct __vxge_hw_fifo *fifo)
2607{
2608 enum vxge_hw_status status = VXGE_HW_OK;
2609
2610 __vxge_hw_fifo_abort(fifo);
2611 status = __vxge_hw_channel_reset(&fifo->channel);
2612
2613 return status;
2614}
2615
2616/*
2617 * __vxge_hw_fifo_delete - Removes the FIFO
2618 * This function freeup the memory pool and removes the FIFO
2619 */
2620enum vxge_hw_status __vxge_hw_fifo_delete(struct __vxge_hw_vpath_handle *vp)
2621{
2622 struct __vxge_hw_fifo *fifo = vp->vpath->fifoh;
2623
2624 __vxge_hw_fifo_abort(fifo);
2625
2626 if (fifo->mempool)
2627 __vxge_hw_mempool_destroy(fifo->mempool);
2628
2629 vp->vpath->fifoh = NULL;
2630
2631 __vxge_hw_channel_free(&fifo->channel);
2632
2633 return VXGE_HW_OK;
2634}
2635
2636/*
2637 * __vxge_hw_vpath_pci_read - Read the content of given address 3590 * __vxge_hw_vpath_pci_read - Read the content of given address
2638 * in pci config space. 3591 * in pci config space.
2639 * Read from the vpath pci config space. 3592 * Read from the vpath pci config space.
@@ -2675,297 +3628,6 @@ exit:
2675 return status; 3628 return status;
2676} 3629}
2677 3630
2678/*
2679 * __vxge_hw_vpath_func_id_get - Get the function id of the vpath.
2680 * Returns the function number of the vpath.
2681 */
2682static u32
2683__vxge_hw_vpath_func_id_get(u32 vp_id,
2684 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg)
2685{
2686 u64 val64;
2687
2688 val64 = readq(&vpmgmt_reg->vpath_to_func_map_cfg1);
2689
2690 return
2691 (u32)VXGE_HW_VPATH_TO_FUNC_MAP_CFG1_GET_VPATH_TO_FUNC_MAP_CFG1(val64);
2692}
2693
2694/*
2695 * __vxge_hw_read_rts_ds - Program RTS steering critieria
2696 */
2697static inline void
2698__vxge_hw_read_rts_ds(struct vxge_hw_vpath_reg __iomem *vpath_reg,
2699 u64 dta_struct_sel)
2700{
2701 writeq(0, &vpath_reg->rts_access_steer_ctrl);
2702 wmb();
2703 writeq(dta_struct_sel, &vpath_reg->rts_access_steer_data0);
2704 writeq(0, &vpath_reg->rts_access_steer_data1);
2705 wmb();
2706}
2707
2708
2709/*
2710 * __vxge_hw_vpath_card_info_get - Get the serial numbers,
2711 * part number and product description.
2712 */
2713static enum vxge_hw_status
2714__vxge_hw_vpath_card_info_get(
2715 u32 vp_id,
2716 struct vxge_hw_vpath_reg __iomem *vpath_reg,
2717 struct vxge_hw_device_hw_info *hw_info)
2718{
2719 u32 i, j;
2720 u64 val64;
2721 u64 data1 = 0ULL;
2722 u64 data2 = 0ULL;
2723 enum vxge_hw_status status = VXGE_HW_OK;
2724 u8 *serial_number = hw_info->serial_number;
2725 u8 *part_number = hw_info->part_number;
2726 u8 *product_desc = hw_info->product_desc;
2727
2728 __vxge_hw_read_rts_ds(vpath_reg,
2729 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_SERIAL_NUMBER);
2730
2731 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2732 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2733 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2734 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2735 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2736 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2737
2738 status = __vxge_hw_pio_mem_write64(val64,
2739 &vpath_reg->rts_access_steer_ctrl,
2740 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2741 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2742
2743 if (status != VXGE_HW_OK)
2744 return status;
2745
2746 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2747
2748 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2749 data1 = readq(&vpath_reg->rts_access_steer_data0);
2750 ((u64 *)serial_number)[0] = be64_to_cpu(data1);
2751
2752 data2 = readq(&vpath_reg->rts_access_steer_data1);
2753 ((u64 *)serial_number)[1] = be64_to_cpu(data2);
2754 status = VXGE_HW_OK;
2755 } else
2756 *serial_number = 0;
2757
2758 __vxge_hw_read_rts_ds(vpath_reg,
2759 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PART_NUMBER);
2760
2761 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2762 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2763 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2764 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2765 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2766 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2767
2768 status = __vxge_hw_pio_mem_write64(val64,
2769 &vpath_reg->rts_access_steer_ctrl,
2770 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2771 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2772
2773 if (status != VXGE_HW_OK)
2774 return status;
2775
2776 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2777
2778 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2779
2780 data1 = readq(&vpath_reg->rts_access_steer_data0);
2781 ((u64 *)part_number)[0] = be64_to_cpu(data1);
2782
2783 data2 = readq(&vpath_reg->rts_access_steer_data1);
2784 ((u64 *)part_number)[1] = be64_to_cpu(data2);
2785
2786 status = VXGE_HW_OK;
2787
2788 } else
2789 *part_number = 0;
2790
2791 j = 0;
2792
2793 for (i = VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_0;
2794 i <= VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_DESC_3; i++) {
2795
2796 __vxge_hw_read_rts_ds(vpath_reg, i);
2797
2798 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2799 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2800 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2801 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2802 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2803 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2804
2805 status = __vxge_hw_pio_mem_write64(val64,
2806 &vpath_reg->rts_access_steer_ctrl,
2807 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2808 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2809
2810 if (status != VXGE_HW_OK)
2811 return status;
2812
2813 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2814
2815 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2816
2817 data1 = readq(&vpath_reg->rts_access_steer_data0);
2818 ((u64 *)product_desc)[j++] = be64_to_cpu(data1);
2819
2820 data2 = readq(&vpath_reg->rts_access_steer_data1);
2821 ((u64 *)product_desc)[j++] = be64_to_cpu(data2);
2822
2823 status = VXGE_HW_OK;
2824 } else
2825 *product_desc = 0;
2826 }
2827
2828 return status;
2829}
2830
2831/*
2832 * __vxge_hw_vpath_fw_ver_get - Get the fw version
2833 * Returns FW Version
2834 */
2835static enum vxge_hw_status
2836__vxge_hw_vpath_fw_ver_get(
2837 u32 vp_id,
2838 struct vxge_hw_vpath_reg __iomem *vpath_reg,
2839 struct vxge_hw_device_hw_info *hw_info)
2840{
2841 u64 val64;
2842 u64 data1 = 0ULL;
2843 u64 data2 = 0ULL;
2844 struct vxge_hw_device_version *fw_version = &hw_info->fw_version;
2845 struct vxge_hw_device_date *fw_date = &hw_info->fw_date;
2846 struct vxge_hw_device_version *flash_version = &hw_info->flash_version;
2847 struct vxge_hw_device_date *flash_date = &hw_info->flash_date;
2848 enum vxge_hw_status status = VXGE_HW_OK;
2849
2850 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2851 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY) |
2852 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2853 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2854 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2855 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2856
2857 status = __vxge_hw_pio_mem_write64(val64,
2858 &vpath_reg->rts_access_steer_ctrl,
2859 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2860 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2861
2862 if (status != VXGE_HW_OK)
2863 goto exit;
2864
2865 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2866
2867 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2868
2869 data1 = readq(&vpath_reg->rts_access_steer_data0);
2870 data2 = readq(&vpath_reg->rts_access_steer_data1);
2871
2872 fw_date->day =
2873 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_DAY(
2874 data1);
2875 fw_date->month =
2876 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MONTH(
2877 data1);
2878 fw_date->year =
2879 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_YEAR(
2880 data1);
2881
2882 snprintf(fw_date->date, VXGE_HW_FW_STRLEN, "%2.2d/%2.2d/%4.4d",
2883 fw_date->month, fw_date->day, fw_date->year);
2884
2885 fw_version->major =
2886 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MAJOR(data1);
2887 fw_version->minor =
2888 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_MINOR(data1);
2889 fw_version->build =
2890 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_FW_VER_BUILD(data1);
2891
2892 snprintf(fw_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2893 fw_version->major, fw_version->minor, fw_version->build);
2894
2895 flash_date->day =
2896 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_DAY(data2);
2897 flash_date->month =
2898 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MONTH(data2);
2899 flash_date->year =
2900 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_YEAR(data2);
2901
2902 snprintf(flash_date->date, VXGE_HW_FW_STRLEN,
2903 "%2.2d/%2.2d/%4.4d",
2904 flash_date->month, flash_date->day, flash_date->year);
2905
2906 flash_version->major =
2907 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MAJOR(data2);
2908 flash_version->minor =
2909 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_MINOR(data2);
2910 flash_version->build =
2911 (u32)VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(data2);
2912
2913 snprintf(flash_version->version, VXGE_HW_FW_STRLEN, "%d.%d.%d",
2914 flash_version->major, flash_version->minor,
2915 flash_version->build);
2916
2917 status = VXGE_HW_OK;
2918
2919 } else
2920 status = VXGE_HW_FAIL;
2921exit:
2922 return status;
2923}
2924
2925/*
2926 * __vxge_hw_vpath_pci_func_mode_get - Get the pci mode
2927 * Returns pci function mode
2928 */
2929static u64
2930__vxge_hw_vpath_pci_func_mode_get(
2931 u32 vp_id,
2932 struct vxge_hw_vpath_reg __iomem *vpath_reg)
2933{
2934 u64 val64;
2935 u64 data1 = 0ULL;
2936 enum vxge_hw_status status = VXGE_HW_OK;
2937
2938 __vxge_hw_read_rts_ds(vpath_reg,
2939 VXGE_HW_RTS_ACCESS_STEER_DATA0_MEMO_ITEM_PCI_MODE);
2940
2941 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2942 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_MEMO_ENTRY) |
2943 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
2944 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
2945 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
2946 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
2947
2948 status = __vxge_hw_pio_mem_write64(val64,
2949 &vpath_reg->rts_access_steer_ctrl,
2950 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
2951 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
2952
2953 if (status != VXGE_HW_OK)
2954 goto exit;
2955
2956 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
2957
2958 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
2959 data1 = readq(&vpath_reg->rts_access_steer_data0);
2960 status = VXGE_HW_OK;
2961 } else {
2962 data1 = 0;
2963 status = VXGE_HW_FAIL;
2964 }
2965exit:
2966 return data1;
2967}
2968
2969/** 3631/**
2970 * vxge_hw_device_flick_link_led - Flick (blink) link LED. 3632 * vxge_hw_device_flick_link_led - Flick (blink) link LED.
2971 * @hldev: HW device. 3633 * @hldev: HW device.
@@ -2974,37 +3636,24 @@ exit:
2974 * Flicker the link LED. 3636 * Flicker the link LED.
2975 */ 3637 */
2976enum vxge_hw_status 3638enum vxge_hw_status
2977vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, 3639vxge_hw_device_flick_link_led(struct __vxge_hw_device *hldev, u64 on_off)
2978 u64 on_off)
2979{ 3640{
2980 u64 val64; 3641 struct __vxge_hw_virtualpath *vpath;
2981 enum vxge_hw_status status = VXGE_HW_OK; 3642 u64 data0, data1 = 0, steer_ctrl = 0;
2982 struct vxge_hw_vpath_reg __iomem *vp_reg; 3643 enum vxge_hw_status status;
2983 3644
2984 if (hldev == NULL) { 3645 if (hldev == NULL) {
2985 status = VXGE_HW_ERR_INVALID_DEVICE; 3646 status = VXGE_HW_ERR_INVALID_DEVICE;
2986 goto exit; 3647 goto exit;
2987 } 3648 }
2988 3649
2989 vp_reg = hldev->vpath_reg[hldev->first_vp_id]; 3650 vpath = &hldev->virtual_paths[hldev->first_vp_id];
2990 3651
2991 writeq(0, &vp_reg->rts_access_steer_ctrl); 3652 data0 = on_off;
2992 wmb(); 3653 status = vxge_hw_vpath_fw_api(vpath,
2993 writeq(on_off, &vp_reg->rts_access_steer_data0); 3654 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL,
2994 writeq(0, &vp_reg->rts_access_steer_data1); 3655 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO,
2995 wmb(); 3656 0, &data0, &data1, &steer_ctrl);
2996
2997 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
2998 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LED_CONTROL) |
2999 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3000 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO) |
3001 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3002 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3003
3004 status = __vxge_hw_pio_mem_write64(val64,
3005 &vp_reg->rts_access_steer_ctrl,
3006 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3007 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3008exit: 3657exit:
3009 return status; 3658 return status;
3010} 3659}
@@ -3013,63 +3662,38 @@ exit:
3013 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables 3662 * __vxge_hw_vpath_rts_table_get - Get the entries from RTS access tables
3014 */ 3663 */
3015enum vxge_hw_status 3664enum vxge_hw_status
3016__vxge_hw_vpath_rts_table_get( 3665__vxge_hw_vpath_rts_table_get(struct __vxge_hw_vpath_handle *vp,
3017 struct __vxge_hw_vpath_handle *vp, 3666 u32 action, u32 rts_table, u32 offset,
3018 u32 action, u32 rts_table, u32 offset, u64 *data1, u64 *data2) 3667 u64 *data0, u64 *data1)
3019{ 3668{
3020 u64 val64; 3669 enum vxge_hw_status status;
3021 struct __vxge_hw_virtualpath *vpath; 3670 u64 steer_ctrl = 0;
3022 struct vxge_hw_vpath_reg __iomem *vp_reg;
3023
3024 enum vxge_hw_status status = VXGE_HW_OK;
3025 3671
3026 if (vp == NULL) { 3672 if (vp == NULL) {
3027 status = VXGE_HW_ERR_INVALID_HANDLE; 3673 status = VXGE_HW_ERR_INVALID_HANDLE;
3028 goto exit; 3674 goto exit;
3029 } 3675 }
3030 3676
3031 vpath = vp->vpath;
3032 vp_reg = vpath->vp_reg;
3033
3034 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
3035 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
3036 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3037 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
3038
3039 if ((rts_table == 3677 if ((rts_table ==
3040 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) || 3678 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT) ||
3041 (rts_table == 3679 (rts_table ==
3042 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) || 3680 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT) ||
3043 (rts_table == 3681 (rts_table ==
3044 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) || 3682 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK) ||
3045 (rts_table == 3683 (rts_table ==
3046 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) { 3684 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY)) {
3047 val64 = val64 | VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL; 3685 steer_ctrl = VXGE_HW_RTS_ACCESS_STEER_CTRL_TABLE_SEL;
3048 } 3686 }
3049 3687
3050 status = __vxge_hw_pio_mem_write64(val64, 3688 status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3051 &vp_reg->rts_access_steer_ctrl, 3689 data0, data1, &steer_ctrl);
3052 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3053 vpath->hldev->config.device_poll_millis);
3054
3055 if (status != VXGE_HW_OK) 3690 if (status != VXGE_HW_OK)
3056 goto exit; 3691 goto exit;
3057 3692
3058 val64 = readq(&vp_reg->rts_access_steer_ctrl); 3693 if ((rts_table != VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3059 3694 (rts_table !=
3060 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) { 3695 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3061 3696 *data1 = 0;
3062 *data1 = readq(&vp_reg->rts_access_steer_data0);
3063
3064 if ((rts_table ==
3065 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3066 (rts_table ==
3067 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) {
3068 *data2 = readq(&vp_reg->rts_access_steer_data1);
3069 }
3070 status = VXGE_HW_OK;
3071 } else
3072 status = VXGE_HW_FAIL;
3073exit: 3697exit:
3074 return status; 3698 return status;
3075} 3699}
@@ -3078,107 +3702,27 @@ exit:
3078 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables 3702 * __vxge_hw_vpath_rts_table_set - Set the entries of RTS access tables
3079 */ 3703 */
3080enum vxge_hw_status 3704enum vxge_hw_status
3081__vxge_hw_vpath_rts_table_set( 3705__vxge_hw_vpath_rts_table_set(struct __vxge_hw_vpath_handle *vp, u32 action,
3082 struct __vxge_hw_vpath_handle *vp, u32 action, u32 rts_table, 3706 u32 rts_table, u32 offset, u64 steer_data0,
3083 u32 offset, u64 data1, u64 data2) 3707 u64 steer_data1)
3084{ 3708{
3085 u64 val64; 3709 u64 data0, data1 = 0, steer_ctrl = 0;
3086 struct __vxge_hw_virtualpath *vpath; 3710 enum vxge_hw_status status;
3087 enum vxge_hw_status status = VXGE_HW_OK;
3088 struct vxge_hw_vpath_reg __iomem *vp_reg;
3089 3711
3090 if (vp == NULL) { 3712 if (vp == NULL) {
3091 status = VXGE_HW_ERR_INVALID_HANDLE; 3713 status = VXGE_HW_ERR_INVALID_HANDLE;
3092 goto exit; 3714 goto exit;
3093 } 3715 }
3094 3716
3095 vpath = vp->vpath; 3717 data0 = steer_data0;
3096 vp_reg = vpath->vp_reg;
3097
3098 writeq(data1, &vp_reg->rts_access_steer_data0);
3099 wmb();
3100 3718
3101 if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) || 3719 if ((rts_table == VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) ||
3102 (rts_table == 3720 (rts_table ==
3103 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT)) { 3721 VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT))
3104 writeq(data2, &vp_reg->rts_access_steer_data1); 3722 data1 = steer_data1;
3105 wmb();
3106 }
3107
3108 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(action) |
3109 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(rts_table) |
3110 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3111 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(offset);
3112
3113 status = __vxge_hw_pio_mem_write64(val64,
3114 &vp_reg->rts_access_steer_ctrl,
3115 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3116 vpath->hldev->config.device_poll_millis);
3117
3118 if (status != VXGE_HW_OK)
3119 goto exit;
3120
3121 val64 = readq(&vp_reg->rts_access_steer_ctrl);
3122
3123 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS)
3124 status = VXGE_HW_OK;
3125 else
3126 status = VXGE_HW_FAIL;
3127exit:
3128 return status;
3129}
3130
3131/*
3132 * __vxge_hw_vpath_addr_get - Get the hw address entry for this vpath
3133 * from MAC address table.
3134 */
3135static enum vxge_hw_status
3136__vxge_hw_vpath_addr_get(
3137 u32 vp_id, struct vxge_hw_vpath_reg __iomem *vpath_reg,
3138 u8 (macaddr)[ETH_ALEN], u8 (macaddr_mask)[ETH_ALEN])
3139{
3140 u32 i;
3141 u64 val64;
3142 u64 data1 = 0ULL;
3143 u64 data2 = 0ULL;
3144 enum vxge_hw_status status = VXGE_HW_OK;
3145
3146 val64 = VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION(
3147 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_LIST_FIRST_ENTRY) |
3148 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL(
3149 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DA) |
3150 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE |
3151 VXGE_HW_RTS_ACCESS_STEER_CTRL_OFFSET(0);
3152
3153 status = __vxge_hw_pio_mem_write64(val64,
3154 &vpath_reg->rts_access_steer_ctrl,
3155 VXGE_HW_RTS_ACCESS_STEER_CTRL_STROBE,
3156 VXGE_HW_DEF_DEVICE_POLL_MILLIS);
3157
3158 if (status != VXGE_HW_OK)
3159 goto exit;
3160
3161 val64 = readq(&vpath_reg->rts_access_steer_ctrl);
3162
3163 if (val64 & VXGE_HW_RTS_ACCESS_STEER_CTRL_RMACJ_STATUS) {
3164 3723
3165 data1 = readq(&vpath_reg->rts_access_steer_data0); 3724 status = vxge_hw_vpath_fw_api(vp->vpath, action, rts_table, offset,
3166 data2 = readq(&vpath_reg->rts_access_steer_data1); 3725 &data0, &data1, &steer_ctrl);
3167
3168 data1 = VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(data1);
3169 data2 = VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_DA_MAC_ADDR_MASK(
3170 data2);
3171
3172 for (i = ETH_ALEN; i > 0; i--) {
3173 macaddr[i-1] = (u8)(data1 & 0xFF);
3174 data1 >>= 8;
3175
3176 macaddr_mask[i-1] = (u8)(data2 & 0xFF);
3177 data2 >>= 8;
3178 }
3179 status = VXGE_HW_OK;
3180 } else
3181 status = VXGE_HW_FAIL;
3182exit: 3726exit:
3183 return status; 3727 return status;
3184} 3728}
@@ -3204,6 +3748,8 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
3204 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY, 3748 VXGE_HW_RTS_ACCESS_STEER_CTRL_ACTION_READ_ENTRY,
3205 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG, 3749 VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG,
3206 0, &data0, &data1); 3750 0, &data0, &data1);
3751 if (status != VXGE_HW_OK)
3752 goto exit;
3207 3753
3208 data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) | 3754 data0 &= ~(VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_BUCKET_SIZE(0xf) |
3209 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3)); 3755 VXGE_HW_RTS_ACCESS_STEER_DATA0_RTH_GEN_ALG_SEL(0x3));
@@ -3771,10 +4317,10 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3771 vp_reg = vpath->vp_reg; 4317 vp_reg = vpath->vp_reg;
3772 config = vpath->vp_config; 4318 config = vpath->vp_config;
3773 4319
3774 writeq((u64)0, &vp_reg->tim_dest_addr); 4320 writeq(0, &vp_reg->tim_dest_addr);
3775 writeq((u64)0, &vp_reg->tim_vpath_map); 4321 writeq(0, &vp_reg->tim_vpath_map);
3776 writeq((u64)0, &vp_reg->tim_bitmap); 4322 writeq(0, &vp_reg->tim_bitmap);
3777 writeq((u64)0, &vp_reg->tim_remap); 4323 writeq(0, &vp_reg->tim_remap);
3778 4324
3779 if (config->ring.enable == VXGE_HW_RING_ENABLE) 4325 if (config->ring.enable == VXGE_HW_RING_ENABLE)
3780 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM( 4326 writeq(VXGE_HW_TIM_RING_ASSN_INT_NUM(
@@ -3876,8 +4422,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3876 4422
3877 if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { 4423 if (config->tti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3878 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); 4424 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3879 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL( 4425 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
3880 config->tti.util_sel);
3881 } 4426 }
3882 4427
3883 if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { 4428 if (config->tti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -3981,8 +4526,7 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
3981 4526
3982 if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) { 4527 if (config->rti.util_sel != VXGE_HW_USE_FLASH_DEFAULT) {
3983 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f); 4528 val64 &= ~VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(0x3f);
3984 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL( 4529 val64 |= VXGE_HW_TIM_CFG3_INT_NUM_UTIL_SEL(vp_id);
3985 config->rti.util_sel);
3986 } 4530 }
3987 4531
3988 if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) { 4532 if (config->rti.ltimer_val != VXGE_HW_USE_FLASH_DEFAULT) {
@@ -4003,11 +4547,15 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4003 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]); 4547 writeq(val64, &vp_reg->tim_cfg2_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4004 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]); 4548 writeq(val64, &vp_reg->tim_cfg3_int_num[VXGE_HW_VPATH_INTR_BMAP]);
4005 4549
4550 val64 = VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_PRD(150);
4551 val64 |= VXGE_HW_TIM_WRKLD_CLC_WRKLD_EVAL_DIV(0);
4552 val64 |= VXGE_HW_TIM_WRKLD_CLC_CNT_RX_TX(3);
4553 writeq(val64, &vp_reg->tim_wrkld_clc);
4554
4006 return status; 4555 return status;
4007} 4556}
4008 4557
4009void 4558void vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
4010vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
4011{ 4559{
4012 struct __vxge_hw_virtualpath *vpath; 4560 struct __vxge_hw_virtualpath *vpath;
4013 struct vxge_hw_vpath_reg __iomem *vp_reg; 4561 struct vxge_hw_vpath_reg __iomem *vp_reg;
@@ -4018,17 +4566,15 @@ vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
4018 vp_reg = vpath->vp_reg; 4566 vp_reg = vpath->vp_reg;
4019 config = vpath->vp_config; 4567 config = vpath->vp_config;
4020 4568
4021 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) { 4569 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE &&
4570 config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
4571 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
4022 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]); 4572 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4023 4573 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4024 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) { 4574 writeq(val64, &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4025 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
4026 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
4027 writeq(val64,
4028 &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
4029 }
4030 } 4575 }
4031} 4576}
4577
4032/* 4578/*
4033 * __vxge_hw_vpath_initialize 4579 * __vxge_hw_vpath_initialize
4034 * This routine is the final phase of init which initializes the 4580 * This routine is the final phase of init which initializes the
@@ -4052,22 +4598,18 @@ __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4052 vp_reg = vpath->vp_reg; 4598 vp_reg = vpath->vp_reg;
4053 4599
4054 status = __vxge_hw_vpath_swapper_set(vpath->vp_reg); 4600 status = __vxge_hw_vpath_swapper_set(vpath->vp_reg);
4055
4056 if (status != VXGE_HW_OK) 4601 if (status != VXGE_HW_OK)
4057 goto exit; 4602 goto exit;
4058 4603
4059 status = __vxge_hw_vpath_mac_configure(hldev, vp_id); 4604 status = __vxge_hw_vpath_mac_configure(hldev, vp_id);
4060
4061 if (status != VXGE_HW_OK) 4605 if (status != VXGE_HW_OK)
4062 goto exit; 4606 goto exit;
4063 4607
4064 status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id); 4608 status = __vxge_hw_vpath_kdfc_configure(hldev, vp_id);
4065
4066 if (status != VXGE_HW_OK) 4609 if (status != VXGE_HW_OK)
4067 goto exit; 4610 goto exit;
4068 4611
4069 status = __vxge_hw_vpath_tim_configure(hldev, vp_id); 4612 status = __vxge_hw_vpath_tim_configure(hldev, vp_id);
4070
4071 if (status != VXGE_HW_OK) 4613 if (status != VXGE_HW_OK)
4072 goto exit; 4614 goto exit;
4073 4615
@@ -4075,7 +4617,6 @@ __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4075 4617
4076 /* Get MRRS value from device control */ 4618 /* Get MRRS value from device control */
4077 status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32); 4619 status = __vxge_hw_vpath_pci_read(vpath, 1, 0x78, &val32);
4078
4079 if (status == VXGE_HW_OK) { 4620 if (status == VXGE_HW_OK) {
4080 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12; 4621 val32 = (val32 & VXGE_HW_PCI_EXP_DEVCTL_READRQ) >> 12;
4081 val64 &= 4622 val64 &=
@@ -4099,6 +4640,28 @@ exit:
4099} 4640}
4100 4641
4101/* 4642/*
4643 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4644 * This routine closes all channels it opened and freeup memory
4645 */
4646static void __vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4647{
4648 struct __vxge_hw_virtualpath *vpath;
4649
4650 vpath = &hldev->virtual_paths[vp_id];
4651
4652 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4653 goto exit;
4654
4655 VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4656 vpath->hldev->tim_int_mask1, vpath->vp_id);
4657 hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4658
4659 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4660exit:
4661 return;
4662}
4663
4664/*
4102 * __vxge_hw_vp_initialize - Initialize Virtual Path structure 4665 * __vxge_hw_vp_initialize - Initialize Virtual Path structure
4103 * This routine is the initial phase of init which resets the vpath and 4666 * This routine is the initial phase of init which resets the vpath and
4104 * initializes the software support structures. 4667 * initializes the software support structures.
@@ -4117,6 +4680,7 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4117 4680
4118 vpath = &hldev->virtual_paths[vp_id]; 4681 vpath = &hldev->virtual_paths[vp_id];
4119 4682
4683 spin_lock_init(&hldev->virtual_paths[vp_id].lock);
4120 vpath->vp_id = vp_id; 4684 vpath->vp_id = vp_id;
4121 vpath->vp_open = VXGE_HW_VP_OPEN; 4685 vpath->vp_open = VXGE_HW_VP_OPEN;
4122 vpath->hldev = hldev; 4686 vpath->hldev = hldev;
@@ -4127,14 +4691,12 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4127 __vxge_hw_vpath_reset(hldev, vp_id); 4691 __vxge_hw_vpath_reset(hldev, vp_id);
4128 4692
4129 status = __vxge_hw_vpath_reset_check(vpath); 4693 status = __vxge_hw_vpath_reset_check(vpath);
4130
4131 if (status != VXGE_HW_OK) { 4694 if (status != VXGE_HW_OK) {
4132 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); 4695 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4133 goto exit; 4696 goto exit;
4134 } 4697 }
4135 4698
4136 status = __vxge_hw_vpath_mgmt_read(hldev, vpath); 4699 status = __vxge_hw_vpath_mgmt_read(hldev, vpath);
4137
4138 if (status != VXGE_HW_OK) { 4700 if (status != VXGE_HW_OK) {
4139 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath)); 4701 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4140 goto exit; 4702 goto exit;
@@ -4148,7 +4710,6 @@ __vxge_hw_vp_initialize(struct __vxge_hw_device *hldev, u32 vp_id,
4148 hldev->tim_int_mask1, vp_id); 4710 hldev->tim_int_mask1, vp_id);
4149 4711
4150 status = __vxge_hw_vpath_initialize(hldev, vp_id); 4712 status = __vxge_hw_vpath_initialize(hldev, vp_id);
4151
4152 if (status != VXGE_HW_OK) 4713 if (status != VXGE_HW_OK)
4153 __vxge_hw_vp_terminate(hldev, vp_id); 4714 __vxge_hw_vp_terminate(hldev, vp_id);
4154exit: 4715exit:
@@ -4156,29 +4717,6 @@ exit:
4156} 4717}
4157 4718
4158/* 4719/*
4159 * __vxge_hw_vp_terminate - Terminate Virtual Path structure
4160 * This routine closes all channels it opened and freeup memory
4161 */
4162static void
4163__vxge_hw_vp_terminate(struct __vxge_hw_device *hldev, u32 vp_id)
4164{
4165 struct __vxge_hw_virtualpath *vpath;
4166
4167 vpath = &hldev->virtual_paths[vp_id];
4168
4169 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN)
4170 goto exit;
4171
4172 VXGE_HW_DEVICE_TIM_INT_MASK_RESET(vpath->hldev->tim_int_mask0,
4173 vpath->hldev->tim_int_mask1, vpath->vp_id);
4174 hldev->stats.hw_dev_info_stats.vpath_info[vpath->vp_id] = NULL;
4175
4176 memset(vpath, 0, sizeof(struct __vxge_hw_virtualpath));
4177exit:
4178 return;
4179}
4180
4181/*
4182 * vxge_hw_vpath_mtu_set - Set MTU. 4720 * vxge_hw_vpath_mtu_set - Set MTU.
4183 * Set new MTU value. Example, to use jumbo frames: 4721 * Set new MTU value. Example, to use jumbo frames:
4184 * vxge_hw_vpath_mtu_set(my_device, 9600); 4722 * vxge_hw_vpath_mtu_set(my_device, 9600);
@@ -4215,6 +4753,64 @@ exit:
4215} 4753}
4216 4754
4217/* 4755/*
4756 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4757 * Enable the DMA vpath statistics. The function is to be called to re-enable
4758 * the adapter to update stats into the host memory
4759 */
4760static enum vxge_hw_status
4761vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4762{
4763 enum vxge_hw_status status = VXGE_HW_OK;
4764 struct __vxge_hw_virtualpath *vpath;
4765
4766 vpath = vp->vpath;
4767
4768 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4769 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4770 goto exit;
4771 }
4772
4773 memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4774 sizeof(struct vxge_hw_vpath_stats_hw_info));
4775
4776 status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4777exit:
4778 return status;
4779}
4780
4781/*
4782 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
4783 * This function allocates a block from block pool or from the system
4784 */
4785static struct __vxge_hw_blockpool_entry *
4786__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
4787{
4788 struct __vxge_hw_blockpool_entry *entry = NULL;
4789 struct __vxge_hw_blockpool *blockpool;
4790
4791 blockpool = &devh->block_pool;
4792
4793 if (size == blockpool->block_size) {
4794
4795 if (!list_empty(&blockpool->free_block_list))
4796 entry = (struct __vxge_hw_blockpool_entry *)
4797 list_first_entry(&blockpool->free_block_list,
4798 struct __vxge_hw_blockpool_entry,
4799 item);
4800
4801 if (entry != NULL) {
4802 list_del(&entry->item);
4803 blockpool->pool_size--;
4804 }
4805 }
4806
4807 if (entry != NULL)
4808 __vxge_hw_blockpool_blocks_add(blockpool);
4809
4810 return entry;
4811}
4812
4813/*
4218 * vxge_hw_vpath_open - Open a virtual path on a given adapter 4814 * vxge_hw_vpath_open - Open a virtual path on a given adapter
4219 * This function is used to open access to virtual path of an 4815 * This function is used to open access to virtual path of an
4220 * adapter for offload, GRO operations. This function returns 4816 * adapter for offload, GRO operations. This function returns
@@ -4238,19 +4834,15 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4238 4834
4239 status = __vxge_hw_vp_initialize(hldev, attr->vp_id, 4835 status = __vxge_hw_vp_initialize(hldev, attr->vp_id,
4240 &hldev->config.vp_config[attr->vp_id]); 4836 &hldev->config.vp_config[attr->vp_id]);
4241
4242 if (status != VXGE_HW_OK) 4837 if (status != VXGE_HW_OK)
4243 goto vpath_open_exit1; 4838 goto vpath_open_exit1;
4244 4839
4245 vp = (struct __vxge_hw_vpath_handle *) 4840 vp = vzalloc(sizeof(struct __vxge_hw_vpath_handle));
4246 vmalloc(sizeof(struct __vxge_hw_vpath_handle));
4247 if (vp == NULL) { 4841 if (vp == NULL) {
4248 status = VXGE_HW_ERR_OUT_OF_MEMORY; 4842 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4249 goto vpath_open_exit2; 4843 goto vpath_open_exit2;
4250 } 4844 }
4251 4845
4252 memset(vp, 0, sizeof(struct __vxge_hw_vpath_handle));
4253
4254 vp->vpath = vpath; 4846 vp->vpath = vpath;
4255 4847
4256 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) { 4848 if (vpath->vp_config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
@@ -4273,7 +4865,6 @@ vxge_hw_vpath_open(struct __vxge_hw_device *hldev,
4273 4865
4274 vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev, 4866 vpath->stats_block = __vxge_hw_blockpool_block_allocate(hldev,
4275 VXGE_HW_BLOCK_SIZE); 4867 VXGE_HW_BLOCK_SIZE);
4276
4277 if (vpath->stats_block == NULL) { 4868 if (vpath->stats_block == NULL) {
4278 status = VXGE_HW_ERR_OUT_OF_MEMORY; 4869 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4279 goto vpath_open_exit8; 4870 goto vpath_open_exit8;
@@ -4332,19 +4923,20 @@ vpath_open_exit1:
4332 * This function is used to close access to virtual path opened 4923 * This function is used to close access to virtual path opened
4333 * earlier. 4924 * earlier.
4334 */ 4925 */
4335void 4926void vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4336vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4337{ 4927{
4338 struct __vxge_hw_virtualpath *vpath = NULL; 4928 struct __vxge_hw_virtualpath *vpath = vp->vpath;
4929 struct __vxge_hw_ring *ring = vpath->ringh;
4930 struct vxgedev *vdev = netdev_priv(vpath->hldev->ndev);
4339 u64 new_count, val64, val164; 4931 u64 new_count, val64, val164;
4340 struct __vxge_hw_ring *ring;
4341 4932
4342 vpath = vp->vpath; 4933 if (vdev->titan1) {
4343 ring = vpath->ringh; 4934 new_count = readq(&vpath->vp_reg->rxdmem_size);
4935 new_count &= 0x1fff;
4936 } else
4937 new_count = ring->config->ring_blocks * VXGE_HW_BLOCK_SIZE / 8;
4344 4938
4345 new_count = readq(&vpath->vp_reg->rxdmem_size); 4939 val164 = VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count);
4346 new_count &= 0x1fff;
4347 val164 = (VXGE_HW_RXDMEM_SIZE_PRC_RXDMEM_SIZE(new_count));
4348 4940
4349 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164), 4941 writeq(VXGE_HW_PRC_RXD_DOORBELL_NEW_QW_CNT(val164),
4350 &vpath->vp_reg->prc_rxd_doorbell); 4942 &vpath->vp_reg->prc_rxd_doorbell);
@@ -4367,6 +4959,29 @@ vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp)
4367} 4959}
4368 4960
4369/* 4961/*
4962 * __vxge_hw_blockpool_block_free - Frees a block from block pool
4963 * @devh: Hal device
4964 * @entry: Entry of block to be freed
4965 *
4966 * This function frees a block from block pool
4967 */
4968static void
4969__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
4970 struct __vxge_hw_blockpool_entry *entry)
4971{
4972 struct __vxge_hw_blockpool *blockpool;
4973
4974 blockpool = &devh->block_pool;
4975
4976 if (entry->length == blockpool->block_size) {
4977 list_add(&entry->item, &blockpool->free_block_list);
4978 blockpool->pool_size++;
4979 }
4980
4981 __vxge_hw_blockpool_blocks_remove(blockpool);
4982}
4983
4984/*
4370 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open 4985 * vxge_hw_vpath_close - Close the handle got from previous vpath (vpath) open
4371 * This function is used to close access to virtual path opened 4986 * This function is used to close access to virtual path opened
4372 * earlier. 4987 * earlier.
@@ -4414,7 +5029,9 @@ enum vxge_hw_status vxge_hw_vpath_close(struct __vxge_hw_vpath_handle *vp)
4414 5029
4415 __vxge_hw_vp_terminate(devh, vp_id); 5030 __vxge_hw_vp_terminate(devh, vp_id);
4416 5031
5032 spin_lock(&vpath->lock);
4417 vpath->vp_open = VXGE_HW_VP_NOT_OPEN; 5033 vpath->vp_open = VXGE_HW_VP_NOT_OPEN;
5034 spin_unlock(&vpath->lock);
4418 5035
4419vpath_close_exit: 5036vpath_close_exit:
4420 return status; 5037 return status;
@@ -4515,730 +5132,3 @@ vxge_hw_vpath_enable(struct __vxge_hw_vpath_handle *vp)
4515 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32), 5132 __vxge_hw_pio_mem_write32_upper((u32)vxge_bVALn(val64, 0, 32),
4516 &hldev->common_reg->cmn_rsthdlr_cfg1); 5133 &hldev->common_reg->cmn_rsthdlr_cfg1);
4517} 5134}
4518
4519/*
4520 * vxge_hw_vpath_stats_enable - Enable vpath h/wstatistics.
4521 * Enable the DMA vpath statistics. The function is to be called to re-enable
4522 * the adapter to update stats into the host memory
4523 */
4524static enum vxge_hw_status
4525vxge_hw_vpath_stats_enable(struct __vxge_hw_vpath_handle *vp)
4526{
4527 enum vxge_hw_status status = VXGE_HW_OK;
4528 struct __vxge_hw_virtualpath *vpath;
4529
4530 vpath = vp->vpath;
4531
4532 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4533 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4534 goto exit;
4535 }
4536
4537 memcpy(vpath->hw_stats_sav, vpath->hw_stats,
4538 sizeof(struct vxge_hw_vpath_stats_hw_info));
4539
4540 status = __vxge_hw_vpath_stats_get(vpath, vpath->hw_stats);
4541exit:
4542 return status;
4543}
4544
4545/*
4546 * __vxge_hw_vpath_stats_access - Get the statistics from the given location
4547 * and offset and perform an operation
4548 */
4549static enum vxge_hw_status
4550__vxge_hw_vpath_stats_access(struct __vxge_hw_virtualpath *vpath,
4551 u32 operation, u32 offset, u64 *stat)
4552{
4553 u64 val64;
4554 enum vxge_hw_status status = VXGE_HW_OK;
4555 struct vxge_hw_vpath_reg __iomem *vp_reg;
4556
4557 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4558 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4559 goto vpath_stats_access_exit;
4560 }
4561
4562 vp_reg = vpath->vp_reg;
4563
4564 val64 = VXGE_HW_XMAC_STATS_ACCESS_CMD_OP(operation) |
4565 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE |
4566 VXGE_HW_XMAC_STATS_ACCESS_CMD_OFFSET_SEL(offset);
4567
4568 status = __vxge_hw_pio_mem_write64(val64,
4569 &vp_reg->xmac_stats_access_cmd,
4570 VXGE_HW_XMAC_STATS_ACCESS_CMD_STROBE,
4571 vpath->hldev->config.device_poll_millis);
4572
4573 if ((status == VXGE_HW_OK) && (operation == VXGE_HW_STATS_OP_READ))
4574 *stat = readq(&vp_reg->xmac_stats_access_data);
4575 else
4576 *stat = 0;
4577
4578vpath_stats_access_exit:
4579 return status;
4580}
4581
4582/*
4583 * __vxge_hw_vpath_xmac_tx_stats_get - Get the TX Statistics of a vpath
4584 */
4585static enum vxge_hw_status
4586__vxge_hw_vpath_xmac_tx_stats_get(
4587 struct __vxge_hw_virtualpath *vpath,
4588 struct vxge_hw_xmac_vpath_tx_stats *vpath_tx_stats)
4589{
4590 u64 *val64;
4591 int i;
4592 u32 offset = VXGE_HW_STATS_VPATH_TX_OFFSET;
4593 enum vxge_hw_status status = VXGE_HW_OK;
4594
4595 val64 = (u64 *) vpath_tx_stats;
4596
4597 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4598 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4599 goto exit;
4600 }
4601
4602 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_tx_stats) / 8; i++) {
4603 status = __vxge_hw_vpath_stats_access(vpath,
4604 VXGE_HW_STATS_OP_READ,
4605 offset, val64);
4606 if (status != VXGE_HW_OK)
4607 goto exit;
4608 offset++;
4609 val64++;
4610 }
4611exit:
4612 return status;
4613}
4614
4615/*
4616 * __vxge_hw_vpath_xmac_rx_stats_get - Get the RX Statistics of a vpath
4617 */
4618static enum vxge_hw_status
4619__vxge_hw_vpath_xmac_rx_stats_get(struct __vxge_hw_virtualpath *vpath,
4620 struct vxge_hw_xmac_vpath_rx_stats *vpath_rx_stats)
4621{
4622 u64 *val64;
4623 enum vxge_hw_status status = VXGE_HW_OK;
4624 int i;
4625 u32 offset = VXGE_HW_STATS_VPATH_RX_OFFSET;
4626 val64 = (u64 *) vpath_rx_stats;
4627
4628 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4629 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4630 goto exit;
4631 }
4632 for (i = 0; i < sizeof(struct vxge_hw_xmac_vpath_rx_stats) / 8; i++) {
4633 status = __vxge_hw_vpath_stats_access(vpath,
4634 VXGE_HW_STATS_OP_READ,
4635 offset >> 3, val64);
4636 if (status != VXGE_HW_OK)
4637 goto exit;
4638
4639 offset += 8;
4640 val64++;
4641 }
4642exit:
4643 return status;
4644}
4645
4646/*
4647 * __vxge_hw_vpath_stats_get - Get the vpath hw statistics.
4648 */
4649static enum vxge_hw_status
4650__vxge_hw_vpath_stats_get(struct __vxge_hw_virtualpath *vpath,
4651 struct vxge_hw_vpath_stats_hw_info *hw_stats)
4652{
4653 u64 val64;
4654 enum vxge_hw_status status = VXGE_HW_OK;
4655 struct vxge_hw_vpath_reg __iomem *vp_reg;
4656
4657 if (vpath->vp_open == VXGE_HW_VP_NOT_OPEN) {
4658 status = VXGE_HW_ERR_VPATH_NOT_OPEN;
4659 goto exit;
4660 }
4661 vp_reg = vpath->vp_reg;
4662
4663 val64 = readq(&vp_reg->vpath_debug_stats0);
4664 hw_stats->ini_num_mwr_sent =
4665 (u32)VXGE_HW_VPATH_DEBUG_STATS0_GET_INI_NUM_MWR_SENT(val64);
4666
4667 val64 = readq(&vp_reg->vpath_debug_stats1);
4668 hw_stats->ini_num_mrd_sent =
4669 (u32)VXGE_HW_VPATH_DEBUG_STATS1_GET_INI_NUM_MRD_SENT(val64);
4670
4671 val64 = readq(&vp_reg->vpath_debug_stats2);
4672 hw_stats->ini_num_cpl_rcvd =
4673 (u32)VXGE_HW_VPATH_DEBUG_STATS2_GET_INI_NUM_CPL_RCVD(val64);
4674
4675 val64 = readq(&vp_reg->vpath_debug_stats3);
4676 hw_stats->ini_num_mwr_byte_sent =
4677 VXGE_HW_VPATH_DEBUG_STATS3_GET_INI_NUM_MWR_BYTE_SENT(val64);
4678
4679 val64 = readq(&vp_reg->vpath_debug_stats4);
4680 hw_stats->ini_num_cpl_byte_rcvd =
4681 VXGE_HW_VPATH_DEBUG_STATS4_GET_INI_NUM_CPL_BYTE_RCVD(val64);
4682
4683 val64 = readq(&vp_reg->vpath_debug_stats5);
4684 hw_stats->wrcrdtarb_xoff =
4685 (u32)VXGE_HW_VPATH_DEBUG_STATS5_GET_WRCRDTARB_XOFF(val64);
4686
4687 val64 = readq(&vp_reg->vpath_debug_stats6);
4688 hw_stats->rdcrdtarb_xoff =
4689 (u32)VXGE_HW_VPATH_DEBUG_STATS6_GET_RDCRDTARB_XOFF(val64);
4690
4691 val64 = readq(&vp_reg->vpath_genstats_count01);
4692 hw_stats->vpath_genstats_count0 =
4693 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT0(
4694 val64);
4695
4696 val64 = readq(&vp_reg->vpath_genstats_count01);
4697 hw_stats->vpath_genstats_count1 =
4698 (u32)VXGE_HW_VPATH_GENSTATS_COUNT01_GET_PPIF_VPATH_GENSTATS_COUNT1(
4699 val64);
4700
4701 val64 = readq(&vp_reg->vpath_genstats_count23);
4702 hw_stats->vpath_genstats_count2 =
4703 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT2(
4704 val64);
4705
4706 val64 = readq(&vp_reg->vpath_genstats_count01);
4707 hw_stats->vpath_genstats_count3 =
4708 (u32)VXGE_HW_VPATH_GENSTATS_COUNT23_GET_PPIF_VPATH_GENSTATS_COUNT3(
4709 val64);
4710
4711 val64 = readq(&vp_reg->vpath_genstats_count4);
4712 hw_stats->vpath_genstats_count4 =
4713 (u32)VXGE_HW_VPATH_GENSTATS_COUNT4_GET_PPIF_VPATH_GENSTATS_COUNT4(
4714 val64);
4715
4716 val64 = readq(&vp_reg->vpath_genstats_count5);
4717 hw_stats->vpath_genstats_count5 =
4718 (u32)VXGE_HW_VPATH_GENSTATS_COUNT5_GET_PPIF_VPATH_GENSTATS_COUNT5(
4719 val64);
4720
4721 status = __vxge_hw_vpath_xmac_tx_stats_get(vpath, &hw_stats->tx_stats);
4722 if (status != VXGE_HW_OK)
4723 goto exit;
4724
4725 status = __vxge_hw_vpath_xmac_rx_stats_get(vpath, &hw_stats->rx_stats);
4726 if (status != VXGE_HW_OK)
4727 goto exit;
4728
4729 VXGE_HW_VPATH_STATS_PIO_READ(
4730 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM0_OFFSET);
4731
4732 hw_stats->prog_event_vnum0 =
4733 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM0(val64);
4734
4735 hw_stats->prog_event_vnum1 =
4736 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM1(val64);
4737
4738 VXGE_HW_VPATH_STATS_PIO_READ(
4739 VXGE_HW_STATS_VPATH_PROG_EVENT_VNUM2_OFFSET);
4740
4741 hw_stats->prog_event_vnum2 =
4742 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM2(val64);
4743
4744 hw_stats->prog_event_vnum3 =
4745 (u32)VXGE_HW_STATS_GET_VPATH_PROG_EVENT_VNUM3(val64);
4746
4747 val64 = readq(&vp_reg->rx_multi_cast_stats);
4748 hw_stats->rx_multi_cast_frame_discard =
4749 (u16)VXGE_HW_RX_MULTI_CAST_STATS_GET_FRAME_DISCARD(val64);
4750
4751 val64 = readq(&vp_reg->rx_frm_transferred);
4752 hw_stats->rx_frm_transferred =
4753 (u32)VXGE_HW_RX_FRM_TRANSFERRED_GET_RX_FRM_TRANSFERRED(val64);
4754
4755 val64 = readq(&vp_reg->rxd_returned);
4756 hw_stats->rxd_returned =
4757 (u16)VXGE_HW_RXD_RETURNED_GET_RXD_RETURNED(val64);
4758
4759 val64 = readq(&vp_reg->dbg_stats_rx_mpa);
4760 hw_stats->rx_mpa_len_fail_frms =
4761 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_LEN_FAIL_FRMS(val64);
4762 hw_stats->rx_mpa_mrk_fail_frms =
4763 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_MRK_FAIL_FRMS(val64);
4764 hw_stats->rx_mpa_crc_fail_frms =
4765 (u16)VXGE_HW_DBG_STATS_GET_RX_MPA_CRC_FAIL_FRMS(val64);
4766
4767 val64 = readq(&vp_reg->dbg_stats_rx_fau);
4768 hw_stats->rx_permitted_frms =
4769 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_PERMITTED_FRMS(val64);
4770 hw_stats->rx_vp_reset_discarded_frms =
4771 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_VP_RESET_DISCARDED_FRMS(val64);
4772 hw_stats->rx_wol_frms =
4773 (u16)VXGE_HW_DBG_STATS_GET_RX_FAU_RX_WOL_FRMS(val64);
4774
4775 val64 = readq(&vp_reg->tx_vp_reset_discarded_frms);
4776 hw_stats->tx_vp_reset_discarded_frms =
4777 (u16)VXGE_HW_TX_VP_RESET_DISCARDED_FRMS_GET_TX_VP_RESET_DISCARDED_FRMS(
4778 val64);
4779exit:
4780 return status;
4781}
4782
4783
4784static void vxge_os_dma_malloc_async(struct pci_dev *pdev, void *devh,
4785 unsigned long size)
4786{
4787 gfp_t flags;
4788 void *vaddr;
4789
4790 if (in_interrupt())
4791 flags = GFP_ATOMIC | GFP_DMA;
4792 else
4793 flags = GFP_KERNEL | GFP_DMA;
4794
4795 vaddr = kmalloc((size), flags);
4796
4797 vxge_hw_blockpool_block_add(devh, vaddr, size, pdev, pdev);
4798}
4799
4800static void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
4801 struct pci_dev **p_dma_acch)
4802{
4803 unsigned long misaligned = *(unsigned long *)p_dma_acch;
4804 u8 *tmp = (u8 *)vaddr;
4805 tmp -= misaligned;
4806 kfree((void *)tmp);
4807}
4808
4809/*
4810 * __vxge_hw_blockpool_create - Create block pool
4811 */
4812
4813enum vxge_hw_status
4814__vxge_hw_blockpool_create(struct __vxge_hw_device *hldev,
4815 struct __vxge_hw_blockpool *blockpool,
4816 u32 pool_size,
4817 u32 pool_max)
4818{
4819 u32 i;
4820 struct __vxge_hw_blockpool_entry *entry = NULL;
4821 void *memblock;
4822 dma_addr_t dma_addr;
4823 struct pci_dev *dma_handle;
4824 struct pci_dev *acc_handle;
4825 enum vxge_hw_status status = VXGE_HW_OK;
4826
4827 if (blockpool == NULL) {
4828 status = VXGE_HW_FAIL;
4829 goto blockpool_create_exit;
4830 }
4831
4832 blockpool->hldev = hldev;
4833 blockpool->block_size = VXGE_HW_BLOCK_SIZE;
4834 blockpool->pool_size = 0;
4835 blockpool->pool_max = pool_max;
4836 blockpool->req_out = 0;
4837
4838 INIT_LIST_HEAD(&blockpool->free_block_list);
4839 INIT_LIST_HEAD(&blockpool->free_entry_list);
4840
4841 for (i = 0; i < pool_size + pool_max; i++) {
4842 entry = kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4843 GFP_KERNEL);
4844 if (entry == NULL) {
4845 __vxge_hw_blockpool_destroy(blockpool);
4846 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4847 goto blockpool_create_exit;
4848 }
4849 list_add(&entry->item, &blockpool->free_entry_list);
4850 }
4851
4852 for (i = 0; i < pool_size; i++) {
4853
4854 memblock = vxge_os_dma_malloc(
4855 hldev->pdev,
4856 VXGE_HW_BLOCK_SIZE,
4857 &dma_handle,
4858 &acc_handle);
4859
4860 if (memblock == NULL) {
4861 __vxge_hw_blockpool_destroy(blockpool);
4862 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4863 goto blockpool_create_exit;
4864 }
4865
4866 dma_addr = pci_map_single(hldev->pdev, memblock,
4867 VXGE_HW_BLOCK_SIZE, PCI_DMA_BIDIRECTIONAL);
4868
4869 if (unlikely(pci_dma_mapping_error(hldev->pdev,
4870 dma_addr))) {
4871
4872 vxge_os_dma_free(hldev->pdev, memblock, &acc_handle);
4873 __vxge_hw_blockpool_destroy(blockpool);
4874 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4875 goto blockpool_create_exit;
4876 }
4877
4878 if (!list_empty(&blockpool->free_entry_list))
4879 entry = (struct __vxge_hw_blockpool_entry *)
4880 list_first_entry(&blockpool->free_entry_list,
4881 struct __vxge_hw_blockpool_entry,
4882 item);
4883
4884 if (entry == NULL)
4885 entry =
4886 kzalloc(sizeof(struct __vxge_hw_blockpool_entry),
4887 GFP_KERNEL);
4888 if (entry != NULL) {
4889 list_del(&entry->item);
4890 entry->length = VXGE_HW_BLOCK_SIZE;
4891 entry->memblock = memblock;
4892 entry->dma_addr = dma_addr;
4893 entry->acc_handle = acc_handle;
4894 entry->dma_handle = dma_handle;
4895 list_add(&entry->item,
4896 &blockpool->free_block_list);
4897 blockpool->pool_size++;
4898 } else {
4899 __vxge_hw_blockpool_destroy(blockpool);
4900 status = VXGE_HW_ERR_OUT_OF_MEMORY;
4901 goto blockpool_create_exit;
4902 }
4903 }
4904
4905blockpool_create_exit:
4906 return status;
4907}
4908
4909/*
4910 * __vxge_hw_blockpool_destroy - Deallocates the block pool
4911 */
4912
4913void __vxge_hw_blockpool_destroy(struct __vxge_hw_blockpool *blockpool)
4914{
4915
4916 struct __vxge_hw_device *hldev;
4917 struct list_head *p, *n;
4918 u16 ret;
4919
4920 if (blockpool == NULL) {
4921 ret = 1;
4922 goto exit;
4923 }
4924
4925 hldev = blockpool->hldev;
4926
4927 list_for_each_safe(p, n, &blockpool->free_block_list) {
4928
4929 pci_unmap_single(hldev->pdev,
4930 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4931 ((struct __vxge_hw_blockpool_entry *)p)->length,
4932 PCI_DMA_BIDIRECTIONAL);
4933
4934 vxge_os_dma_free(hldev->pdev,
4935 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
4936 &((struct __vxge_hw_blockpool_entry *) p)->acc_handle);
4937
4938 list_del(
4939 &((struct __vxge_hw_blockpool_entry *)p)->item);
4940 kfree(p);
4941 blockpool->pool_size--;
4942 }
4943
4944 list_for_each_safe(p, n, &blockpool->free_entry_list) {
4945 list_del(
4946 &((struct __vxge_hw_blockpool_entry *)p)->item);
4947 kfree((void *)p);
4948 }
4949 ret = 0;
4950exit:
4951 return;
4952}
4953
4954/*
4955 * __vxge_hw_blockpool_blocks_add - Request additional blocks
4956 */
4957static
4958void __vxge_hw_blockpool_blocks_add(struct __vxge_hw_blockpool *blockpool)
4959{
4960 u32 nreq = 0, i;
4961
4962 if ((blockpool->pool_size + blockpool->req_out) <
4963 VXGE_HW_MIN_DMA_BLOCK_POOL_SIZE) {
4964 nreq = VXGE_HW_INCR_DMA_BLOCK_POOL_SIZE;
4965 blockpool->req_out += nreq;
4966 }
4967
4968 for (i = 0; i < nreq; i++)
4969 vxge_os_dma_malloc_async(
4970 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4971 blockpool->hldev, VXGE_HW_BLOCK_SIZE);
4972}
4973
4974/*
4975 * __vxge_hw_blockpool_blocks_remove - Free additional blocks
4976 */
4977static
4978void __vxge_hw_blockpool_blocks_remove(struct __vxge_hw_blockpool *blockpool)
4979{
4980 struct list_head *p, *n;
4981
4982 list_for_each_safe(p, n, &blockpool->free_block_list) {
4983
4984 if (blockpool->pool_size < blockpool->pool_max)
4985 break;
4986
4987 pci_unmap_single(
4988 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4989 ((struct __vxge_hw_blockpool_entry *)p)->dma_addr,
4990 ((struct __vxge_hw_blockpool_entry *)p)->length,
4991 PCI_DMA_BIDIRECTIONAL);
4992
4993 vxge_os_dma_free(
4994 ((struct __vxge_hw_device *)blockpool->hldev)->pdev,
4995 ((struct __vxge_hw_blockpool_entry *)p)->memblock,
4996 &((struct __vxge_hw_blockpool_entry *)p)->acc_handle);
4997
4998 list_del(&((struct __vxge_hw_blockpool_entry *)p)->item);
4999
5000 list_add(p, &blockpool->free_entry_list);
5001
5002 blockpool->pool_size--;
5003
5004 }
5005}
5006
5007/*
5008 * vxge_hw_blockpool_block_add - callback for vxge_os_dma_malloc_async
5009 * Adds a block to block pool
5010 */
5011static void vxge_hw_blockpool_block_add(struct __vxge_hw_device *devh,
5012 void *block_addr,
5013 u32 length,
5014 struct pci_dev *dma_h,
5015 struct pci_dev *acc_handle)
5016{
5017 struct __vxge_hw_blockpool *blockpool;
5018 struct __vxge_hw_blockpool_entry *entry = NULL;
5019 dma_addr_t dma_addr;
5020 enum vxge_hw_status status = VXGE_HW_OK;
5021 u32 req_out;
5022
5023 blockpool = &devh->block_pool;
5024
5025 if (block_addr == NULL) {
5026 blockpool->req_out--;
5027 status = VXGE_HW_FAIL;
5028 goto exit;
5029 }
5030
5031 dma_addr = pci_map_single(devh->pdev, block_addr, length,
5032 PCI_DMA_BIDIRECTIONAL);
5033
5034 if (unlikely(pci_dma_mapping_error(devh->pdev, dma_addr))) {
5035
5036 vxge_os_dma_free(devh->pdev, block_addr, &acc_handle);
5037 blockpool->req_out--;
5038 status = VXGE_HW_FAIL;
5039 goto exit;
5040 }
5041
5042
5043 if (!list_empty(&blockpool->free_entry_list))
5044 entry = (struct __vxge_hw_blockpool_entry *)
5045 list_first_entry(&blockpool->free_entry_list,
5046 struct __vxge_hw_blockpool_entry,
5047 item);
5048
5049 if (entry == NULL)
5050 entry = (struct __vxge_hw_blockpool_entry *)
5051 vmalloc(sizeof(struct __vxge_hw_blockpool_entry));
5052 else
5053 list_del(&entry->item);
5054
5055 if (entry != NULL) {
5056 entry->length = length;
5057 entry->memblock = block_addr;
5058 entry->dma_addr = dma_addr;
5059 entry->acc_handle = acc_handle;
5060 entry->dma_handle = dma_h;
5061 list_add(&entry->item, &blockpool->free_block_list);
5062 blockpool->pool_size++;
5063 status = VXGE_HW_OK;
5064 } else
5065 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5066
5067 blockpool->req_out--;
5068
5069 req_out = blockpool->req_out;
5070exit:
5071 return;
5072}
5073
5074/*
5075 * __vxge_hw_blockpool_malloc - Allocate a memory block from pool
5076 * Allocates a block of memory of given size, either from block pool
5077 * or by calling vxge_os_dma_malloc()
5078 */
5079void *
5080__vxge_hw_blockpool_malloc(struct __vxge_hw_device *devh, u32 size,
5081 struct vxge_hw_mempool_dma *dma_object)
5082{
5083 struct __vxge_hw_blockpool_entry *entry = NULL;
5084 struct __vxge_hw_blockpool *blockpool;
5085 void *memblock = NULL;
5086 enum vxge_hw_status status = VXGE_HW_OK;
5087
5088 blockpool = &devh->block_pool;
5089
5090 if (size != blockpool->block_size) {
5091
5092 memblock = vxge_os_dma_malloc(devh->pdev, size,
5093 &dma_object->handle,
5094 &dma_object->acc_handle);
5095
5096 if (memblock == NULL) {
5097 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5098 goto exit;
5099 }
5100
5101 dma_object->addr = pci_map_single(devh->pdev, memblock, size,
5102 PCI_DMA_BIDIRECTIONAL);
5103
5104 if (unlikely(pci_dma_mapping_error(devh->pdev,
5105 dma_object->addr))) {
5106 vxge_os_dma_free(devh->pdev, memblock,
5107 &dma_object->acc_handle);
5108 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5109 goto exit;
5110 }
5111
5112 } else {
5113
5114 if (!list_empty(&blockpool->free_block_list))
5115 entry = (struct __vxge_hw_blockpool_entry *)
5116 list_first_entry(&blockpool->free_block_list,
5117 struct __vxge_hw_blockpool_entry,
5118 item);
5119
5120 if (entry != NULL) {
5121 list_del(&entry->item);
5122 dma_object->addr = entry->dma_addr;
5123 dma_object->handle = entry->dma_handle;
5124 dma_object->acc_handle = entry->acc_handle;
5125 memblock = entry->memblock;
5126
5127 list_add(&entry->item,
5128 &blockpool->free_entry_list);
5129 blockpool->pool_size--;
5130 }
5131
5132 if (memblock != NULL)
5133 __vxge_hw_blockpool_blocks_add(blockpool);
5134 }
5135exit:
5136 return memblock;
5137}
5138
5139/*
5140 * __vxge_hw_blockpool_free - Frees the memory allcoated with
5141 __vxge_hw_blockpool_malloc
5142 */
5143void
5144__vxge_hw_blockpool_free(struct __vxge_hw_device *devh,
5145 void *memblock, u32 size,
5146 struct vxge_hw_mempool_dma *dma_object)
5147{
5148 struct __vxge_hw_blockpool_entry *entry = NULL;
5149 struct __vxge_hw_blockpool *blockpool;
5150 enum vxge_hw_status status = VXGE_HW_OK;
5151
5152 blockpool = &devh->block_pool;
5153
5154 if (size != blockpool->block_size) {
5155 pci_unmap_single(devh->pdev, dma_object->addr, size,
5156 PCI_DMA_BIDIRECTIONAL);
5157 vxge_os_dma_free(devh->pdev, memblock, &dma_object->acc_handle);
5158 } else {
5159
5160 if (!list_empty(&blockpool->free_entry_list))
5161 entry = (struct __vxge_hw_blockpool_entry *)
5162 list_first_entry(&blockpool->free_entry_list,
5163 struct __vxge_hw_blockpool_entry,
5164 item);
5165
5166 if (entry == NULL)
5167 entry = (struct __vxge_hw_blockpool_entry *)
5168 vmalloc(sizeof(
5169 struct __vxge_hw_blockpool_entry));
5170 else
5171 list_del(&entry->item);
5172
5173 if (entry != NULL) {
5174 entry->length = size;
5175 entry->memblock = memblock;
5176 entry->dma_addr = dma_object->addr;
5177 entry->acc_handle = dma_object->acc_handle;
5178 entry->dma_handle = dma_object->handle;
5179 list_add(&entry->item,
5180 &blockpool->free_block_list);
5181 blockpool->pool_size++;
5182 status = VXGE_HW_OK;
5183 } else
5184 status = VXGE_HW_ERR_OUT_OF_MEMORY;
5185
5186 if (status == VXGE_HW_OK)
5187 __vxge_hw_blockpool_blocks_remove(blockpool);
5188 }
5189}
5190
5191/*
5192 * __vxge_hw_blockpool_block_allocate - Allocates a block from block pool
5193 * This function allocates a block from block pool or from the system
5194 */
5195struct __vxge_hw_blockpool_entry *
5196__vxge_hw_blockpool_block_allocate(struct __vxge_hw_device *devh, u32 size)
5197{
5198 struct __vxge_hw_blockpool_entry *entry = NULL;
5199 struct __vxge_hw_blockpool *blockpool;
5200
5201 blockpool = &devh->block_pool;
5202
5203 if (size == blockpool->block_size) {
5204
5205 if (!list_empty(&blockpool->free_block_list))
5206 entry = (struct __vxge_hw_blockpool_entry *)
5207 list_first_entry(&blockpool->free_block_list,
5208 struct __vxge_hw_blockpool_entry,
5209 item);
5210
5211 if (entry != NULL) {
5212 list_del(&entry->item);
5213 blockpool->pool_size--;
5214 }
5215 }
5216
5217 if (entry != NULL)
5218 __vxge_hw_blockpool_blocks_add(blockpool);
5219
5220 return entry;
5221}
5222
5223/*
5224 * __vxge_hw_blockpool_block_free - Frees a block from block pool
5225 * @devh: Hal device
5226 * @entry: Entry of block to be freed
5227 *
5228 * This function frees a block from block pool
5229 */
5230void
5231__vxge_hw_blockpool_block_free(struct __vxge_hw_device *devh,
5232 struct __vxge_hw_blockpool_entry *entry)
5233{
5234 struct __vxge_hw_blockpool *blockpool;
5235
5236 blockpool = &devh->block_pool;
5237
5238 if (entry->length == blockpool->block_size) {
5239 list_add(&entry->item, &blockpool->free_block_list);
5240 blockpool->pool_size++;
5241 }
5242
5243 __vxge_hw_blockpool_blocks_remove(blockpool);
5244}
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 5c00861b6c2c..e249e288d160 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -20,13 +20,6 @@
20#define VXGE_CACHE_LINE_SIZE 128 20#define VXGE_CACHE_LINE_SIZE 128
21#endif 21#endif
22 22
23#define vxge_os_vaprintf(level, mask, fmt, ...) { \
24 char buff[255]; \
25 snprintf(buff, 255, fmt, __VA_ARGS__); \
26 printk(buff); \
27 printk("\n"); \
28}
29
30#ifndef VXGE_ALIGN 23#ifndef VXGE_ALIGN
31#define VXGE_ALIGN(adrs, size) \ 24#define VXGE_ALIGN(adrs, size) \
32 (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1)) 25 (((size) - (((u64)adrs) & ((size)-1))) & ((size)-1))
@@ -36,8 +29,16 @@
36#define VXGE_HW_MAX_MTU 9600 29#define VXGE_HW_MAX_MTU 9600
37#define VXGE_HW_DEFAULT_MTU 1500 30#define VXGE_HW_DEFAULT_MTU 1500
38 31
39#ifdef VXGE_DEBUG_ASSERT 32#define VXGE_HW_MAX_ROM_IMAGES 8
40 33
34struct eprom_image {
35 u8 is_valid:1;
36 u8 index;
37 u8 type;
38 u16 version;
39};
40
41#ifdef VXGE_DEBUG_ASSERT
41/** 42/**
42 * vxge_assert 43 * vxge_assert
43 * @test: C-condition to check 44 * @test: C-condition to check
@@ -48,16 +49,13 @@
48 * compilation 49 * compilation
49 * time. 50 * time.
50 */ 51 */
51#define vxge_assert(test) { \ 52#define vxge_assert(test) BUG_ON(!(test))
52 if (!(test)) \
53 vxge_os_bug("bad cond: "#test" at %s:%d\n", \
54 __FILE__, __LINE__); }
55#else 53#else
56#define vxge_assert(test) 54#define vxge_assert(test)
57#endif /* end of VXGE_DEBUG_ASSERT */ 55#endif /* end of VXGE_DEBUG_ASSERT */
58 56
59/** 57/**
60 * enum enum vxge_debug_level 58 * enum vxge_debug_level
61 * @VXGE_NONE: debug disabled 59 * @VXGE_NONE: debug disabled
62 * @VXGE_ERR: all errors going to be logged out 60 * @VXGE_ERR: all errors going to be logged out
63 * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs 61 * @VXGE_TRACE: all errors plus all kind of verbose tracing print outs
@@ -159,6 +157,47 @@ enum vxge_hw_device_link_state {
159}; 157};
160 158
161/** 159/**
160 * enum enum vxge_hw_fw_upgrade_code - FW upgrade return codes.
161 * @VXGE_HW_FW_UPGRADE_OK: All OK send next 16 bytes
162 * @VXGE_HW_FW_UPGRADE_DONE: upload completed
163 * @VXGE_HW_FW_UPGRADE_ERR: upload error
164 * @VXGE_FW_UPGRADE_BYTES2SKIP: skip bytes in the stream
165 *
166 */
167enum vxge_hw_fw_upgrade_code {
168 VXGE_HW_FW_UPGRADE_OK = 0,
169 VXGE_HW_FW_UPGRADE_DONE = 1,
170 VXGE_HW_FW_UPGRADE_ERR = 2,
171 VXGE_FW_UPGRADE_BYTES2SKIP = 3
172};
173
174/**
175 * enum enum vxge_hw_fw_upgrade_err_code - FW upgrade error codes.
176 * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1: corrupt data
177 * @VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW: buffer overflow
178 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3: invalid .ncf file
179 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4: invalid .ncf file
180 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5: invalid .ncf file
181 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6: invalid .ncf file
182 * @VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7: corrupt data
183 * @VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8: invalid .ncf file
184 * @VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN: generic error unknown type
185 * @VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH: failed to flash image check failed
186 */
187enum vxge_hw_fw_upgrade_err_code {
188 VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_1 = 1,
189 VXGE_HW_FW_UPGRADE_ERR_BUFFER_OVERFLOW = 2,
190 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_3 = 3,
191 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_4 = 4,
192 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_5 = 5,
193 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_6 = 6,
194 VXGE_HW_FW_UPGRADE_ERR_CORRUPT_DATA_7 = 7,
195 VXGE_HW_FW_UPGRADE_ERR_INV_NCF_FILE_8 = 8,
196 VXGE_HW_FW_UPGRADE_ERR_GENERIC_ERROR_UNKNOWN = 9,
197 VXGE_HW_FW_UPGRADE_ERR_FAILED_TO_FLASH = 10
198};
199
200/**
162 * struct vxge_hw_device_date - Date Format 201 * struct vxge_hw_device_date - Date Format
163 * @day: Day 202 * @day: Day
164 * @month: Month 203 * @month: Month
@@ -275,9 +314,9 @@ struct vxge_hw_ring_config {
275#define VXGE_HW_RING_DEFAULT 1 314#define VXGE_HW_RING_DEFAULT 1
276 315
277 u32 ring_blocks; 316 u32 ring_blocks;
278#define VXGE_HW_MIN_RING_BLOCKS 1 317#define VXGE_HW_MIN_RING_BLOCKS 1
279#define VXGE_HW_MAX_RING_BLOCKS 128 318#define VXGE_HW_MAX_RING_BLOCKS 128
280#define VXGE_HW_DEF_RING_BLOCKS 2 319#define VXGE_HW_DEF_RING_BLOCKS 2
281 320
282 u32 buffer_mode; 321 u32 buffer_mode;
283#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1 322#define VXGE_HW_RING_RXD_BUFFER_MODE_1 1
@@ -465,7 +504,6 @@ struct vxge_hw_device_config {
465 * See also: vxge_hw_driver_initialize(). 504 * See also: vxge_hw_driver_initialize().
466 */ 505 */
467struct vxge_hw_uld_cbs { 506struct vxge_hw_uld_cbs {
468
469 void (*link_up)(struct __vxge_hw_device *devh); 507 void (*link_up)(struct __vxge_hw_device *devh);
470 void (*link_down)(struct __vxge_hw_device *devh); 508 void (*link_down)(struct __vxge_hw_device *devh);
471 void (*crit_err)(struct __vxge_hw_device *devh, 509 void (*crit_err)(struct __vxge_hw_device *devh,
@@ -652,6 +690,7 @@ struct __vxge_hw_virtualpath {
652 struct vxge_hw_vpath_stats_hw_info *hw_stats; 690 struct vxge_hw_vpath_stats_hw_info *hw_stats;
653 struct vxge_hw_vpath_stats_hw_info *hw_stats_sav; 691 struct vxge_hw_vpath_stats_hw_info *hw_stats_sav;
654 struct vxge_hw_vpath_stats_sw_info *sw_stats; 692 struct vxge_hw_vpath_stats_sw_info *sw_stats;
693 spinlock_t lock;
655}; 694};
656 695
657/* 696/*
@@ -661,7 +700,7 @@ struct __vxge_hw_virtualpath {
661 * 700 *
662 * This structure is used to store the callback information. 701 * This structure is used to store the callback information.
663 */ 702 */
664struct __vxge_hw_vpath_handle{ 703struct __vxge_hw_vpath_handle {
665 struct list_head item; 704 struct list_head item;
666 struct __vxge_hw_virtualpath *vpath; 705 struct __vxge_hw_virtualpath *vpath;
667}; 706};
@@ -674,9 +713,6 @@ struct __vxge_hw_vpath_handle{
674/** 713/**
675 * struct __vxge_hw_device - Hal device object 714 * struct __vxge_hw_device - Hal device object
676 * @magic: Magic Number 715 * @magic: Magic Number
677 * @device_id: PCI Device Id of the adapter
678 * @major_revision: PCI Device major revision
679 * @minor_revision: PCI Device minor revision
680 * @bar0: BAR0 virtual address. 716 * @bar0: BAR0 virtual address.
681 * @pdev: Physical device handle 717 * @pdev: Physical device handle
682 * @config: Confguration passed by the LL driver at initialization 718 * @config: Confguration passed by the LL driver at initialization
@@ -688,9 +724,6 @@ struct __vxge_hw_device {
688 u32 magic; 724 u32 magic;
689#define VXGE_HW_DEVICE_MAGIC 0x12345678 725#define VXGE_HW_DEVICE_MAGIC 0x12345678
690#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD 726#define VXGE_HW_DEVICE_DEAD 0xDEADDEAD
691 u16 device_id;
692 u8 major_revision;
693 u8 minor_revision;
694 void __iomem *bar0; 727 void __iomem *bar0;
695 struct pci_dev *pdev; 728 struct pci_dev *pdev;
696 struct net_device *ndev; 729 struct net_device *ndev;
@@ -731,6 +764,7 @@ struct __vxge_hw_device {
731 u32 debug_level; 764 u32 debug_level;
732 u32 level_err; 765 u32 level_err;
733 u32 level_trace; 766 u32 level_trace;
767 u16 eprom_versions[VXGE_HW_MAX_ROM_IMAGES];
734}; 768};
735 769
736#define VXGE_HW_INFO_LEN 64 770#define VXGE_HW_INFO_LEN 64
@@ -781,8 +815,8 @@ struct vxge_hw_device_hw_info {
781 u8 serial_number[VXGE_HW_INFO_LEN]; 815 u8 serial_number[VXGE_HW_INFO_LEN];
782 u8 part_number[VXGE_HW_INFO_LEN]; 816 u8 part_number[VXGE_HW_INFO_LEN];
783 u8 product_desc[VXGE_HW_INFO_LEN]; 817 u8 product_desc[VXGE_HW_INFO_LEN];
784 u8 (mac_addrs)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; 818 u8 mac_addrs[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
785 u8 (mac_addr_masks)[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN]; 819 u8 mac_addr_masks[VXGE_HW_MAX_VIRTUAL_PATHS][ETH_ALEN];
786}; 820};
787 821
788/** 822/**
@@ -829,20 +863,10 @@ struct vxge_hw_device_attr {
829 loc, \ 863 loc, \
830 offset, \ 864 offset, \
831 &val64); \ 865 &val64); \
832 \
833 if (status != VXGE_HW_OK) \ 866 if (status != VXGE_HW_OK) \
834 return status; \ 867 return status; \
835} 868}
836 869
837#define VXGE_HW_VPATH_STATS_PIO_READ(offset) { \
838 status = __vxge_hw_vpath_stats_access(vpath, \
839 VXGE_HW_STATS_OP_READ, \
840 offset, \
841 &val64); \
842 if (status != VXGE_HW_OK) \
843 return status; \
844}
845
846/* 870/*
847 * struct __vxge_hw_ring - Ring channel. 871 * struct __vxge_hw_ring - Ring channel.
848 * @channel: Channel "base" of this ring, the common part of all HW 872 * @channel: Channel "base" of this ring, the common part of all HW
@@ -1114,7 +1138,7 @@ struct __vxge_hw_non_offload_db_wrapper {
1114 * lookup to determine the transmit port. 1138 * lookup to determine the transmit port.
1115 * 01: Send on physical Port1. 1139 * 01: Send on physical Port1.
1116 * 10: Send on physical Port0. 1140 * 10: Send on physical Port0.
1117 * 11: Send on both ports. 1141 * 11: Send on both ports.
1118 * Bits 18 to 21 - Reserved 1142 * Bits 18 to 21 - Reserved
1119 * Bits 22 to 23 - Gather_Code. This field is set by the host and 1143 * Bits 22 to 23 - Gather_Code. This field is set by the host and
1120 * is used to describe how individual buffers comprise a frame. 1144 * is used to describe how individual buffers comprise a frame.
@@ -1413,12 +1437,12 @@ enum vxge_hw_rth_algoritms {
1413 * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get(). 1437 * See also: vxge_hw_vpath_rts_rth_set(), vxge_hw_vpath_rts_rth_get().
1414 */ 1438 */
1415struct vxge_hw_rth_hash_types { 1439struct vxge_hw_rth_hash_types {
1416 u8 hash_type_tcpipv4_en; 1440 u8 hash_type_tcpipv4_en:1,
1417 u8 hash_type_ipv4_en; 1441 hash_type_ipv4_en:1,
1418 u8 hash_type_tcpipv6_en; 1442 hash_type_tcpipv6_en:1,
1419 u8 hash_type_ipv6_en; 1443 hash_type_ipv6_en:1,
1420 u8 hash_type_tcpipv6ex_en; 1444 hash_type_tcpipv6ex_en:1,
1421 u8 hash_type_ipv6ex_en; 1445 hash_type_ipv6ex_en:1;
1422}; 1446};
1423 1447
1424void vxge_hw_device_debug_set( 1448void vxge_hw_device_debug_set(
@@ -1893,6 +1917,15 @@ out:
1893 return vaddr; 1917 return vaddr;
1894} 1918}
1895 1919
1920static inline void vxge_os_dma_free(struct pci_dev *pdev, const void *vaddr,
1921 struct pci_dev **p_dma_acch)
1922{
1923 unsigned long misaligned = *(unsigned long *)p_dma_acch;
1924 u8 *tmp = (u8 *)vaddr;
1925 tmp -= misaligned;
1926 kfree((void *)tmp);
1927}
1928
1896/* 1929/*
1897 * __vxge_hw_mempool_item_priv - will return pointer on per item private space 1930 * __vxge_hw_mempool_item_priv - will return pointer on per item private space
1898 */ 1931 */
@@ -1962,7 +1995,6 @@ enum vxge_hw_status vxge_hw_vpath_mtu_set(
1962void 1995void
1963vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp); 1996vxge_hw_vpath_rx_doorbell_init(struct __vxge_hw_vpath_handle *vp);
1964 1997
1965
1966#ifndef readq 1998#ifndef readq
1967static inline u64 readq(void __iomem *addr) 1999static inline u64 readq(void __iomem *addr)
1968{ 2000{
@@ -2000,7 +2032,7 @@ enum vxge_hw_status
2000vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask); 2032vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
2001 2033
2002/** 2034/**
2003 * vxge_debug 2035 * vxge_debug_ll
2004 * @level: level of debug verbosity. 2036 * @level: level of debug verbosity.
2005 * @mask: mask for the debug 2037 * @mask: mask for the debug
2006 * @buf: Circular buffer for tracing 2038 * @buf: Circular buffer for tracing
@@ -2012,26 +2044,13 @@ vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
2012 * may be compiled out if DEBUG macro was never defined. 2044 * may be compiled out if DEBUG macro was never defined.
2013 * See also: enum vxge_debug_level{}. 2045 * See also: enum vxge_debug_level{}.
2014 */ 2046 */
2015
2016#define vxge_trace_aux(level, mask, fmt, ...) \
2017{\
2018 vxge_os_vaprintf(level, mask, fmt, __VA_ARGS__);\
2019}
2020
2021#define vxge_debug(module, level, mask, fmt, ...) { \
2022if ((level >= VXGE_TRACE && ((module & VXGE_DEBUG_TRACE_MASK) == module)) || \
2023 (level >= VXGE_ERR && ((module & VXGE_DEBUG_ERR_MASK) == module))) {\
2024 if ((mask & VXGE_DEBUG_MASK) == mask)\
2025 vxge_trace_aux(level, mask, fmt, __VA_ARGS__); \
2026} \
2027}
2028
2029#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK) 2047#if (VXGE_COMPONENT_LL & VXGE_DEBUG_MODULE_MASK)
2030#define vxge_debug_ll(level, mask, fmt, ...) \ 2048#define vxge_debug_ll(level, mask, fmt, ...) do { \
2031{\ 2049 if ((level >= VXGE_ERR && VXGE_COMPONENT_LL & VXGE_DEBUG_ERR_MASK) || \
2032 vxge_debug(VXGE_COMPONENT_LL, level, mask, fmt, __VA_ARGS__);\ 2050 (level >= VXGE_TRACE && VXGE_COMPONENT_LL & VXGE_DEBUG_TRACE_MASK))\
2033} 2051 if ((mask & VXGE_DEBUG_MASK) == mask) \
2034 2052 printk(fmt "\n", __VA_ARGS__); \
2053} while (0)
2035#else 2054#else
2036#define vxge_debug_ll(level, mask, fmt, ...) 2055#define vxge_debug_ll(level, mask, fmt, ...)
2037#endif 2056#endif
@@ -2051,4 +2070,26 @@ enum vxge_hw_status vxge_hw_vpath_rts_rth_set(
2051 2070
2052enum vxge_hw_status 2071enum vxge_hw_status
2053__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id); 2072__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id);
2073
2074#define VXGE_HW_MIN_SUCCESSIVE_IDLE_COUNT 5
2075#define VXGE_HW_MAX_POLLING_COUNT 100
2076
2077void
2078vxge_hw_device_wait_receive_idle(struct __vxge_hw_device *hldev);
2079
2080enum vxge_hw_status
2081vxge_hw_upgrade_read_version(struct __vxge_hw_device *hldev, u32 *major,
2082 u32 *minor, u32 *build);
2083
2084enum vxge_hw_status vxge_hw_flash_fw(struct __vxge_hw_device *hldev);
2085
2086enum vxge_hw_status
2087vxge_update_fw_image(struct __vxge_hw_device *hldev, const u8 *filebuf,
2088 int size);
2089
2090enum vxge_hw_status
2091vxge_hw_vpath_eprom_img_ver_get(struct __vxge_hw_device *hldev,
2092 struct eprom_image *eprom_image_data);
2093
2094int vxge_hw_vpath_wait_receive_idle(struct __vxge_hw_device *hldev, u32 vp_id);
2054#endif 2095#endif
diff --git a/drivers/net/vxge/vxge-ethtool.c b/drivers/net/vxge/vxge-ethtool.c
index b67746eef923..1dd3a21b3a43 100644
--- a/drivers/net/vxge/vxge-ethtool.c
+++ b/drivers/net/vxge/vxge-ethtool.c
@@ -11,7 +11,7 @@
11 * Virtualized Server Adapter. 11 * Virtualized Server Adapter.
12 * Copyright(c) 2002-2010 Exar Corp. 12 * Copyright(c) 2002-2010 Exar Corp.
13 ******************************************************************************/ 13 ******************************************************************************/
14#include<linux/ethtool.h> 14#include <linux/ethtool.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/pci.h> 16#include <linux/pci.h>
17#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
@@ -29,7 +29,6 @@
29 * Return value: 29 * Return value:
30 * 0 on success. 30 * 0 on success.
31 */ 31 */
32
33static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info) 32static int vxge_ethtool_sset(struct net_device *dev, struct ethtool_cmd *info)
34{ 33{
35 /* We currently only support 10Gb/FULL */ 34 /* We currently only support 10Gb/FULL */
@@ -79,10 +78,9 @@ static int vxge_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
79 * Returns driver specefic information like name, version etc.. to ethtool. 78 * Returns driver specefic information like name, version etc.. to ethtool.
80 */ 79 */
81static void vxge_ethtool_gdrvinfo(struct net_device *dev, 80static void vxge_ethtool_gdrvinfo(struct net_device *dev,
82 struct ethtool_drvinfo *info) 81 struct ethtool_drvinfo *info)
83{ 82{
84 struct vxgedev *vdev; 83 struct vxgedev *vdev = netdev_priv(dev);
85 vdev = (struct vxgedev *)netdev_priv(dev);
86 strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME)); 84 strlcpy(info->driver, VXGE_DRIVER_NAME, sizeof(VXGE_DRIVER_NAME));
87 strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION)); 85 strlcpy(info->version, DRV_VERSION, sizeof(DRV_VERSION));
88 strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN); 86 strlcpy(info->fw_version, vdev->fw_version, VXGE_HW_FW_STRLEN);
@@ -104,15 +102,14 @@ static void vxge_ethtool_gdrvinfo(struct net_device *dev,
104 * buffer area. 102 * buffer area.
105 */ 103 */
106static void vxge_ethtool_gregs(struct net_device *dev, 104static void vxge_ethtool_gregs(struct net_device *dev,
107 struct ethtool_regs *regs, void *space) 105 struct ethtool_regs *regs, void *space)
108{ 106{
109 int index, offset; 107 int index, offset;
110 enum vxge_hw_status status; 108 enum vxge_hw_status status;
111 u64 reg; 109 u64 reg;
112 u64 *reg_space = (u64 *) space; 110 u64 *reg_space = (u64 *)space;
113 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 111 struct vxgedev *vdev = netdev_priv(dev);
114 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 112 struct __vxge_hw_device *hldev = vdev->devh;
115 pci_get_drvdata(vdev->pdev);
116 113
117 regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath; 114 regs->len = sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
118 regs->version = vdev->pdev->subsystem_device; 115 regs->version = vdev->pdev->subsystem_device;
@@ -147,9 +144,8 @@ static void vxge_ethtool_gregs(struct net_device *dev,
147 */ 144 */
148static int vxge_ethtool_idnic(struct net_device *dev, u32 data) 145static int vxge_ethtool_idnic(struct net_device *dev, u32 data)
149{ 146{
150 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 147 struct vxgedev *vdev = netdev_priv(dev);
151 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 148 struct __vxge_hw_device *hldev = vdev->devh;
152 pci_get_drvdata(vdev->pdev);
153 149
154 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON); 150 vxge_hw_device_flick_link_led(hldev, VXGE_FLICKER_ON);
155 msleep_interruptible(data ? (data * HZ) : VXGE_MAX_FLICKER_TIME); 151 msleep_interruptible(data ? (data * HZ) : VXGE_MAX_FLICKER_TIME);
@@ -168,11 +164,10 @@ static int vxge_ethtool_idnic(struct net_device *dev, u32 data)
168 * void 164 * void
169 */ 165 */
170static void vxge_ethtool_getpause_data(struct net_device *dev, 166static void vxge_ethtool_getpause_data(struct net_device *dev,
171 struct ethtool_pauseparam *ep) 167 struct ethtool_pauseparam *ep)
172{ 168{
173 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 169 struct vxgedev *vdev = netdev_priv(dev);
174 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 170 struct __vxge_hw_device *hldev = vdev->devh;
175 pci_get_drvdata(vdev->pdev);
176 171
177 vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause); 172 vxge_hw_device_getpause_data(hldev, 0, &ep->tx_pause, &ep->rx_pause);
178} 173}
@@ -188,11 +183,10 @@ static void vxge_ethtool_getpause_data(struct net_device *dev,
188 * int, returns 0 on Success 183 * int, returns 0 on Success
189 */ 184 */
190static int vxge_ethtool_setpause_data(struct net_device *dev, 185static int vxge_ethtool_setpause_data(struct net_device *dev,
191 struct ethtool_pauseparam *ep) 186 struct ethtool_pauseparam *ep)
192{ 187{
193 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 188 struct vxgedev *vdev = netdev_priv(dev);
194 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 189 struct __vxge_hw_device *hldev = vdev->devh;
195 pci_get_drvdata(vdev->pdev);
196 190
197 vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause); 191 vxge_hw_device_setpause_data(hldev, 0, ep->tx_pause, ep->rx_pause);
198 192
@@ -209,9 +203,8 @@ static void vxge_get_ethtool_stats(struct net_device *dev,
209 enum vxge_hw_status status; 203 enum vxge_hw_status status;
210 enum vxge_hw_status swstatus; 204 enum vxge_hw_status swstatus;
211 struct vxge_vpath *vpath = NULL; 205 struct vxge_vpath *vpath = NULL;
212 206 struct vxgedev *vdev = netdev_priv(dev);
213 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 207 struct __vxge_hw_device *hldev = vdev->devh;
214 struct __vxge_hw_device *hldev = vdev->devh;
215 struct vxge_hw_xmac_stats *xmac_stats; 208 struct vxge_hw_xmac_stats *xmac_stats;
216 struct vxge_hw_device_stats_sw_info *sw_stats; 209 struct vxge_hw_device_stats_sw_info *sw_stats;
217 struct vxge_hw_device_stats_hw_info *hw_stats; 210 struct vxge_hw_device_stats_hw_info *hw_stats;
@@ -574,12 +567,12 @@ static void vxge_get_ethtool_stats(struct net_device *dev,
574 kfree(hw_stats); 567 kfree(hw_stats);
575} 568}
576 569
577static void vxge_ethtool_get_strings(struct net_device *dev, 570static void vxge_ethtool_get_strings(struct net_device *dev, u32 stringset,
578 u32 stringset, u8 *data) 571 u8 *data)
579{ 572{
580 int stat_size = 0; 573 int stat_size = 0;
581 int i, j; 574 int i, j;
582 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 575 struct vxgedev *vdev = netdev_priv(dev);
583 switch (stringset) { 576 switch (stringset) {
584 case ETH_SS_STATS: 577 case ETH_SS_STATS:
585 vxge_add_string("VPATH STATISTICS%s\t\t\t", 578 vxge_add_string("VPATH STATISTICS%s\t\t\t",
@@ -1066,21 +1059,21 @@ static void vxge_ethtool_get_strings(struct net_device *dev,
1066 1059
1067static int vxge_ethtool_get_regs_len(struct net_device *dev) 1060static int vxge_ethtool_get_regs_len(struct net_device *dev)
1068{ 1061{
1069 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 1062 struct vxgedev *vdev = netdev_priv(dev);
1070 1063
1071 return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath; 1064 return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
1072} 1065}
1073 1066
1074static u32 vxge_get_rx_csum(struct net_device *dev) 1067static u32 vxge_get_rx_csum(struct net_device *dev)
1075{ 1068{
1076 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 1069 struct vxgedev *vdev = netdev_priv(dev);
1077 1070
1078 return vdev->rx_csum; 1071 return vdev->rx_csum;
1079} 1072}
1080 1073
1081static int vxge_set_rx_csum(struct net_device *dev, u32 data) 1074static int vxge_set_rx_csum(struct net_device *dev, u32 data)
1082{ 1075{
1083 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 1076 struct vxgedev *vdev = netdev_priv(dev);
1084 1077
1085 if (data) 1078 if (data)
1086 vdev->rx_csum = 1; 1079 vdev->rx_csum = 1;
@@ -1102,7 +1095,7 @@ static int vxge_ethtool_op_set_tso(struct net_device *dev, u32 data)
1102 1095
1103static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset) 1096static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
1104{ 1097{
1105 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 1098 struct vxgedev *vdev = netdev_priv(dev);
1106 1099
1107 switch (sset) { 1100 switch (sset) {
1108 case ETH_SS_STATS: 1101 case ETH_SS_STATS:
@@ -1119,6 +1112,59 @@ static int vxge_ethtool_get_sset_count(struct net_device *dev, int sset)
1119 } 1112 }
1120} 1113}
1121 1114
1115static int vxge_set_flags(struct net_device *dev, u32 data)
1116{
1117 struct vxgedev *vdev = netdev_priv(dev);
1118 enum vxge_hw_status status;
1119
1120 if (data & ~ETH_FLAG_RXHASH)
1121 return -EOPNOTSUPP;
1122
1123 if (!!(data & ETH_FLAG_RXHASH) == vdev->devh->config.rth_en)
1124 return 0;
1125
1126 if (netif_running(dev) || (vdev->config.rth_steering == NO_STEERING))
1127 return -EINVAL;
1128
1129 vdev->devh->config.rth_en = !!(data & ETH_FLAG_RXHASH);
1130
1131 /* Enabling RTH requires some of the logic in vxge_device_register and a
1132 * vpath reset. Due to these restrictions, only allow modification
1133 * while the interface is down.
1134 */
1135 status = vxge_reset_all_vpaths(vdev);
1136 if (status != VXGE_HW_OK) {
1137 vdev->devh->config.rth_en = !vdev->devh->config.rth_en;
1138 return -EFAULT;
1139 }
1140
1141 if (vdev->devh->config.rth_en)
1142 dev->features |= NETIF_F_RXHASH;
1143 else
1144 dev->features &= ~NETIF_F_RXHASH;
1145
1146 return 0;
1147}
1148
1149static int vxge_fw_flash(struct net_device *dev, struct ethtool_flash *parms)
1150{
1151 struct vxgedev *vdev = netdev_priv(dev);
1152
1153 if (vdev->max_vpath_supported != VXGE_HW_MAX_VIRTUAL_PATHS) {
1154 printk(KERN_INFO "Single Function Mode is required to flash the"
1155 " firmware\n");
1156 return -EINVAL;
1157 }
1158
1159 if (netif_running(dev)) {
1160 printk(KERN_INFO "Interface %s must be down to flash the "
1161 "firmware\n", dev->name);
1162 return -EBUSY;
1163 }
1164
1165 return vxge_fw_upgrade(vdev, parms->data, 1);
1166}
1167
1122static const struct ethtool_ops vxge_ethtool_ops = { 1168static const struct ethtool_ops vxge_ethtool_ops = {
1123 .get_settings = vxge_ethtool_gset, 1169 .get_settings = vxge_ethtool_gset,
1124 .set_settings = vxge_ethtool_sset, 1170 .set_settings = vxge_ethtool_sset,
@@ -1131,7 +1177,7 @@ static const struct ethtool_ops vxge_ethtool_ops = {
1131 .get_rx_csum = vxge_get_rx_csum, 1177 .get_rx_csum = vxge_get_rx_csum,
1132 .set_rx_csum = vxge_set_rx_csum, 1178 .set_rx_csum = vxge_set_rx_csum,
1133 .get_tx_csum = ethtool_op_get_tx_csum, 1179 .get_tx_csum = ethtool_op_get_tx_csum,
1134 .set_tx_csum = ethtool_op_set_tx_hw_csum, 1180 .set_tx_csum = ethtool_op_set_tx_ipv6_csum,
1135 .get_sg = ethtool_op_get_sg, 1181 .get_sg = ethtool_op_get_sg,
1136 .set_sg = ethtool_op_set_sg, 1182 .set_sg = ethtool_op_set_sg,
1137 .get_tso = ethtool_op_get_tso, 1183 .get_tso = ethtool_op_get_tso,
@@ -1140,6 +1186,8 @@ static const struct ethtool_ops vxge_ethtool_ops = {
1140 .phys_id = vxge_ethtool_idnic, 1186 .phys_id = vxge_ethtool_idnic,
1141 .get_sset_count = vxge_ethtool_get_sset_count, 1187 .get_sset_count = vxge_ethtool_get_sset_count,
1142 .get_ethtool_stats = vxge_get_ethtool_stats, 1188 .get_ethtool_stats = vxge_get_ethtool_stats,
1189 .set_flags = vxge_set_flags,
1190 .flash_device = vxge_fw_flash,
1143}; 1191};
1144 1192
1145void vxge_initialize_ethtool_ops(struct net_device *ndev) 1193void vxge_initialize_ethtool_ops(struct net_device *ndev)
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 813829f3d024..1ac9b568f1b0 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -50,6 +50,8 @@
50#include <net/ip.h> 50#include <net/ip.h>
51#include <linux/netdevice.h> 51#include <linux/netdevice.h>
52#include <linux/etherdevice.h> 52#include <linux/etherdevice.h>
53#include <linux/firmware.h>
54#include <linux/net_tstamp.h>
53#include "vxge-main.h" 55#include "vxge-main.h"
54#include "vxge-reg.h" 56#include "vxge-reg.h"
55 57
@@ -82,16 +84,6 @@ module_param_array(bw_percentage, uint, NULL, 0);
82 84
83static struct vxge_drv_config *driver_config; 85static struct vxge_drv_config *driver_config;
84 86
85static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
86 struct macInfo *mac);
87static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
88 struct macInfo *mac);
89static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac);
90static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac);
91static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath);
92static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath);
93static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
94
95static inline int is_vxge_card_up(struct vxgedev *vdev) 87static inline int is_vxge_card_up(struct vxgedev *vdev)
96{ 88{
97 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state); 89 return test_bit(__VXGE_STATE_CARD_UP, &vdev->state);
@@ -148,11 +140,10 @@ static inline void VXGE_COMPLETE_ALL_RX(struct vxgedev *vdev)
148 * This function is called during interrupt context to notify link up state 140 * This function is called during interrupt context to notify link up state
149 * change. 141 * change.
150 */ 142 */
151static void 143static void vxge_callback_link_up(struct __vxge_hw_device *hldev)
152vxge_callback_link_up(struct __vxge_hw_device *hldev)
153{ 144{
154 struct net_device *dev = hldev->ndev; 145 struct net_device *dev = hldev->ndev;
155 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 146 struct vxgedev *vdev = netdev_priv(dev);
156 147
157 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 148 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
158 vdev->ndev->name, __func__, __LINE__); 149 vdev->ndev->name, __func__, __LINE__);
@@ -172,11 +163,10 @@ vxge_callback_link_up(struct __vxge_hw_device *hldev)
172 * This function is called during interrupt context to notify link down state 163 * This function is called during interrupt context to notify link down state
173 * change. 164 * change.
174 */ 165 */
175static void 166static void vxge_callback_link_down(struct __vxge_hw_device *hldev)
176vxge_callback_link_down(struct __vxge_hw_device *hldev)
177{ 167{
178 struct net_device *dev = hldev->ndev; 168 struct net_device *dev = hldev->ndev;
179 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 169 struct vxgedev *vdev = netdev_priv(dev);
180 170
181 vxge_debug_entryexit(VXGE_TRACE, 171 vxge_debug_entryexit(VXGE_TRACE,
182 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__); 172 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
@@ -195,7 +185,7 @@ vxge_callback_link_down(struct __vxge_hw_device *hldev)
195 * 185 *
196 * Allocate SKB. 186 * Allocate SKB.
197 */ 187 */
198static struct sk_buff* 188static struct sk_buff *
199vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size) 189vxge_rx_alloc(void *dtrh, struct vxge_ring *ring, const int skb_size)
200{ 190{
201 struct net_device *dev; 191 struct net_device *dev;
@@ -369,7 +359,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
369 u8 t_code, void *userdata) 359 u8 t_code, void *userdata)
370{ 360{
371 struct vxge_ring *ring = (struct vxge_ring *)userdata; 361 struct vxge_ring *ring = (struct vxge_ring *)userdata;
372 struct net_device *dev = ring->ndev; 362 struct net_device *dev = ring->ndev;
373 unsigned int dma_sizes; 363 unsigned int dma_sizes;
374 void *first_dtr = NULL; 364 void *first_dtr = NULL;
375 int dtr_cnt = 0; 365 int dtr_cnt = 0;
@@ -413,7 +403,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
413 403
414 prefetch((char *)skb + L1_CACHE_BYTES); 404 prefetch((char *)skb + L1_CACHE_BYTES);
415 if (unlikely(t_code)) { 405 if (unlikely(t_code)) {
416
417 if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) != 406 if (vxge_hw_ring_handle_tcode(ringh, dtr, t_code) !=
418 VXGE_HW_OK) { 407 VXGE_HW_OK) {
419 408
@@ -436,9 +425,7 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
436 } 425 }
437 426
438 if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) { 427 if (pkt_length > VXGE_LL_RX_COPY_THRESHOLD) {
439
440 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) { 428 if (vxge_rx_alloc(dtr, ring, data_size) != NULL) {
441
442 if (!vxge_rx_map(dtr, ring)) { 429 if (!vxge_rx_map(dtr, ring)) {
443 skb_put(skb, pkt_length); 430 skb_put(skb, pkt_length);
444 431
@@ -513,6 +500,23 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
513 else 500 else
514 skb_checksum_none_assert(skb); 501 skb_checksum_none_assert(skb);
515 502
503
504 if (ring->rx_hwts) {
505 struct skb_shared_hwtstamps *skb_hwts;
506 u32 ns = *(u32 *)(skb->head + pkt_length);
507
508 skb_hwts = skb_hwtstamps(skb);
509 skb_hwts->hwtstamp = ns_to_ktime(ns);
510 skb_hwts->syststamp.tv64 = 0;
511 }
512
513 /* rth_hash_type and rth_it_hit are non-zero regardless of
514 * whether rss is enabled. Only the rth_value is zero/non-zero
515 * if rss is disabled/enabled, so key off of that.
516 */
517 if (ext_info.rth_value)
518 skb->rxhash = ext_info.rth_value;
519
516 vxge_rx_complete(ring, skb, ext_info.vlan, 520 vxge_rx_complete(ring, skb, ext_info.vlan,
517 pkt_length, &ext_info); 521 pkt_length, &ext_info);
518 522
@@ -660,6 +664,65 @@ static enum vxge_hw_status vxge_search_mac_addr_in_list(
660 return FALSE; 664 return FALSE;
661} 665}
662 666
667static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
668{
669 struct vxge_mac_addrs *new_mac_entry;
670 u8 *mac_address = NULL;
671
672 if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
673 return TRUE;
674
675 new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
676 if (!new_mac_entry) {
677 vxge_debug_mem(VXGE_ERR,
678 "%s: memory allocation failed",
679 VXGE_DRIVER_NAME);
680 return FALSE;
681 }
682
683 list_add(&new_mac_entry->item, &vpath->mac_addr_list);
684
685 /* Copy the new mac address to the list */
686 mac_address = (u8 *)&new_mac_entry->macaddr;
687 memcpy(mac_address, mac->macaddr, ETH_ALEN);
688
689 new_mac_entry->state = mac->state;
690 vpath->mac_addr_cnt++;
691
692 /* Is this a multicast address */
693 if (0x01 & mac->macaddr[0])
694 vpath->mcast_addr_cnt++;
695
696 return TRUE;
697}
698
699/* Add a mac address to DA table */
700static enum vxge_hw_status
701vxge_add_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
702{
703 enum vxge_hw_status status = VXGE_HW_OK;
704 struct vxge_vpath *vpath;
705 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
706
707 if (0x01 & mac->macaddr[0]) /* multicast address */
708 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
709 else
710 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
711
712 vpath = &vdev->vpaths[mac->vpath_no];
713 status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
714 mac->macmask, duplicate_mode);
715 if (status != VXGE_HW_OK) {
716 vxge_debug_init(VXGE_ERR,
717 "DA config add entry failed for vpath:%d",
718 vpath->device_id);
719 } else
720 if (FALSE == vxge_mac_list_add(vpath, mac))
721 status = -EPERM;
722
723 return status;
724}
725
663static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header) 726static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
664{ 727{
665 struct macInfo mac_info; 728 struct macInfo mac_info;
@@ -670,7 +733,7 @@ static int vxge_learn_mac(struct vxgedev *vdev, u8 *mac_header)
670 struct vxge_vpath *vpath = NULL; 733 struct vxge_vpath *vpath = NULL;
671 struct __vxge_hw_device *hldev; 734 struct __vxge_hw_device *hldev;
672 735
673 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 736 hldev = pci_get_drvdata(vdev->pdev);
674 737
675 mac_address = (u8 *)&mac_addr; 738 mac_address = (u8 *)&mac_addr;
676 memcpy(mac_address, mac_header, ETH_ALEN); 739 memcpy(mac_address, mac_header, ETH_ALEN);
@@ -769,7 +832,7 @@ vxge_xmit(struct sk_buff *skb, struct net_device *dev)
769 return NETDEV_TX_OK; 832 return NETDEV_TX_OK;
770 } 833 }
771 834
772 vdev = (struct vxgedev *)netdev_priv(dev); 835 vdev = netdev_priv(dev);
773 836
774 if (unlikely(!is_vxge_card_up(vdev))) { 837 if (unlikely(!is_vxge_card_up(vdev))) {
775 vxge_debug_tx(VXGE_ERR, 838 vxge_debug_tx(VXGE_ERR,
@@ -1005,6 +1068,50 @@ vxge_tx_term(void *dtrh, enum vxge_hw_txdl_state state, void *userdata)
1005 "%s:%d Exiting...", __func__, __LINE__); 1068 "%s:%d Exiting...", __func__, __LINE__);
1006} 1069}
1007 1070
1071static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1072{
1073 struct list_head *entry, *next;
1074 u64 del_mac = 0;
1075 u8 *mac_address = (u8 *) (&del_mac);
1076
1077 /* Copy the mac address to delete from the list */
1078 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1079
1080 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1081 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
1082 list_del(entry);
1083 kfree((struct vxge_mac_addrs *)entry);
1084 vpath->mac_addr_cnt--;
1085
1086 /* Is this a multicast address */
1087 if (0x01 & mac->macaddr[0])
1088 vpath->mcast_addr_cnt--;
1089 return TRUE;
1090 }
1091 }
1092
1093 return FALSE;
1094}
1095
1096/* delete a mac address from DA table */
1097static enum vxge_hw_status
1098vxge_del_mac_addr(struct vxgedev *vdev, struct macInfo *mac)
1099{
1100 enum vxge_hw_status status = VXGE_HW_OK;
1101 struct vxge_vpath *vpath;
1102
1103 vpath = &vdev->vpaths[mac->vpath_no];
1104 status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
1105 mac->macmask);
1106 if (status != VXGE_HW_OK) {
1107 vxge_debug_init(VXGE_ERR,
1108 "DA config delete entry failed for vpath:%d",
1109 vpath->device_id);
1110 } else
1111 vxge_mac_list_del(vpath, mac);
1112 return status;
1113}
1114
1008/** 1115/**
1009 * vxge_set_multicast 1116 * vxge_set_multicast
1010 * @dev: pointer to the device structure 1117 * @dev: pointer to the device structure
@@ -1034,7 +1141,7 @@ static void vxge_set_multicast(struct net_device *dev)
1034 vxge_debug_entryexit(VXGE_TRACE, 1141 vxge_debug_entryexit(VXGE_TRACE,
1035 "%s:%d", __func__, __LINE__); 1142 "%s:%d", __func__, __LINE__);
1036 1143
1037 vdev = (struct vxgedev *)netdev_priv(dev); 1144 vdev = netdev_priv(dev);
1038 hldev = (struct __vxge_hw_device *)vdev->devh; 1145 hldev = (struct __vxge_hw_device *)vdev->devh;
1039 1146
1040 if (unlikely(!is_vxge_card_up(vdev))) 1147 if (unlikely(!is_vxge_card_up(vdev)))
@@ -1094,7 +1201,7 @@ static void vxge_set_multicast(struct net_device *dev)
1094 /* Delete previous MC's */ 1201 /* Delete previous MC's */
1095 for (i = 0; i < mcast_cnt; i++) { 1202 for (i = 0; i < mcast_cnt; i++) {
1096 list_for_each_safe(entry, next, list_head) { 1203 list_for_each_safe(entry, next, list_head) {
1097 mac_entry = (struct vxge_mac_addrs *) entry; 1204 mac_entry = (struct vxge_mac_addrs *)entry;
1098 /* Copy the mac address to delete */ 1205 /* Copy the mac address to delete */
1099 mac_address = (u8 *)&mac_entry->macaddr; 1206 mac_address = (u8 *)&mac_entry->macaddr;
1100 memcpy(mac_info.macaddr, mac_address, ETH_ALEN); 1207 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1137,7 +1244,7 @@ _set_all_mcast:
1137 /* Delete previous MC's */ 1244 /* Delete previous MC's */
1138 for (i = 0; i < mcast_cnt; i++) { 1245 for (i = 0; i < mcast_cnt; i++) {
1139 list_for_each_safe(entry, next, list_head) { 1246 list_for_each_safe(entry, next, list_head) {
1140 mac_entry = (struct vxge_mac_addrs *) entry; 1247 mac_entry = (struct vxge_mac_addrs *)entry;
1141 /* Copy the mac address to delete */ 1248 /* Copy the mac address to delete */
1142 mac_address = (u8 *)&mac_entry->macaddr; 1249 mac_address = (u8 *)&mac_entry->macaddr;
1143 memcpy(mac_info.macaddr, mac_address, ETH_ALEN); 1250 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
@@ -1184,14 +1291,14 @@ static int vxge_set_mac_addr(struct net_device *dev, void *p)
1184{ 1291{
1185 struct sockaddr *addr = p; 1292 struct sockaddr *addr = p;
1186 struct vxgedev *vdev; 1293 struct vxgedev *vdev;
1187 struct __vxge_hw_device *hldev; 1294 struct __vxge_hw_device *hldev;
1188 enum vxge_hw_status status = VXGE_HW_OK; 1295 enum vxge_hw_status status = VXGE_HW_OK;
1189 struct macInfo mac_info_new, mac_info_old; 1296 struct macInfo mac_info_new, mac_info_old;
1190 int vpath_idx = 0; 1297 int vpath_idx = 0;
1191 1298
1192 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 1299 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1193 1300
1194 vdev = (struct vxgedev *)netdev_priv(dev); 1301 vdev = netdev_priv(dev);
1195 hldev = vdev->devh; 1302 hldev = vdev->devh;
1196 1303
1197 if (!is_valid_ether_addr(addr->sa_data)) 1304 if (!is_valid_ether_addr(addr->sa_data))
@@ -1292,8 +1399,13 @@ static void vxge_vpath_intr_enable(struct vxgedev *vdev, int vp_id)
1292static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id) 1399static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1293{ 1400{
1294 struct vxge_vpath *vpath = &vdev->vpaths[vp_id]; 1401 struct vxge_vpath *vpath = &vdev->vpaths[vp_id];
1402 struct __vxge_hw_device *hldev;
1295 int msix_id; 1403 int msix_id;
1296 1404
1405 hldev = pci_get_drvdata(vdev->pdev);
1406
1407 vxge_hw_vpath_wait_receive_idle(hldev, vpath->device_id);
1408
1297 vxge_hw_vpath_intr_disable(vpath->handle); 1409 vxge_hw_vpath_intr_disable(vpath->handle);
1298 1410
1299 if (vdev->config.intr_type == INTA) 1411 if (vdev->config.intr_type == INTA)
@@ -1310,6 +1422,95 @@ static void vxge_vpath_intr_disable(struct vxgedev *vdev, int vp_id)
1310 } 1422 }
1311} 1423}
1312 1424
1425/* list all mac addresses from DA table */
1426static enum vxge_hw_status
1427vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath, struct macInfo *mac)
1428{
1429 enum vxge_hw_status status = VXGE_HW_OK;
1430 unsigned char macmask[ETH_ALEN];
1431 unsigned char macaddr[ETH_ALEN];
1432
1433 status = vxge_hw_vpath_mac_addr_get(vpath->handle,
1434 macaddr, macmask);
1435 if (status != VXGE_HW_OK) {
1436 vxge_debug_init(VXGE_ERR,
1437 "DA config list entry failed for vpath:%d",
1438 vpath->device_id);
1439 return status;
1440 }
1441
1442 while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
1443 status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
1444 macaddr, macmask);
1445 if (status != VXGE_HW_OK)
1446 break;
1447 }
1448
1449 return status;
1450}
1451
1452/* Store all mac addresses from the list to the DA table */
1453static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1454{
1455 enum vxge_hw_status status = VXGE_HW_OK;
1456 struct macInfo mac_info;
1457 u8 *mac_address = NULL;
1458 struct list_head *entry, *next;
1459
1460 memset(&mac_info, 0, sizeof(struct macInfo));
1461
1462 if (vpath->is_open) {
1463 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1464 mac_address =
1465 (u8 *)&
1466 ((struct vxge_mac_addrs *)entry)->macaddr;
1467 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1468 ((struct vxge_mac_addrs *)entry)->state =
1469 VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1470 /* does this mac address already exist in da table? */
1471 status = vxge_search_mac_addr_in_da_table(vpath,
1472 &mac_info);
1473 if (status != VXGE_HW_OK) {
1474 /* Add this mac address to the DA table */
1475 status = vxge_hw_vpath_mac_addr_add(
1476 vpath->handle, mac_info.macaddr,
1477 mac_info.macmask,
1478 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
1479 if (status != VXGE_HW_OK) {
1480 vxge_debug_init(VXGE_ERR,
1481 "DA add entry failed for vpath:%d",
1482 vpath->device_id);
1483 ((struct vxge_mac_addrs *)entry)->state
1484 = VXGE_LL_MAC_ADDR_IN_LIST;
1485 }
1486 }
1487 }
1488 }
1489
1490 return status;
1491}
1492
1493/* Store all vlan ids from the list to the vid table */
1494static enum vxge_hw_status
1495vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1496{
1497 enum vxge_hw_status status = VXGE_HW_OK;
1498 struct vxgedev *vdev = vpath->vdev;
1499 u16 vid;
1500
1501 if (vdev->vlgrp && vpath->is_open) {
1502
1503 for (vid = 0; vid < VLAN_N_VID; vid++) {
1504 if (!vlan_group_get_device(vdev->vlgrp, vid))
1505 continue;
1506 /* Add these vlan to the vid table */
1507 status = vxge_hw_vpath_vid_add(vpath->handle, vid);
1508 }
1509 }
1510
1511 return status;
1512}
1513
1313/* 1514/*
1314 * vxge_reset_vpath 1515 * vxge_reset_vpath
1315 * @vdev: pointer to vdev 1516 * @vdev: pointer to vdev
@@ -1405,12 +1606,16 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
1405 } 1606 }
1406 1607
1407 if (event == VXGE_LL_FULL_RESET) { 1608 if (event == VXGE_LL_FULL_RESET) {
1609 netif_carrier_off(vdev->ndev);
1610
1408 /* wait for all the vpath reset to complete */ 1611 /* wait for all the vpath reset to complete */
1409 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { 1612 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
1410 while (test_bit(vp_id, &vdev->vp_reset)) 1613 while (test_bit(vp_id, &vdev->vp_reset))
1411 msleep(50); 1614 msleep(50);
1412 } 1615 }
1413 1616
1617 netif_carrier_on(vdev->ndev);
1618
1414 /* if execution mode is set to debug, don't reset the adapter */ 1619 /* if execution mode is set to debug, don't reset the adapter */
1415 if (unlikely(vdev->exec_mode)) { 1620 if (unlikely(vdev->exec_mode)) {
1416 vxge_debug_init(VXGE_ERR, 1621 vxge_debug_init(VXGE_ERR,
@@ -1423,6 +1628,7 @@ static int do_vxge_reset(struct vxgedev *vdev, int event)
1423 } 1628 }
1424 1629
1425 if (event == VXGE_LL_FULL_RESET) { 1630 if (event == VXGE_LL_FULL_RESET) {
1631 vxge_hw_device_wait_receive_idle(vdev->devh);
1426 vxge_hw_device_intr_disable(vdev->devh); 1632 vxge_hw_device_intr_disable(vdev->devh);
1427 1633
1428 switch (vdev->cric_err_event) { 1634 switch (vdev->cric_err_event) {
@@ -1563,9 +1769,14 @@ out:
1563 * 1769 *
1564 * driver may reset the chip on events of serr, eccerr, etc 1770 * driver may reset the chip on events of serr, eccerr, etc
1565 */ 1771 */
1566static int vxge_reset(struct vxgedev *vdev) 1772static void vxge_reset(struct work_struct *work)
1567{ 1773{
1568 return do_vxge_reset(vdev, VXGE_LL_FULL_RESET); 1774 struct vxgedev *vdev = container_of(work, struct vxgedev, reset_task);
1775
1776 if (!netif_running(vdev->ndev))
1777 return;
1778
1779 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
1569} 1780}
1570 1781
1571/** 1782/**
@@ -1608,8 +1819,7 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
1608 int budget_org = budget; 1819 int budget_org = budget;
1609 struct vxge_ring *ring; 1820 struct vxge_ring *ring;
1610 1821
1611 struct __vxge_hw_device *hldev = (struct __vxge_hw_device *) 1822 struct __vxge_hw_device *hldev = pci_get_drvdata(vdev->pdev);
1612 pci_get_drvdata(vdev->pdev);
1613 1823
1614 for (i = 0; i < vdev->no_of_vpath; i++) { 1824 for (i = 0; i < vdev->no_of_vpath; i++) {
1615 ring = &vdev->vpaths[i].ring; 1825 ring = &vdev->vpaths[i].ring;
@@ -1645,11 +1855,11 @@ static int vxge_poll_inta(struct napi_struct *napi, int budget)
1645 */ 1855 */
1646static void vxge_netpoll(struct net_device *dev) 1856static void vxge_netpoll(struct net_device *dev)
1647{ 1857{
1648 struct __vxge_hw_device *hldev; 1858 struct __vxge_hw_device *hldev;
1649 struct vxgedev *vdev; 1859 struct vxgedev *vdev;
1650 1860
1651 vdev = (struct vxgedev *)netdev_priv(dev); 1861 vdev = netdev_priv(dev);
1652 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev); 1862 hldev = pci_get_drvdata(vdev->pdev);
1653 1863
1654 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 1864 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
1655 1865
@@ -1689,15 +1899,6 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1689 mtable[index] = index % vdev->no_of_vpath; 1899 mtable[index] = index % vdev->no_of_vpath;
1690 } 1900 }
1691 1901
1692 /* Fill RTH hash types */
1693 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1694 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1695 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1696 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1697 hash_types.hash_type_tcpipv6ex_en =
1698 vdev->config.rth_hash_type_tcpipv6ex;
1699 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1700
1701 /* set indirection table, bucket-to-vpath mapping */ 1902 /* set indirection table, bucket-to-vpath mapping */
1702 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles, 1903 status = vxge_hw_vpath_rts_rth_itable_set(vdev->vp_handles,
1703 vdev->no_of_vpath, 1904 vdev->no_of_vpath,
@@ -1710,19 +1911,27 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1710 return status; 1911 return status;
1711 } 1912 }
1712 1913
1914 /* Fill RTH hash types */
1915 hash_types.hash_type_tcpipv4_en = vdev->config.rth_hash_type_tcpipv4;
1916 hash_types.hash_type_ipv4_en = vdev->config.rth_hash_type_ipv4;
1917 hash_types.hash_type_tcpipv6_en = vdev->config.rth_hash_type_tcpipv6;
1918 hash_types.hash_type_ipv6_en = vdev->config.rth_hash_type_ipv6;
1919 hash_types.hash_type_tcpipv6ex_en =
1920 vdev->config.rth_hash_type_tcpipv6ex;
1921 hash_types.hash_type_ipv6ex_en = vdev->config.rth_hash_type_ipv6ex;
1922
1713 /* 1923 /*
1714 * Because the itable_set() method uses the active_table field 1924 * Because the itable_set() method uses the active_table field
1715 * for the target virtual path the RTH config should be updated 1925 * for the target virtual path the RTH config should be updated
1716 * for all VPATHs. The h/w only uses the lowest numbered VPATH 1926 * for all VPATHs. The h/w only uses the lowest numbered VPATH
1717 * when steering frames. 1927 * when steering frames.
1718 */ 1928 */
1719 for (index = 0; index < vdev->no_of_vpath; index++) { 1929 for (index = 0; index < vdev->no_of_vpath; index++) {
1720 status = vxge_hw_vpath_rts_rth_set( 1930 status = vxge_hw_vpath_rts_rth_set(
1721 vdev->vpaths[index].handle, 1931 vdev->vpaths[index].handle,
1722 vdev->config.rth_algorithm, 1932 vdev->config.rth_algorithm,
1723 &hash_types, 1933 &hash_types,
1724 vdev->config.rth_bkt_sz); 1934 vdev->config.rth_bkt_sz);
1725
1726 if (status != VXGE_HW_OK) { 1935 if (status != VXGE_HW_OK) {
1727 vxge_debug_init(VXGE_ERR, 1936 vxge_debug_init(VXGE_ERR,
1728 "RTH configuration failed for vpath:%d", 1937 "RTH configuration failed for vpath:%d",
@@ -1734,201 +1943,8 @@ static enum vxge_hw_status vxge_rth_configure(struct vxgedev *vdev)
1734 return status; 1943 return status;
1735} 1944}
1736 1945
1737static int vxge_mac_list_add(struct vxge_vpath *vpath, struct macInfo *mac)
1738{
1739 struct vxge_mac_addrs *new_mac_entry;
1740 u8 *mac_address = NULL;
1741
1742 if (vpath->mac_addr_cnt >= VXGE_MAX_LEARN_MAC_ADDR_CNT)
1743 return TRUE;
1744
1745 new_mac_entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_ATOMIC);
1746 if (!new_mac_entry) {
1747 vxge_debug_mem(VXGE_ERR,
1748 "%s: memory allocation failed",
1749 VXGE_DRIVER_NAME);
1750 return FALSE;
1751 }
1752
1753 list_add(&new_mac_entry->item, &vpath->mac_addr_list);
1754
1755 /* Copy the new mac address to the list */
1756 mac_address = (u8 *)&new_mac_entry->macaddr;
1757 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1758
1759 new_mac_entry->state = mac->state;
1760 vpath->mac_addr_cnt++;
1761
1762 /* Is this a multicast address */
1763 if (0x01 & mac->macaddr[0])
1764 vpath->mcast_addr_cnt++;
1765
1766 return TRUE;
1767}
1768
1769/* Add a mac address to DA table */
1770static enum vxge_hw_status vxge_add_mac_addr(struct vxgedev *vdev,
1771 struct macInfo *mac)
1772{
1773 enum vxge_hw_status status = VXGE_HW_OK;
1774 struct vxge_vpath *vpath;
1775 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode;
1776
1777 if (0x01 & mac->macaddr[0]) /* multicast address */
1778 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE;
1779 else
1780 duplicate_mode = VXGE_HW_VPATH_MAC_ADDR_REPLACE_DUPLICATE;
1781
1782 vpath = &vdev->vpaths[mac->vpath_no];
1783 status = vxge_hw_vpath_mac_addr_add(vpath->handle, mac->macaddr,
1784 mac->macmask, duplicate_mode);
1785 if (status != VXGE_HW_OK) {
1786 vxge_debug_init(VXGE_ERR,
1787 "DA config add entry failed for vpath:%d",
1788 vpath->device_id);
1789 } else
1790 if (FALSE == vxge_mac_list_add(vpath, mac))
1791 status = -EPERM;
1792
1793 return status;
1794}
1795
1796static int vxge_mac_list_del(struct vxge_vpath *vpath, struct macInfo *mac)
1797{
1798 struct list_head *entry, *next;
1799 u64 del_mac = 0;
1800 u8 *mac_address = (u8 *) (&del_mac);
1801
1802 /* Copy the mac address to delete from the list */
1803 memcpy(mac_address, mac->macaddr, ETH_ALEN);
1804
1805 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1806 if (((struct vxge_mac_addrs *)entry)->macaddr == del_mac) {
1807 list_del(entry);
1808 kfree((struct vxge_mac_addrs *)entry);
1809 vpath->mac_addr_cnt--;
1810
1811 /* Is this a multicast address */
1812 if (0x01 & mac->macaddr[0])
1813 vpath->mcast_addr_cnt--;
1814 return TRUE;
1815 }
1816 }
1817
1818 return FALSE;
1819}
1820/* delete a mac address from DA table */
1821static enum vxge_hw_status vxge_del_mac_addr(struct vxgedev *vdev,
1822 struct macInfo *mac)
1823{
1824 enum vxge_hw_status status = VXGE_HW_OK;
1825 struct vxge_vpath *vpath;
1826
1827 vpath = &vdev->vpaths[mac->vpath_no];
1828 status = vxge_hw_vpath_mac_addr_delete(vpath->handle, mac->macaddr,
1829 mac->macmask);
1830 if (status != VXGE_HW_OK) {
1831 vxge_debug_init(VXGE_ERR,
1832 "DA config delete entry failed for vpath:%d",
1833 vpath->device_id);
1834 } else
1835 vxge_mac_list_del(vpath, mac);
1836 return status;
1837}
1838
1839/* list all mac addresses from DA table */
1840enum vxge_hw_status
1841static vxge_search_mac_addr_in_da_table(struct vxge_vpath *vpath,
1842 struct macInfo *mac)
1843{
1844 enum vxge_hw_status status = VXGE_HW_OK;
1845 unsigned char macmask[ETH_ALEN];
1846 unsigned char macaddr[ETH_ALEN];
1847
1848 status = vxge_hw_vpath_mac_addr_get(vpath->handle,
1849 macaddr, macmask);
1850 if (status != VXGE_HW_OK) {
1851 vxge_debug_init(VXGE_ERR,
1852 "DA config list entry failed for vpath:%d",
1853 vpath->device_id);
1854 return status;
1855 }
1856
1857 while (memcmp(mac->macaddr, macaddr, ETH_ALEN)) {
1858
1859 status = vxge_hw_vpath_mac_addr_get_next(vpath->handle,
1860 macaddr, macmask);
1861 if (status != VXGE_HW_OK)
1862 break;
1863 }
1864
1865 return status;
1866}
1867
1868/* Store all vlan ids from the list to the vid table */
1869static enum vxge_hw_status vxge_restore_vpath_vid_table(struct vxge_vpath *vpath)
1870{
1871 enum vxge_hw_status status = VXGE_HW_OK;
1872 struct vxgedev *vdev = vpath->vdev;
1873 u16 vid;
1874
1875 if (vdev->vlgrp && vpath->is_open) {
1876
1877 for (vid = 0; vid < VLAN_N_VID; vid++) {
1878 if (!vlan_group_get_device(vdev->vlgrp, vid))
1879 continue;
1880 /* Add these vlan to the vid table */
1881 status = vxge_hw_vpath_vid_add(vpath->handle, vid);
1882 }
1883 }
1884
1885 return status;
1886}
1887
1888/* Store all mac addresses from the list to the DA table */
1889static enum vxge_hw_status vxge_restore_vpath_mac_addr(struct vxge_vpath *vpath)
1890{
1891 enum vxge_hw_status status = VXGE_HW_OK;
1892 struct macInfo mac_info;
1893 u8 *mac_address = NULL;
1894 struct list_head *entry, *next;
1895
1896 memset(&mac_info, 0, sizeof(struct macInfo));
1897
1898 if (vpath->is_open) {
1899
1900 list_for_each_safe(entry, next, &vpath->mac_addr_list) {
1901 mac_address =
1902 (u8 *)&
1903 ((struct vxge_mac_addrs *)entry)->macaddr;
1904 memcpy(mac_info.macaddr, mac_address, ETH_ALEN);
1905 ((struct vxge_mac_addrs *)entry)->state =
1906 VXGE_LL_MAC_ADDR_IN_DA_TABLE;
1907 /* does this mac address already exist in da table? */
1908 status = vxge_search_mac_addr_in_da_table(vpath,
1909 &mac_info);
1910 if (status != VXGE_HW_OK) {
1911 /* Add this mac address to the DA table */
1912 status = vxge_hw_vpath_mac_addr_add(
1913 vpath->handle, mac_info.macaddr,
1914 mac_info.macmask,
1915 VXGE_HW_VPATH_MAC_ADDR_ADD_DUPLICATE);
1916 if (status != VXGE_HW_OK) {
1917 vxge_debug_init(VXGE_ERR,
1918 "DA add entry failed for vpath:%d",
1919 vpath->device_id);
1920 ((struct vxge_mac_addrs *)entry)->state
1921 = VXGE_LL_MAC_ADDR_IN_LIST;
1922 }
1923 }
1924 }
1925 }
1926
1927 return status;
1928}
1929
1930/* reset vpaths */ 1946/* reset vpaths */
1931static enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev) 1947enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev)
1932{ 1948{
1933 enum vxge_hw_status status = VXGE_HW_OK; 1949 enum vxge_hw_status status = VXGE_HW_OK;
1934 struct vxge_vpath *vpath; 1950 struct vxge_vpath *vpath;
@@ -1988,8 +2004,23 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
1988 2004
1989 for (i = 0; i < vdev->no_of_vpath; i++) { 2005 for (i = 0; i < vdev->no_of_vpath; i++) {
1990 vpath = &vdev->vpaths[i]; 2006 vpath = &vdev->vpaths[i];
1991
1992 vxge_assert(vpath->is_configured); 2007 vxge_assert(vpath->is_configured);
2008
2009 if (!vdev->titan1) {
2010 struct vxge_hw_vp_config *vcfg;
2011 vcfg = &vdev->devh->config.vp_config[vpath->device_id];
2012
2013 vcfg->rti.urange_a = RTI_T1A_RX_URANGE_A;
2014 vcfg->rti.urange_b = RTI_T1A_RX_URANGE_B;
2015 vcfg->rti.urange_c = RTI_T1A_RX_URANGE_C;
2016 vcfg->tti.uec_a = TTI_T1A_TX_UFC_A;
2017 vcfg->tti.uec_b = TTI_T1A_TX_UFC_B;
2018 vcfg->tti.uec_c = TTI_T1A_TX_UFC_C(vdev->mtu);
2019 vcfg->tti.uec_d = TTI_T1A_TX_UFC_D(vdev->mtu);
2020 vcfg->tti.ltimer_val = VXGE_T1A_TTI_LTIMER_VAL;
2021 vcfg->tti.rtimer_val = VXGE_T1A_TTI_RTIMER_VAL;
2022 }
2023
1993 attr.vp_id = vpath->device_id; 2024 attr.vp_id = vpath->device_id;
1994 attr.fifo_attr.callback = vxge_xmit_compl; 2025 attr.fifo_attr.callback = vxge_xmit_compl;
1995 attr.fifo_attr.txdl_term = vxge_tx_term; 2026 attr.fifo_attr.txdl_term = vxge_tx_term;
@@ -2004,6 +2035,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2004 2035
2005 vpath->ring.ndev = vdev->ndev; 2036 vpath->ring.ndev = vdev->ndev;
2006 vpath->ring.pdev = vdev->pdev; 2037 vpath->ring.pdev = vdev->pdev;
2038
2007 status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle); 2039 status = vxge_hw_vpath_open(vdev->devh, &attr, &vpath->handle);
2008 if (status == VXGE_HW_OK) { 2040 if (status == VXGE_HW_OK) {
2009 vpath->fifo.handle = 2041 vpath->fifo.handle =
@@ -2024,6 +2056,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2024 vdev->config.fifo_indicate_max_pkts; 2056 vdev->config.fifo_indicate_max_pkts;
2025 vpath->ring.rx_vector_no = 0; 2057 vpath->ring.rx_vector_no = 0;
2026 vpath->ring.rx_csum = vdev->rx_csum; 2058 vpath->ring.rx_csum = vdev->rx_csum;
2059 vpath->ring.rx_hwts = vdev->rx_hwts;
2027 vpath->is_open = 1; 2060 vpath->is_open = 1;
2028 vdev->vp_handles[i] = vpath->handle; 2061 vdev->vp_handles[i] = vpath->handle;
2029 vpath->ring.gro_enable = vdev->config.gro_enable; 2062 vpath->ring.gro_enable = vdev->config.gro_enable;
@@ -2031,11 +2064,10 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2031 vdev->stats.vpaths_open++; 2064 vdev->stats.vpaths_open++;
2032 } else { 2065 } else {
2033 vdev->stats.vpath_open_fail++; 2066 vdev->stats.vpath_open_fail++;
2034 vxge_debug_init(VXGE_ERR, 2067 vxge_debug_init(VXGE_ERR, "%s: vpath: %d failed to "
2035 "%s: vpath: %d failed to open " 2068 "open with status: %d",
2036 "with status: %d", 2069 vdev->ndev->name, vpath->device_id,
2037 vdev->ndev->name, vpath->device_id, 2070 status);
2038 status);
2039 vxge_close_vpaths(vdev, 0); 2071 vxge_close_vpaths(vdev, 0);
2040 return -EPERM; 2072 return -EPERM;
2041 } 2073 }
@@ -2043,6 +2075,7 @@ static int vxge_open_vpaths(struct vxgedev *vdev)
2043 vp_id = vpath->handle->vpath->vp_id; 2075 vp_id = vpath->handle->vpath->vp_id;
2044 vdev->vpaths_deployed |= vxge_mBIT(vp_id); 2076 vdev->vpaths_deployed |= vxge_mBIT(vp_id);
2045 } 2077 }
2078
2046 return VXGE_HW_OK; 2079 return VXGE_HW_OK;
2047} 2080}
2048 2081
@@ -2062,21 +2095,20 @@ static irqreturn_t vxge_isr_napi(int irq, void *dev_id)
2062 struct __vxge_hw_device *hldev; 2095 struct __vxge_hw_device *hldev;
2063 u64 reason; 2096 u64 reason;
2064 enum vxge_hw_status status; 2097 enum vxge_hw_status status;
2065 struct vxgedev *vdev = (struct vxgedev *) dev_id;; 2098 struct vxgedev *vdev = (struct vxgedev *)dev_id;
2066 2099
2067 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__); 2100 vxge_debug_intr(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2068 2101
2069 dev = vdev->ndev; 2102 dev = vdev->ndev;
2070 hldev = (struct __vxge_hw_device *)pci_get_drvdata(vdev->pdev); 2103 hldev = pci_get_drvdata(vdev->pdev);
2071 2104
2072 if (pci_channel_offline(vdev->pdev)) 2105 if (pci_channel_offline(vdev->pdev))
2073 return IRQ_NONE; 2106 return IRQ_NONE;
2074 2107
2075 if (unlikely(!is_vxge_card_up(vdev))) 2108 if (unlikely(!is_vxge_card_up(vdev)))
2076 return IRQ_NONE; 2109 return IRQ_HANDLED;
2077 2110
2078 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, 2111 status = vxge_hw_device_begin_irq(hldev, vdev->exec_mode, &reason);
2079 &reason);
2080 if (status == VXGE_HW_OK) { 2112 if (status == VXGE_HW_OK) {
2081 vxge_hw_device_mask_all(hldev); 2113 vxge_hw_device_mask_all(hldev);
2082 2114
@@ -2301,8 +2333,8 @@ static void vxge_rem_msix_isr(struct vxgedev *vdev)
2301 2333
2302static void vxge_rem_isr(struct vxgedev *vdev) 2334static void vxge_rem_isr(struct vxgedev *vdev)
2303{ 2335{
2304 struct __vxge_hw_device *hldev; 2336 struct __vxge_hw_device *hldev;
2305 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 2337 hldev = pci_get_drvdata(vdev->pdev);
2306 2338
2307#ifdef CONFIG_PCI_MSI 2339#ifdef CONFIG_PCI_MSI
2308 if (vdev->config.intr_type == MSI_X) { 2340 if (vdev->config.intr_type == MSI_X) {
@@ -2529,8 +2561,7 @@ static void vxge_poll_vp_lockup(unsigned long data)
2529 * Return value: '0' on success and an appropriate (-)ve integer as 2561 * Return value: '0' on success and an appropriate (-)ve integer as
2530 * defined in errno.h file on failure. 2562 * defined in errno.h file on failure.
2531 */ 2563 */
2532static int 2564static int vxge_open(struct net_device *dev)
2533vxge_open(struct net_device *dev)
2534{ 2565{
2535 enum vxge_hw_status status; 2566 enum vxge_hw_status status;
2536 struct vxgedev *vdev; 2567 struct vxgedev *vdev;
@@ -2539,11 +2570,12 @@ vxge_open(struct net_device *dev)
2539 int ret = 0; 2570 int ret = 0;
2540 int i; 2571 int i;
2541 u64 val64, function_mode; 2572 u64 val64, function_mode;
2573
2542 vxge_debug_entryexit(VXGE_TRACE, 2574 vxge_debug_entryexit(VXGE_TRACE,
2543 "%s: %s:%d", dev->name, __func__, __LINE__); 2575 "%s: %s:%d", dev->name, __func__, __LINE__);
2544 2576
2545 vdev = (struct vxgedev *)netdev_priv(dev); 2577 vdev = netdev_priv(dev);
2546 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 2578 hldev = pci_get_drvdata(vdev->pdev);
2547 function_mode = vdev->config.device_hw_info.function_mode; 2579 function_mode = vdev->config.device_hw_info.function_mode;
2548 2580
2549 /* make sure you have link off by default every time Nic is 2581 /* make sure you have link off by default every time Nic is
@@ -2598,6 +2630,8 @@ vxge_open(struct net_device *dev)
2598 goto out2; 2630 goto out2;
2599 } 2631 }
2600 } 2632 }
2633 printk(KERN_INFO "%s: Receive Hashing Offload %s\n", dev->name,
2634 hldev->config.rth_en ? "enabled" : "disabled");
2601 2635
2602 for (i = 0; i < vdev->no_of_vpath; i++) { 2636 for (i = 0; i < vdev->no_of_vpath; i++) {
2603 vpath = &vdev->vpaths[i]; 2637 vpath = &vdev->vpaths[i];
@@ -2683,9 +2717,10 @@ vxge_open(struct net_device *dev)
2683 vxge_os_timer(vdev->vp_reset_timer, 2717 vxge_os_timer(vdev->vp_reset_timer,
2684 vxge_poll_vp_reset, vdev, (HZ/2)); 2718 vxge_poll_vp_reset, vdev, (HZ/2));
2685 2719
2686 if (vdev->vp_lockup_timer.function == NULL) 2720 /* There is no need to check for RxD leak and RxD lookup on Titan1A */
2687 vxge_os_timer(vdev->vp_lockup_timer, 2721 if (vdev->titan1 && vdev->vp_lockup_timer.function == NULL)
2688 vxge_poll_vp_lockup, vdev, (HZ/2)); 2722 vxge_os_timer(vdev->vp_lockup_timer, vxge_poll_vp_lockup, vdev,
2723 HZ / 2);
2689 2724
2690 set_bit(__VXGE_STATE_CARD_UP, &vdev->state); 2725 set_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2691 2726
@@ -2767,8 +2802,8 @@ static int do_vxge_close(struct net_device *dev, int do_io)
2767 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d", 2802 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d",
2768 dev->name, __func__, __LINE__); 2803 dev->name, __func__, __LINE__);
2769 2804
2770 vdev = (struct vxgedev *)netdev_priv(dev); 2805 vdev = netdev_priv(dev);
2771 hldev = (struct __vxge_hw_device *) pci_get_drvdata(vdev->pdev); 2806 hldev = pci_get_drvdata(vdev->pdev);
2772 2807
2773 if (unlikely(!is_vxge_card_up(vdev))) 2808 if (unlikely(!is_vxge_card_up(vdev)))
2774 return 0; 2809 return 0;
@@ -2778,7 +2813,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
2778 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state)) 2813 while (test_and_set_bit(__VXGE_STATE_RESET_CARD, &vdev->state))
2779 msleep(50); 2814 msleep(50);
2780 2815
2781 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2782 if (do_io) { 2816 if (do_io) {
2783 /* Put the vpath back in normal mode */ 2817 /* Put the vpath back in normal mode */
2784 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id); 2818 vpath_vector = vxge_mBIT(vdev->vpaths[0].device_id);
@@ -2789,7 +2823,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
2789 struct vxge_hw_mrpcim_reg, 2823 struct vxge_hw_mrpcim_reg,
2790 rts_mgr_cbasin_cfg), 2824 rts_mgr_cbasin_cfg),
2791 &val64); 2825 &val64);
2792
2793 if (status == VXGE_HW_OK) { 2826 if (status == VXGE_HW_OK) {
2794 val64 &= ~vpath_vector; 2827 val64 &= ~vpath_vector;
2795 status = vxge_hw_mgmt_reg_write(vdev->devh, 2828 status = vxge_hw_mgmt_reg_write(vdev->devh,
@@ -2818,10 +2851,17 @@ static int do_vxge_close(struct net_device *dev, int do_io)
2818 2851
2819 smp_wmb(); 2852 smp_wmb();
2820 } 2853 }
2821 del_timer_sync(&vdev->vp_lockup_timer); 2854
2855 if (vdev->titan1)
2856 del_timer_sync(&vdev->vp_lockup_timer);
2822 2857
2823 del_timer_sync(&vdev->vp_reset_timer); 2858 del_timer_sync(&vdev->vp_reset_timer);
2824 2859
2860 if (do_io)
2861 vxge_hw_device_wait_receive_idle(hldev);
2862
2863 clear_bit(__VXGE_STATE_CARD_UP, &vdev->state);
2864
2825 /* Disable napi */ 2865 /* Disable napi */
2826 if (vdev->config.intr_type != MSI_X) 2866 if (vdev->config.intr_type != MSI_X)
2827 napi_disable(&vdev->napi); 2867 napi_disable(&vdev->napi);
@@ -2838,8 +2878,6 @@ static int do_vxge_close(struct net_device *dev, int do_io)
2838 if (do_io) 2878 if (do_io)
2839 vxge_hw_device_intr_disable(vdev->devh); 2879 vxge_hw_device_intr_disable(vdev->devh);
2840 2880
2841 mdelay(1000);
2842
2843 vxge_rem_isr(vdev); 2881 vxge_rem_isr(vdev);
2844 2882
2845 vxge_napi_del_all(vdev); 2883 vxge_napi_del_all(vdev);
@@ -2868,8 +2906,7 @@ static int do_vxge_close(struct net_device *dev, int do_io)
2868 * Return value: '0' on success and an appropriate (-)ve integer as 2906 * Return value: '0' on success and an appropriate (-)ve integer as
2869 * defined in errno.h file on failure. 2907 * defined in errno.h file on failure.
2870 */ 2908 */
2871static int 2909static int vxge_close(struct net_device *dev)
2872vxge_close(struct net_device *dev)
2873{ 2910{
2874 do_vxge_close(dev, 1); 2911 do_vxge_close(dev, 1);
2875 return 0; 2912 return 0;
@@ -2943,9 +2980,7 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
2943 net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes; 2980 net_stats->rx_bytes += vdev->vpaths[k].ring.stats.rx_bytes;
2944 net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors; 2981 net_stats->rx_errors += vdev->vpaths[k].ring.stats.rx_errors;
2945 net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast; 2982 net_stats->multicast += vdev->vpaths[k].ring.stats.rx_mcast;
2946 net_stats->rx_dropped += 2983 net_stats->rx_dropped += vdev->vpaths[k].ring.stats.rx_dropped;
2947 vdev->vpaths[k].ring.stats.rx_dropped;
2948
2949 net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms; 2984 net_stats->tx_packets += vdev->vpaths[k].fifo.stats.tx_frms;
2950 net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes; 2985 net_stats->tx_bytes += vdev->vpaths[k].fifo.stats.tx_bytes;
2951 net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors; 2986 net_stats->tx_errors += vdev->vpaths[k].fifo.stats.tx_errors;
@@ -2954,6 +2989,101 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
2954 return net_stats; 2989 return net_stats;
2955} 2990}
2956 2991
2992static enum vxge_hw_status vxge_timestamp_config(struct vxgedev *vdev,
2993 int enable)
2994{
2995 enum vxge_hw_status status;
2996 u64 val64;
2997
2998 /* Timestamp is passed to the driver via the FCS, therefore we
2999 * must disable the FCS stripping by the adapter. Since this is
3000 * required for the driver to load (due to a hardware bug),
3001 * there is no need to do anything special here.
3002 */
3003 if (enable)
3004 val64 = VXGE_HW_XMAC_TIMESTAMP_EN |
3005 VXGE_HW_XMAC_TIMESTAMP_USE_LINK_ID(0) |
3006 VXGE_HW_XMAC_TIMESTAMP_INTERVAL(0);
3007 else
3008 val64 = 0;
3009
3010 status = vxge_hw_mgmt_reg_write(vdev->devh,
3011 vxge_hw_mgmt_reg_type_mrpcim,
3012 0,
3013 offsetof(struct vxge_hw_mrpcim_reg,
3014 xmac_timestamp),
3015 val64);
3016 vxge_hw_device_flush_io(vdev->devh);
3017 return status;
3018}
3019
3020static int vxge_hwtstamp_ioctl(struct vxgedev *vdev, void __user *data)
3021{
3022 struct hwtstamp_config config;
3023 enum vxge_hw_status status;
3024 int i;
3025
3026 if (copy_from_user(&config, data, sizeof(config)))
3027 return -EFAULT;
3028
3029 /* reserved for future extensions */
3030 if (config.flags)
3031 return -EINVAL;
3032
3033 /* Transmit HW Timestamp not supported */
3034 switch (config.tx_type) {
3035 case HWTSTAMP_TX_OFF:
3036 break;
3037 case HWTSTAMP_TX_ON:
3038 default:
3039 return -ERANGE;
3040 }
3041
3042 switch (config.rx_filter) {
3043 case HWTSTAMP_FILTER_NONE:
3044 status = vxge_timestamp_config(vdev, 0);
3045 if (status != VXGE_HW_OK)
3046 return -EFAULT;
3047
3048 vdev->rx_hwts = 0;
3049 config.rx_filter = HWTSTAMP_FILTER_NONE;
3050 break;
3051
3052 case HWTSTAMP_FILTER_ALL:
3053 case HWTSTAMP_FILTER_SOME:
3054 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
3055 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
3056 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
3057 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
3058 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
3059 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
3060 case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
3061 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
3062 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
3063 case HWTSTAMP_FILTER_PTP_V2_EVENT:
3064 case HWTSTAMP_FILTER_PTP_V2_SYNC:
3065 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
3066 status = vxge_timestamp_config(vdev, 1);
3067 if (status != VXGE_HW_OK)
3068 return -EFAULT;
3069
3070 vdev->rx_hwts = 1;
3071 config.rx_filter = HWTSTAMP_FILTER_ALL;
3072 break;
3073
3074 default:
3075 return -ERANGE;
3076 }
3077
3078 for (i = 0; i < vdev->no_of_vpath; i++)
3079 vdev->vpaths[i].ring.rx_hwts = vdev->rx_hwts;
3080
3081 if (copy_to_user(data, &config, sizeof(config)))
3082 return -EFAULT;
3083
3084 return 0;
3085}
3086
2957/** 3087/**
2958 * vxge_ioctl 3088 * vxge_ioctl
2959 * @dev: Device pointer. 3089 * @dev: Device pointer.
@@ -2966,7 +3096,20 @@ vxge_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
2966 */ 3096 */
2967static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) 3097static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2968{ 3098{
2969 return -EOPNOTSUPP; 3099 struct vxgedev *vdev = netdev_priv(dev);
3100 int ret;
3101
3102 switch (cmd) {
3103 case SIOCSHWTSTAMP:
3104 ret = vxge_hwtstamp_ioctl(vdev, rq->ifr_data);
3105 if (ret)
3106 return ret;
3107 break;
3108 default:
3109 return -EOPNOTSUPP;
3110 }
3111
3112 return 0;
2970} 3113}
2971 3114
2972/** 3115/**
@@ -2977,18 +3120,17 @@ static int vxge_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2977 * This function is triggered if the Tx Queue is stopped 3120 * This function is triggered if the Tx Queue is stopped
2978 * for a pre-defined amount of time when the Interface is still up. 3121 * for a pre-defined amount of time when the Interface is still up.
2979 */ 3122 */
2980static void 3123static void vxge_tx_watchdog(struct net_device *dev)
2981vxge_tx_watchdog(struct net_device *dev)
2982{ 3124{
2983 struct vxgedev *vdev; 3125 struct vxgedev *vdev;
2984 3126
2985 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 3127 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
2986 3128
2987 vdev = (struct vxgedev *)netdev_priv(dev); 3129 vdev = netdev_priv(dev);
2988 3130
2989 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START; 3131 vdev->cric_err_event = VXGE_HW_EVENT_RESET_START;
2990 3132
2991 vxge_reset(vdev); 3133 schedule_work(&vdev->reset_task);
2992 vxge_debug_entryexit(VXGE_TRACE, 3134 vxge_debug_entryexit(VXGE_TRACE,
2993 "%s:%d Exiting...", __func__, __LINE__); 3135 "%s:%d Exiting...", __func__, __LINE__);
2994} 3136}
@@ -3012,7 +3154,7 @@ vxge_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
3012 3154
3013 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 3155 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3014 3156
3015 vdev = (struct vxgedev *)netdev_priv(dev); 3157 vdev = netdev_priv(dev);
3016 3158
3017 vpath = &vdev->vpaths[0]; 3159 vpath = &vdev->vpaths[0];
3018 if ((NULL == grp) && (vpath->is_open)) { 3160 if ((NULL == grp) && (vpath->is_open)) {
@@ -3061,7 +3203,7 @@ vxge_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
3061 struct vxge_vpath *vpath; 3203 struct vxge_vpath *vpath;
3062 int vp_id; 3204 int vp_id;
3063 3205
3064 vdev = (struct vxgedev *)netdev_priv(dev); 3206 vdev = netdev_priv(dev);
3065 3207
3066 /* Add these vlan to the vid table */ 3208 /* Add these vlan to the vid table */
3067 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) { 3209 for (vp_id = 0; vp_id < vdev->no_of_vpath; vp_id++) {
@@ -3088,7 +3230,7 @@ vxge_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
3088 3230
3089 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__); 3231 vxge_debug_entryexit(VXGE_TRACE, "%s:%d", __func__, __LINE__);
3090 3232
3091 vdev = (struct vxgedev *)netdev_priv(dev); 3233 vdev = netdev_priv(dev);
3092 3234
3093 vlan_group_set_device(vdev->vlgrp, vid, NULL); 3235 vlan_group_set_device(vdev->vlgrp, vid, NULL);
3094 3236
@@ -3110,21 +3252,31 @@ static const struct net_device_ops vxge_netdev_ops = {
3110 .ndo_start_xmit = vxge_xmit, 3252 .ndo_start_xmit = vxge_xmit,
3111 .ndo_validate_addr = eth_validate_addr, 3253 .ndo_validate_addr = eth_validate_addr,
3112 .ndo_set_multicast_list = vxge_set_multicast, 3254 .ndo_set_multicast_list = vxge_set_multicast,
3113
3114 .ndo_do_ioctl = vxge_ioctl, 3255 .ndo_do_ioctl = vxge_ioctl,
3115
3116 .ndo_set_mac_address = vxge_set_mac_addr, 3256 .ndo_set_mac_address = vxge_set_mac_addr,
3117 .ndo_change_mtu = vxge_change_mtu, 3257 .ndo_change_mtu = vxge_change_mtu,
3118 .ndo_vlan_rx_register = vxge_vlan_rx_register, 3258 .ndo_vlan_rx_register = vxge_vlan_rx_register,
3119 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid, 3259 .ndo_vlan_rx_kill_vid = vxge_vlan_rx_kill_vid,
3120 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid, 3260 .ndo_vlan_rx_add_vid = vxge_vlan_rx_add_vid,
3121
3122 .ndo_tx_timeout = vxge_tx_watchdog, 3261 .ndo_tx_timeout = vxge_tx_watchdog,
3123#ifdef CONFIG_NET_POLL_CONTROLLER 3262#ifdef CONFIG_NET_POLL_CONTROLLER
3124 .ndo_poll_controller = vxge_netpoll, 3263 .ndo_poll_controller = vxge_netpoll,
3125#endif 3264#endif
3126}; 3265};
3127 3266
3267static int __devinit vxge_device_revision(struct vxgedev *vdev)
3268{
3269 int ret;
3270 u8 revision;
3271
3272 ret = pci_read_config_byte(vdev->pdev, PCI_REVISION_ID, &revision);
3273 if (ret)
3274 return -EIO;
3275
3276 vdev->titan1 = (revision == VXGE_HW_TITAN1_PCI_REVISION);
3277 return 0;
3278}
3279
3128static int __devinit vxge_device_register(struct __vxge_hw_device *hldev, 3280static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3129 struct vxge_config *config, 3281 struct vxge_config *config,
3130 int high_dma, int no_of_vpath, 3282 int high_dma, int no_of_vpath,
@@ -3163,6 +3315,11 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3163 vdev->pdev = hldev->pdev; 3315 vdev->pdev = hldev->pdev;
3164 memcpy(&vdev->config, config, sizeof(struct vxge_config)); 3316 memcpy(&vdev->config, config, sizeof(struct vxge_config));
3165 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */ 3317 vdev->rx_csum = 1; /* Enable Rx CSUM by default. */
3318 vdev->rx_hwts = 0;
3319
3320 ret = vxge_device_revision(vdev);
3321 if (ret < 0)
3322 goto _out1;
3166 3323
3167 SET_NETDEV_DEV(ndev, &vdev->pdev->dev); 3324 SET_NETDEV_DEV(ndev, &vdev->pdev->dev);
3168 3325
@@ -3175,9 +3332,15 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3175 ndev->netdev_ops = &vxge_netdev_ops; 3332 ndev->netdev_ops = &vxge_netdev_ops;
3176 3333
3177 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT; 3334 ndev->watchdog_timeo = VXGE_LL_WATCH_DOG_TIMEOUT;
3335 INIT_WORK(&vdev->reset_task, vxge_reset);
3178 3336
3179 vxge_initialize_ethtool_ops(ndev); 3337 vxge_initialize_ethtool_ops(ndev);
3180 3338
3339 if (vdev->config.rth_steering != NO_STEERING) {
3340 ndev->features |= NETIF_F_RXHASH;
3341 hldev->config.rth_en = VXGE_HW_RTH_ENABLE;
3342 }
3343
3181 /* Allocate memory for vpath */ 3344 /* Allocate memory for vpath */
3182 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) * 3345 vdev->vpaths = kzalloc((sizeof(struct vxge_vpath)) *
3183 no_of_vpath, GFP_KERNEL); 3346 no_of_vpath, GFP_KERNEL);
@@ -3191,7 +3354,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3191 3354
3192 ndev->features |= NETIF_F_SG; 3355 ndev->features |= NETIF_F_SG;
3193 3356
3194 ndev->features |= NETIF_F_HW_CSUM; 3357 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3195 vxge_debug_init(vxge_hw_device_trace_level_get(hldev), 3358 vxge_debug_init(vxge_hw_device_trace_level_get(hldev),
3196 "%s : checksuming enabled", __func__); 3359 "%s : checksuming enabled", __func__);
3197 3360
@@ -3227,6 +3390,7 @@ static int __devinit vxge_device_register(struct __vxge_hw_device *hldev,
3227 "%s: Ethernet device registered", 3390 "%s: Ethernet device registered",
3228 ndev->name); 3391 ndev->name);
3229 3392
3393 hldev->ndev = ndev;
3230 *vdev_out = vdev; 3394 *vdev_out = vdev;
3231 3395
3232 /* Resetting the Device stats */ 3396 /* Resetting the Device stats */
@@ -3261,36 +3425,29 @@ _out0:
3261 * 3425 *
3262 * This function will unregister and free network device 3426 * This function will unregister and free network device
3263 */ 3427 */
3264static void 3428static void vxge_device_unregister(struct __vxge_hw_device *hldev)
3265vxge_device_unregister(struct __vxge_hw_device *hldev)
3266{ 3429{
3267 struct vxgedev *vdev; 3430 struct vxgedev *vdev;
3268 struct net_device *dev; 3431 struct net_device *dev;
3269 char buf[IFNAMSIZ]; 3432 char buf[IFNAMSIZ];
3270#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3271 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3272 u32 level_trace;
3273#endif
3274 3433
3275 dev = hldev->ndev; 3434 dev = hldev->ndev;
3276 vdev = netdev_priv(dev); 3435 vdev = netdev_priv(dev);
3277#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
3278 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
3279 level_trace = vdev->level_trace;
3280#endif
3281 vxge_debug_entryexit(level_trace,
3282 "%s: %s:%d", vdev->ndev->name, __func__, __LINE__);
3283 3436
3284 memcpy(buf, vdev->ndev->name, IFNAMSIZ); 3437 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d", vdev->ndev->name,
3438 __func__, __LINE__);
3439
3440 strncpy(buf, dev->name, IFNAMSIZ);
3441
3442 flush_work_sync(&vdev->reset_task);
3285 3443
3286 /* in 2.6 will call stop() if device is up */ 3444 /* in 2.6 will call stop() if device is up */
3287 unregister_netdev(dev); 3445 unregister_netdev(dev);
3288 3446
3289 flush_scheduled_work(); 3447 vxge_debug_init(vdev->level_trace, "%s: ethernet device unregistered",
3290 3448 buf);
3291 vxge_debug_init(level_trace, "%s: ethernet device unregistered", buf); 3449 vxge_debug_entryexit(vdev->level_trace, "%s: %s:%d Exiting...", buf,
3292 vxge_debug_entryexit(level_trace, 3450 __func__, __LINE__);
3293 "%s: %s:%d Exiting...", buf, __func__, __LINE__);
3294} 3451}
3295 3452
3296/* 3453/*
@@ -3304,7 +3461,7 @@ vxge_callback_crit_err(struct __vxge_hw_device *hldev,
3304 enum vxge_hw_event type, u64 vp_id) 3461 enum vxge_hw_event type, u64 vp_id)
3305{ 3462{
3306 struct net_device *dev = hldev->ndev; 3463 struct net_device *dev = hldev->ndev;
3307 struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev); 3464 struct vxgedev *vdev = netdev_priv(dev);
3308 struct vxge_vpath *vpath = NULL; 3465 struct vxge_vpath *vpath = NULL;
3309 int vpath_idx; 3466 int vpath_idx;
3310 3467
@@ -3527,9 +3684,9 @@ static int __devinit vxge_config_vpaths(
3527 device_config->vp_config[i].tti.timer_ac_en = 3684 device_config->vp_config[i].tti.timer_ac_en =
3528 VXGE_HW_TIM_TIMER_AC_ENABLE; 3685 VXGE_HW_TIM_TIMER_AC_ENABLE;
3529 3686
3530 /* For msi-x with napi (each vector 3687 /* For msi-x with napi (each vector has a handler of its own) -
3531 has a handler of its own) - 3688 * Set CI to OFF for all vpaths
3532 Set CI to OFF for all vpaths */ 3689 */
3533 device_config->vp_config[i].tti.timer_ci_en = 3690 device_config->vp_config[i].tti.timer_ci_en =
3534 VXGE_HW_TIM_TIMER_CI_DISABLE; 3691 VXGE_HW_TIM_TIMER_CI_DISABLE;
3535 3692
@@ -3559,10 +3716,13 @@ static int __devinit vxge_config_vpaths(
3559 3716
3560 device_config->vp_config[i].ring.ring_blocks = 3717 device_config->vp_config[i].ring.ring_blocks =
3561 VXGE_HW_DEF_RING_BLOCKS; 3718 VXGE_HW_DEF_RING_BLOCKS;
3719
3562 device_config->vp_config[i].ring.buffer_mode = 3720 device_config->vp_config[i].ring.buffer_mode =
3563 VXGE_HW_RING_RXD_BUFFER_MODE_1; 3721 VXGE_HW_RING_RXD_BUFFER_MODE_1;
3722
3564 device_config->vp_config[i].ring.rxds_limit = 3723 device_config->vp_config[i].ring.rxds_limit =
3565 VXGE_HW_DEF_RING_RXDS_LIMIT; 3724 VXGE_HW_DEF_RING_RXDS_LIMIT;
3725
3566 device_config->vp_config[i].ring.scatter_mode = 3726 device_config->vp_config[i].ring.scatter_mode =
3567 VXGE_HW_RING_SCATTER_MODE_A; 3727 VXGE_HW_RING_SCATTER_MODE_A;
3568 3728
@@ -3642,6 +3802,7 @@ static void __devinit vxge_device_config_init(
3642 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX; 3802 device_config->intr_mode = VXGE_HW_INTR_MODE_MSIX;
3643 break; 3803 break;
3644 } 3804 }
3805
3645 /* Timer period between device poll */ 3806 /* Timer period between device poll */
3646 device_config->device_poll_millis = VXGE_TIMER_DELAY; 3807 device_config->device_poll_millis = VXGE_TIMER_DELAY;
3647 3808
@@ -3653,16 +3814,10 @@ static void __devinit vxge_device_config_init(
3653 3814
3654 vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ", 3815 vxge_debug_ll_config(VXGE_TRACE, "%s : Device Config Params ",
3655 __func__); 3816 __func__);
3656 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_initial : %d",
3657 device_config->dma_blockpool_initial);
3658 vxge_debug_ll_config(VXGE_TRACE, "dma_blockpool_max : %d",
3659 device_config->dma_blockpool_max);
3660 vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d", 3817 vxge_debug_ll_config(VXGE_TRACE, "intr_mode : %d",
3661 device_config->intr_mode); 3818 device_config->intr_mode);
3662 vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d", 3819 vxge_debug_ll_config(VXGE_TRACE, "device_poll_millis : %d",
3663 device_config->device_poll_millis); 3820 device_config->device_poll_millis);
3664 vxge_debug_ll_config(VXGE_TRACE, "rts_mac_en : %d",
3665 device_config->rts_mac_en);
3666 vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d", 3821 vxge_debug_ll_config(VXGE_TRACE, "rth_en : %d",
3667 device_config->rth_en); 3822 device_config->rth_en);
3668 vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d", 3823 vxge_debug_ll_config(VXGE_TRACE, "rth_it_type : %d",
@@ -3751,9 +3906,6 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3751 vxge_debug_init(VXGE_TRACE, 3906 vxge_debug_init(VXGE_TRACE,
3752 "%s: MAC Address learning enabled", vdev->ndev->name); 3907 "%s: MAC Address learning enabled", vdev->ndev->name);
3753 3908
3754 vxge_debug_init(VXGE_TRACE,
3755 "%s: Rx doorbell mode enabled", vdev->ndev->name);
3756
3757 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 3909 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
3758 if (!vxge_bVALn(vpath_mask, i, 1)) 3910 if (!vxge_bVALn(vpath_mask, i, 1))
3759 continue; 3911 continue;
@@ -3766,14 +3918,6 @@ static void __devinit vxge_print_parm(struct vxgedev *vdev, u64 vpath_mask)
3766 ((struct __vxge_hw_device *)(vdev->devh))-> 3918 ((struct __vxge_hw_device *)(vdev->devh))->
3767 config.vp_config[i].rpa_strip_vlan_tag 3919 config.vp_config[i].rpa_strip_vlan_tag
3768 ? "Enabled" : "Disabled"); 3920 ? "Enabled" : "Disabled");
3769 vxge_debug_init(VXGE_TRACE,
3770 "%s: Ring blocks : %d", vdev->ndev->name,
3771 ((struct __vxge_hw_device *)(vdev->devh))->
3772 config.vp_config[i].ring.ring_blocks);
3773 vxge_debug_init(VXGE_TRACE,
3774 "%s: Fifo blocks : %d", vdev->ndev->name,
3775 ((struct __vxge_hw_device *)(vdev->devh))->
3776 config.vp_config[i].fifo.fifo_blocks);
3777 vxge_debug_ll_config(VXGE_TRACE, 3921 vxge_debug_ll_config(VXGE_TRACE,
3778 "%s: Max frags : %d", vdev->ndev->name, 3922 "%s: Max frags : %d", vdev->ndev->name,
3779 ((struct __vxge_hw_device *)(vdev->devh))-> 3923 ((struct __vxge_hw_device *)(vdev->devh))->
@@ -3813,8 +3957,7 @@ static int vxge_pm_resume(struct pci_dev *pdev)
3813static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev, 3957static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
3814 pci_channel_state_t state) 3958 pci_channel_state_t state)
3815{ 3959{
3816 struct __vxge_hw_device *hldev = 3960 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
3817 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3818 struct net_device *netdev = hldev->ndev; 3961 struct net_device *netdev = hldev->ndev;
3819 3962
3820 netif_device_detach(netdev); 3963 netif_device_detach(netdev);
@@ -3843,8 +3986,7 @@ static pci_ers_result_t vxge_io_error_detected(struct pci_dev *pdev,
3843 */ 3986 */
3844static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev) 3987static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
3845{ 3988{
3846 struct __vxge_hw_device *hldev = 3989 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
3847 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3848 struct net_device *netdev = hldev->ndev; 3990 struct net_device *netdev = hldev->ndev;
3849 3991
3850 struct vxgedev *vdev = netdev_priv(netdev); 3992 struct vxgedev *vdev = netdev_priv(netdev);
@@ -3855,7 +3997,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
3855 } 3997 }
3856 3998
3857 pci_set_master(pdev); 3999 pci_set_master(pdev);
3858 vxge_reset(vdev); 4000 do_vxge_reset(vdev, VXGE_LL_FULL_RESET);
3859 4001
3860 return PCI_ERS_RESULT_RECOVERED; 4002 return PCI_ERS_RESULT_RECOVERED;
3861} 4003}
@@ -3869,8 +4011,7 @@ static pci_ers_result_t vxge_io_slot_reset(struct pci_dev *pdev)
3869 */ 4011 */
3870static void vxge_io_resume(struct pci_dev *pdev) 4012static void vxge_io_resume(struct pci_dev *pdev)
3871{ 4013{
3872 struct __vxge_hw_device *hldev = 4014 struct __vxge_hw_device *hldev = pci_get_drvdata(pdev);
3873 (struct __vxge_hw_device *) pci_get_drvdata(pdev);
3874 struct net_device *netdev = hldev->ndev; 4015 struct net_device *netdev = hldev->ndev;
3875 4016
3876 if (netif_running(netdev)) { 4017 if (netif_running(netdev)) {
@@ -3914,6 +4055,156 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
3914 return num_functions; 4055 return num_functions;
3915} 4056}
3916 4057
4058int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override)
4059{
4060 struct __vxge_hw_device *hldev = vdev->devh;
4061 u32 maj, min, bld, cmaj, cmin, cbld;
4062 enum vxge_hw_status status;
4063 const struct firmware *fw;
4064 int ret;
4065
4066 ret = request_firmware(&fw, fw_name, &vdev->pdev->dev);
4067 if (ret) {
4068 vxge_debug_init(VXGE_ERR, "%s: Firmware file '%s' not found",
4069 VXGE_DRIVER_NAME, fw_name);
4070 goto out;
4071 }
4072
4073 /* Load the new firmware onto the adapter */
4074 status = vxge_update_fw_image(hldev, fw->data, fw->size);
4075 if (status != VXGE_HW_OK) {
4076 vxge_debug_init(VXGE_ERR,
4077 "%s: FW image download to adapter failed '%s'.",
4078 VXGE_DRIVER_NAME, fw_name);
4079 ret = -EIO;
4080 goto out;
4081 }
4082
4083 /* Read the version of the new firmware */
4084 status = vxge_hw_upgrade_read_version(hldev, &maj, &min, &bld);
4085 if (status != VXGE_HW_OK) {
4086 vxge_debug_init(VXGE_ERR,
4087 "%s: Upgrade read version failed '%s'.",
4088 VXGE_DRIVER_NAME, fw_name);
4089 ret = -EIO;
4090 goto out;
4091 }
4092
4093 cmaj = vdev->config.device_hw_info.fw_version.major;
4094 cmin = vdev->config.device_hw_info.fw_version.minor;
4095 cbld = vdev->config.device_hw_info.fw_version.build;
4096 /* It's possible the version in /lib/firmware is not the latest version.
4097 * If so, we could get into a loop of trying to upgrade to the latest
4098 * and flashing the older version.
4099 */
4100 if (VXGE_FW_VER(maj, min, bld) == VXGE_FW_VER(cmaj, cmin, cbld) &&
4101 !override) {
4102 ret = -EINVAL;
4103 goto out;
4104 }
4105
4106 printk(KERN_NOTICE "Upgrade to firmware version %d.%d.%d commencing\n",
4107 maj, min, bld);
4108
4109 /* Flash the adapter with the new firmware */
4110 status = vxge_hw_flash_fw(hldev);
4111 if (status != VXGE_HW_OK) {
4112 vxge_debug_init(VXGE_ERR, "%s: Upgrade commit failed '%s'.",
4113 VXGE_DRIVER_NAME, fw_name);
4114 ret = -EIO;
4115 goto out;
4116 }
4117
4118 printk(KERN_NOTICE "Upgrade of firmware successful! Adapter must be "
4119 "hard reset before using, thus requiring a system reboot or a "
4120 "hotplug event.\n");
4121
4122out:
4123 return ret;
4124}
4125
4126static int vxge_probe_fw_update(struct vxgedev *vdev)
4127{
4128 u32 maj, min, bld;
4129 int ret, gpxe = 0;
4130 char *fw_name;
4131
4132 maj = vdev->config.device_hw_info.fw_version.major;
4133 min = vdev->config.device_hw_info.fw_version.minor;
4134 bld = vdev->config.device_hw_info.fw_version.build;
4135
4136 if (VXGE_FW_VER(maj, min, bld) == VXGE_CERT_FW_VER)
4137 return 0;
4138
4139 /* Ignore the build number when determining if the current firmware is
4140 * "too new" to load the driver
4141 */
4142 if (VXGE_FW_VER(maj, min, 0) > VXGE_CERT_FW_VER) {
4143 vxge_debug_init(VXGE_ERR, "%s: Firmware newer than last known "
4144 "version, unable to load driver\n",
4145 VXGE_DRIVER_NAME);
4146 return -EINVAL;
4147 }
4148
4149 /* Firmware 1.4.4 and older cannot be upgraded, and is too ancient to
4150 * work with this driver.
4151 */
4152 if (VXGE_FW_VER(maj, min, bld) <= VXGE_FW_DEAD_VER) {
4153 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d cannot be "
4154 "upgraded\n", VXGE_DRIVER_NAME, maj, min, bld);
4155 return -EINVAL;
4156 }
4157
4158 /* If file not specified, determine gPXE or not */
4159 if (VXGE_FW_VER(maj, min, bld) >= VXGE_EPROM_FW_VER) {
4160 int i;
4161 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++)
4162 if (vdev->devh->eprom_versions[i]) {
4163 gpxe = 1;
4164 break;
4165 }
4166 }
4167 if (gpxe)
4168 fw_name = "vxge/X3fw-pxe.ncf";
4169 else
4170 fw_name = "vxge/X3fw.ncf";
4171
4172 ret = vxge_fw_upgrade(vdev, fw_name, 0);
4173 /* -EINVAL and -ENOENT are not fatal errors for flashing firmware on
4174 * probe, so ignore them
4175 */
4176 if (ret != -EINVAL && ret != -ENOENT)
4177 return -EIO;
4178 else
4179 ret = 0;
4180
4181 if (VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, VXGE_CERT_FW_VER_MINOR, 0) >
4182 VXGE_FW_VER(maj, min, 0)) {
4183 vxge_debug_init(VXGE_ERR, "%s: Firmware %d.%d.%d is too old to"
4184 " be used with this driver.\n"
4185 "Please get the latest version from "
4186 "ftp://ftp.s2io.com/pub/X3100-Drivers/FIRMWARE",
4187 VXGE_DRIVER_NAME, maj, min, bld);
4188 return -EINVAL;
4189 }
4190
4191 return ret;
4192}
4193
4194static int __devinit is_sriov_initialized(struct pci_dev *pdev)
4195{
4196 int pos;
4197 u16 ctrl;
4198
4199 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
4200 if (pos) {
4201 pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &ctrl);
4202 if (ctrl & PCI_SRIOV_CTRL_VFE)
4203 return 1;
4204 }
4205 return 0;
4206}
4207
3917/** 4208/**
3918 * vxge_probe 4209 * vxge_probe
3919 * @pdev : structure containing the PCI related information of the device. 4210 * @pdev : structure containing the PCI related information of the device.
@@ -3928,7 +4219,7 @@ static inline u32 vxge_get_num_vfs(u64 function_mode)
3928static int __devinit 4219static int __devinit
3929vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre) 4220vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
3930{ 4221{
3931 struct __vxge_hw_device *hldev; 4222 struct __vxge_hw_device *hldev;
3932 enum vxge_hw_status status; 4223 enum vxge_hw_status status;
3933 int ret; 4224 int ret;
3934 int high_dma = 0; 4225 int high_dma = 0;
@@ -3951,9 +4242,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
3951 attr.pdev = pdev; 4242 attr.pdev = pdev;
3952 4243
3953 /* In SRIOV-17 mode, functions of the same adapter 4244 /* In SRIOV-17 mode, functions of the same adapter
3954 * can be deployed on different buses */ 4245 * can be deployed on different buses
3955 if ((!pdev->is_virtfn) && ((bus != pdev->bus->number) || 4246 */
3956 (device != PCI_SLOT(pdev->devfn)))) 4247 if (((bus != pdev->bus->number) || (device != PCI_SLOT(pdev->devfn))) &&
4248 !pdev->is_virtfn)
3957 new_device = 1; 4249 new_device = 1;
3958 4250
3959 bus = pdev->bus->number; 4251 bus = pdev->bus->number;
@@ -3971,6 +4263,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
3971 driver_config->config_dev_cnt = 0; 4263 driver_config->config_dev_cnt = 0;
3972 driver_config->total_dev_cnt = 0; 4264 driver_config->total_dev_cnt = 0;
3973 } 4265 }
4266
3974 /* Now making the CPU based no of vpath calculation 4267 /* Now making the CPU based no of vpath calculation
3975 * applicable for individual functions as well. 4268 * applicable for individual functions as well.
3976 */ 4269 */
@@ -3993,11 +4286,11 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
3993 goto _exit0; 4286 goto _exit0;
3994 } 4287 }
3995 4288
3996 ll_config = kzalloc(sizeof(*ll_config), GFP_KERNEL); 4289 ll_config = kzalloc(sizeof(struct vxge_config), GFP_KERNEL);
3997 if (!ll_config) { 4290 if (!ll_config) {
3998 ret = -ENOMEM; 4291 ret = -ENOMEM;
3999 vxge_debug_init(VXGE_ERR, 4292 vxge_debug_init(VXGE_ERR,
4000 "ll_config : malloc failed %s %d", 4293 "device_config : malloc failed %s %d",
4001 __FILE__, __LINE__); 4294 __FILE__, __LINE__);
4002 goto _exit0; 4295 goto _exit0;
4003 } 4296 }
@@ -4041,7 +4334,7 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4041 goto _exit1; 4334 goto _exit1;
4042 } 4335 }
4043 4336
4044 if (pci_request_regions(pdev, VXGE_DRIVER_NAME)) { 4337 if (pci_request_region(pdev, 0, VXGE_DRIVER_NAME)) {
4045 vxge_debug_init(VXGE_ERR, 4338 vxge_debug_init(VXGE_ERR,
4046 "%s : request regions failed", __func__); 4339 "%s : request regions failed", __func__);
4047 ret = -ENODEV; 4340 ret = -ENODEV;
@@ -4072,16 +4365,6 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4072 goto _exit3; 4365 goto _exit3;
4073 } 4366 }
4074 4367
4075 if (ll_config->device_hw_info.fw_version.major !=
4076 VXGE_DRIVER_FW_VERSION_MAJOR) {
4077 vxge_debug_init(VXGE_ERR,
4078 "%s: Incorrect firmware version."
4079 "Please upgrade the firmware to version 1.x.x",
4080 VXGE_DRIVER_NAME);
4081 ret = -EINVAL;
4082 goto _exit3;
4083 }
4084
4085 vpath_mask = ll_config->device_hw_info.vpath_mask; 4368 vpath_mask = ll_config->device_hw_info.vpath_mask;
4086 if (vpath_mask == 0) { 4369 if (vpath_mask == 0) {
4087 vxge_debug_ll_config(VXGE_TRACE, 4370 vxge_debug_ll_config(VXGE_TRACE,
@@ -4110,14 +4393,13 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4110 num_vfs = vxge_get_num_vfs(function_mode) - 1; 4393 num_vfs = vxge_get_num_vfs(function_mode) - 1;
4111 4394
4112 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */ 4395 /* Enable SRIOV mode, if firmware has SRIOV support and if it is a PF */
4113 if (is_sriov(function_mode) && (max_config_dev > 1) && 4396 if (is_sriov(function_mode) && !is_sriov_initialized(pdev) &&
4114 (ll_config->intr_type != INTA) && 4397 (ll_config->intr_type != INTA)) {
4115 (is_privileged == VXGE_HW_OK)) { 4398 ret = pci_enable_sriov(pdev, num_vfs);
4116 ret = pci_enable_sriov(pdev, ((max_config_dev - 1) < num_vfs)
4117 ? (max_config_dev - 1) : num_vfs);
4118 if (ret) 4399 if (ret)
4119 vxge_debug_ll_config(VXGE_ERR, 4400 vxge_debug_ll_config(VXGE_ERR,
4120 "Failed in enabling SRIOV mode: %d\n", ret); 4401 "Failed in enabling SRIOV mode: %d\n", ret);
4402 /* No need to fail out, as an error here is non-fatal */
4121 } 4403 }
4122 4404
4123 /* 4405 /*
@@ -4145,11 +4427,37 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4145 goto _exit3; 4427 goto _exit3;
4146 } 4428 }
4147 4429
4430 if (VXGE_FW_VER(ll_config->device_hw_info.fw_version.major,
4431 ll_config->device_hw_info.fw_version.minor,
4432 ll_config->device_hw_info.fw_version.build) >=
4433 VXGE_EPROM_FW_VER) {
4434 struct eprom_image img[VXGE_HW_MAX_ROM_IMAGES];
4435
4436 status = vxge_hw_vpath_eprom_img_ver_get(hldev, img);
4437 if (status != VXGE_HW_OK) {
4438 vxge_debug_init(VXGE_ERR, "%s: Reading of EPROM failed",
4439 VXGE_DRIVER_NAME);
4440 /* This is a non-fatal error, continue */
4441 }
4442
4443 for (i = 0; i < VXGE_HW_MAX_ROM_IMAGES; i++) {
4444 hldev->eprom_versions[i] = img[i].version;
4445 if (!img[i].is_valid)
4446 break;
4447 vxge_debug_init(VXGE_TRACE, "%s: EPROM %d, version "
4448 "%d.%d.%d.%d\n", VXGE_DRIVER_NAME, i,
4449 VXGE_EPROM_IMG_MAJOR(img[i].version),
4450 VXGE_EPROM_IMG_MINOR(img[i].version),
4451 VXGE_EPROM_IMG_FIX(img[i].version),
4452 VXGE_EPROM_IMG_BUILD(img[i].version));
4453 }
4454 }
4455
4148 /* if FCS stripping is not disabled in MAC fail driver load */ 4456 /* if FCS stripping is not disabled in MAC fail driver load */
4149 if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) { 4457 status = vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask);
4150 vxge_debug_init(VXGE_ERR, 4458 if (status != VXGE_HW_OK) {
4151 "%s: FCS stripping is not disabled in MAC" 4459 vxge_debug_init(VXGE_ERR, "%s: FCS stripping is enabled in MAC"
4152 " failing driver load", VXGE_DRIVER_NAME); 4460 " failing driver load", VXGE_DRIVER_NAME);
4153 ret = -EINVAL; 4461 ret = -EINVAL;
4154 goto _exit4; 4462 goto _exit4;
4155 } 4463 }
@@ -4163,28 +4471,32 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4163 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS; 4471 ll_config->fifo_indicate_max_pkts = VXGE_FIFO_INDICATE_MAX_PKTS;
4164 ll_config->addr_learn_en = addr_learn_en; 4472 ll_config->addr_learn_en = addr_learn_en;
4165 ll_config->rth_algorithm = RTH_ALG_JENKINS; 4473 ll_config->rth_algorithm = RTH_ALG_JENKINS;
4166 ll_config->rth_hash_type_tcpipv4 = VXGE_HW_RING_HASH_TYPE_TCP_IPV4; 4474 ll_config->rth_hash_type_tcpipv4 = 1;
4167 ll_config->rth_hash_type_ipv4 = VXGE_HW_RING_HASH_TYPE_NONE; 4475 ll_config->rth_hash_type_ipv4 = 0;
4168 ll_config->rth_hash_type_tcpipv6 = VXGE_HW_RING_HASH_TYPE_NONE; 4476 ll_config->rth_hash_type_tcpipv6 = 0;
4169 ll_config->rth_hash_type_ipv6 = VXGE_HW_RING_HASH_TYPE_NONE; 4477 ll_config->rth_hash_type_ipv6 = 0;
4170 ll_config->rth_hash_type_tcpipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; 4478 ll_config->rth_hash_type_tcpipv6ex = 0;
4171 ll_config->rth_hash_type_ipv6ex = VXGE_HW_RING_HASH_TYPE_NONE; 4479 ll_config->rth_hash_type_ipv6ex = 0;
4172 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE; 4480 ll_config->rth_bkt_sz = RTH_BUCKET_SIZE;
4173 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; 4481 ll_config->tx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4174 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE; 4482 ll_config->rx_pause_enable = VXGE_PAUSE_CTRL_ENABLE;
4175 4483
4176 if (vxge_device_register(hldev, ll_config, high_dma, no_of_vpath, 4484 ret = vxge_device_register(hldev, ll_config, high_dma, no_of_vpath,
4177 &vdev)) { 4485 &vdev);
4486 if (ret) {
4178 ret = -EINVAL; 4487 ret = -EINVAL;
4179 goto _exit4; 4488 goto _exit4;
4180 } 4489 }
4181 4490
4491 ret = vxge_probe_fw_update(vdev);
4492 if (ret)
4493 goto _exit5;
4494
4182 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL); 4495 vxge_hw_device_debug_set(hldev, VXGE_TRACE, VXGE_COMPONENT_LL);
4183 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev), 4496 VXGE_COPY_DEBUG_INFO_TO_LL(vdev, vxge_hw_device_error_level_get(hldev),
4184 vxge_hw_device_trace_level_get(hldev)); 4497 vxge_hw_device_trace_level_get(hldev));
4185 4498
4186 /* set private HW device info */ 4499 /* set private HW device info */
4187 hldev->ndev = vdev->ndev;
4188 vdev->mtu = VXGE_HW_DEFAULT_MTU; 4500 vdev->mtu = VXGE_HW_DEFAULT_MTU;
4189 vdev->bar0 = attr.bar0; 4501 vdev->bar0 = attr.bar0;
4190 vdev->max_vpath_supported = max_vpath_supported; 4502 vdev->max_vpath_supported = max_vpath_supported;
@@ -4278,15 +4590,13 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4278 4590
4279 /* Copy the station mac address to the list */ 4591 /* Copy the station mac address to the list */
4280 for (i = 0; i < vdev->no_of_vpath; i++) { 4592 for (i = 0; i < vdev->no_of_vpath; i++) {
4281 entry = (struct vxge_mac_addrs *) 4593 entry = kzalloc(sizeof(struct vxge_mac_addrs), GFP_KERNEL);
4282 kzalloc(sizeof(struct vxge_mac_addrs),
4283 GFP_KERNEL);
4284 if (NULL == entry) { 4594 if (NULL == entry) {
4285 vxge_debug_init(VXGE_ERR, 4595 vxge_debug_init(VXGE_ERR,
4286 "%s: mac_addr_list : memory allocation failed", 4596 "%s: mac_addr_list : memory allocation failed",
4287 vdev->ndev->name); 4597 vdev->ndev->name);
4288 ret = -EPERM; 4598 ret = -EPERM;
4289 goto _exit5; 4599 goto _exit6;
4290 } 4600 }
4291 macaddr = (u8 *)&entry->macaddr; 4601 macaddr = (u8 *)&entry->macaddr;
4292 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN); 4602 memcpy(macaddr, vdev->ndev->dev_addr, ETH_ALEN);
@@ -4326,10 +4636,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4326 kfree(ll_config); 4636 kfree(ll_config);
4327 return 0; 4637 return 0;
4328 4638
4329_exit5: 4639_exit6:
4330 for (i = 0; i < vdev->no_of_vpath; i++) 4640 for (i = 0; i < vdev->no_of_vpath; i++)
4331 vxge_free_mac_add_list(&vdev->vpaths[i]); 4641 vxge_free_mac_add_list(&vdev->vpaths[i]);
4332 4642_exit5:
4333 vxge_device_unregister(hldev); 4643 vxge_device_unregister(hldev);
4334_exit4: 4644_exit4:
4335 pci_disable_sriov(pdev); 4645 pci_disable_sriov(pdev);
@@ -4337,7 +4647,7 @@ _exit4:
4337_exit3: 4647_exit3:
4338 iounmap(attr.bar0); 4648 iounmap(attr.bar0);
4339_exit2: 4649_exit2:
4340 pci_release_regions(pdev); 4650 pci_release_region(pdev, 0);
4341_exit1: 4651_exit1:
4342 pci_disable_device(pdev); 4652 pci_disable_device(pdev);
4343_exit0: 4653_exit0:
@@ -4354,34 +4664,25 @@ _exit0:
4354 * Description: This function is called by the Pci subsystem to release a 4664 * Description: This function is called by the Pci subsystem to release a
4355 * PCI device and free up all resource held up by the device. 4665 * PCI device and free up all resource held up by the device.
4356 */ 4666 */
4357static void __devexit 4667static void __devexit vxge_remove(struct pci_dev *pdev)
4358vxge_remove(struct pci_dev *pdev)
4359{ 4668{
4360 struct __vxge_hw_device *hldev; 4669 struct __vxge_hw_device *hldev;
4361 struct vxgedev *vdev = NULL; 4670 struct vxgedev *vdev = NULL;
4362 struct net_device *dev; 4671 struct net_device *dev;
4363 int i = 0; 4672 int i = 0;
4364#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \
4365 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4366 u32 level_trace;
4367#endif
4368 4673
4369 hldev = (struct __vxge_hw_device *) pci_get_drvdata(pdev); 4674 hldev = pci_get_drvdata(pdev);
4370 4675
4371 if (hldev == NULL) 4676 if (hldev == NULL)
4372 return; 4677 return;
4678
4373 dev = hldev->ndev; 4679 dev = hldev->ndev;
4374 vdev = netdev_priv(dev); 4680 vdev = netdev_priv(dev);
4375 4681
4376#if ((VXGE_DEBUG_INIT & VXGE_DEBUG_MASK) || \ 4682 vxge_debug_entryexit(vdev->level_trace, "%s:%d", __func__, __LINE__);
4377 (VXGE_DEBUG_ENTRYEXIT & VXGE_DEBUG_MASK))
4378 level_trace = vdev->level_trace;
4379#endif
4380 vxge_debug_entryexit(level_trace,
4381 "%s:%d", __func__, __LINE__);
4382 4683
4383 vxge_debug_init(level_trace, 4684 vxge_debug_init(vdev->level_trace, "%s : removing PCI device...",
4384 "%s : removing PCI device...", __func__); 4685 __func__);
4385 vxge_device_unregister(hldev); 4686 vxge_device_unregister(hldev);
4386 4687
4387 for (i = 0; i < vdev->no_of_vpath; i++) { 4688 for (i = 0; i < vdev->no_of_vpath; i++) {
@@ -4394,21 +4695,19 @@ vxge_remove(struct pci_dev *pdev)
4394 4695
4395 iounmap(vdev->bar0); 4696 iounmap(vdev->bar0);
4396 4697
4397 pci_disable_sriov(pdev);
4398
4399 /* we are safe to free it now */ 4698 /* we are safe to free it now */
4400 free_netdev(dev); 4699 free_netdev(dev);
4401 4700
4402 vxge_debug_init(level_trace, 4701 vxge_debug_init(vdev->level_trace, "%s:%d Device unregistered",
4403 "%s:%d Device unregistered", __func__, __LINE__); 4702 __func__, __LINE__);
4404 4703
4405 vxge_hw_device_terminate(hldev); 4704 vxge_hw_device_terminate(hldev);
4406 4705
4407 pci_disable_device(pdev); 4706 pci_disable_device(pdev);
4408 pci_release_regions(pdev); 4707 pci_release_region(pdev, 0);
4409 pci_set_drvdata(pdev, NULL); 4708 pci_set_drvdata(pdev, NULL);
4410 vxge_debug_entryexit(level_trace, 4709 vxge_debug_entryexit(vdev->level_trace, "%s:%d Exiting...", __func__,
4411 "%s:%d Exiting...", __func__, __LINE__); 4710 __LINE__);
4412} 4711}
4413 4712
4414static struct pci_error_handlers vxge_err_handler = { 4713static struct pci_error_handlers vxge_err_handler = {
@@ -4444,6 +4743,10 @@ vxge_starter(void)
4444 return -ENOMEM; 4743 return -ENOMEM;
4445 4744
4446 ret = pci_register_driver(&vxge_driver); 4745 ret = pci_register_driver(&vxge_driver);
4746 if (ret) {
4747 kfree(driver_config);
4748 goto err;
4749 }
4447 4750
4448 if (driver_config->config_dev_cnt && 4751 if (driver_config->config_dev_cnt &&
4449 (driver_config->config_dev_cnt != driver_config->total_dev_cnt)) 4752 (driver_config->config_dev_cnt != driver_config->total_dev_cnt))
@@ -4451,10 +4754,7 @@ vxge_starter(void)
4451 "%s: Configured %d of %d devices", 4754 "%s: Configured %d of %d devices",
4452 VXGE_DRIVER_NAME, driver_config->config_dev_cnt, 4755 VXGE_DRIVER_NAME, driver_config->config_dev_cnt,
4453 driver_config->total_dev_cnt); 4756 driver_config->total_dev_cnt);
4454 4757err:
4455 if (ret)
4456 kfree(driver_config);
4457
4458 return ret; 4758 return ret;
4459} 4759}
4460 4760
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index de64536cb7d0..5746fedc356f 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -29,6 +29,9 @@
29 29
30#define PCI_DEVICE_ID_TITAN_WIN 0x5733 30#define PCI_DEVICE_ID_TITAN_WIN 0x5733
31#define PCI_DEVICE_ID_TITAN_UNI 0x5833 31#define PCI_DEVICE_ID_TITAN_UNI 0x5833
32#define VXGE_HW_TITAN1_PCI_REVISION 1
33#define VXGE_HW_TITAN1A_PCI_REVISION 2
34
32#define VXGE_USE_DEFAULT 0xffffffff 35#define VXGE_USE_DEFAULT 0xffffffff
33#define VXGE_HW_VPATH_MSIX_ACTIVE 4 36#define VXGE_HW_VPATH_MSIX_ACTIVE 4
34#define VXGE_ALARM_MSIX_ID 2 37#define VXGE_ALARM_MSIX_ID 2
@@ -53,11 +56,13 @@
53 56
54#define VXGE_TTI_BTIMER_VAL 250000 57#define VXGE_TTI_BTIMER_VAL 250000
55 58
56#define VXGE_TTI_LTIMER_VAL 1000 59#define VXGE_TTI_LTIMER_VAL 1000
57#define VXGE_TTI_RTIMER_VAL 0 60#define VXGE_T1A_TTI_LTIMER_VAL 80
58#define VXGE_RTI_BTIMER_VAL 250 61#define VXGE_TTI_RTIMER_VAL 0
59#define VXGE_RTI_LTIMER_VAL 100 62#define VXGE_T1A_TTI_RTIMER_VAL 400
60#define VXGE_RTI_RTIMER_VAL 0 63#define VXGE_RTI_BTIMER_VAL 250
64#define VXGE_RTI_LTIMER_VAL 100
65#define VXGE_RTI_RTIMER_VAL 0
61#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH 66#define VXGE_FIFO_INDICATE_MAX_PKTS VXGE_DEF_FIFO_LENGTH
62#define VXGE_ISR_POLLING_CNT 8 67#define VXGE_ISR_POLLING_CNT 8
63#define VXGE_MAX_CONFIG_DEV 0xFF 68#define VXGE_MAX_CONFIG_DEV 0xFF
@@ -76,14 +81,32 @@
76#define TTI_TX_UFC_B 40 81#define TTI_TX_UFC_B 40
77#define TTI_TX_UFC_C 60 82#define TTI_TX_UFC_C 60
78#define TTI_TX_UFC_D 100 83#define TTI_TX_UFC_D 100
84#define TTI_T1A_TX_UFC_A 30
85#define TTI_T1A_TX_UFC_B 80
86/* Slope - (max_mtu - min_mtu)/(max_mtu_ufc - min_mtu_ufc) */
87/* Slope - 93 */
88/* 60 - 9k Mtu, 140 - 1.5k mtu */
89#define TTI_T1A_TX_UFC_C(mtu) (60 + ((VXGE_HW_MAX_MTU - mtu) / 93))
90
91/* Slope - 37 */
92/* 100 - 9k Mtu, 300 - 1.5k mtu */
93#define TTI_T1A_TX_UFC_D(mtu) (100 + ((VXGE_HW_MAX_MTU - mtu) / 37))
94
95
96#define RTI_RX_URANGE_A 5
97#define RTI_RX_URANGE_B 15
98#define RTI_RX_URANGE_C 40
99#define RTI_T1A_RX_URANGE_A 1
100#define RTI_T1A_RX_URANGE_B 20
101#define RTI_T1A_RX_URANGE_C 50
102#define RTI_RX_UFC_A 1
103#define RTI_RX_UFC_B 5
104#define RTI_RX_UFC_C 10
105#define RTI_RX_UFC_D 15
106#define RTI_T1A_RX_UFC_B 20
107#define RTI_T1A_RX_UFC_C 50
108#define RTI_T1A_RX_UFC_D 60
79 109
80#define RTI_RX_URANGE_A 5
81#define RTI_RX_URANGE_B 15
82#define RTI_RX_URANGE_C 40
83#define RTI_RX_UFC_A 1
84#define RTI_RX_UFC_B 5
85#define RTI_RX_UFC_C 10
86#define RTI_RX_UFC_D 15
87 110
88/* Milli secs timer period */ 111/* Milli secs timer period */
89#define VXGE_TIMER_DELAY 10000 112#define VXGE_TIMER_DELAY 10000
@@ -145,15 +168,15 @@ struct vxge_config {
145 168
146 int addr_learn_en; 169 int addr_learn_en;
147 170
148 int rth_steering; 171 u32 rth_steering:2,
149 int rth_algorithm; 172 rth_algorithm:2,
150 int rth_hash_type_tcpipv4; 173 rth_hash_type_tcpipv4:1,
151 int rth_hash_type_ipv4; 174 rth_hash_type_ipv4:1,
152 int rth_hash_type_tcpipv6; 175 rth_hash_type_tcpipv6:1,
153 int rth_hash_type_ipv6; 176 rth_hash_type_ipv6:1,
154 int rth_hash_type_tcpipv6ex; 177 rth_hash_type_tcpipv6ex:1,
155 int rth_hash_type_ipv6ex; 178 rth_hash_type_ipv6ex:1,
156 int rth_bkt_sz; 179 rth_bkt_sz:8;
157 int rth_jhash_golden_ratio; 180 int rth_jhash_golden_ratio;
158 int tx_steering_type; 181 int tx_steering_type;
159 int fifo_indicate_max_pkts; 182 int fifo_indicate_max_pkts;
@@ -248,8 +271,9 @@ struct vxge_ring {
248 */ 271 */
249 int driver_id; 272 int driver_id;
250 273
251 /* copy of the flag indicating whether rx_csum is to be used */ 274 /* copy of the flag indicating whether rx_csum is to be used */
252 u32 rx_csum; 275 u32 rx_csum:1,
276 rx_hwts:1;
253 277
254 int pkts_processed; 278 int pkts_processed;
255 int budget; 279 int budget;
@@ -281,8 +305,8 @@ struct vxge_vpath {
281 int is_configured; 305 int is_configured;
282 int is_open; 306 int is_open;
283 struct vxgedev *vdev; 307 struct vxgedev *vdev;
284 u8 (macaddr)[ETH_ALEN]; 308 u8 macaddr[ETH_ALEN];
285 u8 (macmask)[ETH_ALEN]; 309 u8 macmask[ETH_ALEN];
286 310
287#define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048 311#define VXGE_MAX_LEARN_MAC_ADDR_CNT 2048
288 /* mac addresses currently programmed into NIC */ 312 /* mac addresses currently programmed into NIC */
@@ -327,7 +351,9 @@ struct vxgedev {
327 u16 all_multi_flg; 351 u16 all_multi_flg;
328 352
329 /* A flag indicating whether rx_csum is to be used or not. */ 353 /* A flag indicating whether rx_csum is to be used or not. */
330 u32 rx_csum; 354 u32 rx_csum:1,
355 rx_hwts:1,
356 titan1:1;
331 357
332 struct vxge_msix_entry *vxge_entries; 358 struct vxge_msix_entry *vxge_entries;
333 struct msix_entry *entries; 359 struct msix_entry *entries;
@@ -369,6 +395,7 @@ struct vxgedev {
369 u32 level_err; 395 u32 level_err;
370 u32 level_trace; 396 u32 level_trace;
371 char fw_version[VXGE_HW_FW_STRLEN]; 397 char fw_version[VXGE_HW_FW_STRLEN];
398 struct work_struct reset_task;
372}; 399};
373 400
374struct vxge_rx_priv { 401struct vxge_rx_priv {
@@ -387,8 +414,6 @@ struct vxge_tx_priv {
387 static int p = val; \ 414 static int p = val; \
388 module_param(p, int, 0) 415 module_param(p, int, 0)
389 416
390#define vxge_os_bug(fmt...) { printk(fmt); BUG(); }
391
392#define vxge_os_timer(timer, handle, arg, exp) do { \ 417#define vxge_os_timer(timer, handle, arg, exp) do { \
393 init_timer(&timer); \ 418 init_timer(&timer); \
394 timer.function = handle; \ 419 timer.function = handle; \
@@ -396,7 +421,10 @@ struct vxge_tx_priv {
396 mod_timer(&timer, (jiffies + exp)); \ 421 mod_timer(&timer, (jiffies + exp)); \
397 } while (0); 422 } while (0);
398 423
399extern void vxge_initialize_ethtool_ops(struct net_device *ndev); 424void vxge_initialize_ethtool_ops(struct net_device *ndev);
425enum vxge_hw_status vxge_reset_all_vpaths(struct vxgedev *vdev);
426int vxge_fw_upgrade(struct vxgedev *vdev, char *fw_name, int override);
427
400/** 428/**
401 * #define VXGE_DEBUG_INIT: debug for initialization functions 429 * #define VXGE_DEBUG_INIT: debug for initialization functions
402 * #define VXGE_DEBUG_TX : debug transmit related functions 430 * #define VXGE_DEBUG_TX : debug transmit related functions
diff --git a/drivers/net/vxge/vxge-reg.h b/drivers/net/vxge/vxge-reg.h
index 3dd5c9615ef9..3e658b175947 100644
--- a/drivers/net/vxge/vxge-reg.h
+++ b/drivers/net/vxge/vxge-reg.h
@@ -49,6 +49,33 @@
49#define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17 49#define VXGE_HW_TITAN_VPMGMT_REG_SPACES 17
50#define VXGE_HW_TITAN_VPATH_REG_SPACES 17 50#define VXGE_HW_TITAN_VPATH_REG_SPACES 17
51 51
52#define VXGE_HW_FW_API_GET_EPROM_REV 31
53
54#define VXGE_EPROM_IMG_MAJOR(val) (u32) vxge_bVALn(val, 48, 4)
55#define VXGE_EPROM_IMG_MINOR(val) (u32) vxge_bVALn(val, 52, 4)
56#define VXGE_EPROM_IMG_FIX(val) (u32) vxge_bVALn(val, 56, 4)
57#define VXGE_EPROM_IMG_BUILD(val) (u32) vxge_bVALn(val, 60, 4)
58
59#define VXGE_HW_GET_EPROM_IMAGE_INDEX(val) vxge_bVALn(val, 16, 8)
60#define VXGE_HW_GET_EPROM_IMAGE_VALID(val) vxge_bVALn(val, 31, 1)
61#define VXGE_HW_GET_EPROM_IMAGE_TYPE(val) vxge_bVALn(val, 40, 8)
62#define VXGE_HW_GET_EPROM_IMAGE_REV(val) vxge_bVALn(val, 48, 16)
63#define VXGE_HW_RTS_ACCESS_STEER_ROM_IMAGE_INDEX(val) vxge_vBIT(val, 16, 8)
64
65#define VXGE_HW_FW_API_GET_FUNC_MODE 29
66#define VXGE_HW_GET_FUNC_MODE_VAL(val) (val & 0xFF)
67
68#define VXGE_HW_FW_UPGRADE_MEMO 13
69#define VXGE_HW_FW_UPGRADE_ACTION 16
70#define VXGE_HW_FW_UPGRADE_OFFSET_START 2
71#define VXGE_HW_FW_UPGRADE_OFFSET_SEND 3
72#define VXGE_HW_FW_UPGRADE_OFFSET_COMMIT 4
73#define VXGE_HW_FW_UPGRADE_OFFSET_READ 5
74
75#define VXGE_HW_FW_UPGRADE_BLK_SIZE 16
76#define VXGE_HW_UPGRADE_GET_RET_ERR_CODE(val) (val & 0xff)
77#define VXGE_HW_UPGRADE_GET_SEC_ERR_CODE(val) ((val >> 8) & 0xff)
78
52#define VXGE_HW_ASIC_MODE_RESERVED 0 79#define VXGE_HW_ASIC_MODE_RESERVED 0
53#define VXGE_HW_ASIC_MODE_NO_IOV 1 80#define VXGE_HW_ASIC_MODE_NO_IOV 1
54#define VXGE_HW_ASIC_MODE_SR_IOV 2 81#define VXGE_HW_ASIC_MODE_SR_IOV 2
@@ -165,13 +192,13 @@
165#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2 192#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_ETYPE 2
166#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3 193#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_PN 3
167#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5 194#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_GEN_CFG 5
168#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6 195#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_SOLO_IT 6
169#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7 196#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_JHASH_CFG 7
170#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8 197#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MASK 8
171#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9 198#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_RTH_KEY 9
172#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10 199#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_QOS 10
173#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11 200#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_DS 11
174#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12 201#define VXGE_HW_RTS_ACS_STEER_CTRL_DATA_STRUCT_SEL_RTH_MULTI_IT 12
175#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13 202#define VXGE_HW_RTS_ACCESS_STEER_CTRL_DATA_STRUCT_SEL_FW_MEMO 13
176 203
177#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \ 204#define VXGE_HW_RTS_ACCESS_STEER_DATA0_GET_DA_MAC_ADDR(bits) \
@@ -437,6 +464,7 @@
437#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \ 464#define VXGE_HW_RTS_ACCESS_STEER_DATA1_GET_FLASH_VER_BUILD(bits) \
438 vxge_bVALn(bits, 48, 16) 465 vxge_bVALn(bits, 48, 16)
439#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16) 466#define VXGE_HW_RTS_ACCESS_STEER_DATA1_FLASH_VER_BUILD vxge_vBIT(val, 48, 16)
467#define VXGE_HW_RTS_ACCESS_STEER_CTRL_GET_ACTION(bits) vxge_bVALn(bits, 0, 8)
440 468
441#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\ 469#define VXGE_HW_SRPCIM_TO_VPATH_ALARM_REG_GET_PPIF_SRPCIM_TO_VPATH_ALARM(bits)\
442 vxge_bVALn(bits, 0, 18) 470 vxge_bVALn(bits, 0, 18)
@@ -3998,6 +4026,7 @@ struct vxge_hw_vpath_reg {
3998#define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9) 4026#define VXGE_HW_PRC_CFG6_L4_CPC_TRSFR_CODE_EN vxge_mBIT(9)
3999#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9) 4027#define VXGE_HW_PRC_CFG6_RXD_CRXDT(val) vxge_vBIT(val, 23, 9)
4000#define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9) 4028#define VXGE_HW_PRC_CFG6_RXD_SPAT(val) vxge_vBIT(val, 36, 9)
4029#define VXGE_HW_PRC_CFG6_GET_RXD_SPAT(val) vxge_bVALn(val, 36, 9)
4001/*0x00a78*/ u64 prc_cfg7; 4030/*0x00a78*/ u64 prc_cfg7;
4002#define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2) 4031#define VXGE_HW_PRC_CFG7_SCATTER_MODE(val) vxge_vBIT(val, 6, 2)
4003#define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11) 4032#define VXGE_HW_PRC_CFG7_SMART_SCAT_EN vxge_mBIT(11)
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index 4bdb611a6842..4c10d6c4075f 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -17,13 +17,6 @@
17#include "vxge-config.h" 17#include "vxge-config.h"
18#include "vxge-main.h" 18#include "vxge-main.h"
19 19
20static enum vxge_hw_status
21__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev,
22 u32 vp_id, enum vxge_hw_event type);
23static enum vxge_hw_status
24__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
25 u32 skip_alarms);
26
27/* 20/*
28 * vxge_hw_vpath_intr_enable - Enable vpath interrupts. 21 * vxge_hw_vpath_intr_enable - Enable vpath interrupts.
29 * @vp: Virtual Path handle. 22 * @vp: Virtual Path handle.
@@ -419,6 +412,384 @@ void vxge_hw_device_flush_io(struct __vxge_hw_device *hldev)
419} 412}
420 413
421/** 414/**
415 * __vxge_hw_device_handle_error - Handle error
416 * @hldev: HW device
417 * @vp_id: Vpath Id
418 * @type: Error type. Please see enum vxge_hw_event{}
419 *
420 * Handle error.
421 */
422static enum vxge_hw_status
423__vxge_hw_device_handle_error(struct __vxge_hw_device *hldev, u32 vp_id,
424 enum vxge_hw_event type)
425{
426 switch (type) {
427 case VXGE_HW_EVENT_UNKNOWN:
428 break;
429 case VXGE_HW_EVENT_RESET_START:
430 case VXGE_HW_EVENT_RESET_COMPLETE:
431 case VXGE_HW_EVENT_LINK_DOWN:
432 case VXGE_HW_EVENT_LINK_UP:
433 goto out;
434 case VXGE_HW_EVENT_ALARM_CLEARED:
435 goto out;
436 case VXGE_HW_EVENT_ECCERR:
437 case VXGE_HW_EVENT_MRPCIM_ECCERR:
438 goto out;
439 case VXGE_HW_EVENT_FIFO_ERR:
440 case VXGE_HW_EVENT_VPATH_ERR:
441 case VXGE_HW_EVENT_CRITICAL_ERR:
442 case VXGE_HW_EVENT_SERR:
443 break;
444 case VXGE_HW_EVENT_SRPCIM_SERR:
445 case VXGE_HW_EVENT_MRPCIM_SERR:
446 goto out;
447 case VXGE_HW_EVENT_SLOT_FREEZE:
448 break;
449 default:
450 vxge_assert(0);
451 goto out;
452 }
453
454 /* notify driver */
455 if (hldev->uld_callbacks.crit_err)
456 hldev->uld_callbacks.crit_err(
457 (struct __vxge_hw_device *)hldev,
458 type, vp_id);
459out:
460
461 return VXGE_HW_OK;
462}
463
464/*
465 * __vxge_hw_device_handle_link_down_ind
466 * @hldev: HW device handle.
467 *
468 * Link down indication handler. The function is invoked by HW when
469 * Titan indicates that the link is down.
470 */
471static enum vxge_hw_status
472__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
473{
474 /*
475 * If the previous link state is not down, return.
476 */
477 if (hldev->link_state == VXGE_HW_LINK_DOWN)
478 goto exit;
479
480 hldev->link_state = VXGE_HW_LINK_DOWN;
481
482 /* notify driver */
483 if (hldev->uld_callbacks.link_down)
484 hldev->uld_callbacks.link_down(hldev);
485exit:
486 return VXGE_HW_OK;
487}
488
489/*
490 * __vxge_hw_device_handle_link_up_ind
491 * @hldev: HW device handle.
492 *
493 * Link up indication handler. The function is invoked by HW when
494 * Titan indicates that the link is up for programmable amount of time.
495 */
496static enum vxge_hw_status
497__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
498{
499 /*
500 * If the previous link state is not down, return.
501 */
502 if (hldev->link_state == VXGE_HW_LINK_UP)
503 goto exit;
504
505 hldev->link_state = VXGE_HW_LINK_UP;
506
507 /* notify driver */
508 if (hldev->uld_callbacks.link_up)
509 hldev->uld_callbacks.link_up(hldev);
510exit:
511 return VXGE_HW_OK;
512}
513
514/*
515 * __vxge_hw_vpath_alarm_process - Process Alarms.
516 * @vpath: Virtual Path.
517 * @skip_alarms: Do not clear the alarms
518 *
519 * Process vpath alarms.
520 *
521 */
522static enum vxge_hw_status
523__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
524 u32 skip_alarms)
525{
526 u64 val64;
527 u64 alarm_status;
528 u64 pic_status;
529 struct __vxge_hw_device *hldev = NULL;
530 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
531 u64 mask64;
532 struct vxge_hw_vpath_stats_sw_info *sw_stats;
533 struct vxge_hw_vpath_reg __iomem *vp_reg;
534
535 if (vpath == NULL) {
536 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
537 alarm_event);
538 goto out2;
539 }
540
541 hldev = vpath->hldev;
542 vp_reg = vpath->vp_reg;
543 alarm_status = readq(&vp_reg->vpath_general_int_status);
544
545 if (alarm_status == VXGE_HW_ALL_FOXES) {
546 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
547 alarm_event);
548 goto out;
549 }
550
551 sw_stats = vpath->sw_stats;
552
553 if (alarm_status & ~(
554 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
555 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
556 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
557 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
558 sw_stats->error_stats.unknown_alarms++;
559
560 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
561 alarm_event);
562 goto out;
563 }
564
565 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
566
567 val64 = readq(&vp_reg->xgmac_vp_int_status);
568
569 if (val64 &
570 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
571
572 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
573
574 if (((val64 &
575 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
576 (!(val64 &
577 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
578 ((val64 &
579 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
580 (!(val64 &
581 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
582 ))) {
583 sw_stats->error_stats.network_sustained_fault++;
584
585 writeq(
586 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
587 &vp_reg->asic_ntwk_vp_err_mask);
588
589 __vxge_hw_device_handle_link_down_ind(hldev);
590 alarm_event = VXGE_HW_SET_LEVEL(
591 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
592 }
593
594 if (((val64 &
595 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
596 (!(val64 &
597 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
598 ((val64 &
599 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
600 (!(val64 &
601 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
602 ))) {
603
604 sw_stats->error_stats.network_sustained_ok++;
605
606 writeq(
607 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
608 &vp_reg->asic_ntwk_vp_err_mask);
609
610 __vxge_hw_device_handle_link_up_ind(hldev);
611 alarm_event = VXGE_HW_SET_LEVEL(
612 VXGE_HW_EVENT_LINK_UP, alarm_event);
613 }
614
615 writeq(VXGE_HW_INTR_MASK_ALL,
616 &vp_reg->asic_ntwk_vp_err_reg);
617
618 alarm_event = VXGE_HW_SET_LEVEL(
619 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
620
621 if (skip_alarms)
622 return VXGE_HW_OK;
623 }
624 }
625
626 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
627
628 pic_status = readq(&vp_reg->vpath_ppif_int_status);
629
630 if (pic_status &
631 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
632
633 val64 = readq(&vp_reg->general_errors_reg);
634 mask64 = readq(&vp_reg->general_errors_mask);
635
636 if ((val64 &
637 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
638 ~mask64) {
639 sw_stats->error_stats.ini_serr_det++;
640
641 alarm_event = VXGE_HW_SET_LEVEL(
642 VXGE_HW_EVENT_SERR, alarm_event);
643 }
644
645 if ((val64 &
646 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
647 ~mask64) {
648 sw_stats->error_stats.dblgen_fifo0_overflow++;
649
650 alarm_event = VXGE_HW_SET_LEVEL(
651 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
652 }
653
654 if ((val64 &
655 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
656 ~mask64)
657 sw_stats->error_stats.statsb_pif_chain_error++;
658
659 if ((val64 &
660 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
661 ~mask64)
662 sw_stats->error_stats.statsb_drop_timeout++;
663
664 if ((val64 &
665 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
666 ~mask64)
667 sw_stats->error_stats.target_illegal_access++;
668
669 if (!skip_alarms) {
670 writeq(VXGE_HW_INTR_MASK_ALL,
671 &vp_reg->general_errors_reg);
672 alarm_event = VXGE_HW_SET_LEVEL(
673 VXGE_HW_EVENT_ALARM_CLEARED,
674 alarm_event);
675 }
676 }
677
678 if (pic_status &
679 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
680
681 val64 = readq(&vp_reg->kdfcctl_errors_reg);
682 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
683
684 if ((val64 &
685 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
686 ~mask64) {
687 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
688
689 alarm_event = VXGE_HW_SET_LEVEL(
690 VXGE_HW_EVENT_FIFO_ERR,
691 alarm_event);
692 }
693
694 if ((val64 &
695 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
696 ~mask64) {
697 sw_stats->error_stats.kdfcctl_fifo0_poison++;
698
699 alarm_event = VXGE_HW_SET_LEVEL(
700 VXGE_HW_EVENT_FIFO_ERR,
701 alarm_event);
702 }
703
704 if ((val64 &
705 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
706 ~mask64) {
707 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
708
709 alarm_event = VXGE_HW_SET_LEVEL(
710 VXGE_HW_EVENT_FIFO_ERR,
711 alarm_event);
712 }
713
714 if (!skip_alarms) {
715 writeq(VXGE_HW_INTR_MASK_ALL,
716 &vp_reg->kdfcctl_errors_reg);
717 alarm_event = VXGE_HW_SET_LEVEL(
718 VXGE_HW_EVENT_ALARM_CLEARED,
719 alarm_event);
720 }
721 }
722
723 }
724
725 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
726
727 val64 = readq(&vp_reg->wrdma_alarm_status);
728
729 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
730
731 val64 = readq(&vp_reg->prc_alarm_reg);
732 mask64 = readq(&vp_reg->prc_alarm_mask);
733
734 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
735 ~mask64)
736 sw_stats->error_stats.prc_ring_bumps++;
737
738 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
739 ~mask64) {
740 sw_stats->error_stats.prc_rxdcm_sc_err++;
741
742 alarm_event = VXGE_HW_SET_LEVEL(
743 VXGE_HW_EVENT_VPATH_ERR,
744 alarm_event);
745 }
746
747 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
748 & ~mask64) {
749 sw_stats->error_stats.prc_rxdcm_sc_abort++;
750
751 alarm_event = VXGE_HW_SET_LEVEL(
752 VXGE_HW_EVENT_VPATH_ERR,
753 alarm_event);
754 }
755
756 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
757 & ~mask64) {
758 sw_stats->error_stats.prc_quanta_size_err++;
759
760 alarm_event = VXGE_HW_SET_LEVEL(
761 VXGE_HW_EVENT_VPATH_ERR,
762 alarm_event);
763 }
764
765 if (!skip_alarms) {
766 writeq(VXGE_HW_INTR_MASK_ALL,
767 &vp_reg->prc_alarm_reg);
768 alarm_event = VXGE_HW_SET_LEVEL(
769 VXGE_HW_EVENT_ALARM_CLEARED,
770 alarm_event);
771 }
772 }
773 }
774out:
775 hldev->stats.sw_dev_err_stats.vpath_alarms++;
776out2:
777 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
778 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
779 return VXGE_HW_OK;
780
781 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
782
783 if (alarm_event == VXGE_HW_EVENT_SERR)
784 return VXGE_HW_ERR_CRITICAL;
785
786 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
787 VXGE_HW_ERR_SLOT_FREEZE :
788 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
789 VXGE_HW_ERR_VPATH;
790}
791
792/**
422 * vxge_hw_device_begin_irq - Begin IRQ processing. 793 * vxge_hw_device_begin_irq - Begin IRQ processing.
423 * @hldev: HW device handle. 794 * @hldev: HW device handle.
424 * @skip_alarms: Do not clear the alarms 795 * @skip_alarms: Do not clear the alarms
@@ -513,108 +884,6 @@ exit:
513 return ret; 884 return ret;
514} 885}
515 886
516/*
517 * __vxge_hw_device_handle_link_up_ind
518 * @hldev: HW device handle.
519 *
520 * Link up indication handler. The function is invoked by HW when
521 * Titan indicates that the link is up for programmable amount of time.
522 */
523static enum vxge_hw_status
524__vxge_hw_device_handle_link_up_ind(struct __vxge_hw_device *hldev)
525{
526 /*
527 * If the previous link state is not down, return.
528 */
529 if (hldev->link_state == VXGE_HW_LINK_UP)
530 goto exit;
531
532 hldev->link_state = VXGE_HW_LINK_UP;
533
534 /* notify driver */
535 if (hldev->uld_callbacks.link_up)
536 hldev->uld_callbacks.link_up(hldev);
537exit:
538 return VXGE_HW_OK;
539}
540
541/*
542 * __vxge_hw_device_handle_link_down_ind
543 * @hldev: HW device handle.
544 *
545 * Link down indication handler. The function is invoked by HW when
546 * Titan indicates that the link is down.
547 */
548static enum vxge_hw_status
549__vxge_hw_device_handle_link_down_ind(struct __vxge_hw_device *hldev)
550{
551 /*
552 * If the previous link state is not down, return.
553 */
554 if (hldev->link_state == VXGE_HW_LINK_DOWN)
555 goto exit;
556
557 hldev->link_state = VXGE_HW_LINK_DOWN;
558
559 /* notify driver */
560 if (hldev->uld_callbacks.link_down)
561 hldev->uld_callbacks.link_down(hldev);
562exit:
563 return VXGE_HW_OK;
564}
565
566/**
567 * __vxge_hw_device_handle_error - Handle error
568 * @hldev: HW device
569 * @vp_id: Vpath Id
570 * @type: Error type. Please see enum vxge_hw_event{}
571 *
572 * Handle error.
573 */
574static enum vxge_hw_status
575__vxge_hw_device_handle_error(
576 struct __vxge_hw_device *hldev,
577 u32 vp_id,
578 enum vxge_hw_event type)
579{
580 switch (type) {
581 case VXGE_HW_EVENT_UNKNOWN:
582 break;
583 case VXGE_HW_EVENT_RESET_START:
584 case VXGE_HW_EVENT_RESET_COMPLETE:
585 case VXGE_HW_EVENT_LINK_DOWN:
586 case VXGE_HW_EVENT_LINK_UP:
587 goto out;
588 case VXGE_HW_EVENT_ALARM_CLEARED:
589 goto out;
590 case VXGE_HW_EVENT_ECCERR:
591 case VXGE_HW_EVENT_MRPCIM_ECCERR:
592 goto out;
593 case VXGE_HW_EVENT_FIFO_ERR:
594 case VXGE_HW_EVENT_VPATH_ERR:
595 case VXGE_HW_EVENT_CRITICAL_ERR:
596 case VXGE_HW_EVENT_SERR:
597 break;
598 case VXGE_HW_EVENT_SRPCIM_SERR:
599 case VXGE_HW_EVENT_MRPCIM_SERR:
600 goto out;
601 case VXGE_HW_EVENT_SLOT_FREEZE:
602 break;
603 default:
604 vxge_assert(0);
605 goto out;
606 }
607
608 /* notify driver */
609 if (hldev->uld_callbacks.crit_err)
610 hldev->uld_callbacks.crit_err(
611 (struct __vxge_hw_device *)hldev,
612 type, vp_id);
613out:
614
615 return VXGE_HW_OK;
616}
617
618/** 887/**
619 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the 888 * vxge_hw_device_clear_tx_rx - Acknowledge (that is, clear) the
620 * condition that has caused the Tx and RX interrupt. 889 * condition that has caused the Tx and RX interrupt.
@@ -699,8 +968,8 @@ _alloc_after_swap:
699 * Posts a dtr to work array. 968 * Posts a dtr to work array.
700 * 969 *
701 */ 970 */
702static void vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, 971static void
703 void *dtrh) 972vxge_hw_channel_dtr_post(struct __vxge_hw_channel *channel, void *dtrh)
704{ 973{
705 vxge_assert(channel->work_arr[channel->post_index] == NULL); 974 vxge_assert(channel->work_arr[channel->post_index] == NULL);
706 975
@@ -911,10 +1180,6 @@ void vxge_hw_ring_rxd_post(struct __vxge_hw_ring *ring, void *rxdh)
911 */ 1180 */
912void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh) 1181void vxge_hw_ring_rxd_post_post_wmb(struct __vxge_hw_ring *ring, void *rxdh)
913{ 1182{
914 struct __vxge_hw_channel *channel;
915
916 channel = &ring->channel;
917
918 wmb(); 1183 wmb();
919 vxge_hw_ring_rxd_post_post(ring, rxdh); 1184 vxge_hw_ring_rxd_post_post(ring, rxdh);
920} 1185}
@@ -975,7 +1240,7 @@ enum vxge_hw_status vxge_hw_ring_rxd_next_completed(
975 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0); 1240 *t_code = (u8)VXGE_HW_RING_RXD_T_CODE_GET(control_0);
976 1241
977 /* check whether it is not the end */ 1242 /* check whether it is not the end */
978 if (!own || ((*t_code == VXGE_HW_RING_T_CODE_FRM_DROP) && own)) { 1243 if (!own || *t_code == VXGE_HW_RING_T_CODE_FRM_DROP) {
979 1244
980 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control != 1245 vxge_assert(((struct vxge_hw_ring_rxd_1 *)rxdp)->host_control !=
981 0); 1246 0);
@@ -1868,284 +2133,6 @@ exit:
1868} 2133}
1869 2134
1870/* 2135/*
1871 * __vxge_hw_vpath_alarm_process - Process Alarms.
1872 * @vpath: Virtual Path.
1873 * @skip_alarms: Do not clear the alarms
1874 *
1875 * Process vpath alarms.
1876 *
1877 */
1878static enum vxge_hw_status
1879__vxge_hw_vpath_alarm_process(struct __vxge_hw_virtualpath *vpath,
1880 u32 skip_alarms)
1881{
1882 u64 val64;
1883 u64 alarm_status;
1884 u64 pic_status;
1885 struct __vxge_hw_device *hldev = NULL;
1886 enum vxge_hw_event alarm_event = VXGE_HW_EVENT_UNKNOWN;
1887 u64 mask64;
1888 struct vxge_hw_vpath_stats_sw_info *sw_stats;
1889 struct vxge_hw_vpath_reg __iomem *vp_reg;
1890
1891 if (vpath == NULL) {
1892 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1893 alarm_event);
1894 goto out2;
1895 }
1896
1897 hldev = vpath->hldev;
1898 vp_reg = vpath->vp_reg;
1899 alarm_status = readq(&vp_reg->vpath_general_int_status);
1900
1901 if (alarm_status == VXGE_HW_ALL_FOXES) {
1902 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_SLOT_FREEZE,
1903 alarm_event);
1904 goto out;
1905 }
1906
1907 sw_stats = vpath->sw_stats;
1908
1909 if (alarm_status & ~(
1910 VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT |
1911 VXGE_HW_VPATH_GENERAL_INT_STATUS_PCI_INT |
1912 VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT |
1913 VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT)) {
1914 sw_stats->error_stats.unknown_alarms++;
1915
1916 alarm_event = VXGE_HW_SET_LEVEL(VXGE_HW_EVENT_UNKNOWN,
1917 alarm_event);
1918 goto out;
1919 }
1920
1921 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_XMAC_INT) {
1922
1923 val64 = readq(&vp_reg->xgmac_vp_int_status);
1924
1925 if (val64 &
1926 VXGE_HW_XGMAC_VP_INT_STATUS_ASIC_NTWK_VP_ERR_ASIC_NTWK_VP_INT) {
1927
1928 val64 = readq(&vp_reg->asic_ntwk_vp_err_reg);
1929
1930 if (((val64 &
1931 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT) &&
1932 (!(val64 &
1933 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK))) ||
1934 ((val64 &
1935 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR) &&
1936 (!(val64 &
1937 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR)
1938 ))) {
1939 sw_stats->error_stats.network_sustained_fault++;
1940
1941 writeq(
1942 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT,
1943 &vp_reg->asic_ntwk_vp_err_mask);
1944
1945 __vxge_hw_device_handle_link_down_ind(hldev);
1946 alarm_event = VXGE_HW_SET_LEVEL(
1947 VXGE_HW_EVENT_LINK_DOWN, alarm_event);
1948 }
1949
1950 if (((val64 &
1951 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK) &&
1952 (!(val64 &
1953 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT))) ||
1954 ((val64 &
1955 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK_OCCURR) &&
1956 (!(val64 &
1957 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_FLT_OCCURR)
1958 ))) {
1959
1960 sw_stats->error_stats.network_sustained_ok++;
1961
1962 writeq(
1963 VXGE_HW_ASIC_NW_VP_ERR_REG_XMACJ_STN_OK,
1964 &vp_reg->asic_ntwk_vp_err_mask);
1965
1966 __vxge_hw_device_handle_link_up_ind(hldev);
1967 alarm_event = VXGE_HW_SET_LEVEL(
1968 VXGE_HW_EVENT_LINK_UP, alarm_event);
1969 }
1970
1971 writeq(VXGE_HW_INTR_MASK_ALL,
1972 &vp_reg->asic_ntwk_vp_err_reg);
1973
1974 alarm_event = VXGE_HW_SET_LEVEL(
1975 VXGE_HW_EVENT_ALARM_CLEARED, alarm_event);
1976
1977 if (skip_alarms)
1978 return VXGE_HW_OK;
1979 }
1980 }
1981
1982 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_PIC_INT) {
1983
1984 pic_status = readq(&vp_reg->vpath_ppif_int_status);
1985
1986 if (pic_status &
1987 VXGE_HW_VPATH_PPIF_INT_STATUS_GENERAL_ERRORS_GENERAL_INT) {
1988
1989 val64 = readq(&vp_reg->general_errors_reg);
1990 mask64 = readq(&vp_reg->general_errors_mask);
1991
1992 if ((val64 &
1993 VXGE_HW_GENERAL_ERRORS_REG_INI_SERR_DET) &
1994 ~mask64) {
1995 sw_stats->error_stats.ini_serr_det++;
1996
1997 alarm_event = VXGE_HW_SET_LEVEL(
1998 VXGE_HW_EVENT_SERR, alarm_event);
1999 }
2000
2001 if ((val64 &
2002 VXGE_HW_GENERAL_ERRORS_REG_DBLGEN_FIFO0_OVRFLOW) &
2003 ~mask64) {
2004 sw_stats->error_stats.dblgen_fifo0_overflow++;
2005
2006 alarm_event = VXGE_HW_SET_LEVEL(
2007 VXGE_HW_EVENT_FIFO_ERR, alarm_event);
2008 }
2009
2010 if ((val64 &
2011 VXGE_HW_GENERAL_ERRORS_REG_STATSB_PIF_CHAIN_ERR) &
2012 ~mask64)
2013 sw_stats->error_stats.statsb_pif_chain_error++;
2014
2015 if ((val64 &
2016 VXGE_HW_GENERAL_ERRORS_REG_STATSB_DROP_TIMEOUT_REQ) &
2017 ~mask64)
2018 sw_stats->error_stats.statsb_drop_timeout++;
2019
2020 if ((val64 &
2021 VXGE_HW_GENERAL_ERRORS_REG_TGT_ILLEGAL_ACCESS) &
2022 ~mask64)
2023 sw_stats->error_stats.target_illegal_access++;
2024
2025 if (!skip_alarms) {
2026 writeq(VXGE_HW_INTR_MASK_ALL,
2027 &vp_reg->general_errors_reg);
2028 alarm_event = VXGE_HW_SET_LEVEL(
2029 VXGE_HW_EVENT_ALARM_CLEARED,
2030 alarm_event);
2031 }
2032 }
2033
2034 if (pic_status &
2035 VXGE_HW_VPATH_PPIF_INT_STATUS_KDFCCTL_ERRORS_KDFCCTL_INT) {
2036
2037 val64 = readq(&vp_reg->kdfcctl_errors_reg);
2038 mask64 = readq(&vp_reg->kdfcctl_errors_mask);
2039
2040 if ((val64 &
2041 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_OVRWR) &
2042 ~mask64) {
2043 sw_stats->error_stats.kdfcctl_fifo0_overwrite++;
2044
2045 alarm_event = VXGE_HW_SET_LEVEL(
2046 VXGE_HW_EVENT_FIFO_ERR,
2047 alarm_event);
2048 }
2049
2050 if ((val64 &
2051 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_POISON) &
2052 ~mask64) {
2053 sw_stats->error_stats.kdfcctl_fifo0_poison++;
2054
2055 alarm_event = VXGE_HW_SET_LEVEL(
2056 VXGE_HW_EVENT_FIFO_ERR,
2057 alarm_event);
2058 }
2059
2060 if ((val64 &
2061 VXGE_HW_KDFCCTL_ERRORS_REG_KDFCCTL_FIFO0_DMA_ERR) &
2062 ~mask64) {
2063 sw_stats->error_stats.kdfcctl_fifo0_dma_error++;
2064
2065 alarm_event = VXGE_HW_SET_LEVEL(
2066 VXGE_HW_EVENT_FIFO_ERR,
2067 alarm_event);
2068 }
2069
2070 if (!skip_alarms) {
2071 writeq(VXGE_HW_INTR_MASK_ALL,
2072 &vp_reg->kdfcctl_errors_reg);
2073 alarm_event = VXGE_HW_SET_LEVEL(
2074 VXGE_HW_EVENT_ALARM_CLEARED,
2075 alarm_event);
2076 }
2077 }
2078
2079 }
2080
2081 if (alarm_status & VXGE_HW_VPATH_GENERAL_INT_STATUS_WRDMA_INT) {
2082
2083 val64 = readq(&vp_reg->wrdma_alarm_status);
2084
2085 if (val64 & VXGE_HW_WRDMA_ALARM_STATUS_PRC_ALARM_PRC_INT) {
2086
2087 val64 = readq(&vp_reg->prc_alarm_reg);
2088 mask64 = readq(&vp_reg->prc_alarm_mask);
2089
2090 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RING_BUMP)&
2091 ~mask64)
2092 sw_stats->error_stats.prc_ring_bumps++;
2093
2094 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ERR) &
2095 ~mask64) {
2096 sw_stats->error_stats.prc_rxdcm_sc_err++;
2097
2098 alarm_event = VXGE_HW_SET_LEVEL(
2099 VXGE_HW_EVENT_VPATH_ERR,
2100 alarm_event);
2101 }
2102
2103 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_RXDCM_SC_ABORT)
2104 & ~mask64) {
2105 sw_stats->error_stats.prc_rxdcm_sc_abort++;
2106
2107 alarm_event = VXGE_HW_SET_LEVEL(
2108 VXGE_HW_EVENT_VPATH_ERR,
2109 alarm_event);
2110 }
2111
2112 if ((val64 & VXGE_HW_PRC_ALARM_REG_PRC_QUANTA_SIZE_ERR)
2113 & ~mask64) {
2114 sw_stats->error_stats.prc_quanta_size_err++;
2115
2116 alarm_event = VXGE_HW_SET_LEVEL(
2117 VXGE_HW_EVENT_VPATH_ERR,
2118 alarm_event);
2119 }
2120
2121 if (!skip_alarms) {
2122 writeq(VXGE_HW_INTR_MASK_ALL,
2123 &vp_reg->prc_alarm_reg);
2124 alarm_event = VXGE_HW_SET_LEVEL(
2125 VXGE_HW_EVENT_ALARM_CLEARED,
2126 alarm_event);
2127 }
2128 }
2129 }
2130out:
2131 hldev->stats.sw_dev_err_stats.vpath_alarms++;
2132out2:
2133 if ((alarm_event == VXGE_HW_EVENT_ALARM_CLEARED) ||
2134 (alarm_event == VXGE_HW_EVENT_UNKNOWN))
2135 return VXGE_HW_OK;
2136
2137 __vxge_hw_device_handle_error(hldev, vpath->vp_id, alarm_event);
2138
2139 if (alarm_event == VXGE_HW_EVENT_SERR)
2140 return VXGE_HW_ERR_CRITICAL;
2141
2142 return (alarm_event == VXGE_HW_EVENT_SLOT_FREEZE) ?
2143 VXGE_HW_ERR_SLOT_FREEZE :
2144 (alarm_event == VXGE_HW_EVENT_FIFO_ERR) ? VXGE_HW_ERR_FIFO :
2145 VXGE_HW_ERR_VPATH;
2146}
2147
2148/*
2149 * vxge_hw_vpath_alarm_process - Process Alarms. 2136 * vxge_hw_vpath_alarm_process - Process Alarms.
2150 * @vpath: Virtual Path. 2137 * @vpath: Virtual Path.
2151 * @skip_alarms: Do not clear the alarms 2138 * @skip_alarms: Do not clear the alarms
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 654295816a39..d48486d6afa1 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -1904,34 +1904,6 @@ enum vxge_hw_ring_tcode {
1904 VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF 1904 VXGE_HW_RING_T_CODE_MULTI_ERR = 0xF
1905}; 1905};
1906 1906
1907/**
1908 * enum enum vxge_hw_ring_hash_type - RTH hash types
1909 * @VXGE_HW_RING_HASH_TYPE_NONE: No Hash
1910 * @VXGE_HW_RING_HASH_TYPE_TCP_IPV4: TCP IPv4
1911 * @VXGE_HW_RING_HASH_TYPE_UDP_IPV4: UDP IPv4
1912 * @VXGE_HW_RING_HASH_TYPE_IPV4: IPv4
1913 * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6: TCP IPv6
1914 * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6: UDP IPv6
1915 * @VXGE_HW_RING_HASH_TYPE_IPV6: IPv6
1916 * @VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX: TCP IPv6 extension
1917 * @VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX: UDP IPv6 extension
1918 * @VXGE_HW_RING_HASH_TYPE_IPV6_EX: IPv6 extension
1919 *
1920 * RTH hash types
1921 */
1922enum vxge_hw_ring_hash_type {
1923 VXGE_HW_RING_HASH_TYPE_NONE = 0x0,
1924 VXGE_HW_RING_HASH_TYPE_TCP_IPV4 = 0x1,
1925 VXGE_HW_RING_HASH_TYPE_UDP_IPV4 = 0x2,
1926 VXGE_HW_RING_HASH_TYPE_IPV4 = 0x3,
1927 VXGE_HW_RING_HASH_TYPE_TCP_IPV6 = 0x4,
1928 VXGE_HW_RING_HASH_TYPE_UDP_IPV6 = 0x5,
1929 VXGE_HW_RING_HASH_TYPE_IPV6 = 0x6,
1930 VXGE_HW_RING_HASH_TYPE_TCP_IPV6_EX = 0x7,
1931 VXGE_HW_RING_HASH_TYPE_UDP_IPV6_EX = 0x8,
1932 VXGE_HW_RING_HASH_TYPE_IPV6_EX = 0x9
1933};
1934
1935enum vxge_hw_status vxge_hw_ring_rxd_reserve( 1907enum vxge_hw_status vxge_hw_ring_rxd_reserve(
1936 struct __vxge_hw_ring *ring_handle, 1908 struct __vxge_hw_ring *ring_handle,
1937 void **rxdh); 1909 void **rxdh);
@@ -2109,10 +2081,6 @@ struct __vxge_hw_ring_rxd_priv {
2109#endif 2081#endif
2110}; 2082};
2111 2083
2112/* ========================= FIFO PRIVATE API ============================= */
2113
2114struct vxge_hw_fifo_attr;
2115
2116struct vxge_hw_mempool_cbs { 2084struct vxge_hw_mempool_cbs {
2117 void (*item_func_alloc)( 2085 void (*item_func_alloc)(
2118 struct vxge_hw_mempool *mempoolh, 2086 struct vxge_hw_mempool *mempoolh,
@@ -2186,27 +2154,27 @@ enum vxge_hw_vpath_mac_addr_add_mode {
2186enum vxge_hw_status 2154enum vxge_hw_status
2187vxge_hw_vpath_mac_addr_add( 2155vxge_hw_vpath_mac_addr_add(
2188 struct __vxge_hw_vpath_handle *vpath_handle, 2156 struct __vxge_hw_vpath_handle *vpath_handle,
2189 u8 (macaddr)[ETH_ALEN], 2157 u8 *macaddr,
2190 u8 (macaddr_mask)[ETH_ALEN], 2158 u8 *macaddr_mask,
2191 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode); 2159 enum vxge_hw_vpath_mac_addr_add_mode duplicate_mode);
2192 2160
2193enum vxge_hw_status 2161enum vxge_hw_status
2194vxge_hw_vpath_mac_addr_get( 2162vxge_hw_vpath_mac_addr_get(
2195 struct __vxge_hw_vpath_handle *vpath_handle, 2163 struct __vxge_hw_vpath_handle *vpath_handle,
2196 u8 (macaddr)[ETH_ALEN], 2164 u8 *macaddr,
2197 u8 (macaddr_mask)[ETH_ALEN]); 2165 u8 *macaddr_mask);
2198 2166
2199enum vxge_hw_status 2167enum vxge_hw_status
2200vxge_hw_vpath_mac_addr_get_next( 2168vxge_hw_vpath_mac_addr_get_next(
2201 struct __vxge_hw_vpath_handle *vpath_handle, 2169 struct __vxge_hw_vpath_handle *vpath_handle,
2202 u8 (macaddr)[ETH_ALEN], 2170 u8 *macaddr,
2203 u8 (macaddr_mask)[ETH_ALEN]); 2171 u8 *macaddr_mask);
2204 2172
2205enum vxge_hw_status 2173enum vxge_hw_status
2206vxge_hw_vpath_mac_addr_delete( 2174vxge_hw_vpath_mac_addr_delete(
2207 struct __vxge_hw_vpath_handle *vpath_handle, 2175 struct __vxge_hw_vpath_handle *vpath_handle,
2208 u8 (macaddr)[ETH_ALEN], 2176 u8 *macaddr,
2209 u8 (macaddr_mask)[ETH_ALEN]); 2177 u8 *macaddr_mask);
2210 2178
2211enum vxge_hw_status 2179enum vxge_hw_status
2212vxge_hw_vpath_vid_add( 2180vxge_hw_vpath_vid_add(
@@ -2313,6 +2281,7 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
2313 2281
2314int 2282int
2315vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel); 2283vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
2284
2316void 2285void
2317vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id); 2286vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
2318 2287
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 53fefe137368..ad2f99b9bcf3 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -15,8 +15,35 @@
15#define VXGE_VERSION_H 15#define VXGE_VERSION_H
16 16
17#define VXGE_VERSION_MAJOR "2" 17#define VXGE_VERSION_MAJOR "2"
18#define VXGE_VERSION_MINOR "0" 18#define VXGE_VERSION_MINOR "5"
19#define VXGE_VERSION_FIX "9" 19#define VXGE_VERSION_FIX "1"
20#define VXGE_VERSION_BUILD "20840" 20#define VXGE_VERSION_BUILD "22082"
21#define VXGE_VERSION_FOR "k" 21#define VXGE_VERSION_FOR "k"
22
23#define VXGE_FW_VER(maj, min, bld) (((maj) << 16) + ((min) << 8) + (bld))
24
25#define VXGE_DEAD_FW_VER_MAJOR 1
26#define VXGE_DEAD_FW_VER_MINOR 4
27#define VXGE_DEAD_FW_VER_BUILD 4
28
29#define VXGE_FW_DEAD_VER VXGE_FW_VER(VXGE_DEAD_FW_VER_MAJOR, \
30 VXGE_DEAD_FW_VER_MINOR, \
31 VXGE_DEAD_FW_VER_BUILD)
32
33#define VXGE_EPROM_FW_VER_MAJOR 1
34#define VXGE_EPROM_FW_VER_MINOR 6
35#define VXGE_EPROM_FW_VER_BUILD 1
36
37#define VXGE_EPROM_FW_VER VXGE_FW_VER(VXGE_EPROM_FW_VER_MAJOR, \
38 VXGE_EPROM_FW_VER_MINOR, \
39 VXGE_EPROM_FW_VER_BUILD)
40
41#define VXGE_CERT_FW_VER_MAJOR 1
42#define VXGE_CERT_FW_VER_MINOR 8
43#define VXGE_CERT_FW_VER_BUILD 1
44
45#define VXGE_CERT_FW_VER VXGE_FW_VER(VXGE_CERT_FW_VER_MAJOR, \
46 VXGE_CERT_FW_VER_MINOR, \
47 VXGE_CERT_FW_VER_BUILD)
48
22#endif 49#endif
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 94ff9b02e28e..4578e5b4b411 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -1358,7 +1358,7 @@ static int dscc4_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1358 return ret; 1358 return ret;
1359} 1359}
1360 1360
1361static int dscc4_match(struct thingie *p, int value) 1361static int dscc4_match(const struct thingie *p, int value)
1362{ 1362{
1363 int i; 1363 int i;
1364 1364
@@ -1403,7 +1403,7 @@ done:
1403static int dscc4_encoding_setting(struct dscc4_dev_priv *dpriv, 1403static int dscc4_encoding_setting(struct dscc4_dev_priv *dpriv,
1404 struct net_device *dev) 1404 struct net_device *dev)
1405{ 1405{
1406 struct thingie encoding[] = { 1406 static const struct thingie encoding[] = {
1407 { ENCODING_NRZ, 0x00000000 }, 1407 { ENCODING_NRZ, 0x00000000 },
1408 { ENCODING_NRZI, 0x00200000 }, 1408 { ENCODING_NRZI, 0x00200000 },
1409 { ENCODING_FM_MARK, 0x00400000 }, 1409 { ENCODING_FM_MARK, 0x00400000 },
@@ -1442,7 +1442,7 @@ static int dscc4_loopback_setting(struct dscc4_dev_priv *dpriv,
1442static int dscc4_crc_setting(struct dscc4_dev_priv *dpriv, 1442static int dscc4_crc_setting(struct dscc4_dev_priv *dpriv,
1443 struct net_device *dev) 1443 struct net_device *dev)
1444{ 1444{
1445 struct thingie crc[] = { 1445 static const struct thingie crc[] = {
1446 { PARITY_CRC16_PR0_CCITT, 0x00000010 }, 1446 { PARITY_CRC16_PR0_CCITT, 0x00000010 },
1447 { PARITY_CRC16_PR1_CCITT, 0x00000000 }, 1447 { PARITY_CRC16_PR1_CCITT, 0x00000000 },
1448 { PARITY_CRC32_PR0_CCITT, 0x00000011 }, 1448 { PARITY_CRC32_PR0_CCITT, 0x00000011 },
diff --git a/drivers/net/wd.c b/drivers/net/wd.c
index f1549fff0edc..8831a3393ecf 100644
--- a/drivers/net/wd.c
+++ b/drivers/net/wd.c
@@ -275,7 +275,7 @@ static int __init wd_probe1(struct net_device *dev, int ioaddr)
275 dev->base_addr = ioaddr+WD_NIC_OFFSET; 275 dev->base_addr = ioaddr+WD_NIC_OFFSET;
276 276
277 if (dev->irq < 2) { 277 if (dev->irq < 2) {
278 int irqmap[] = {9,3,5,7,10,11,15,4}; 278 static const int irqmap[] = {9, 3, 5, 7, 10, 11, 15, 4};
279 int reg1 = inb(ioaddr+1); 279 int reg1 = inb(ioaddr+1);
280 int reg4 = inb(ioaddr+4); 280 int reg4 = inb(ioaddr+4);
281 if (ancient || reg1 == 0xff) { /* Ack!! No way to read the IRQ! */ 281 if (ancient || reg1 == 0xff) { /* Ack!! No way to read the IRQ! */
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index bcb483fdc4d9..65bc334ed57b 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -92,54 +92,6 @@ MODULE_PARM_DESC(barkers,
92 "signal; values are appended to a list--setting one value " 92 "signal; values are appended to a list--setting one value "
93 "as zero cleans the existing list and starts a new one."); 93 "as zero cleans the existing list and starts a new one.");
94 94
95static
96struct i2400m_work *__i2400m_work_setup(
97 struct i2400m *i2400m, void (*fn)(struct work_struct *),
98 gfp_t gfp_flags, const void *pl, size_t pl_size)
99{
100 struct i2400m_work *iw;
101
102 iw = kzalloc(sizeof(*iw) + pl_size, gfp_flags);
103 if (iw == NULL)
104 return NULL;
105 iw->i2400m = i2400m_get(i2400m);
106 iw->pl_size = pl_size;
107 memcpy(iw->pl, pl, pl_size);
108 INIT_WORK(&iw->ws, fn);
109 return iw;
110}
111
112
113/*
114 * Schedule i2400m's specific work on the system's queue.
115 *
116 * Used for a few cases where we really need it; otherwise, identical
117 * to i2400m_queue_work().
118 *
119 * Returns < 0 errno code on error, 1 if ok.
120 *
121 * If it returns zero, something really bad happened, as it means the
122 * works struct was already queued, but we have just allocated it, so
123 * it should not happen.
124 */
125static int i2400m_schedule_work(struct i2400m *i2400m,
126 void (*fn)(struct work_struct *), gfp_t gfp_flags,
127 const void *pl, size_t pl_size)
128{
129 int result;
130 struct i2400m_work *iw;
131
132 result = -ENOMEM;
133 iw = __i2400m_work_setup(i2400m, fn, gfp_flags, pl, pl_size);
134 if (iw != NULL) {
135 result = schedule_work(&iw->ws);
136 if (WARN_ON(result == 0))
137 result = -ENXIO;
138 }
139 return result;
140}
141
142
143/* 95/*
144 * WiMAX stack operation: relay a message from user space 96 * WiMAX stack operation: relay a message from user space
145 * 97 *
@@ -648,17 +600,11 @@ EXPORT_SYMBOL_GPL(i2400m_post_reset);
648static 600static
649void __i2400m_dev_reset_handle(struct work_struct *ws) 601void __i2400m_dev_reset_handle(struct work_struct *ws)
650{ 602{
651 int result; 603 struct i2400m *i2400m = container_of(ws, struct i2400m, reset_ws);
652 struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws); 604 const char *reason = i2400m->reset_reason;
653 const char *reason;
654 struct i2400m *i2400m = iw->i2400m;
655 struct device *dev = i2400m_dev(i2400m); 605 struct device *dev = i2400m_dev(i2400m);
656 struct i2400m_reset_ctx *ctx = i2400m->reset_ctx; 606 struct i2400m_reset_ctx *ctx = i2400m->reset_ctx;
657 607 int result;
658 if (WARN_ON(iw->pl_size != sizeof(reason)))
659 reason = "SW BUG: reason n/a";
660 else
661 memcpy(&reason, iw->pl, sizeof(reason));
662 608
663 d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason); 609 d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason);
664 610
@@ -733,8 +679,6 @@ void __i2400m_dev_reset_handle(struct work_struct *ws)
733 } 679 }
734 } 680 }
735out: 681out:
736 i2400m_put(i2400m);
737 kfree(iw);
738 d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n", 682 d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n",
739 ws, i2400m, reason); 683 ws, i2400m, reason);
740} 684}
@@ -754,8 +698,8 @@ out:
754 */ 698 */
755int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason) 699int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason)
756{ 700{
757 return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle, 701 i2400m->reset_reason = reason;
758 GFP_ATOMIC, &reason, sizeof(reason)); 702 return schedule_work(&i2400m->reset_ws);
759} 703}
760EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle); 704EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
761 705
@@ -768,14 +712,9 @@ EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
768static 712static
769void __i2400m_error_recovery(struct work_struct *ws) 713void __i2400m_error_recovery(struct work_struct *ws)
770{ 714{
771 struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws); 715 struct i2400m *i2400m = container_of(ws, struct i2400m, recovery_ws);
772 struct i2400m *i2400m = iw->i2400m;
773 716
774 i2400m_reset(i2400m, I2400M_RT_BUS); 717 i2400m_reset(i2400m, I2400M_RT_BUS);
775
776 i2400m_put(i2400m);
777 kfree(iw);
778 return;
779} 718}
780 719
781/* 720/*
@@ -805,18 +744,10 @@ void __i2400m_error_recovery(struct work_struct *ws)
805 */ 744 */
806void i2400m_error_recovery(struct i2400m *i2400m) 745void i2400m_error_recovery(struct i2400m *i2400m)
807{ 746{
808 struct device *dev = i2400m_dev(i2400m); 747 if (atomic_add_return(1, &i2400m->error_recovery) == 1)
809 748 schedule_work(&i2400m->recovery_ws);
810 if (atomic_add_return(1, &i2400m->error_recovery) == 1) { 749 else
811 if (i2400m_schedule_work(i2400m, __i2400m_error_recovery,
812 GFP_ATOMIC, NULL, 0) < 0) {
813 dev_err(dev, "run out of memory for "
814 "scheduling an error recovery ?\n");
815 atomic_dec(&i2400m->error_recovery);
816 }
817 } else
818 atomic_dec(&i2400m->error_recovery); 750 atomic_dec(&i2400m->error_recovery);
819 return;
820} 751}
821EXPORT_SYMBOL_GPL(i2400m_error_recovery); 752EXPORT_SYMBOL_GPL(i2400m_error_recovery);
822 753
@@ -886,6 +817,10 @@ void i2400m_init(struct i2400m *i2400m)
886 817
887 mutex_init(&i2400m->init_mutex); 818 mutex_init(&i2400m->init_mutex);
888 /* wake_tx_ws is initialized in i2400m_tx_setup() */ 819 /* wake_tx_ws is initialized in i2400m_tx_setup() */
820
821 INIT_WORK(&i2400m->reset_ws, __i2400m_dev_reset_handle);
822 INIT_WORK(&i2400m->recovery_ws, __i2400m_error_recovery);
823
889 atomic_set(&i2400m->bus_reset_retries, 0); 824 atomic_set(&i2400m->bus_reset_retries, 0);
890 825
891 i2400m->alive = 0; 826 i2400m->alive = 0;
@@ -1040,6 +975,9 @@ void i2400m_release(struct i2400m *i2400m)
1040 975
1041 i2400m_dev_stop(i2400m); 976 i2400m_dev_stop(i2400m);
1042 977
978 cancel_work_sync(&i2400m->reset_ws);
979 cancel_work_sync(&i2400m->recovery_ws);
980
1043 i2400m_debugfs_rm(i2400m); 981 i2400m_debugfs_rm(i2400m);
1044 sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj, 982 sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj,
1045 &i2400m_dev_attr_group); 983 &i2400m_dev_attr_group);
@@ -1083,8 +1021,6 @@ module_init(i2400m_driver_init);
1083static 1021static
1084void __exit i2400m_driver_exit(void) 1022void __exit i2400m_driver_exit(void)
1085{ 1023{
1086 /* for scheds i2400m_dev_reset_handle() */
1087 flush_scheduled_work();
1088 i2400m_barker_db_exit(); 1024 i2400m_barker_db_exit();
1089} 1025}
1090module_exit(i2400m_driver_exit); 1026module_exit(i2400m_driver_exit);
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index a2dbc94f6232..030cbfd31704 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -632,6 +632,11 @@ struct i2400m {
632 struct work_struct wake_tx_ws; 632 struct work_struct wake_tx_ws;
633 struct sk_buff *wake_tx_skb; 633 struct sk_buff *wake_tx_skb;
634 634
635 struct work_struct reset_ws;
636 const char *reset_reason;
637
638 struct work_struct recovery_ws;
639
635 struct dentry *debugfs_dentry; 640 struct dentry *debugfs_dentry;
636 const char *fw_name; /* name of the current firmware image */ 641 const char *fw_name; /* name of the current firmware image */
637 unsigned long fw_version; /* version of the firmware interface */ 642 unsigned long fw_version; /* version of the firmware interface */
@@ -896,20 +901,6 @@ struct device *i2400m_dev(struct i2400m *i2400m)
896 return i2400m->wimax_dev.net_dev->dev.parent; 901 return i2400m->wimax_dev.net_dev->dev.parent;
897} 902}
898 903
899/*
900 * Helper for scheduling simple work functions
901 *
902 * This struct can get any kind of payload attached (normally in the
903 * form of a struct where you pack the stuff you want to pass to the
904 * _work function).
905 */
906struct i2400m_work {
907 struct work_struct ws;
908 struct i2400m *i2400m;
909 size_t pl_size;
910 u8 pl[0];
911};
912
913extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *, 904extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *,
914 char *, size_t); 905 char *, size_t);
915extern int i2400m_msg_size_check(struct i2400m *, 906extern int i2400m_msg_size_check(struct i2400m *,
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
index 9bfc26e1bc6b..be428cae28d8 100644
--- a/drivers/net/wimax/i2400m/sdio.c
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -590,7 +590,6 @@ module_init(i2400ms_driver_init);
590static 590static
591void __exit i2400ms_driver_exit(void) 591void __exit i2400ms_driver_exit(void)
592{ 592{
593 flush_scheduled_work(); /* for the stuff we schedule */
594 sdio_unregister_driver(&i2400m_sdio_driver); 593 sdio_unregister_driver(&i2400m_sdio_driver);
595} 594}
596module_exit(i2400ms_driver_exit); 595module_exit(i2400ms_driver_exit);
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index d3365ac85dde..298f2b0b6311 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -514,7 +514,7 @@ int i2400mu_probe(struct usb_interface *iface,
514#ifdef CONFIG_PM 514#ifdef CONFIG_PM
515 iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */ 515 iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */
516 device_init_wakeup(dev, 1); 516 device_init_wakeup(dev, 1);
517 usb_dev->autosuspend_delay = 15 * HZ; 517 pm_runtime_set_autosuspend_delay(&usb_dev->dev, 15000);
518 usb_enable_autosuspend(usb_dev); 518 usb_enable_autosuspend(usb_dev);
519#endif 519#endif
520 520
@@ -780,7 +780,6 @@ module_init(i2400mu_driver_init);
780static 780static
781void __exit i2400mu_driver_exit(void) 781void __exit i2400mu_driver_exit(void)
782{ 782{
783 flush_scheduled_work(); /* for the stuff we schedule from sysfs.c */
784 usb_deregister(&i2400mu_driver); 783 usb_deregister(&i2400mu_driver);
785} 784}
786module_exit(i2400mu_driver_exit); 785module_exit(i2400mu_driver_exit);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index 4de4410cd38e..b4338f389394 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -279,6 +279,7 @@ source "drivers/net/wireless/libertas/Kconfig"
279source "drivers/net/wireless/orinoco/Kconfig" 279source "drivers/net/wireless/orinoco/Kconfig"
280source "drivers/net/wireless/p54/Kconfig" 280source "drivers/net/wireless/p54/Kconfig"
281source "drivers/net/wireless/rt2x00/Kconfig" 281source "drivers/net/wireless/rt2x00/Kconfig"
282source "drivers/net/wireless/rtlwifi/Kconfig"
282source "drivers/net/wireless/wl1251/Kconfig" 283source "drivers/net/wireless/wl1251/Kconfig"
283source "drivers/net/wireless/wl12xx/Kconfig" 284source "drivers/net/wireless/wl12xx/Kconfig"
284source "drivers/net/wireless/zd1211rw/Kconfig" 285source "drivers/net/wireless/zd1211rw/Kconfig"
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 06f8ca26c5c1..9760561a27a5 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -24,6 +24,7 @@ obj-$(CONFIG_B43LEGACY) += b43legacy/
24obj-$(CONFIG_ZD1211RW) += zd1211rw/ 24obj-$(CONFIG_ZD1211RW) += zd1211rw/
25obj-$(CONFIG_RTL8180) += rtl818x/ 25obj-$(CONFIG_RTL8180) += rtl818x/
26obj-$(CONFIG_RTL8187) += rtl818x/ 26obj-$(CONFIG_RTL8187) += rtl818x/
27obj-$(CONFIG_RTL8192CE) += rtlwifi/
27 28
28# 16-bit wireless PCMCIA client drivers 29# 16-bit wireless PCMCIA client drivers
29obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o 30obj-$(CONFIG_PCMCIA_RAYCS) += ray_cs.o
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index a36e7870b03e..57a79b0475f6 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -4652,24 +4652,18 @@ static ssize_t proc_write( struct file *file,
4652 size_t len, 4652 size_t len,
4653 loff_t *offset ) 4653 loff_t *offset )
4654{ 4654{
4655 loff_t pos = *offset; 4655 ssize_t ret;
4656 struct proc_data *priv = file->private_data; 4656 struct proc_data *priv = file->private_data;
4657 4657
4658 if (!priv->wbuffer) 4658 if (!priv->wbuffer)
4659 return -EINVAL; 4659 return -EINVAL;
4660 4660
4661 if (pos < 0) 4661 ret = simple_write_to_buffer(priv->wbuffer, priv->maxwritelen, offset,
4662 return -EINVAL; 4662 buffer, len);
4663 if (pos >= priv->maxwritelen) 4663 if (ret > 0)
4664 return 0; 4664 priv->writelen = max_t(int, priv->writelen, *offset);
4665 if (len > priv->maxwritelen - pos) 4665
4666 len = priv->maxwritelen - pos; 4666 return ret;
4667 if (copy_from_user(priv->wbuffer + pos, buffer, len))
4668 return -EFAULT;
4669 if ( pos + len > priv->writelen )
4670 priv->writelen = len + file->f_pos;
4671 *offset = pos + len;
4672 return len;
4673} 4667}
4674 4668
4675static int proc_status_open(struct inode *inode, struct file *file) 4669static int proc_status_open(struct inode *inode, struct file *file)
diff --git a/drivers/net/wireless/ath/ar9170/cmd.c b/drivers/net/wireless/ath/ar9170/cmd.c
index 4604de09a8b2..6452c5055a63 100644
--- a/drivers/net/wireless/ath/ar9170/cmd.c
+++ b/drivers/net/wireless/ath/ar9170/cmd.c
@@ -54,7 +54,7 @@ int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len)
54 54
55int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val) 55int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val)
56{ 56{
57 __le32 buf[2] = { 57 const __le32 buf[2] = {
58 cpu_to_le32(reg), 58 cpu_to_le32(reg),
59 cpu_to_le32(val), 59 cpu_to_le32(val),
60 }; 60 };
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index 5dbb5361fd51..d3be6f9816b5 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -161,8 +161,7 @@ static void ar9170_usb_submit_urb(struct ar9170_usb *aru)
161static void ar9170_usb_tx_urb_complete_frame(struct urb *urb) 161static void ar9170_usb_tx_urb_complete_frame(struct urb *urb)
162{ 162{
163 struct sk_buff *skb = urb->context; 163 struct sk_buff *skb = urb->context;
164 struct ar9170_usb *aru = (struct ar9170_usb *) 164 struct ar9170_usb *aru = usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
165 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
166 165
167 if (unlikely(!aru)) { 166 if (unlikely(!aru)) {
168 dev_kfree_skb_irq(skb); 167 dev_kfree_skb_irq(skb);
@@ -219,8 +218,7 @@ free:
219static void ar9170_usb_rx_completed(struct urb *urb) 218static void ar9170_usb_rx_completed(struct urb *urb)
220{ 219{
221 struct sk_buff *skb = urb->context; 220 struct sk_buff *skb = urb->context;
222 struct ar9170_usb *aru = (struct ar9170_usb *) 221 struct ar9170_usb *aru = usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
223 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
224 int err; 222 int err;
225 223
226 if (!aru) 224 if (!aru)
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index 501050c0296f..e43210c8585c 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -126,6 +126,7 @@ struct ath_bus_ops {
126 void (*read_cachesize)(struct ath_common *common, int *csz); 126 void (*read_cachesize)(struct ath_common *common, int *csz);
127 bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data); 127 bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
128 void (*bt_coex_prep)(struct ath_common *common); 128 void (*bt_coex_prep)(struct ath_common *common);
129 void (*extn_synch_en)(struct ath_common *common);
129}; 130};
130 131
131struct ath_common { 132struct ath_common {
@@ -162,6 +163,8 @@ struct ath_common {
162 struct ath_regulatory regulatory; 163 struct ath_regulatory regulatory;
163 const struct ath_ops *ops; 164 const struct ath_ops *ops;
164 const struct ath_bus_ops *bus_ops; 165 const struct ath_bus_ops *bus_ops;
166
167 bool btcoex_enabled;
165}; 168};
166 169
167struct sk_buff *ath_rxbuf_alloc(struct ath_common *common, 170struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
@@ -178,4 +181,112 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry);
178void ath_hw_cycle_counters_update(struct ath_common *common); 181void ath_hw_cycle_counters_update(struct ath_common *common);
179int32_t ath_hw_get_listen_time(struct ath_common *common); 182int32_t ath_hw_get_listen_time(struct ath_common *common);
180 183
184extern __attribute__ ((format (printf, 3, 4))) int
185ath_printk(const char *level, struct ath_common *common, const char *fmt, ...);
186
187#define ath_emerg(common, fmt, ...) \
188 ath_printk(KERN_EMERG, common, fmt, ##__VA_ARGS__)
189#define ath_alert(common, fmt, ...) \
190 ath_printk(KERN_ALERT, common, fmt, ##__VA_ARGS__)
191#define ath_crit(common, fmt, ...) \
192 ath_printk(KERN_CRIT, common, fmt, ##__VA_ARGS__)
193#define ath_err(common, fmt, ...) \
194 ath_printk(KERN_ERR, common, fmt, ##__VA_ARGS__)
195#define ath_warn(common, fmt, ...) \
196 ath_printk(KERN_WARNING, common, fmt, ##__VA_ARGS__)
197#define ath_notice(common, fmt, ...) \
198 ath_printk(KERN_NOTICE, common, fmt, ##__VA_ARGS__)
199#define ath_info(common, fmt, ...) \
200 ath_printk(KERN_INFO, common, fmt, ##__VA_ARGS__)
201
202/**
203 * enum ath_debug_level - atheros wireless debug level
204 *
205 * @ATH_DBG_RESET: reset processing
206 * @ATH_DBG_QUEUE: hardware queue management
207 * @ATH_DBG_EEPROM: eeprom processing
208 * @ATH_DBG_CALIBRATE: periodic calibration
209 * @ATH_DBG_INTERRUPT: interrupt processing
210 * @ATH_DBG_REGULATORY: regulatory processing
211 * @ATH_DBG_ANI: adaptive noise immunitive processing
212 * @ATH_DBG_XMIT: basic xmit operation
213 * @ATH_DBG_BEACON: beacon handling
214 * @ATH_DBG_CONFIG: configuration of the hardware
215 * @ATH_DBG_FATAL: fatal errors, this is the default, DBG_DEFAULT
216 * @ATH_DBG_PS: power save processing
217 * @ATH_DBG_HWTIMER: hardware timer handling
218 * @ATH_DBG_BTCOEX: bluetooth coexistance
219 * @ATH_DBG_BSTUCK: stuck beacons
220 * @ATH_DBG_ANY: enable all debugging
221 *
222 * The debug level is used to control the amount and type of debugging output
223 * we want to see. Each driver has its own method for enabling debugging and
224 * modifying debug level states -- but this is typically done through a
225 * module parameter 'debug' along with a respective 'debug' debugfs file
226 * entry.
227 */
228enum ATH_DEBUG {
229 ATH_DBG_RESET = 0x00000001,
230 ATH_DBG_QUEUE = 0x00000002,
231 ATH_DBG_EEPROM = 0x00000004,
232 ATH_DBG_CALIBRATE = 0x00000008,
233 ATH_DBG_INTERRUPT = 0x00000010,
234 ATH_DBG_REGULATORY = 0x00000020,
235 ATH_DBG_ANI = 0x00000040,
236 ATH_DBG_XMIT = 0x00000080,
237 ATH_DBG_BEACON = 0x00000100,
238 ATH_DBG_CONFIG = 0x00000200,
239 ATH_DBG_FATAL = 0x00000400,
240 ATH_DBG_PS = 0x00000800,
241 ATH_DBG_HWTIMER = 0x00001000,
242 ATH_DBG_BTCOEX = 0x00002000,
243 ATH_DBG_WMI = 0x00004000,
244 ATH_DBG_BSTUCK = 0x00008000,
245 ATH_DBG_ANY = 0xffffffff
246};
247
248#define ATH_DBG_DEFAULT (ATH_DBG_FATAL)
249
250#ifdef CONFIG_ATH_DEBUG
251
252#define ath_dbg(common, dbg_mask, fmt, ...) \
253({ \
254 int rtn; \
255 if ((common)->debug_mask & dbg_mask) \
256 rtn = ath_printk(KERN_DEBUG, common, fmt, \
257 ##__VA_ARGS__); \
258 else \
259 rtn = 0; \
260 \
261 rtn; \
262})
263#define ATH_DBG_WARN(foo, arg...) WARN(foo, arg)
264#define ATH_DBG_WARN_ON_ONCE(foo) WARN_ON_ONCE(foo)
265
266#else
267
268static inline __attribute__ ((format (printf, 3, 4))) int
269ath_dbg(struct ath_common *common, enum ATH_DEBUG dbg_mask,
270 const char *fmt, ...)
271{
272 return 0;
273}
274#define ATH_DBG_WARN(foo, arg...) do {} while (0)
275#define ATH_DBG_WARN_ON_ONCE(foo) ({ \
276 int __ret_warn_once = !!(foo); \
277 unlikely(__ret_warn_once); \
278})
279
280#endif /* CONFIG_ATH_DEBUG */
281
282/** Returns string describing opmode, or NULL if unknown mode. */
283#ifdef CONFIG_ATH_DEBUG
284const char *ath_opmode_to_string(enum nl80211_iftype opmode);
285#else
286static inline const char *ath_opmode_to_string(enum nl80211_iftype opmode)
287{
288 return "UNKNOWN";
289}
290#endif
291
181#endif /* ATH_H */ 292#endif /* ATH_H */
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index eb83b7b4d0e3..e0793319389d 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -1,9 +1,12 @@
1config ATH5K 1config ATH5K
2 tristate "Atheros 5xxx wireless cards support" 2 tristate "Atheros 5xxx wireless cards support"
3 depends on PCI && MAC80211 3 depends on (PCI || ATHEROS_AR231X) && MAC80211
4 select MAC80211_LEDS 4 select MAC80211_LEDS
5 select LEDS_CLASS 5 select LEDS_CLASS
6 select NEW_LEDS 6 select NEW_LEDS
7 select AVERAGE
8 select ATH5K_AHB if (ATHEROS_AR231X && !PCI)
9 select ATH5K_PCI if (!ATHEROS_AR231X && PCI)
7 ---help--- 10 ---help---
8 This module adds support for wireless adapters based on 11 This module adds support for wireless adapters based on
9 Atheros 5xxx chipset. 12 Atheros 5xxx chipset.
@@ -37,3 +40,16 @@ config ATH5K_DEBUG
37 40
38 modprobe ath5k debug=0x00000400 41 modprobe ath5k debug=0x00000400
39 42
43config ATH5K_AHB
44 bool "Atheros 5xxx AHB bus support"
45 depends on (ATHEROS_AR231X && !PCI)
46 ---help---
47 This adds support for WiSoC type chipsets of the 5xxx Atheros
48 family.
49
50config ATH5K_PCI
51 bool "Atheros 5xxx PCI bus support"
52 depends on (!ATHEROS_AR231X && PCI)
53 ---help---
54 This adds support for PCI type chipsets of the 5xxx Atheros
55 family.
diff --git a/drivers/net/wireless/ath/ath5k/Makefile b/drivers/net/wireless/ath/ath5k/Makefile
index 2242a140e4fe..f60b3899afc4 100644
--- a/drivers/net/wireless/ath/ath5k/Makefile
+++ b/drivers/net/wireless/ath/ath5k/Makefile
@@ -14,5 +14,8 @@ ath5k-y += led.o
14ath5k-y += rfkill.o 14ath5k-y += rfkill.o
15ath5k-y += ani.o 15ath5k-y += ani.o
16ath5k-y += sysfs.o 16ath5k-y += sysfs.o
17ath5k-y += mac80211-ops.o
17ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o 18ath5k-$(CONFIG_ATH5K_DEBUG) += debug.o
19ath5k-$(CONFIG_ATH5K_AHB) += ahb.o
20ath5k-$(CONFIG_ATH5K_PCI) += pci.o
18obj-$(CONFIG_ATH5K) += ath5k.o 21obj-$(CONFIG_ATH5K) += ath5k.o
diff --git a/drivers/net/wireless/ath/ath5k/ahb.c b/drivers/net/wireless/ath/ath5k/ahb.c
new file mode 100644
index 000000000000..707cde149248
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/ahb.c
@@ -0,0 +1,219 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 * Copyright (c) 2009 Gabor Juhos <juhosg@openwrt.org>
4 * Copyright (c) 2009 Imre Kaloz <kaloz@openwrt.org>
5 *
6 * Permission to use, copy, modify, and/or distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <linux/nl80211.h>
20#include <linux/platform_device.h>
21#include <ar231x_platform.h>
22#include "ath5k.h"
23#include "debug.h"
24#include "base.h"
25#include "reg.h"
26#include "debug.h"
27
28/* return bus cachesize in 4B word units */
29static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz)
30{
31 *csz = L1_CACHE_BYTES >> 2;
32}
33
34bool ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
35{
36 struct ath5k_softc *sc = common->priv;
37 struct platform_device *pdev = to_platform_device(sc->dev);
38 struct ar231x_board_config *bcfg = pdev->dev.platform_data;
39 u16 *eeprom, *eeprom_end;
40
41
42
43 bcfg = pdev->dev.platform_data;
44 eeprom = (u16 *) bcfg->radio;
45 eeprom_end = ((void *) bcfg->config) + BOARD_CONFIG_BUFSZ;
46
47 eeprom += off;
48 if (eeprom > eeprom_end)
49 return -EINVAL;
50
51 *data = *eeprom;
52 return 0;
53}
54
55int ath5k_hw_read_srev(struct ath5k_hw *ah)
56{
57 struct ath5k_softc *sc = ah->ah_sc;
58 struct platform_device *pdev = to_platform_device(sc->dev);
59 struct ar231x_board_config *bcfg = pdev->dev.platform_data;
60 ah->ah_mac_srev = bcfg->devid;
61 return 0;
62}
63
64static const struct ath_bus_ops ath_ahb_bus_ops = {
65 .ath_bus_type = ATH_AHB,
66 .read_cachesize = ath5k_ahb_read_cachesize,
67 .eeprom_read = ath5k_ahb_eeprom_read,
68};
69
70/*Initialization*/
71static int ath_ahb_probe(struct platform_device *pdev)
72{
73 struct ar231x_board_config *bcfg = pdev->dev.platform_data;
74 struct ath5k_softc *sc;
75 struct ieee80211_hw *hw;
76 struct resource *res;
77 void __iomem *mem;
78 int irq;
79 int ret = 0;
80 u32 reg;
81
82 if (!pdev->dev.platform_data) {
83 dev_err(&pdev->dev, "no platform data specified\n");
84 ret = -EINVAL;
85 goto err_out;
86 }
87
88 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
89 if (res == NULL) {
90 dev_err(&pdev->dev, "no memory resource found\n");
91 ret = -ENXIO;
92 goto err_out;
93 }
94
95 mem = ioremap_nocache(res->start, res->end - res->start + 1);
96 if (mem == NULL) {
97 dev_err(&pdev->dev, "ioremap failed\n");
98 ret = -ENOMEM;
99 goto err_out;
100 }
101
102 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
103 if (res == NULL) {
104 dev_err(&pdev->dev, "no IRQ resource found\n");
105 ret = -ENXIO;
106 goto err_out;
107 }
108
109 irq = res->start;
110
111 hw = ieee80211_alloc_hw(sizeof(struct ath5k_softc), &ath5k_hw_ops);
112 if (hw == NULL) {
113 dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
114 ret = -ENOMEM;
115 goto err_out;
116 }
117
118 sc = hw->priv;
119 sc->hw = hw;
120 sc->dev = &pdev->dev;
121 sc->iobase = mem;
122 sc->irq = irq;
123 sc->devid = bcfg->devid;
124
125 if (bcfg->devid >= AR5K_SREV_AR2315_R6) {
126 /* Enable WMAC AHB arbitration */
127 reg = __raw_readl((void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
128 reg |= AR5K_AR2315_AHB_ARB_CTL_WLAN;
129 __raw_writel(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
130
131 /* Enable global WMAC swapping */
132 reg = __raw_readl((void __iomem *) AR5K_AR2315_BYTESWAP);
133 reg |= AR5K_AR2315_BYTESWAP_WMAC;
134 __raw_writel(reg, (void __iomem *) AR5K_AR2315_BYTESWAP);
135 } else {
136 /* Enable WMAC DMA access (assuming 5312 or 231x*/
137 /* TODO: check other platforms */
138 reg = __raw_readl((void __iomem *) AR5K_AR5312_ENABLE);
139 if (to_platform_device(sc->dev)->id == 0)
140 reg |= AR5K_AR5312_ENABLE_WLAN0;
141 else
142 reg |= AR5K_AR5312_ENABLE_WLAN1;
143 __raw_writel(reg, (void __iomem *) AR5K_AR5312_ENABLE);
144 }
145
146 ret = ath5k_init_softc(sc, &ath_ahb_bus_ops);
147 if (ret != 0) {
148 dev_err(&pdev->dev, "failed to attach device, err=%d\n", ret);
149 ret = -ENODEV;
150 goto err_free_hw;
151 }
152
153 platform_set_drvdata(pdev, hw);
154
155 return 0;
156
157 err_free_hw:
158 ieee80211_free_hw(hw);
159 platform_set_drvdata(pdev, NULL);
160 err_out:
161 return ret;
162}
163
164static int ath_ahb_remove(struct platform_device *pdev)
165{
166 struct ar231x_board_config *bcfg = pdev->dev.platform_data;
167 struct ieee80211_hw *hw = platform_get_drvdata(pdev);
168 struct ath5k_softc *sc;
169 u32 reg;
170
171 if (!hw)
172 return 0;
173
174 sc = hw->priv;
175
176 if (bcfg->devid >= AR5K_SREV_AR2315_R6) {
177 /* Disable WMAC AHB arbitration */
178 reg = __raw_readl((void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
179 reg &= ~AR5K_AR2315_AHB_ARB_CTL_WLAN;
180 __raw_writel(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
181 } else {
182 /*Stop DMA access */
183 reg = __raw_readl((void __iomem *) AR5K_AR5312_ENABLE);
184 if (to_platform_device(sc->dev)->id == 0)
185 reg &= ~AR5K_AR5312_ENABLE_WLAN0;
186 else
187 reg &= ~AR5K_AR5312_ENABLE_WLAN1;
188 __raw_writel(reg, (void __iomem *) AR5K_AR5312_ENABLE);
189 }
190
191 ath5k_deinit_softc(sc);
192 platform_set_drvdata(pdev, NULL);
193
194 return 0;
195}
196
197static struct platform_driver ath_ahb_driver = {
198 .probe = ath_ahb_probe,
199 .remove = ath_ahb_remove,
200 .driver = {
201 .name = "ar231x-wmac",
202 .owner = THIS_MODULE,
203 },
204};
205
206static int __init
207ath5k_ahb_init(void)
208{
209 return platform_driver_register(&ath_ahb_driver);
210}
211
212static void __exit
213ath5k_ahb_exit(void)
214{
215 platform_driver_unregister(&ath_ahb_driver);
216}
217
218module_init(ath5k_ahb_init);
219module_exit(ath5k_ahb_exit);
diff --git a/drivers/net/wireless/ath/ath5k/ani.c b/drivers/net/wireless/ath/ath5k/ani.c
index f1419198a479..f915f404302d 100644
--- a/drivers/net/wireless/ath/ath5k/ani.c
+++ b/drivers/net/wireless/ath/ath5k/ani.c
@@ -58,20 +58,20 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
58{ 58{
59 /* TODO: 59 /* TODO:
60 * ANI documents suggest the following five levels to use, but the HAL 60 * ANI documents suggest the following five levels to use, but the HAL
61 * and ath9k use only use the last two levels, making this 61 * and ath9k use only the last two levels, making this
62 * essentially an on/off option. There *may* be a reason for this (???), 62 * essentially an on/off option. There *may* be a reason for this (???),
63 * so i stick with the HAL version for now... 63 * so i stick with the HAL version for now...
64 */ 64 */
65#if 0 65#if 0
66 const s8 hi[] = { -18, -18, -16, -14, -12 }; 66 static const s8 lo[] = { -52, -56, -60, -64, -70 };
67 const s8 lo[] = { -52, -56, -60, -64, -70 }; 67 static const s8 hi[] = { -18, -18, -16, -14, -12 };
68 const s8 sz[] = { -34, -41, -48, -55, -62 }; 68 static const s8 sz[] = { -34, -41, -48, -55, -62 };
69 const s8 fr[] = { -70, -72, -75, -78, -80 }; 69 static const s8 fr[] = { -70, -72, -75, -78, -80 };
70#else 70#else
71 const s8 sz[] = { -55, -62 }; 71 static const s8 lo[] = { -64, -70 };
72 const s8 lo[] = { -64, -70 }; 72 static const s8 hi[] = { -14, -12 };
73 const s8 hi[] = { -14, -12 }; 73 static const s8 sz[] = { -55, -62 };
74 const s8 fr[] = { -78, -80 }; 74 static const s8 fr[] = { -78, -80 };
75#endif 75#endif
76 if (level < 0 || level >= ARRAY_SIZE(sz)) { 76 if (level < 0 || level >= ARRAY_SIZE(sz)) {
77 ATH5K_ERR(ah->ah_sc, "noise immuniy level %d out of range", 77 ATH5K_ERR(ah->ah_sc, "noise immuniy level %d out of range",
@@ -102,7 +102,7 @@ ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
102void 102void
103ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level) 103ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
104{ 104{
105 const int val[] = { 2, 4, 6, 8, 10, 12, 14, 16 }; 105 static const int val[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
106 106
107 if (level < 0 || level >= ARRAY_SIZE(val) || 107 if (level < 0 || level >= ARRAY_SIZE(val) ||
108 level > ah->ah_sc->ani_state.max_spur_level) { 108 level > ah->ah_sc->ani_state.max_spur_level) {
@@ -127,7 +127,7 @@ ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
127void 127void
128ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level) 128ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
129{ 129{
130 const int val[] = { 0, 4, 8 }; 130 static const int val[] = { 0, 4, 8 };
131 131
132 if (level < 0 || level >= ARRAY_SIZE(val)) { 132 if (level < 0 || level >= ARRAY_SIZE(val)) {
133 ATH5K_ERR(ah->ah_sc, "firstep level %d out of range", level); 133 ATH5K_ERR(ah->ah_sc, "firstep level %d out of range", level);
@@ -151,12 +151,12 @@ ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
151void 151void
152ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on) 152ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
153{ 153{
154 const int m1l[] = { 127, 50 }; 154 static const int m1l[] = { 127, 50 };
155 const int m2l[] = { 127, 40 }; 155 static const int m2l[] = { 127, 40 };
156 const int m1[] = { 127, 0x4d }; 156 static const int m1[] = { 127, 0x4d };
157 const int m2[] = { 127, 0x40 }; 157 static const int m2[] = { 127, 0x40 };
158 const int m2cnt[] = { 31, 16 }; 158 static const int m2cnt[] = { 31, 16 };
159 const int m2lcnt[] = { 63, 48 }; 159 static const int m2lcnt[] = { 63, 48 };
160 160
161 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR, 161 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
162 AR5K_PHY_WEAK_OFDM_LOW_THR_M1, m1l[on]); 162 AR5K_PHY_WEAK_OFDM_LOW_THR_M1, m1l[on]);
@@ -192,7 +192,7 @@ ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
192void 192void
193ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on) 193ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
194{ 194{
195 const int val[] = { 8, 6 }; 195 static const int val[] = { 8, 6 };
196 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_CCK_CROSSCORR, 196 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_CCK_CROSSCORR,
197 AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR, val[on]); 197 AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR, val[on]);
198 ah->ah_sc->ani_state.cck_weak_sig = on; 198 ah->ah_sc->ani_state.cck_weak_sig = on;
@@ -216,7 +216,7 @@ static void
216ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as, 216ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
217 bool ofdm_trigger) 217 bool ofdm_trigger)
218{ 218{
219 int rssi = ah->ah_beacon_rssi_avg.avg; 219 int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
220 220
221 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "raise immunity (%s)", 221 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "raise immunity (%s)",
222 ofdm_trigger ? "ODFM" : "CCK"); 222 ofdm_trigger ? "ODFM" : "CCK");
@@ -301,7 +301,7 @@ ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
301static void 301static void
302ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as) 302ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
303{ 303{
304 int rssi = ah->ah_beacon_rssi_avg.avg; 304 int rssi = ewma_read(&ah->ah_beacon_rssi_avg);
305 305
306 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "lower immunity"); 306 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_ANI, "lower immunity");
307 307
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 308b79e1ff08..407e39c2b10b 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -25,6 +25,7 @@
25 25
26#include <linux/io.h> 26#include <linux/io.h>
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/average.h>
28#include <net/mac80211.h> 29#include <net/mac80211.h>
29 30
30/* RX/TX descriptor hw structs 31/* RX/TX descriptor hw structs
@@ -153,19 +154,6 @@
153 udelay(1); \ 154 udelay(1); \
154} while (0) 155} while (0)
155 156
156/* Register dumps are done per operation mode */
157#define AR5K_INI_RFGAIN_5GHZ 0
158#define AR5K_INI_RFGAIN_2GHZ 1
159
160/* TODO: Clean this up */
161#define AR5K_INI_VAL_11A 0
162#define AR5K_INI_VAL_11A_TURBO 1
163#define AR5K_INI_VAL_11B 2
164#define AR5K_INI_VAL_11G 3
165#define AR5K_INI_VAL_11G_TURBO 4
166#define AR5K_INI_VAL_XR 0
167#define AR5K_INI_VAL_MAX 5
168
169/* 157/*
170 * Some tuneable values (these should be changeable by the user) 158 * Some tuneable values (these should be changeable by the user)
171 * TODO: Make use of them and add more options OR use debug/configfs 159 * TODO: Make use of them and add more options OR use debug/configfs
@@ -221,42 +209,66 @@
221 209
222/* Initial values */ 210/* Initial values */
223#define AR5K_INIT_CYCRSSI_THR1 2 211#define AR5K_INIT_CYCRSSI_THR1 2
224#define AR5K_INIT_TX_LATENCY 502 212
225#define AR5K_INIT_USEC 39 213/* Tx retry limits */
226#define AR5K_INIT_USEC_TURBO 79
227#define AR5K_INIT_USEC_32 31
228#define AR5K_INIT_SLOT_TIME 396
229#define AR5K_INIT_SLOT_TIME_TURBO 480
230#define AR5K_INIT_ACK_CTS_TIMEOUT 1024
231#define AR5K_INIT_ACK_CTS_TIMEOUT_TURBO 0x08000800
232#define AR5K_INIT_PROG_IFS 920
233#define AR5K_INIT_PROG_IFS_TURBO 960
234#define AR5K_INIT_EIFS 3440
235#define AR5K_INIT_EIFS_TURBO 6880
236#define AR5K_INIT_SIFS 560
237#define AR5K_INIT_SIFS_TURBO 480
238#define AR5K_INIT_SH_RETRY 10 214#define AR5K_INIT_SH_RETRY 10
239#define AR5K_INIT_LG_RETRY AR5K_INIT_SH_RETRY 215#define AR5K_INIT_LG_RETRY AR5K_INIT_SH_RETRY
216/* For station mode */
240#define AR5K_INIT_SSH_RETRY 32 217#define AR5K_INIT_SSH_RETRY 32
241#define AR5K_INIT_SLG_RETRY AR5K_INIT_SSH_RETRY 218#define AR5K_INIT_SLG_RETRY AR5K_INIT_SSH_RETRY
242#define AR5K_INIT_TX_RETRY 10 219#define AR5K_INIT_TX_RETRY 10
243 220
244#define AR5K_INIT_TRANSMIT_LATENCY ( \ 221
245 (AR5K_INIT_TX_LATENCY << 14) | (AR5K_INIT_USEC_32 << 7) | \ 222/* Slot time */
246 (AR5K_INIT_USEC) \ 223#define AR5K_INIT_SLOT_TIME_TURBO 6
247) 224#define AR5K_INIT_SLOT_TIME_DEFAULT 9
248#define AR5K_INIT_TRANSMIT_LATENCY_TURBO ( \ 225#define AR5K_INIT_SLOT_TIME_HALF_RATE 13
249 (AR5K_INIT_TX_LATENCY << 14) | (AR5K_INIT_USEC_32 << 7) | \ 226#define AR5K_INIT_SLOT_TIME_QUARTER_RATE 21
250 (AR5K_INIT_USEC_TURBO) \ 227#define AR5K_INIT_SLOT_TIME_B 20
251) 228#define AR5K_SLOT_TIME_MAX 0xffff
252#define AR5K_INIT_PROTO_TIME_CNTRL ( \ 229
253 (AR5K_INIT_CARR_SENSE_EN << 26) | (AR5K_INIT_EIFS << 12) | \ 230/* SIFS */
254 (AR5K_INIT_PROG_IFS) \ 231#define AR5K_INIT_SIFS_TURBO 6
255) 232/* XXX: 8 from initvals 10 from standard */
256#define AR5K_INIT_PROTO_TIME_CNTRL_TURBO ( \ 233#define AR5K_INIT_SIFS_DEFAULT_BG 8
257 (AR5K_INIT_CARR_SENSE_EN << 26) | (AR5K_INIT_EIFS_TURBO << 12) | \ 234#define AR5K_INIT_SIFS_DEFAULT_A 16
258 (AR5K_INIT_PROG_IFS_TURBO) \ 235#define AR5K_INIT_SIFS_HALF_RATE 32
259) 236#define AR5K_INIT_SIFS_QUARTER_RATE 64
237
238/* Used to calculate tx time for non 5/10/40MHz
239 * operation */
240/* It's preamble time + signal time (16 + 4) */
241#define AR5K_INIT_OFDM_PREAMPLE_TIME 20
242/* Preamble time for 40MHz (turbo) operation (min ?) */
243#define AR5K_INIT_OFDM_PREAMBLE_TIME_MIN 14
244#define AR5K_INIT_OFDM_SYMBOL_TIME 4
245#define AR5K_INIT_OFDM_PLCP_BITS 22
246
247/* Rx latency for 5 and 10MHz operation (max ?) */
248#define AR5K_INIT_RX_LAT_MAX 63
249/* Tx latencies from initvals (5212 only but no problem
250 * because we only tweak them on 5212) */
251#define AR5K_INIT_TX_LAT_A 54
252#define AR5K_INIT_TX_LAT_BG 384
253/* Tx latency for 40MHz (turbo) operation (min ?) */
254#define AR5K_INIT_TX_LAT_MIN 32
255/* Default Tx/Rx latencies (same for 5211)*/
256#define AR5K_INIT_TX_LATENCY_5210 54
257#define AR5K_INIT_RX_LATENCY_5210 29
258
259/* Tx frame to Tx data start delay */
260#define AR5K_INIT_TXF2TXD_START_DEFAULT 14
261#define AR5K_INIT_TXF2TXD_START_DELAY_10MHZ 12
262#define AR5K_INIT_TXF2TXD_START_DELAY_5MHZ 13
263
264/* We need to increase PHY switch and agc settling time
265 * on turbo mode */
266#define AR5K_SWITCH_SETTLING 5760
267#define AR5K_SWITCH_SETTLING_TURBO 7168
268
269#define AR5K_AGC_SETTLING 28
270/* 38 on 5210 but shouldn't matter */
271#define AR5K_AGC_SETTLING_TURBO 37
260 272
261 273
262/* GENERIC CHIPSET DEFINITIONS */ 274/* GENERIC CHIPSET DEFINITIONS */
@@ -303,12 +315,19 @@ struct ath5k_srev_name {
303#define AR5K_SREV_AR5311B 0x30 /* Spirit */ 315#define AR5K_SREV_AR5311B 0x30 /* Spirit */
304#define AR5K_SREV_AR5211 0x40 /* Oahu */ 316#define AR5K_SREV_AR5211 0x40 /* Oahu */
305#define AR5K_SREV_AR5212 0x50 /* Venice */ 317#define AR5K_SREV_AR5212 0x50 /* Venice */
318#define AR5K_SREV_AR5312_R2 0x52 /* AP31 */
306#define AR5K_SREV_AR5212_V4 0x54 /* ??? */ 319#define AR5K_SREV_AR5212_V4 0x54 /* ??? */
307#define AR5K_SREV_AR5213 0x55 /* ??? */ 320#define AR5K_SREV_AR5213 0x55 /* ??? */
321#define AR5K_SREV_AR5312_R7 0x57 /* AP30 */
322#define AR5K_SREV_AR2313_R8 0x58 /* AP43 */
308#define AR5K_SREV_AR5213A 0x59 /* Hainan */ 323#define AR5K_SREV_AR5213A 0x59 /* Hainan */
309#define AR5K_SREV_AR2413 0x78 /* Griffin lite */ 324#define AR5K_SREV_AR2413 0x78 /* Griffin lite */
310#define AR5K_SREV_AR2414 0x70 /* Griffin */ 325#define AR5K_SREV_AR2414 0x70 /* Griffin */
326#define AR5K_SREV_AR2315_R6 0x86 /* AP51-Light */
327#define AR5K_SREV_AR2315_R7 0x87 /* AP51-Full */
311#define AR5K_SREV_AR5424 0x90 /* Condor */ 328#define AR5K_SREV_AR5424 0x90 /* Condor */
329#define AR5K_SREV_AR2317_R1 0x90 /* AP61-Light */
330#define AR5K_SREV_AR2317_R2 0x91 /* AP61-Full */
312#define AR5K_SREV_AR5413 0xa4 /* Eagle lite */ 331#define AR5K_SREV_AR5413 0xa4 /* Eagle lite */
313#define AR5K_SREV_AR5414 0xa0 /* Eagle */ 332#define AR5K_SREV_AR5414 0xa0 /* Eagle */
314#define AR5K_SREV_AR2415 0xb0 /* Talon */ 333#define AR5K_SREV_AR2415 0xb0 /* Talon */
@@ -404,12 +423,10 @@ struct ath5k_srev_name {
404 423
405enum ath5k_driver_mode { 424enum ath5k_driver_mode {
406 AR5K_MODE_11A = 0, 425 AR5K_MODE_11A = 0,
407 AR5K_MODE_11A_TURBO = 1, 426 AR5K_MODE_11B = 1,
408 AR5K_MODE_11B = 2, 427 AR5K_MODE_11G = 2,
409 AR5K_MODE_11G = 3,
410 AR5K_MODE_11G_TURBO = 4,
411 AR5K_MODE_XR = 0, 428 AR5K_MODE_XR = 0,
412 AR5K_MODE_MAX = 5 429 AR5K_MODE_MAX = 3
413}; 430};
414 431
415enum ath5k_ant_mode { 432enum ath5k_ant_mode {
@@ -423,6 +440,12 @@ enum ath5k_ant_mode {
423 AR5K_ANTMODE_MAX, 440 AR5K_ANTMODE_MAX,
424}; 441};
425 442
443enum ath5k_bw_mode {
444 AR5K_BWMODE_DEFAULT = 0, /* 20MHz, default operation */
445 AR5K_BWMODE_5MHZ = 1, /* Quarter rate */
446 AR5K_BWMODE_10MHZ = 2, /* Half rate */
447 AR5K_BWMODE_40MHZ = 3 /* Turbo */
448};
426 449
427/****************\ 450/****************\
428 TX DEFINITIONS 451 TX DEFINITIONS
@@ -655,7 +678,6 @@ struct ath5k_gain {
655 678
656/* channel_flags */ 679/* channel_flags */
657#define CHANNEL_CW_INT 0x0008 /* Contention Window interference detected */ 680#define CHANNEL_CW_INT 0x0008 /* Contention Window interference detected */
658#define CHANNEL_TURBO 0x0010 /* Turbo Channel */
659#define CHANNEL_CCK 0x0020 /* CCK channel */ 681#define CHANNEL_CCK 0x0020 /* CCK channel */
660#define CHANNEL_OFDM 0x0040 /* OFDM channel */ 682#define CHANNEL_OFDM 0x0040 /* OFDM channel */
661#define CHANNEL_2GHZ 0x0080 /* 2GHz channel. */ 683#define CHANNEL_2GHZ 0x0080 /* 2GHz channel. */
@@ -667,16 +689,10 @@ struct ath5k_gain {
667#define CHANNEL_A (CHANNEL_5GHZ|CHANNEL_OFDM) 689#define CHANNEL_A (CHANNEL_5GHZ|CHANNEL_OFDM)
668#define CHANNEL_B (CHANNEL_2GHZ|CHANNEL_CCK) 690#define CHANNEL_B (CHANNEL_2GHZ|CHANNEL_CCK)
669#define CHANNEL_G (CHANNEL_2GHZ|CHANNEL_OFDM) 691#define CHANNEL_G (CHANNEL_2GHZ|CHANNEL_OFDM)
670#define CHANNEL_T (CHANNEL_5GHZ|CHANNEL_OFDM|CHANNEL_TURBO)
671#define CHANNEL_TG (CHANNEL_2GHZ|CHANNEL_OFDM|CHANNEL_TURBO)
672#define CHANNEL_108A CHANNEL_T
673#define CHANNEL_108G CHANNEL_TG
674#define CHANNEL_X (CHANNEL_5GHZ|CHANNEL_OFDM|CHANNEL_XR) 692#define CHANNEL_X (CHANNEL_5GHZ|CHANNEL_OFDM|CHANNEL_XR)
675 693
676#define CHANNEL_ALL (CHANNEL_OFDM|CHANNEL_CCK|CHANNEL_2GHZ|CHANNEL_5GHZ| \ 694#define CHANNEL_ALL (CHANNEL_OFDM|CHANNEL_CCK|CHANNEL_2GHZ|CHANNEL_5GHZ)
677 CHANNEL_TURBO)
678 695
679#define CHANNEL_ALL_NOTURBO (CHANNEL_ALL & ~CHANNEL_TURBO)
680#define CHANNEL_MODES CHANNEL_ALL 696#define CHANNEL_MODES CHANNEL_ALL
681 697
682/* 698/*
@@ -1025,7 +1041,6 @@ struct ath5k_hw {
1025 enum ath5k_int ah_imr; 1041 enum ath5k_int ah_imr;
1026 1042
1027 struct ieee80211_channel *ah_current_channel; 1043 struct ieee80211_channel *ah_current_channel;
1028 bool ah_turbo;
1029 bool ah_calibration; 1044 bool ah_calibration;
1030 bool ah_single_chip; 1045 bool ah_single_chip;
1031 1046
@@ -1034,6 +1049,7 @@ struct ath5k_hw {
1034 u32 ah_phy; 1049 u32 ah_phy;
1035 u32 ah_mac_srev; 1050 u32 ah_mac_srev;
1036 u16 ah_mac_version; 1051 u16 ah_mac_version;
1052 u16 ah_mac_revision;
1037 u16 ah_phy_revision; 1053 u16 ah_phy_revision;
1038 u16 ah_radio_5ghz_revision; 1054 u16 ah_radio_5ghz_revision;
1039 u16 ah_radio_2ghz_revision; 1055 u16 ah_radio_2ghz_revision;
@@ -1043,6 +1059,8 @@ struct ath5k_hw {
1043 1059
1044 u32 ah_limit_tx_retries; 1060 u32 ah_limit_tx_retries;
1045 u8 ah_coverage_class; 1061 u8 ah_coverage_class;
1062 bool ah_ack_bitrate_high;
1063 u8 ah_bwmode;
1046 1064
1047 /* Antenna Control */ 1065 /* Antenna Control */
1048 u32 ah_ant_ctl[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX]; 1066 u32 ah_ant_ctl[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
@@ -1085,12 +1103,14 @@ struct ath5k_hw {
1085 /* Values in 0.25dB units */ 1103 /* Values in 0.25dB units */
1086 s16 txp_min_pwr; 1104 s16 txp_min_pwr;
1087 s16 txp_max_pwr; 1105 s16 txp_max_pwr;
1106 s16 txp_cur_pwr;
1088 /* Values in 0.5dB units */ 1107 /* Values in 0.5dB units */
1089 s16 txp_offset; 1108 s16 txp_offset;
1090 s16 txp_ofdm; 1109 s16 txp_ofdm;
1091 s16 txp_cck_ofdm_gainf_delta; 1110 s16 txp_cck_ofdm_gainf_delta;
1092 /* Value in dB units */ 1111 /* Value in dB units */
1093 s16 txp_cck_ofdm_pwr_delta; 1112 s16 txp_cck_ofdm_pwr_delta;
1113 bool txp_setup;
1094 } ah_txpower; 1114 } ah_txpower;
1095 1115
1096 struct { 1116 struct {
@@ -1102,7 +1122,7 @@ struct ath5k_hw {
1102 struct ath5k_nfcal_hist ah_nfcal_hist; 1122 struct ath5k_nfcal_hist ah_nfcal_hist;
1103 1123
1104 /* average beacon RSSI in our BSS (used by ANI) */ 1124 /* average beacon RSSI in our BSS (used by ANI) */
1105 struct ath5k_avg_val ah_beacon_rssi_avg; 1125 struct ewma ah_beacon_rssi_avg;
1106 1126
1107 /* noise floor from last periodic calibration */ 1127 /* noise floor from last periodic calibration */
1108 s32 ah_noise_floor; 1128 s32 ah_noise_floor;
@@ -1131,36 +1151,50 @@ struct ath5k_hw {
1131/* 1151/*
1132 * Prototypes 1152 * Prototypes
1133 */ 1153 */
1154extern const struct ieee80211_ops ath5k_hw_ops;
1134 1155
1135/* Attach/Detach Functions */ 1156/* Initialization and detach functions */
1136int ath5k_hw_attach(struct ath5k_softc *sc); 1157int ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops);
1137void ath5k_hw_detach(struct ath5k_hw *ah); 1158void ath5k_deinit_softc(struct ath5k_softc *sc);
1159int ath5k_hw_init(struct ath5k_softc *sc);
1160void ath5k_hw_deinit(struct ath5k_hw *ah);
1138 1161
1139int ath5k_sysfs_register(struct ath5k_softc *sc); 1162int ath5k_sysfs_register(struct ath5k_softc *sc);
1140void ath5k_sysfs_unregister(struct ath5k_softc *sc); 1163void ath5k_sysfs_unregister(struct ath5k_softc *sc);
1141 1164
1165/*Chip id helper functions */
1166const char *ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val);
1167int ath5k_hw_read_srev(struct ath5k_hw *ah);
1168
1142/* LED functions */ 1169/* LED functions */
1143int ath5k_init_leds(struct ath5k_softc *sc); 1170int ath5k_init_leds(struct ath5k_softc *sc);
1144void ath5k_led_enable(struct ath5k_softc *sc); 1171void ath5k_led_enable(struct ath5k_softc *sc);
1145void ath5k_led_off(struct ath5k_softc *sc); 1172void ath5k_led_off(struct ath5k_softc *sc);
1146void ath5k_unregister_leds(struct ath5k_softc *sc); 1173void ath5k_unregister_leds(struct ath5k_softc *sc);
1147 1174
1175
1148/* Reset Functions */ 1176/* Reset Functions */
1149int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial); 1177int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial);
1150int ath5k_hw_on_hold(struct ath5k_hw *ah); 1178int ath5k_hw_on_hold(struct ath5k_hw *ah);
1151int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, 1179int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1152 struct ieee80211_channel *channel, bool change_channel); 1180 struct ieee80211_channel *channel, bool fast, bool skip_pcu);
1153int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val, 1181int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
1154 bool is_set); 1182 bool is_set);
1155/* Power management functions */ 1183/* Power management functions */
1156 1184
1185
1186/* Clock rate related functions */
1187unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec);
1188unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock);
1189void ath5k_hw_set_clockrate(struct ath5k_hw *ah);
1190
1191
1157/* DMA Related Functions */ 1192/* DMA Related Functions */
1158void ath5k_hw_start_rx_dma(struct ath5k_hw *ah); 1193void ath5k_hw_start_rx_dma(struct ath5k_hw *ah);
1159int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah);
1160u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah); 1194u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah);
1161void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr); 1195int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr);
1162int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue); 1196int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue);
1163int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue); 1197int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue);
1164u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue); 1198u32 ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue);
1165int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, 1199int ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue,
1166 u32 phys_addr); 1200 u32 phys_addr);
@@ -1170,38 +1204,43 @@ bool ath5k_hw_is_intr_pending(struct ath5k_hw *ah);
1170int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask); 1204int ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask);
1171enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask); 1205enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask);
1172void ath5k_hw_update_mib_counters(struct ath5k_hw *ah); 1206void ath5k_hw_update_mib_counters(struct ath5k_hw *ah);
1207/* Init/Stop functions */
1208void ath5k_hw_dma_init(struct ath5k_hw *ah);
1209int ath5k_hw_dma_stop(struct ath5k_hw *ah);
1173 1210
1174/* EEPROM access functions */ 1211/* EEPROM access functions */
1175int ath5k_eeprom_init(struct ath5k_hw *ah); 1212int ath5k_eeprom_init(struct ath5k_hw *ah);
1176void ath5k_eeprom_detach(struct ath5k_hw *ah); 1213void ath5k_eeprom_detach(struct ath5k_hw *ah);
1177int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac); 1214int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac);
1178 1215
1216
1179/* Protocol Control Unit Functions */ 1217/* Protocol Control Unit Functions */
1218/* Helpers */
1219int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
1220 int len, struct ieee80211_rate *rate);
1221unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah);
1222unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah);
1180extern int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype opmode); 1223extern int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype opmode);
1181void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class); 1224void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class);
1182/* BSSID Functions */ 1225/* RX filter control*/
1183int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac); 1226int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
1184void ath5k_hw_set_bssid(struct ath5k_hw *ah); 1227void ath5k_hw_set_bssid(struct ath5k_hw *ah);
1185void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask); 1228void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
1186/* Receive start/stop functions */
1187void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
1188void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
1189/* RX Filter functions */
1190void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1); 1229void ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1);
1191u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah); 1230u32 ath5k_hw_get_rx_filter(struct ath5k_hw *ah);
1192void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter); 1231void ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter);
1232/* Receive (DRU) start/stop functions */
1233void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
1234void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
1193/* Beacon control functions */ 1235/* Beacon control functions */
1194u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah); 1236u64 ath5k_hw_get_tsf64(struct ath5k_hw *ah);
1195void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64); 1237void ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64);
1196void ath5k_hw_reset_tsf(struct ath5k_hw *ah); 1238void ath5k_hw_reset_tsf(struct ath5k_hw *ah);
1197void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval); 1239void ath5k_hw_init_beacon(struct ath5k_hw *ah, u32 next_beacon, u32 interval);
1198bool ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval); 1240bool ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval);
1199/* ACK bit rate */ 1241/* Init function */
1200void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high); 1242void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1201/* Clock rate related functions */ 1243 u8 mode);
1202unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec);
1203unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock);
1204void ath5k_hw_set_clockrate(struct ath5k_hw *ah);
1205 1244
1206/* Queue Control Unit, DFS Control Unit Functions */ 1245/* Queue Control Unit, DFS Control Unit Functions */
1207int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, 1246int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
@@ -1214,7 +1253,9 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah,
1214u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue); 1253u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue);
1215void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue); 1254void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue);
1216int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue); 1255int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue);
1217int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time); 1256int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time);
1257/* Init function */
1258int ath5k_hw_init_queues(struct ath5k_hw *ah);
1218 1259
1219/* Hardware Descriptor Functions */ 1260/* Hardware Descriptor Functions */
1220int ath5k_hw_init_desc_functions(struct ath5k_hw *ah); 1261int ath5k_hw_init_desc_functions(struct ath5k_hw *ah);
@@ -1224,6 +1265,7 @@ int ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
1224 unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2, 1265 unsigned int tx_rate1, u_int tx_tries1, u_int tx_rate2,
1225 u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3); 1266 u_int tx_tries2, unsigned int tx_rate3, u_int tx_tries3);
1226 1267
1268
1227/* GPIO Functions */ 1269/* GPIO Functions */
1228void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state); 1270void ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state);
1229int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio); 1271int ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio);
@@ -1233,11 +1275,13 @@ int ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val);
1233void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio, 1275void ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
1234 u32 interrupt_level); 1276 u32 interrupt_level);
1235 1277
1236/* rfkill Functions */ 1278
1279/* RFkill Functions */
1237void ath5k_rfkill_hw_start(struct ath5k_hw *ah); 1280void ath5k_rfkill_hw_start(struct ath5k_hw *ah);
1238void ath5k_rfkill_hw_stop(struct ath5k_hw *ah); 1281void ath5k_rfkill_hw_stop(struct ath5k_hw *ah);
1239 1282
1240/* Misc functions */ 1283
1284/* Misc functions TODO: Cleanup */
1241int ath5k_hw_set_capabilities(struct ath5k_hw *ah); 1285int ath5k_hw_set_capabilities(struct ath5k_hw *ah);
1242int ath5k_hw_get_capability(struct ath5k_hw *ah, 1286int ath5k_hw_get_capability(struct ath5k_hw *ah,
1243 enum ath5k_capability_type cap_type, u32 capability, 1287 enum ath5k_capability_type cap_type, u32 capability,
@@ -1245,19 +1289,20 @@ int ath5k_hw_get_capability(struct ath5k_hw *ah,
1245int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id); 1289int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid, u16 assoc_id);
1246int ath5k_hw_disable_pspoll(struct ath5k_hw *ah); 1290int ath5k_hw_disable_pspoll(struct ath5k_hw *ah);
1247 1291
1292
1248/* Initial register settings functions */ 1293/* Initial register settings functions */
1249int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel); 1294int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel);
1250 1295
1251/* Initialize RF */ 1296
1252int ath5k_hw_rfregs_init(struct ath5k_hw *ah, 1297/* PHY functions */
1253 struct ieee80211_channel *channel, 1298/* Misc PHY functions */
1254 unsigned int mode); 1299u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan);
1255int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq); 1300int ath5k_hw_phy_disable(struct ath5k_hw *ah);
1301/* Gain_F optimization */
1256enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah); 1302enum ath5k_rfgain ath5k_hw_gainf_calibrate(struct ath5k_hw *ah);
1257int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah); 1303int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah);
1258/* PHY/RF channel functions */ 1304/* PHY/RF channel functions */
1259bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags); 1305bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
1260int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
1261/* PHY calibration */ 1306/* PHY calibration */
1262void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah); 1307void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah);
1263int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, 1308int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
@@ -1266,18 +1311,14 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah);
1266/* Spur mitigation */ 1311/* Spur mitigation */
1267bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah, 1312bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
1268 struct ieee80211_channel *channel); 1313 struct ieee80211_channel *channel);
1269void ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
1270 struct ieee80211_channel *channel);
1271/* Misc PHY functions */
1272u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan);
1273int ath5k_hw_phy_disable(struct ath5k_hw *ah);
1274/* Antenna control */ 1314/* Antenna control */
1275void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode); 1315void ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode);
1276void ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode); 1316void ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode);
1277/* TX power setup */ 1317/* TX power setup */
1278int ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
1279 u8 ee_mode, u8 txpower);
1280int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower); 1318int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower);
1319/* Init function */
1320int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
1321 u8 mode, bool fast);
1281 1322
1282/* 1323/*
1283 * Functions used internaly 1324 * Functions used internaly
@@ -1293,6 +1334,32 @@ static inline struct ath_regulatory *ath5k_hw_regulatory(struct ath5k_hw *ah)
1293 return &(ath5k_hw_common(ah)->regulatory); 1334 return &(ath5k_hw_common(ah)->regulatory);
1294} 1335}
1295 1336
1337#ifdef CONFIG_ATHEROS_AR231X
1338#define AR5K_AR2315_PCI_BASE ((void __iomem *)0xb0100000)
1339
1340static inline void __iomem *ath5k_ahb_reg(struct ath5k_hw *ah, u16 reg)
1341{
1342 /* On AR2315 and AR2317 the PCI clock domain registers
1343 * are outside of the WMAC register space */
1344 if (unlikely((reg >= 0x4000) && (reg < 0x5000) &&
1345 (ah->ah_mac_srev >= AR5K_SREV_AR2315_R6)))
1346 return AR5K_AR2315_PCI_BASE + reg;
1347
1348 return ah->ah_iobase + reg;
1349}
1350
1351static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
1352{
1353 return __raw_readl(ath5k_ahb_reg(ah, reg));
1354}
1355
1356static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
1357{
1358 __raw_writel(val, ath5k_ahb_reg(ah, reg));
1359}
1360
1361#else
1362
1296static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg) 1363static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
1297{ 1364{
1298 return ioread32(ah->ah_iobase + reg); 1365 return ioread32(ah->ah_iobase + reg);
@@ -1303,6 +1370,24 @@ static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
1303 iowrite32(val, ah->ah_iobase + reg); 1370 iowrite32(val, ah->ah_iobase + reg);
1304} 1371}
1305 1372
1373#endif
1374
1375static inline enum ath_bus_type ath5k_get_bus_type(struct ath5k_hw *ah)
1376{
1377 return ath5k_hw_common(ah)->bus_ops->ath_bus_type;
1378}
1379
1380static inline void ath5k_read_cachesize(struct ath_common *common, int *csz)
1381{
1382 common->bus_ops->read_cachesize(common, csz);
1383}
1384
1385static inline bool ath5k_hw_nvram_read(struct ath5k_hw *ah, u32 off, u16 *data)
1386{
1387 struct ath_common *common = ath5k_hw_common(ah);
1388 return common->bus_ops->eeprom_read(common, off, data);
1389}
1390
1306static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits) 1391static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
1307{ 1392{
1308 u32 retval = 0, bit, i; 1393 u32 retval = 0, bit, i;
@@ -1315,27 +1400,4 @@ static inline u32 ath5k_hw_bitswap(u32 val, unsigned int bits)
1315 return retval; 1400 return retval;
1316} 1401}
1317 1402
1318#define AVG_SAMPLES 8
1319#define AVG_FACTOR 1000
1320
1321/**
1322 * ath5k_moving_average - Exponentially weighted moving average
1323 * @avg: average structure
1324 * @val: current value
1325 *
1326 * This implementation make use of a struct ath5k_avg_val to prevent rounding
1327 * errors.
1328 */
1329static inline struct ath5k_avg_val
1330ath5k_moving_average(const struct ath5k_avg_val avg, const int val)
1331{
1332 struct ath5k_avg_val new;
1333 new.avg_weight = avg.avg_weight ?
1334 (((avg.avg_weight * ((AVG_SAMPLES) - 1)) +
1335 (val * (AVG_FACTOR))) / (AVG_SAMPLES)) :
1336 (val * (AVG_FACTOR));
1337 new.avg = new.avg_weight / (AVG_FACTOR);
1338 return new;
1339}
1340
1341#endif 1403#endif
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index fbe8aca975d8..cdac5cff0177 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -93,16 +93,16 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
93} 93}
94 94
95/** 95/**
96 * ath5k_hw_attach - Check if hw is supported and init the needed structs 96 * ath5k_hw_init - Check if hw is supported and init the needed structs
97 * 97 *
98 * @sc: The &struct ath5k_softc we got from the driver's attach function 98 * @sc: The &struct ath5k_softc we got from the driver's init_softc function
99 * 99 *
100 * Check if the device is supported, perform a POST and initialize the needed 100 * Check if the device is supported, perform a POST and initialize the needed
101 * structs. Returns -ENOMEM if we don't have memory for the needed structs, 101 * structs. Returns -ENOMEM if we don't have memory for the needed structs,
102 * -ENODEV if the device is not supported or prints an error msg if something 102 * -ENODEV if the device is not supported or prints an error msg if something
103 * else went wrong. 103 * else went wrong.
104 */ 104 */
105int ath5k_hw_attach(struct ath5k_softc *sc) 105int ath5k_hw_init(struct ath5k_softc *sc)
106{ 106{
107 struct ath5k_hw *ah = sc->ah; 107 struct ath5k_hw *ah = sc->ah;
108 struct ath_common *common = ath5k_hw_common(ah); 108 struct ath_common *common = ath5k_hw_common(ah);
@@ -115,7 +115,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
115 * HW information 115 * HW information
116 */ 116 */
117 ah->ah_radar.r_enabled = AR5K_TUNE_RADAR_ALERT; 117 ah->ah_radar.r_enabled = AR5K_TUNE_RADAR_ALERT;
118 ah->ah_turbo = false; 118 ah->ah_bwmode = AR5K_BWMODE_DEFAULT;
119 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER; 119 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
120 ah->ah_imr = 0; 120 ah->ah_imr = 0;
121 ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY; 121 ah->ah_limit_tx_retries = AR5K_INIT_TX_RETRY;
@@ -128,7 +128,8 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
128 /* 128 /*
129 * Find the mac version 129 * Find the mac version
130 */ 130 */
131 srev = ath5k_hw_reg_read(ah, AR5K_SREV); 131 ath5k_hw_read_srev(ah);
132 srev = ah->ah_mac_srev;
132 if (srev < AR5K_SREV_AR5311) 133 if (srev < AR5K_SREV_AR5311)
133 ah->ah_version = AR5K_AR5210; 134 ah->ah_version = AR5K_AR5210;
134 else if (srev < AR5K_SREV_AR5212) 135 else if (srev < AR5K_SREV_AR5212)
@@ -136,6 +137,10 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
136 else 137 else
137 ah->ah_version = AR5K_AR5212; 138 ah->ah_version = AR5K_AR5212;
138 139
140 /* Get the MAC revision */
141 ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER);
142 ah->ah_mac_revision = AR5K_REG_MS(srev, AR5K_SREV_REV);
143
139 /* Fill the ath5k_hw struct with the needed functions */ 144 /* Fill the ath5k_hw struct with the needed functions */
140 ret = ath5k_hw_init_desc_functions(ah); 145 ret = ath5k_hw_init_desc_functions(ah);
141 if (ret) 146 if (ret)
@@ -146,9 +151,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
146 if (ret) 151 if (ret)
147 goto err; 152 goto err;
148 153
149 /* Get MAC, PHY and RADIO revisions */ 154 /* Get PHY and RADIO revisions */
150 ah->ah_mac_srev = srev;
151 ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER);
152 ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) & 155 ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
153 0xffffffff; 156 0xffffffff;
154 ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah, 157 ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
@@ -273,7 +276,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
273 /* 276 /*
274 * Write PCI-E power save settings 277 * Write PCI-E power save settings
275 */ 278 */
276 if ((ah->ah_version == AR5K_AR5212) && (pdev->is_pcie)) { 279 if ((ah->ah_version == AR5K_AR5212) && pdev && (pci_is_pcie(pdev))) {
277 ath5k_hw_reg_write(ah, 0x9248fc00, AR5K_PCIE_SERDES); 280 ath5k_hw_reg_write(ah, 0x9248fc00, AR5K_PCIE_SERDES);
278 ath5k_hw_reg_write(ah, 0x24924924, AR5K_PCIE_SERDES); 281 ath5k_hw_reg_write(ah, 0x24924924, AR5K_PCIE_SERDES);
279 282
@@ -305,8 +308,7 @@ int ath5k_hw_attach(struct ath5k_softc *sc)
305 /* Get misc capabilities */ 308 /* Get misc capabilities */
306 ret = ath5k_hw_set_capabilities(ah); 309 ret = ath5k_hw_set_capabilities(ah);
307 if (ret) { 310 if (ret) {
308 ATH5K_ERR(sc, "unable to get device capabilities: 0x%04x\n", 311 ATH5K_ERR(sc, "unable to get device capabilities\n");
309 sc->pdev->device);
310 goto err; 312 goto err;
311 } 313 }
312 314
@@ -346,11 +348,11 @@ err:
346} 348}
347 349
348/** 350/**
349 * ath5k_hw_detach - Free the ath5k_hw struct 351 * ath5k_hw_deinit - Free the ath5k_hw struct
350 * 352 *
351 * @ah: The &struct ath5k_hw 353 * @ah: The &struct ath5k_hw
352 */ 354 */
353void ath5k_hw_detach(struct ath5k_hw *ah) 355void ath5k_hw_deinit(struct ath5k_hw *ah)
354{ 356{
355 __set_bit(ATH_STAT_INVALID, ah->ah_sc->status); 357 __set_bit(ATH_STAT_INVALID, ah->ah_sc->status);
356 358
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 42ed923cdb1a..019a74d533a6 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -47,8 +47,6 @@
47#include <linux/io.h> 47#include <linux/io.h>
48#include <linux/netdevice.h> 48#include <linux/netdevice.h>
49#include <linux/cache.h> 49#include <linux/cache.h>
50#include <linux/pci.h>
51#include <linux/pci-aspm.h>
52#include <linux/ethtool.h> 50#include <linux/ethtool.h>
53#include <linux/uaccess.h> 51#include <linux/uaccess.h>
54#include <linux/slab.h> 52#include <linux/slab.h>
@@ -62,10 +60,9 @@
62#include "reg.h" 60#include "reg.h"
63#include "debug.h" 61#include "debug.h"
64#include "ani.h" 62#include "ani.h"
65#include "../debug.h"
66 63
67static int modparam_nohwcrypt; 64int ath5k_modparam_nohwcrypt;
68module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 65module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, S_IRUGO);
69MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 66MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
70 67
71static int modparam_all_channels; 68static int modparam_all_channels;
@@ -78,39 +75,24 @@ MODULE_AUTHOR("Nick Kossifidis");
78MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards."); 75MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
79MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards"); 76MODULE_SUPPORTED_DEVICE("Atheros 5xxx WLAN cards");
80MODULE_LICENSE("Dual BSD/GPL"); 77MODULE_LICENSE("Dual BSD/GPL");
81MODULE_VERSION("0.6.0 (EXPERIMENTAL)"); 78
82 79static int ath5k_init(struct ieee80211_hw *hw);
83static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan); 80static int ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
84static int ath5k_beacon_update(struct ieee80211_hw *hw, 81 bool skip_pcu);
85 struct ieee80211_vif *vif); 82int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
86static void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf); 83void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
87
88/* Known PCI ids */
89static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
90 { PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */
91 { PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */
92 { PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/
93 { PCI_VDEVICE(ATHEROS, 0x0012) }, /* 5211 */
94 { PCI_VDEVICE(ATHEROS, 0x0013) }, /* 5212 */
95 { PCI_VDEVICE(3COM_2, 0x0013) }, /* 3com 5212 */
96 { PCI_VDEVICE(3COM, 0x0013) }, /* 3com 3CRDAG675 5212 */
97 { PCI_VDEVICE(ATHEROS, 0x1014) }, /* IBM minipci 5212 */
98 { PCI_VDEVICE(ATHEROS, 0x0014) }, /* 5212 combatible */
99 { PCI_VDEVICE(ATHEROS, 0x0015) }, /* 5212 combatible */
100 { PCI_VDEVICE(ATHEROS, 0x0016) }, /* 5212 combatible */
101 { PCI_VDEVICE(ATHEROS, 0x0017) }, /* 5212 combatible */
102 { PCI_VDEVICE(ATHEROS, 0x0018) }, /* 5212 combatible */
103 { PCI_VDEVICE(ATHEROS, 0x0019) }, /* 5212 combatible */
104 { PCI_VDEVICE(ATHEROS, 0x001a) }, /* 2413 Griffin-lite */
105 { PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */
106 { PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */
107 { PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */
108 { 0 }
109};
110MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
111 84
112/* Known SREVs */ 85/* Known SREVs */
113static const struct ath5k_srev_name srev_names[] = { 86static const struct ath5k_srev_name srev_names[] = {
87#ifdef CONFIG_ATHEROS_AR231X
88 { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R2 },
89 { "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R7 },
90 { "2313", AR5K_VERSION_MAC, AR5K_SREV_AR2313_R8 },
91 { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R6 },
92 { "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R7 },
93 { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R1 },
94 { "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R2 },
95#else
114 { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 }, 96 { "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 },
115 { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 }, 97 { "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 },
116 { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A }, 98 { "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A },
@@ -129,6 +111,7 @@ static const struct ath5k_srev_name srev_names[] = {
129 { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 }, 111 { "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 },
130 { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 }, 112 { "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 },
131 { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 }, 113 { "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 },
114#endif
132 { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN }, 115 { "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN },
133 { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 }, 116 { "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 },
134 { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 }, 117 { "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 },
@@ -142,10 +125,12 @@ static const struct ath5k_srev_name srev_names[] = {
142 { "2112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112B }, 125 { "2112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112B },
143 { "2413", AR5K_VERSION_RAD, AR5K_SREV_RAD_2413 }, 126 { "2413", AR5K_VERSION_RAD, AR5K_SREV_RAD_2413 },
144 { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 }, 127 { "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 },
145 { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 },
146 { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 },
147 { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 }, 128 { "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 },
148 { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 }, 129 { "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 },
130#ifdef CONFIG_ATHEROS_AR231X
131 { "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 },
132 { "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 },
133#endif
149 { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN }, 134 { "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN },
150}; 135};
151 136
@@ -191,38 +176,6 @@ static const struct ieee80211_rate ath5k_rates[] = {
191 /* XR missing */ 176 /* XR missing */
192}; 177};
193 178
194static inline void ath5k_txbuf_free_skb(struct ath5k_softc *sc,
195 struct ath5k_buf *bf)
196{
197 BUG_ON(!bf);
198 if (!bf->skb)
199 return;
200 pci_unmap_single(sc->pdev, bf->skbaddr, bf->skb->len,
201 PCI_DMA_TODEVICE);
202 dev_kfree_skb_any(bf->skb);
203 bf->skb = NULL;
204 bf->skbaddr = 0;
205 bf->desc->ds_data = 0;
206}
207
208static inline void ath5k_rxbuf_free_skb(struct ath5k_softc *sc,
209 struct ath5k_buf *bf)
210{
211 struct ath5k_hw *ah = sc->ah;
212 struct ath_common *common = ath5k_hw_common(ah);
213
214 BUG_ON(!bf);
215 if (!bf->skb)
216 return;
217 pci_unmap_single(sc->pdev, bf->skbaddr, common->rx_bufsize,
218 PCI_DMA_FROMDEVICE);
219 dev_kfree_skb_any(bf->skb);
220 bf->skb = NULL;
221 bf->skbaddr = 0;
222 bf->desc->ds_data = 0;
223}
224
225
226static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp) 179static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
227{ 180{
228 u64 tsf = ath5k_hw_get_tsf64(ah); 181 u64 tsf = ath5k_hw_get_tsf64(ah);
@@ -233,7 +186,7 @@ static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
233 return (tsf & ~0x7fff) | rstamp; 186 return (tsf & ~0x7fff) | rstamp;
234} 187}
235 188
236static const char * 189const char *
237ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val) 190ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
238{ 191{
239 const char *name = "xxxxx"; 192 const char *name = "xxxxx";
@@ -327,14 +280,12 @@ ath5k_copy_channels(struct ath5k_hw *ah,
327 280
328 switch (mode) { 281 switch (mode) {
329 case AR5K_MODE_11A: 282 case AR5K_MODE_11A:
330 case AR5K_MODE_11A_TURBO:
331 /* 1..220, but 2GHz frequencies are filtered by check_channel */ 283 /* 1..220, but 2GHz frequencies are filtered by check_channel */
332 size = 220 ; 284 size = 220 ;
333 chfreq = CHANNEL_5GHZ; 285 chfreq = CHANNEL_5GHZ;
334 break; 286 break;
335 case AR5K_MODE_11B: 287 case AR5K_MODE_11B:
336 case AR5K_MODE_11G: 288 case AR5K_MODE_11G:
337 case AR5K_MODE_11G_TURBO:
338 size = 26; 289 size = 26;
339 chfreq = CHANNEL_2GHZ; 290 chfreq = CHANNEL_2GHZ;
340 break; 291 break;
@@ -363,11 +314,6 @@ ath5k_copy_channels(struct ath5k_hw *ah,
363 case AR5K_MODE_11G: 314 case AR5K_MODE_11G:
364 channels[count].hw_value = chfreq | CHANNEL_OFDM; 315 channels[count].hw_value = chfreq | CHANNEL_OFDM;
365 break; 316 break;
366 case AR5K_MODE_11A_TURBO:
367 case AR5K_MODE_11G_TURBO:
368 channels[count].hw_value = chfreq |
369 CHANNEL_OFDM | CHANNEL_TURBO;
370 break;
371 case AR5K_MODE_11B: 317 case AR5K_MODE_11B:
372 channels[count].hw_value = CHANNEL_B; 318 channels[count].hw_value = CHANNEL_B;
373 } 319 }
@@ -483,7 +429,7 @@ ath5k_setup_bands(struct ieee80211_hw *hw)
483 * 429 *
484 * Called with sc->lock. 430 * Called with sc->lock.
485 */ 431 */
486static int 432int
487ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan) 433ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
488{ 434{
489 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, 435 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
@@ -496,7 +442,7 @@ ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan)
496 * hardware at the new frequency, and then re-enable 442 * hardware at the new frequency, and then re-enable
497 * the relevant bits of the h/w. 443 * the relevant bits of the h/w.
498 */ 444 */
499 return ath5k_reset(sc, chan); 445 return ath5k_reset(sc, chan, true);
500} 446}
501 447
502static void 448static void
@@ -549,7 +495,7 @@ static void ath_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
549 /* Calculate combined mode - when APs are active, operate in AP mode. 495 /* Calculate combined mode - when APs are active, operate in AP mode.
550 * Otherwise use the mode of the new interface. This can currently 496 * Otherwise use the mode of the new interface. This can currently
551 * only deal with combinations of APs and STAs. Only one ad-hoc 497 * only deal with combinations of APs and STAs. Only one ad-hoc
552 * interfaces is allowed above. 498 * interfaces is allowed.
553 */ 499 */
554 if (avf->opmode == NL80211_IFTYPE_AP) 500 if (avf->opmode == NL80211_IFTYPE_AP)
555 iter_data->opmode = NL80211_IFTYPE_AP; 501 iter_data->opmode = NL80211_IFTYPE_AP;
@@ -558,16 +504,9 @@ static void ath_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
558 iter_data->opmode = avf->opmode; 504 iter_data->opmode = avf->opmode;
559} 505}
560 506
561static void ath_do_set_opmode(struct ath5k_softc *sc) 507void
562{ 508ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
563 struct ath5k_hw *ah = sc->ah; 509 struct ieee80211_vif *vif)
564 ath5k_hw_set_opmode(ah, sc->opmode);
565 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
566 sc->opmode, ath_opmode_to_string(sc->opmode));
567}
568
569void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
570 struct ieee80211_vif *vif)
571{ 510{
572 struct ath_common *common = ath5k_hw_common(sc->ah); 511 struct ath_common *common = ath5k_hw_common(sc->ah);
573 struct ath_vif_iter_data iter_data; 512 struct ath_vif_iter_data iter_data;
@@ -595,7 +534,9 @@ void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
595 /* Nothing active, default to station mode */ 534 /* Nothing active, default to station mode */
596 sc->opmode = NL80211_IFTYPE_STATION; 535 sc->opmode = NL80211_IFTYPE_STATION;
597 536
598 ath_do_set_opmode(sc); 537 ath5k_hw_set_opmode(sc->ah, sc->opmode);
538 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
539 sc->opmode, ath_opmode_to_string(sc->opmode));
599 540
600 if (iter_data.need_set_hw_addr && iter_data.found_active) 541 if (iter_data.need_set_hw_addr && iter_data.found_active)
601 ath5k_hw_set_lladdr(sc->ah, iter_data.active_mac); 542 ath5k_hw_set_lladdr(sc->ah, iter_data.active_mac);
@@ -604,7 +545,7 @@ void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
604 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask); 545 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
605} 546}
606 547
607static void 548void
608ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif) 549ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif)
609{ 550{
610 struct ath5k_hw *ah = sc->ah; 551 struct ath5k_hw *ah = sc->ah;
@@ -659,10 +600,11 @@ struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
659 return NULL; 600 return NULL;
660 } 601 }
661 602
662 *skb_addr = pci_map_single(sc->pdev, 603 *skb_addr = dma_map_single(sc->dev,
663 skb->data, common->rx_bufsize, 604 skb->data, common->rx_bufsize,
664 PCI_DMA_FROMDEVICE); 605 DMA_FROM_DEVICE);
665 if (unlikely(pci_dma_mapping_error(sc->pdev, *skb_addr))) { 606
607 if (unlikely(dma_mapping_error(sc->dev, *skb_addr))) {
666 ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__); 608 ATH5K_ERR(sc, "%s: DMA mapping failed\n", __func__);
667 dev_kfree_skb(skb); 609 dev_kfree_skb(skb);
668 return NULL; 610 return NULL;
@@ -758,8 +700,8 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
758 flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK; 700 flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
759 701
760 /* XXX endianness */ 702 /* XXX endianness */
761 bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len, 703 bf->skbaddr = dma_map_single(sc->dev, skb->data, skb->len,
762 PCI_DMA_TODEVICE); 704 DMA_TO_DEVICE);
763 705
764 rate = ieee80211_get_tx_rate(sc->hw, info); 706 rate = ieee80211_get_tx_rate(sc->hw, info);
765 if (!rate) { 707 if (!rate) {
@@ -839,7 +781,7 @@ ath5k_txbuf_setup(struct ath5k_softc *sc, struct ath5k_buf *bf,
839 781
840 return 0; 782 return 0;
841err_unmap: 783err_unmap:
842 pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE); 784 dma_unmap_single(sc->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
843 return ret; 785 return ret;
844} 786}
845 787
@@ -848,7 +790,7 @@ err_unmap:
848\*******************/ 790\*******************/
849 791
850static int 792static int
851ath5k_desc_alloc(struct ath5k_softc *sc, struct pci_dev *pdev) 793ath5k_desc_alloc(struct ath5k_softc *sc)
852{ 794{
853 struct ath5k_desc *ds; 795 struct ath5k_desc *ds;
854 struct ath5k_buf *bf; 796 struct ath5k_buf *bf;
@@ -859,7 +801,9 @@ ath5k_desc_alloc(struct ath5k_softc *sc, struct pci_dev *pdev)
859 /* allocate descriptors */ 801 /* allocate descriptors */
860 sc->desc_len = sizeof(struct ath5k_desc) * 802 sc->desc_len = sizeof(struct ath5k_desc) *
861 (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1); 803 (ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
862 sc->desc = pci_alloc_consistent(pdev, sc->desc_len, &sc->desc_daddr); 804
805 sc->desc = dma_alloc_coherent(sc->dev, sc->desc_len,
806 &sc->desc_daddr, GFP_KERNEL);
863 if (sc->desc == NULL) { 807 if (sc->desc == NULL) {
864 ATH5K_ERR(sc, "can't allocate descriptors\n"); 808 ATH5K_ERR(sc, "can't allocate descriptors\n");
865 ret = -ENOMEM; 809 ret = -ENOMEM;
@@ -905,14 +849,45 @@ ath5k_desc_alloc(struct ath5k_softc *sc, struct pci_dev *pdev)
905 849
906 return 0; 850 return 0;
907err_free: 851err_free:
908 pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); 852 dma_free_coherent(sc->dev, sc->desc_len, sc->desc, sc->desc_daddr);
909err: 853err:
910 sc->desc = NULL; 854 sc->desc = NULL;
911 return ret; 855 return ret;
912} 856}
913 857
858void
859ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
860{
861 BUG_ON(!bf);
862 if (!bf->skb)
863 return;
864 dma_unmap_single(sc->dev, bf->skbaddr, bf->skb->len,
865 DMA_TO_DEVICE);
866 dev_kfree_skb_any(bf->skb);
867 bf->skb = NULL;
868 bf->skbaddr = 0;
869 bf->desc->ds_data = 0;
870}
871
872void
873ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf)
874{
875 struct ath5k_hw *ah = sc->ah;
876 struct ath_common *common = ath5k_hw_common(ah);
877
878 BUG_ON(!bf);
879 if (!bf->skb)
880 return;
881 dma_unmap_single(sc->dev, bf->skbaddr, common->rx_bufsize,
882 DMA_FROM_DEVICE);
883 dev_kfree_skb_any(bf->skb);
884 bf->skb = NULL;
885 bf->skbaddr = 0;
886 bf->desc->ds_data = 0;
887}
888
914static void 889static void
915ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev) 890ath5k_desc_free(struct ath5k_softc *sc)
916{ 891{
917 struct ath5k_buf *bf; 892 struct ath5k_buf *bf;
918 893
@@ -924,7 +899,7 @@ ath5k_desc_free(struct ath5k_softc *sc, struct pci_dev *pdev)
924 ath5k_txbuf_free_skb(sc, bf); 899 ath5k_txbuf_free_skb(sc, bf);
925 900
926 /* Free memory associated with all descriptors */ 901 /* Free memory associated with all descriptors */
927 pci_free_consistent(pdev, sc->desc_len, sc->desc, sc->desc_daddr); 902 dma_free_coherent(sc->dev, sc->desc_len, sc->desc, sc->desc_daddr);
928 sc->desc = NULL; 903 sc->desc = NULL;
929 sc->desc_daddr = 0; 904 sc->desc_daddr = 0;
930 905
@@ -1069,62 +1044,44 @@ err:
1069 return ret; 1044 return ret;
1070} 1045}
1071 1046
1047/**
1048 * ath5k_drain_tx_buffs - Empty tx buffers
1049 *
1050 * @sc The &struct ath5k_softc
1051 *
1052 * Empty tx buffers from all queues in preparation
1053 * of a reset or during shutdown.
1054 *
1055 * NB: this assumes output has been stopped and
1056 * we do not need to block ath5k_tx_tasklet
1057 */
1072static void 1058static void
1073ath5k_txq_drainq(struct ath5k_softc *sc, struct ath5k_txq *txq) 1059ath5k_drain_tx_buffs(struct ath5k_softc *sc)
1074{ 1060{
1061 struct ath5k_txq *txq;
1075 struct ath5k_buf *bf, *bf0; 1062 struct ath5k_buf *bf, *bf0;
1063 int i;
1076 1064
1077 /* 1065 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++) {
1078 * NB: this assumes output has been stopped and 1066 if (sc->txqs[i].setup) {
1079 * we do not need to block ath5k_tx_tasklet 1067 txq = &sc->txqs[i];
1080 */ 1068 spin_lock_bh(&txq->lock);
1081 spin_lock_bh(&txq->lock); 1069 list_for_each_entry_safe(bf, bf0, &txq->q, list) {
1082 list_for_each_entry_safe(bf, bf0, &txq->q, list) { 1070 ath5k_debug_printtxbuf(sc, bf);
1083 ath5k_debug_printtxbuf(sc, bf);
1084
1085 ath5k_txbuf_free_skb(sc, bf);
1086
1087 spin_lock_bh(&sc->txbuflock);
1088 list_move_tail(&bf->list, &sc->txbuf);
1089 sc->txbuf_len++;
1090 txq->txq_len--;
1091 spin_unlock_bh(&sc->txbuflock);
1092 }
1093 txq->link = NULL;
1094 txq->txq_poll_mark = false;
1095 spin_unlock_bh(&txq->lock);
1096}
1097 1071
1098/* 1072 ath5k_txbuf_free_skb(sc, bf);
1099 * Drain the transmit queues and reclaim resources.
1100 */
1101static void
1102ath5k_txq_cleanup(struct ath5k_softc *sc)
1103{
1104 struct ath5k_hw *ah = sc->ah;
1105 unsigned int i;
1106 1073
1107 /* XXX return value */ 1074 spin_lock_bh(&sc->txbuflock);
1108 if (likely(!test_bit(ATH_STAT_INVALID, sc->status))) { 1075 list_move_tail(&bf->list, &sc->txbuf);
1109 /* don't touch the hardware if marked invalid */ 1076 sc->txbuf_len++;
1110 ath5k_hw_stop_tx_dma(ah, sc->bhalq); 1077 txq->txq_len--;
1111 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "beacon queue %x\n", 1078 spin_unlock_bh(&sc->txbuflock);
1112 ath5k_hw_get_txdp(ah, sc->bhalq));
1113 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
1114 if (sc->txqs[i].setup) {
1115 ath5k_hw_stop_tx_dma(ah, sc->txqs[i].qnum);
1116 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "txq [%u] %x, "
1117 "link %p\n",
1118 sc->txqs[i].qnum,
1119 ath5k_hw_get_txdp(ah,
1120 sc->txqs[i].qnum),
1121 sc->txqs[i].link);
1122 } 1079 }
1080 txq->link = NULL;
1081 txq->txq_poll_mark = false;
1082 spin_unlock_bh(&txq->lock);
1083 }
1123 } 1084 }
1124
1125 for (i = 0; i < ARRAY_SIZE(sc->txqs); i++)
1126 if (sc->txqs[i].setup)
1127 ath5k_txq_drainq(sc, &sc->txqs[i]);
1128} 1085}
1129 1086
1130static void 1087static void
@@ -1184,16 +1141,19 @@ err:
1184} 1141}
1185 1142
1186/* 1143/*
1187 * Disable the receive h/w in preparation for a reset. 1144 * Disable the receive logic on PCU (DRU)
1145 * In preparation for a shutdown.
1146 *
1147 * Note: Doesn't stop rx DMA, ath5k_hw_dma_stop
1148 * does.
1188 */ 1149 */
1189static void 1150static void
1190ath5k_rx_stop(struct ath5k_softc *sc) 1151ath5k_rx_stop(struct ath5k_softc *sc)
1191{ 1152{
1192 struct ath5k_hw *ah = sc->ah; 1153 struct ath5k_hw *ah = sc->ah;
1193 1154
1194 ath5k_hw_stop_rx_pcu(ah); /* disable PCU */
1195 ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */ 1155 ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */
1196 ath5k_hw_stop_rx_dma(ah); /* disable DMA engine */ 1156 ath5k_hw_stop_rx_pcu(ah); /* disable PCU */
1197 1157
1198 ath5k_debug_printrxbuffs(sc, ah); 1158 ath5k_debug_printrxbuffs(sc, ah);
1199} 1159}
@@ -1307,8 +1267,7 @@ ath5k_update_beacon_rssi(struct ath5k_softc *sc, struct sk_buff *skb, int rssi)
1307 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0) 1267 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) != 0)
1308 return; 1268 return;
1309 1269
1310 ah->ah_beacon_rssi_avg = ath5k_moving_average(ah->ah_beacon_rssi_avg, 1270 ewma_add(&ah->ah_beacon_rssi_avg, rssi);
1311 rssi);
1312 1271
1313 /* in IBSS mode we should keep RSSI statistics per neighbour */ 1272 /* in IBSS mode we should keep RSSI statistics per neighbour */
1314 /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */ 1273 /* le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS */
@@ -1551,9 +1510,9 @@ ath5k_tasklet_rx(unsigned long data)
1551 if (!next_skb) 1510 if (!next_skb)
1552 goto next; 1511 goto next;
1553 1512
1554 pci_unmap_single(sc->pdev, bf->skbaddr, 1513 dma_unmap_single(sc->dev, bf->skbaddr,
1555 common->rx_bufsize, 1514 common->rx_bufsize,
1556 PCI_DMA_FROMDEVICE); 1515 DMA_FROM_DEVICE);
1557 1516
1558 skb_put(skb, rs.rs_datalen); 1517 skb_put(skb, rs.rs_datalen);
1559 1518
@@ -1574,8 +1533,9 @@ unlock:
1574* TX Handling * 1533* TX Handling *
1575\*************/ 1534\*************/
1576 1535
1577static int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb, 1536int
1578 struct ath5k_txq *txq) 1537ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
1538 struct ath5k_txq *txq)
1579{ 1539{
1580 struct ath5k_softc *sc = hw->priv; 1540 struct ath5k_softc *sc = hw->priv;
1581 struct ath5k_buf *bf; 1541 struct ath5k_buf *bf;
@@ -1716,8 +1676,9 @@ ath5k_tx_processq(struct ath5k_softc *sc, struct ath5k_txq *txq)
1716 1676
1717 skb = bf->skb; 1677 skb = bf->skb;
1718 bf->skb = NULL; 1678 bf->skb = NULL;
1719 pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, 1679
1720 PCI_DMA_TODEVICE); 1680 dma_unmap_single(sc->dev, bf->skbaddr, skb->len,
1681 DMA_TO_DEVICE);
1721 ath5k_tx_frame_completed(sc, skb, &ts); 1682 ath5k_tx_frame_completed(sc, skb, &ts);
1722 } 1683 }
1723 1684
@@ -1771,12 +1732,13 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1771 u32 flags; 1732 u32 flags;
1772 const int padsize = 0; 1733 const int padsize = 0;
1773 1734
1774 bf->skbaddr = pci_map_single(sc->pdev, skb->data, skb->len, 1735 bf->skbaddr = dma_map_single(sc->dev, skb->data, skb->len,
1775 PCI_DMA_TODEVICE); 1736 DMA_TO_DEVICE);
1776 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] " 1737 ATH5K_DBG(sc, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
1777 "skbaddr %llx\n", skb, skb->data, skb->len, 1738 "skbaddr %llx\n", skb, skb->data, skb->len,
1778 (unsigned long long)bf->skbaddr); 1739 (unsigned long long)bf->skbaddr);
1779 if (pci_dma_mapping_error(sc->pdev, bf->skbaddr)) { 1740
1741 if (dma_mapping_error(sc->dev, bf->skbaddr)) {
1780 ATH5K_ERR(sc, "beacon DMA mapping failed\n"); 1742 ATH5K_ERR(sc, "beacon DMA mapping failed\n");
1781 return -EIO; 1743 return -EIO;
1782 } 1744 }
@@ -1828,7 +1790,7 @@ ath5k_beacon_setup(struct ath5k_softc *sc, struct ath5k_buf *bf)
1828 1790
1829 return 0; 1791 return 0;
1830err_unmap: 1792err_unmap:
1831 pci_unmap_single(sc->pdev, bf->skbaddr, skb->len, PCI_DMA_TODEVICE); 1793 dma_unmap_single(sc->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
1832 return ret; 1794 return ret;
1833} 1795}
1834 1796
@@ -1839,7 +1801,7 @@ err_unmap:
1839 * 1801 *
1840 * Called with the beacon lock. 1802 * Called with the beacon lock.
1841 */ 1803 */
1842static int 1804int
1843ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 1805ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1844{ 1806{
1845 int ret; 1807 int ret;
@@ -1945,7 +1907,7 @@ ath5k_beacon_send(struct ath5k_softc *sc)
1945 * This should never fail since we check above that no frames 1907 * This should never fail since we check above that no frames
1946 * are still pending on the queue. 1908 * are still pending on the queue.
1947 */ 1909 */
1948 if (unlikely(ath5k_hw_stop_tx_dma(ah, sc->bhalq))) { 1910 if (unlikely(ath5k_hw_stop_beacon_queue(ah, sc->bhalq))) {
1949 ATH5K_WARN(sc, "beacon queue %u didn't start/stop ?\n", sc->bhalq); 1911 ATH5K_WARN(sc, "beacon queue %u didn't start/stop ?\n", sc->bhalq);
1950 /* NB: hw still stops DMA, so proceed */ 1912 /* NB: hw still stops DMA, so proceed */
1951 } 1913 }
@@ -1985,7 +1947,7 @@ ath5k_beacon_send(struct ath5k_softc *sc)
1985 * when we otherwise know we have to update the timers, but we keep it in this 1947 * when we otherwise know we have to update the timers, but we keep it in this
1986 * function to have it all together in one place. 1948 * function to have it all together in one place.
1987 */ 1949 */
1988static void 1950void
1989ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf) 1951ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
1990{ 1952{
1991 struct ath5k_hw *ah = sc->ah; 1953 struct ath5k_hw *ah = sc->ah;
@@ -2087,7 +2049,7 @@ ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf)
2087 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA 2049 * In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
2088 * interrupts to detect TSF updates only. 2050 * interrupts to detect TSF updates only.
2089 */ 2051 */
2090static void 2052void
2091ath5k_beacon_config(struct ath5k_softc *sc) 2053ath5k_beacon_config(struct ath5k_softc *sc)
2092{ 2054{
2093 struct ath5k_hw *ah = sc->ah; 2055 struct ath5k_hw *ah = sc->ah;
@@ -2115,7 +2077,7 @@ ath5k_beacon_config(struct ath5k_softc *sc)
2115 } else 2077 } else
2116 ath5k_beacon_update_timers(sc, -1); 2078 ath5k_beacon_update_timers(sc, -1);
2117 } else { 2079 } else {
2118 ath5k_hw_stop_tx_dma(sc->ah, sc->bhalq); 2080 ath5k_hw_stop_beacon_queue(sc->ah, sc->bhalq);
2119 } 2081 }
2120 2082
2121 ath5k_hw_set_imr(ah, sc->imask); 2083 ath5k_hw_set_imr(ah, sc->imask);
@@ -2177,7 +2139,7 @@ ath5k_intr_calibration_poll(struct ath5k_hw *ah)
2177 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */ 2139 * AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
2178} 2140}
2179 2141
2180static irqreturn_t 2142irqreturn_t
2181ath5k_intr(int irq, void *dev_id) 2143ath5k_intr(int irq, void *dev_id)
2182{ 2144{
2183 struct ath5k_softc *sc = dev_id; 2145 struct ath5k_softc *sc = dev_id;
@@ -2186,7 +2148,8 @@ ath5k_intr(int irq, void *dev_id)
2186 unsigned int counter = 1000; 2148 unsigned int counter = 1000;
2187 2149
2188 if (unlikely(test_bit(ATH_STAT_INVALID, sc->status) || 2150 if (unlikely(test_bit(ATH_STAT_INVALID, sc->status) ||
2189 !ath5k_hw_is_intr_pending(ah))) 2151 ((ath5k_get_bus_type(ah) != ATH_AHB) &&
2152 !ath5k_hw_is_intr_pending(ah))))
2190 return IRQ_NONE; 2153 return IRQ_NONE;
2191 2154
2192 do { 2155 do {
@@ -2252,6 +2215,10 @@ ath5k_intr(int irq, void *dev_id)
2252 tasklet_schedule(&sc->rf_kill.toggleq); 2215 tasklet_schedule(&sc->rf_kill.toggleq);
2253 2216
2254 } 2217 }
2218
2219 if (ath5k_get_bus_type(ah) == ATH_AHB)
2220 break;
2221
2255 } while (ath5k_hw_is_intr_pending(ah) && --counter > 0); 2222 } while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
2256 2223
2257 if (unlikely(!counter)) 2224 if (unlikely(!counter))
@@ -2351,7 +2318,7 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
2351 if (needreset) { 2318 if (needreset) {
2352 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, 2319 ATH5K_DBG(sc, ATH5K_DEBUG_RESET,
2353 "TX queues stuck, resetting\n"); 2320 "TX queues stuck, resetting\n");
2354 ath5k_reset(sc, sc->curchan); 2321 ath5k_reset(sc, NULL, true);
2355 } 2322 }
2356 2323
2357 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 2324 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
@@ -2363,6 +2330,163 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
2363* Initialization routines * 2330* Initialization routines *
2364\*************************/ 2331\*************************/
2365 2332
2333int
2334ath5k_init_softc(struct ath5k_softc *sc, const struct ath_bus_ops *bus_ops)
2335{
2336 struct ieee80211_hw *hw = sc->hw;
2337 struct ath_common *common;
2338 int ret;
2339 int csz;
2340
2341 /* Initialize driver private data */
2342 SET_IEEE80211_DEV(hw, sc->dev);
2343 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
2344 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
2345 IEEE80211_HW_SIGNAL_DBM |
2346 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
2347
2348 hw->wiphy->interface_modes =
2349 BIT(NL80211_IFTYPE_AP) |
2350 BIT(NL80211_IFTYPE_STATION) |
2351 BIT(NL80211_IFTYPE_ADHOC) |
2352 BIT(NL80211_IFTYPE_MESH_POINT);
2353
2354 /* both antennas can be configured as RX or TX */
2355 hw->wiphy->available_antennas_tx = 0x3;
2356 hw->wiphy->available_antennas_rx = 0x3;
2357
2358 hw->extra_tx_headroom = 2;
2359 hw->channel_change_time = 5000;
2360
2361 /*
2362 * Mark the device as detached to avoid processing
2363 * interrupts until setup is complete.
2364 */
2365 __set_bit(ATH_STAT_INVALID, sc->status);
2366
2367 sc->opmode = NL80211_IFTYPE_STATION;
2368 sc->bintval = 1000;
2369 mutex_init(&sc->lock);
2370 spin_lock_init(&sc->rxbuflock);
2371 spin_lock_init(&sc->txbuflock);
2372 spin_lock_init(&sc->block);
2373
2374
2375 /* Setup interrupt handler */
2376 ret = request_irq(sc->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
2377 if (ret) {
2378 ATH5K_ERR(sc, "request_irq failed\n");
2379 goto err;
2380 }
2381
2382 /* If we passed the test, malloc an ath5k_hw struct */
2383 sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
2384 if (!sc->ah) {
2385 ret = -ENOMEM;
2386 ATH5K_ERR(sc, "out of memory\n");
2387 goto err_irq;
2388 }
2389
2390 sc->ah->ah_sc = sc;
2391 sc->ah->ah_iobase = sc->iobase;
2392 common = ath5k_hw_common(sc->ah);
2393 common->ops = &ath5k_common_ops;
2394 common->bus_ops = bus_ops;
2395 common->ah = sc->ah;
2396 common->hw = hw;
2397 common->priv = sc;
2398
2399 /*
2400 * Cache line size is used to size and align various
2401 * structures used to communicate with the hardware.
2402 */
2403 ath5k_read_cachesize(common, &csz);
2404 common->cachelsz = csz << 2; /* convert to bytes */
2405
2406 spin_lock_init(&common->cc_lock);
2407
2408 /* Initialize device */
2409 ret = ath5k_hw_init(sc);
2410 if (ret)
2411 goto err_free_ah;
2412
2413 /* set up multi-rate retry capabilities */
2414 if (sc->ah->ah_version == AR5K_AR5212) {
2415 hw->max_rates = 4;
2416 hw->max_rate_tries = 11;
2417 }
2418
2419 hw->vif_data_size = sizeof(struct ath5k_vif);
2420
2421 /* Finish private driver data initialization */
2422 ret = ath5k_init(hw);
2423 if (ret)
2424 goto err_ah;
2425
2426 ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
2427 ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
2428 sc->ah->ah_mac_srev,
2429 sc->ah->ah_phy_revision);
2430
2431 if (!sc->ah->ah_single_chip) {
2432 /* Single chip radio (!RF5111) */
2433 if (sc->ah->ah_radio_5ghz_revision &&
2434 !sc->ah->ah_radio_2ghz_revision) {
2435 /* No 5GHz support -> report 2GHz radio */
2436 if (!test_bit(AR5K_MODE_11A,
2437 sc->ah->ah_capabilities.cap_mode)) {
2438 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
2439 ath5k_chip_name(AR5K_VERSION_RAD,
2440 sc->ah->ah_radio_5ghz_revision),
2441 sc->ah->ah_radio_5ghz_revision);
2442 /* No 2GHz support (5110 and some
2443 * 5Ghz only cards) -> report 5Ghz radio */
2444 } else if (!test_bit(AR5K_MODE_11B,
2445 sc->ah->ah_capabilities.cap_mode)) {
2446 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
2447 ath5k_chip_name(AR5K_VERSION_RAD,
2448 sc->ah->ah_radio_5ghz_revision),
2449 sc->ah->ah_radio_5ghz_revision);
2450 /* Multiband radio */
2451 } else {
2452 ATH5K_INFO(sc, "RF%s multiband radio found"
2453 " (0x%x)\n",
2454 ath5k_chip_name(AR5K_VERSION_RAD,
2455 sc->ah->ah_radio_5ghz_revision),
2456 sc->ah->ah_radio_5ghz_revision);
2457 }
2458 }
2459 /* Multi chip radio (RF5111 - RF2111) ->
2460 * report both 2GHz/5GHz radios */
2461 else if (sc->ah->ah_radio_5ghz_revision &&
2462 sc->ah->ah_radio_2ghz_revision){
2463 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
2464 ath5k_chip_name(AR5K_VERSION_RAD,
2465 sc->ah->ah_radio_5ghz_revision),
2466 sc->ah->ah_radio_5ghz_revision);
2467 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
2468 ath5k_chip_name(AR5K_VERSION_RAD,
2469 sc->ah->ah_radio_2ghz_revision),
2470 sc->ah->ah_radio_2ghz_revision);
2471 }
2472 }
2473
2474 ath5k_debug_init_device(sc);
2475
2476 /* ready to process interrupts */
2477 __clear_bit(ATH_STAT_INVALID, sc->status);
2478
2479 return 0;
2480err_ah:
2481 ath5k_hw_deinit(sc->ah);
2482err_free_ah:
2483 kfree(sc->ah);
2484err_irq:
2485 free_irq(sc->irq, sc);
2486err:
2487 return ret;
2488}
2489
2366static int 2490static int
2367ath5k_stop_locked(struct ath5k_softc *sc) 2491ath5k_stop_locked(struct ath5k_softc *sc)
2368{ 2492{
@@ -2391,19 +2515,18 @@ ath5k_stop_locked(struct ath5k_softc *sc)
2391 if (!test_bit(ATH_STAT_INVALID, sc->status)) { 2515 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2392 ath5k_led_off(sc); 2516 ath5k_led_off(sc);
2393 ath5k_hw_set_imr(ah, 0); 2517 ath5k_hw_set_imr(ah, 0);
2394 synchronize_irq(sc->pdev->irq); 2518 synchronize_irq(sc->irq);
2395 }
2396 ath5k_txq_cleanup(sc);
2397 if (!test_bit(ATH_STAT_INVALID, sc->status)) {
2398 ath5k_rx_stop(sc); 2519 ath5k_rx_stop(sc);
2520 ath5k_hw_dma_stop(ah);
2521 ath5k_drain_tx_buffs(sc);
2399 ath5k_hw_phy_disable(ah); 2522 ath5k_hw_phy_disable(ah);
2400 } 2523 }
2401 2524
2402 return 0; 2525 return 0;
2403} 2526}
2404 2527
2405static int 2528int
2406ath5k_init(struct ath5k_softc *sc) 2529ath5k_init_hw(struct ath5k_softc *sc)
2407{ 2530{
2408 struct ath5k_hw *ah = sc->ah; 2531 struct ath5k_hw *ah = sc->ah;
2409 struct ath_common *common = ath5k_hw_common(ah); 2532 struct ath_common *common = ath5k_hw_common(ah);
@@ -2432,7 +2555,7 @@ ath5k_init(struct ath5k_softc *sc)
2432 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL | 2555 AR5K_INT_RXORN | AR5K_INT_TXDESC | AR5K_INT_TXEOL |
2433 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB; 2556 AR5K_INT_FATAL | AR5K_INT_GLOBAL | AR5K_INT_MIB;
2434 2557
2435 ret = ath5k_reset(sc, NULL); 2558 ret = ath5k_reset(sc, NULL, false);
2436 if (ret) 2559 if (ret)
2437 goto done; 2560 goto done;
2438 2561
@@ -2445,7 +2568,9 @@ ath5k_init(struct ath5k_softc *sc)
2445 for (i = 0; i < common->keymax; i++) 2568 for (i = 0; i < common->keymax; i++)
2446 ath_hw_keyreset(common, (u16) i); 2569 ath_hw_keyreset(common, (u16) i);
2447 2570
2448 ath5k_hw_set_ack_bitrate_high(ah, true); 2571 /* Use higher rates for acks instead of base
2572 * rate */
2573 ah->ah_ack_bitrate_high = true;
2449 2574
2450 for (i = 0; i < ARRAY_SIZE(sc->bslot); i++) 2575 for (i = 0; i < ARRAY_SIZE(sc->bslot); i++)
2451 sc->bslot[i] = NULL; 2576 sc->bslot[i] = NULL;
@@ -2476,7 +2601,7 @@ static void stop_tasklets(struct ath5k_softc *sc)
2476 * if another thread does a system call and the thread doing the 2601 * if another thread does a system call and the thread doing the
2477 * stop is preempted). 2602 * stop is preempted).
2478 */ 2603 */
2479static int 2604int
2480ath5k_stop_hw(struct ath5k_softc *sc) 2605ath5k_stop_hw(struct ath5k_softc *sc)
2481{ 2606{
2482 int ret; 2607 int ret;
@@ -2529,25 +2654,35 @@ ath5k_stop_hw(struct ath5k_softc *sc)
2529 * This should be called with sc->lock. 2654 * This should be called with sc->lock.
2530 */ 2655 */
2531static int 2656static int
2532ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan) 2657ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan,
2658 bool skip_pcu)
2533{ 2659{
2534 struct ath5k_hw *ah = sc->ah; 2660 struct ath5k_hw *ah = sc->ah;
2535 int ret; 2661 struct ath_common *common = ath5k_hw_common(ah);
2662 int ret, ani_mode;
2536 2663
2537 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n"); 2664 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "resetting\n");
2538 2665
2539 ath5k_hw_set_imr(ah, 0); 2666 ath5k_hw_set_imr(ah, 0);
2540 synchronize_irq(sc->pdev->irq); 2667 synchronize_irq(sc->irq);
2541 stop_tasklets(sc); 2668 stop_tasklets(sc);
2542 2669
2543 if (chan) { 2670 /* Save ani mode and disable ANI durring
2544 ath5k_txq_cleanup(sc); 2671 * reset. If we don't we might get false
2545 ath5k_rx_stop(sc); 2672 * PHY error interrupts. */
2673 ani_mode = ah->ah_sc->ani_state.ani_mode;
2674 ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);
2546 2675
2676 /* We are going to empty hw queues
2677 * so we should also free any remaining
2678 * tx buffers */
2679 ath5k_drain_tx_buffs(sc);
2680 if (chan) {
2547 sc->curchan = chan; 2681 sc->curchan = chan;
2548 sc->curband = &sc->sbands[chan->band]; 2682 sc->curband = &sc->sbands[chan->band];
2549 } 2683 }
2550 ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL); 2684 ret = ath5k_hw_reset(ah, sc->opmode, sc->curchan, chan != NULL,
2685 skip_pcu);
2551 if (ret) { 2686 if (ret) {
2552 ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret); 2687 ATH5K_ERR(sc, "can't reset hardware (%d)\n", ret);
2553 goto err; 2688 goto err;
@@ -2559,11 +2694,20 @@ ath5k_reset(struct ath5k_softc *sc, struct ieee80211_channel *chan)
2559 goto err; 2694 goto err;
2560 } 2695 }
2561 2696
2562 ath5k_ani_init(ah, ah->ah_sc->ani_state.ani_mode); 2697 ath5k_ani_init(ah, ani_mode);
2563 2698
2564 ah->ah_cal_next_full = jiffies; 2699 ah->ah_cal_next_full = jiffies;
2565 ah->ah_cal_next_ani = jiffies; 2700 ah->ah_cal_next_ani = jiffies;
2566 ah->ah_cal_next_nf = jiffies; 2701 ah->ah_cal_next_nf = jiffies;
2702 ewma_init(&ah->ah_beacon_rssi_avg, 1024, 8);
2703
2704 /* clear survey data and cycle counters */
2705 memset(&sc->survey, 0, sizeof(sc->survey));
2706 spin_lock_bh(&common->cc_lock);
2707 ath_hw_cycle_counters_update(common);
2708 memset(&common->cc_survey, 0, sizeof(common->cc_survey));
2709 memset(&common->cc_ani, 0, sizeof(common->cc_ani));
2710 spin_unlock_bh(&common->cc_lock);
2567 2711
2568 /* 2712 /*
2569 * Change channels and update the h/w rate map if we're switching; 2713 * Change channels and update the h/w rate map if we're switching;
@@ -2592,13 +2736,14 @@ static void ath5k_reset_work(struct work_struct *work)
2592 reset_work); 2736 reset_work);
2593 2737
2594 mutex_lock(&sc->lock); 2738 mutex_lock(&sc->lock);
2595 ath5k_reset(sc, sc->curchan); 2739 ath5k_reset(sc, NULL, true);
2596 mutex_unlock(&sc->lock); 2740 mutex_unlock(&sc->lock);
2597} 2741}
2598 2742
2599static int 2743static int
2600ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw) 2744ath5k_init(struct ieee80211_hw *hw)
2601{ 2745{
2746
2602 struct ath5k_softc *sc = hw->priv; 2747 struct ath5k_softc *sc = hw->priv;
2603 struct ath5k_hw *ah = sc->ah; 2748 struct ath5k_hw *ah = sc->ah;
2604 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah); 2749 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
@@ -2606,7 +2751,6 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
2606 u8 mac[ETH_ALEN] = {}; 2751 u8 mac[ETH_ALEN] = {};
2607 int ret; 2752 int ret;
2608 2753
2609 ATH5K_DBG(sc, ATH5K_DEBUG_ANY, "devid 0x%x\n", pdev->device);
2610 2754
2611 /* 2755 /*
2612 * Check if the MAC has multi-rate retry support. 2756 * Check if the MAC has multi-rate retry support.
@@ -2643,7 +2787,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
2643 /* 2787 /*
2644 * Allocate tx+rx descriptors and populate the lists. 2788 * Allocate tx+rx descriptors and populate the lists.
2645 */ 2789 */
2646 ret = ath5k_desc_alloc(sc, pdev); 2790 ret = ath5k_desc_alloc(sc);
2647 if (ret) { 2791 if (ret) {
2648 ATH5K_ERR(sc, "can't allocate descriptors\n"); 2792 ATH5K_ERR(sc, "can't allocate descriptors\n");
2649 goto err; 2793 goto err;
@@ -2668,33 +2812,46 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
2668 goto err_bhal; 2812 goto err_bhal;
2669 } 2813 }
2670 2814
2671 /* This order matches mac80211's queue priority, so we can 2815 /* 5211 and 5212 usually support 10 queues but we better rely on the
2672 * directly use the mac80211 queue number without any mapping */ 2816 * capability information */
2673 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO); 2817 if (ah->ah_capabilities.cap_queues.q_tx_num >= 6) {
2674 if (IS_ERR(txq)) { 2818 /* This order matches mac80211's queue priority, so we can
2675 ATH5K_ERR(sc, "can't setup xmit queue\n"); 2819 * directly use the mac80211 queue number without any mapping */
2676 ret = PTR_ERR(txq); 2820 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
2677 goto err_queues; 2821 if (IS_ERR(txq)) {
2678 } 2822 ATH5K_ERR(sc, "can't setup xmit queue\n");
2679 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI); 2823 ret = PTR_ERR(txq);
2680 if (IS_ERR(txq)) { 2824 goto err_queues;
2681 ATH5K_ERR(sc, "can't setup xmit queue\n"); 2825 }
2682 ret = PTR_ERR(txq); 2826 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
2683 goto err_queues; 2827 if (IS_ERR(txq)) {
2684 } 2828 ATH5K_ERR(sc, "can't setup xmit queue\n");
2685 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE); 2829 ret = PTR_ERR(txq);
2686 if (IS_ERR(txq)) { 2830 goto err_queues;
2687 ATH5K_ERR(sc, "can't setup xmit queue\n"); 2831 }
2688 ret = PTR_ERR(txq); 2832 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
2689 goto err_queues; 2833 if (IS_ERR(txq)) {
2690 } 2834 ATH5K_ERR(sc, "can't setup xmit queue\n");
2691 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK); 2835 ret = PTR_ERR(txq);
2692 if (IS_ERR(txq)) { 2836 goto err_queues;
2693 ATH5K_ERR(sc, "can't setup xmit queue\n"); 2837 }
2694 ret = PTR_ERR(txq); 2838 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
2695 goto err_queues; 2839 if (IS_ERR(txq)) {
2840 ATH5K_ERR(sc, "can't setup xmit queue\n");
2841 ret = PTR_ERR(txq);
2842 goto err_queues;
2843 }
2844 hw->queues = 4;
2845 } else {
2846 /* older hardware (5210) can only support one data queue */
2847 txq = ath5k_txq_setup(sc, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
2848 if (IS_ERR(txq)) {
2849 ATH5K_ERR(sc, "can't setup xmit queue\n");
2850 ret = PTR_ERR(txq);
2851 goto err_queues;
2852 }
2853 hw->queues = 1;
2696 } 2854 }
2697 hw->queues = 4;
2698 2855
2699 tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc); 2856 tasklet_init(&sc->rxtq, ath5k_tasklet_rx, (unsigned long)sc);
2700 tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc); 2857 tasklet_init(&sc->txtq, ath5k_tasklet_tx, (unsigned long)sc);
@@ -2707,8 +2864,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
2707 2864
2708 ret = ath5k_eeprom_read_mac(ah, mac); 2865 ret = ath5k_eeprom_read_mac(ah, mac);
2709 if (ret) { 2866 if (ret) {
2710 ATH5K_ERR(sc, "unable to read address from EEPROM: 0x%04x\n", 2867 ATH5K_ERR(sc, "unable to read address from EEPROM\n");
2711 sc->pdev->device);
2712 goto err_queues; 2868 goto err_queues;
2713 } 2869 }
2714 2870
@@ -2743,15 +2899,15 @@ err_queues:
2743err_bhal: 2899err_bhal:
2744 ath5k_hw_release_tx_queue(ah, sc->bhalq); 2900 ath5k_hw_release_tx_queue(ah, sc->bhalq);
2745err_desc: 2901err_desc:
2746 ath5k_desc_free(sc, pdev); 2902 ath5k_desc_free(sc);
2747err: 2903err:
2748 return ret; 2904 return ret;
2749} 2905}
2750 2906
2751static void 2907void
2752ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw) 2908ath5k_deinit_softc(struct ath5k_softc *sc)
2753{ 2909{
2754 struct ath5k_softc *sc = hw->priv; 2910 struct ieee80211_hw *hw = sc->hw;
2755 2911
2756 /* 2912 /*
2757 * NB: the order of these is important: 2913 * NB: the order of these is important:
@@ -2766,8 +2922,9 @@ ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw)
2766 * XXX: ??? detach ath5k_hw ??? 2922 * XXX: ??? detach ath5k_hw ???
2767 * Other than that, it's straightforward... 2923 * Other than that, it's straightforward...
2768 */ 2924 */
2925 ath5k_debug_finish_device(sc);
2769 ieee80211_unregister_hw(hw); 2926 ieee80211_unregister_hw(hw);
2770 ath5k_desc_free(sc, pdev); 2927 ath5k_desc_free(sc);
2771 ath5k_txq_release(sc); 2928 ath5k_txq_release(sc);
2772 ath5k_hw_release_tx_queue(sc->ah, sc->bhalq); 2929 ath5k_hw_release_tx_queue(sc->ah, sc->bhalq);
2773 ath5k_unregister_leds(sc); 2930 ath5k_unregister_leds(sc);
@@ -2778,232 +2935,12 @@ ath5k_detach(struct pci_dev *pdev, struct ieee80211_hw *hw)
2778 * returns because we'll get called back to reclaim node 2935 * returns because we'll get called back to reclaim node
2779 * state and potentially want to use them. 2936 * state and potentially want to use them.
2780 */ 2937 */
2938 ath5k_hw_deinit(sc->ah);
2939 free_irq(sc->irq, sc);
2781} 2940}
2782 2941
2783/********************\ 2942bool
2784* Mac80211 functions * 2943ath_any_vif_assoc(struct ath5k_softc *sc)
2785\********************/
2786
2787static int
2788ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2789{
2790 struct ath5k_softc *sc = hw->priv;
2791 u16 qnum = skb_get_queue_mapping(skb);
2792
2793 if (WARN_ON(qnum >= sc->ah->ah_capabilities.cap_queues.q_tx_num)) {
2794 dev_kfree_skb_any(skb);
2795 return 0;
2796 }
2797
2798 return ath5k_tx_queue(hw, skb, &sc->txqs[qnum]);
2799}
2800
2801static int ath5k_start(struct ieee80211_hw *hw)
2802{
2803 return ath5k_init(hw->priv);
2804}
2805
2806static void ath5k_stop(struct ieee80211_hw *hw)
2807{
2808 ath5k_stop_hw(hw->priv);
2809}
2810
2811static int ath5k_add_interface(struct ieee80211_hw *hw,
2812 struct ieee80211_vif *vif)
2813{
2814 struct ath5k_softc *sc = hw->priv;
2815 int ret;
2816 struct ath5k_vif *avf = (void *)vif->drv_priv;
2817
2818 mutex_lock(&sc->lock);
2819
2820 if ((vif->type == NL80211_IFTYPE_AP ||
2821 vif->type == NL80211_IFTYPE_ADHOC)
2822 && (sc->num_ap_vifs + sc->num_adhoc_vifs) >= ATH_BCBUF) {
2823 ret = -ELNRNG;
2824 goto end;
2825 }
2826
2827 /* Don't allow other interfaces if one ad-hoc is configured.
2828 * TODO: Fix the problems with ad-hoc and multiple other interfaces.
2829 * We would need to operate the HW in ad-hoc mode to allow TSF updates
2830 * for the IBSS, but this breaks with additional AP or STA interfaces
2831 * at the moment. */
2832 if (sc->num_adhoc_vifs ||
2833 (sc->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
2834 ATH5K_ERR(sc, "Only one single ad-hoc interface is allowed.\n");
2835 ret = -ELNRNG;
2836 goto end;
2837 }
2838
2839 switch (vif->type) {
2840 case NL80211_IFTYPE_AP:
2841 case NL80211_IFTYPE_STATION:
2842 case NL80211_IFTYPE_ADHOC:
2843 case NL80211_IFTYPE_MESH_POINT:
2844 avf->opmode = vif->type;
2845 break;
2846 default:
2847 ret = -EOPNOTSUPP;
2848 goto end;
2849 }
2850
2851 sc->nvifs++;
2852 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "add interface mode %d\n", avf->opmode);
2853
2854 /* Assign the vap/adhoc to a beacon xmit slot. */
2855 if ((avf->opmode == NL80211_IFTYPE_AP) ||
2856 (avf->opmode == NL80211_IFTYPE_ADHOC) ||
2857 (avf->opmode == NL80211_IFTYPE_MESH_POINT)) {
2858 int slot;
2859
2860 WARN_ON(list_empty(&sc->bcbuf));
2861 avf->bbuf = list_first_entry(&sc->bcbuf, struct ath5k_buf,
2862 list);
2863 list_del(&avf->bbuf->list);
2864
2865 avf->bslot = 0;
2866 for (slot = 0; slot < ATH_BCBUF; slot++) {
2867 if (!sc->bslot[slot]) {
2868 avf->bslot = slot;
2869 break;
2870 }
2871 }
2872 BUG_ON(sc->bslot[avf->bslot] != NULL);
2873 sc->bslot[avf->bslot] = vif;
2874 if (avf->opmode == NL80211_IFTYPE_AP)
2875 sc->num_ap_vifs++;
2876 else if (avf->opmode == NL80211_IFTYPE_ADHOC)
2877 sc->num_adhoc_vifs++;
2878 }
2879
2880 /* Any MAC address is fine, all others are included through the
2881 * filter.
2882 */
2883 memcpy(&sc->lladdr, vif->addr, ETH_ALEN);
2884 ath5k_hw_set_lladdr(sc->ah, vif->addr);
2885
2886 memcpy(&avf->lladdr, vif->addr, ETH_ALEN);
2887
2888 ath5k_mode_setup(sc, vif);
2889
2890 ret = 0;
2891end:
2892 mutex_unlock(&sc->lock);
2893 return ret;
2894}
2895
2896static void
2897ath5k_remove_interface(struct ieee80211_hw *hw,
2898 struct ieee80211_vif *vif)
2899{
2900 struct ath5k_softc *sc = hw->priv;
2901 struct ath5k_vif *avf = (void *)vif->drv_priv;
2902 unsigned int i;
2903
2904 mutex_lock(&sc->lock);
2905 sc->nvifs--;
2906
2907 if (avf->bbuf) {
2908 ath5k_txbuf_free_skb(sc, avf->bbuf);
2909 list_add_tail(&avf->bbuf->list, &sc->bcbuf);
2910 for (i = 0; i < ATH_BCBUF; i++) {
2911 if (sc->bslot[i] == vif) {
2912 sc->bslot[i] = NULL;
2913 break;
2914 }
2915 }
2916 avf->bbuf = NULL;
2917 }
2918 if (avf->opmode == NL80211_IFTYPE_AP)
2919 sc->num_ap_vifs--;
2920 else if (avf->opmode == NL80211_IFTYPE_ADHOC)
2921 sc->num_adhoc_vifs--;
2922
2923 ath5k_update_bssid_mask_and_opmode(sc, NULL);
2924 mutex_unlock(&sc->lock);
2925}
2926
2927/*
2928 * TODO: Phy disable/diversity etc
2929 */
2930static int
2931ath5k_config(struct ieee80211_hw *hw, u32 changed)
2932{
2933 struct ath5k_softc *sc = hw->priv;
2934 struct ath5k_hw *ah = sc->ah;
2935 struct ieee80211_conf *conf = &hw->conf;
2936 int ret = 0;
2937
2938 mutex_lock(&sc->lock);
2939
2940 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
2941 ret = ath5k_chan_set(sc, conf->channel);
2942 if (ret < 0)
2943 goto unlock;
2944 }
2945
2946 if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
2947 (sc->power_level != conf->power_level)) {
2948 sc->power_level = conf->power_level;
2949
2950 /* Half dB steps */
2951 ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
2952 }
2953
2954 /* TODO:
2955 * 1) Move this on config_interface and handle each case
2956 * separately eg. when we have only one STA vif, use
2957 * AR5K_ANTMODE_SINGLE_AP
2958 *
2959 * 2) Allow the user to change antenna mode eg. when only
2960 * one antenna is present
2961 *
2962 * 3) Allow the user to set default/tx antenna when possible
2963 *
2964 * 4) Default mode should handle 90% of the cases, together
2965 * with fixed a/b and single AP modes we should be able to
2966 * handle 99%. Sectored modes are extreme cases and i still
2967 * haven't found a usage for them. If we decide to support them,
2968 * then we must allow the user to set how many tx antennas we
2969 * have available
2970 */
2971 ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
2972
2973unlock:
2974 mutex_unlock(&sc->lock);
2975 return ret;
2976}
2977
2978static u64 ath5k_prepare_multicast(struct ieee80211_hw *hw,
2979 struct netdev_hw_addr_list *mc_list)
2980{
2981 u32 mfilt[2], val;
2982 u8 pos;
2983 struct netdev_hw_addr *ha;
2984
2985 mfilt[0] = 0;
2986 mfilt[1] = 1;
2987
2988 netdev_hw_addr_list_for_each(ha, mc_list) {
2989 /* calculate XOR of eight 6-bit values */
2990 val = get_unaligned_le32(ha->addr + 0);
2991 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2992 val = get_unaligned_le32(ha->addr + 3);
2993 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
2994 pos &= 0x3f;
2995 mfilt[pos / 32] |= (1 << (pos % 32));
2996 /* XXX: we might be able to just do this instead,
2997 * but not sure, needs testing, if we do use this we'd
2998 * neet to inform below to not reset the mcast */
2999 /* ath5k_hw_set_mcast_filterindex(ah,
3000 * ha->addr[5]); */
3001 }
3002
3003 return ((u64)(mfilt[1]) << 32) | mfilt[0];
3004}
3005
3006static bool ath_any_vif_assoc(struct ath5k_softc *sc)
3007{ 2944{
3008 struct ath_vif_iter_data iter_data; 2945 struct ath_vif_iter_data iter_data;
3009 iter_data.hw_macaddr = NULL; 2946 iter_data.hw_macaddr = NULL;
@@ -3016,242 +2953,7 @@ static bool ath_any_vif_assoc(struct ath5k_softc *sc)
3016 return iter_data.any_assoc; 2953 return iter_data.any_assoc;
3017} 2954}
3018 2955
3019#define SUPPORTED_FIF_FLAGS \ 2956void
3020 FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | \
3021 FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \
3022 FIF_BCN_PRBRESP_PROMISC
3023/*
3024 * o always accept unicast, broadcast, and multicast traffic
3025 * o multicast traffic for all BSSIDs will be enabled if mac80211
3026 * says it should be
3027 * o maintain current state of phy ofdm or phy cck error reception.
3028 * If the hardware detects any of these type of errors then
3029 * ath5k_hw_get_rx_filter() will pass to us the respective
3030 * hardware filters to be able to receive these type of frames.
3031 * o probe request frames are accepted only when operating in
3032 * hostap, adhoc, or monitor modes
3033 * o enable promiscuous mode according to the interface state
3034 * o accept beacons:
3035 * - when operating in adhoc mode so the 802.11 layer creates
3036 * node table entries for peers,
3037 * - when operating in station mode for collecting rssi data when
3038 * the station is otherwise quiet, or
3039 * - when scanning
3040 */
3041static void ath5k_configure_filter(struct ieee80211_hw *hw,
3042 unsigned int changed_flags,
3043 unsigned int *new_flags,
3044 u64 multicast)
3045{
3046 struct ath5k_softc *sc = hw->priv;
3047 struct ath5k_hw *ah = sc->ah;
3048 u32 mfilt[2], rfilt;
3049
3050 mutex_lock(&sc->lock);
3051
3052 mfilt[0] = multicast;
3053 mfilt[1] = multicast >> 32;
3054
3055 /* Only deal with supported flags */
3056 changed_flags &= SUPPORTED_FIF_FLAGS;
3057 *new_flags &= SUPPORTED_FIF_FLAGS;
3058
3059 /* If HW detects any phy or radar errors, leave those filters on.
3060 * Also, always enable Unicast, Broadcasts and Multicast
3061 * XXX: move unicast, bssid broadcasts and multicast to mac80211 */
3062 rfilt = (ath5k_hw_get_rx_filter(ah) & (AR5K_RX_FILTER_PHYERR)) |
3063 (AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST |
3064 AR5K_RX_FILTER_MCAST);
3065
3066 if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
3067 if (*new_flags & FIF_PROMISC_IN_BSS) {
3068 __set_bit(ATH_STAT_PROMISC, sc->status);
3069 } else {
3070 __clear_bit(ATH_STAT_PROMISC, sc->status);
3071 }
3072 }
3073
3074 if (test_bit(ATH_STAT_PROMISC, sc->status))
3075 rfilt |= AR5K_RX_FILTER_PROM;
3076
3077 /* Note, AR5K_RX_FILTER_MCAST is already enabled */
3078 if (*new_flags & FIF_ALLMULTI) {
3079 mfilt[0] = ~0;
3080 mfilt[1] = ~0;
3081 }
3082
3083 /* This is the best we can do */
3084 if (*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL))
3085 rfilt |= AR5K_RX_FILTER_PHYERR;
3086
3087 /* FIF_BCN_PRBRESP_PROMISC really means to enable beacons
3088 * and probes for any BSSID */
3089 if ((*new_flags & FIF_BCN_PRBRESP_PROMISC) || (sc->nvifs > 1))
3090 rfilt |= AR5K_RX_FILTER_BEACON;
3091
3092 /* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not
3093 * set we should only pass on control frames for this
3094 * station. This needs testing. I believe right now this
3095 * enables *all* control frames, which is OK.. but
3096 * but we should see if we can improve on granularity */
3097 if (*new_flags & FIF_CONTROL)
3098 rfilt |= AR5K_RX_FILTER_CONTROL;
3099
3100 /* Additional settings per mode -- this is per ath5k */
3101
3102 /* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */
3103
3104 switch (sc->opmode) {
3105 case NL80211_IFTYPE_MESH_POINT:
3106 rfilt |= AR5K_RX_FILTER_CONTROL |
3107 AR5K_RX_FILTER_BEACON |
3108 AR5K_RX_FILTER_PROBEREQ |
3109 AR5K_RX_FILTER_PROM;
3110 break;
3111 case NL80211_IFTYPE_AP:
3112 case NL80211_IFTYPE_ADHOC:
3113 rfilt |= AR5K_RX_FILTER_PROBEREQ |
3114 AR5K_RX_FILTER_BEACON;
3115 break;
3116 case NL80211_IFTYPE_STATION:
3117 if (sc->assoc)
3118 rfilt |= AR5K_RX_FILTER_BEACON;
3119 default:
3120 break;
3121 }
3122
3123 /* Set filters */
3124 ath5k_hw_set_rx_filter(ah, rfilt);
3125
3126 /* Set multicast bits */
3127 ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]);
3128 /* Set the cached hw filter flags, this will later actually
3129 * be set in HW */
3130 sc->filter_flags = rfilt;
3131
3132 mutex_unlock(&sc->lock);
3133}
3134
3135static int
3136ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3137 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
3138 struct ieee80211_key_conf *key)
3139{
3140 struct ath5k_softc *sc = hw->priv;
3141 struct ath5k_hw *ah = sc->ah;
3142 struct ath_common *common = ath5k_hw_common(ah);
3143 int ret = 0;
3144
3145 if (modparam_nohwcrypt)
3146 return -EOPNOTSUPP;
3147
3148 switch (key->cipher) {
3149 case WLAN_CIPHER_SUITE_WEP40:
3150 case WLAN_CIPHER_SUITE_WEP104:
3151 case WLAN_CIPHER_SUITE_TKIP:
3152 break;
3153 case WLAN_CIPHER_SUITE_CCMP:
3154 if (common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)
3155 break;
3156 return -EOPNOTSUPP;
3157 default:
3158 WARN_ON(1);
3159 return -EINVAL;
3160 }
3161
3162 mutex_lock(&sc->lock);
3163
3164 switch (cmd) {
3165 case SET_KEY:
3166 ret = ath_key_config(common, vif, sta, key);
3167 if (ret >= 0) {
3168 key->hw_key_idx = ret;
3169 /* push IV and Michael MIC generation to stack */
3170 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
3171 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
3172 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
3173 if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
3174 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
3175 ret = 0;
3176 }
3177 break;
3178 case DISABLE_KEY:
3179 ath_key_delete(common, key);
3180 break;
3181 default:
3182 ret = -EINVAL;
3183 }
3184
3185 mmiowb();
3186 mutex_unlock(&sc->lock);
3187 return ret;
3188}
3189
3190static int
3191ath5k_get_stats(struct ieee80211_hw *hw,
3192 struct ieee80211_low_level_stats *stats)
3193{
3194 struct ath5k_softc *sc = hw->priv;
3195
3196 /* Force update */
3197 ath5k_hw_update_mib_counters(sc->ah);
3198
3199 stats->dot11ACKFailureCount = sc->stats.ack_fail;
3200 stats->dot11RTSFailureCount = sc->stats.rts_fail;
3201 stats->dot11RTSSuccessCount = sc->stats.rts_ok;
3202 stats->dot11FCSErrorCount = sc->stats.fcs_error;
3203
3204 return 0;
3205}
3206
3207static int ath5k_get_survey(struct ieee80211_hw *hw, int idx,
3208 struct survey_info *survey)
3209{
3210 struct ath5k_softc *sc = hw->priv;
3211 struct ieee80211_conf *conf = &hw->conf;
3212
3213 if (idx != 0)
3214 return -ENOENT;
3215
3216 survey->channel = conf->channel;
3217 survey->filled = SURVEY_INFO_NOISE_DBM;
3218 survey->noise = sc->ah->ah_noise_floor;
3219
3220 return 0;
3221}
3222
3223static u64
3224ath5k_get_tsf(struct ieee80211_hw *hw)
3225{
3226 struct ath5k_softc *sc = hw->priv;
3227
3228 return ath5k_hw_get_tsf64(sc->ah);
3229}
3230
3231static void
3232ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
3233{
3234 struct ath5k_softc *sc = hw->priv;
3235
3236 ath5k_hw_set_tsf64(sc->ah, tsf);
3237}
3238
3239static void
3240ath5k_reset_tsf(struct ieee80211_hw *hw)
3241{
3242 struct ath5k_softc *sc = hw->priv;
3243
3244 /*
3245 * in IBSS mode we need to update the beacon timers too.
3246 * this will also reset the TSF if we call it with 0
3247 */
3248 if (sc->opmode == NL80211_IFTYPE_ADHOC)
3249 ath5k_beacon_update_timers(sc, 0);
3250 else
3251 ath5k_hw_reset_tsf(sc->ah);
3252}
3253
3254static void
3255set_beacon_filter(struct ieee80211_hw *hw, bool enable) 2957set_beacon_filter(struct ieee80211_hw *hw, bool enable)
3256{ 2958{
3257 struct ath5k_softc *sc = hw->priv; 2959 struct ath5k_softc *sc = hw->priv;
@@ -3265,494 +2967,3 @@ set_beacon_filter(struct ieee80211_hw *hw, bool enable)
3265 ath5k_hw_set_rx_filter(ah, rfilt); 2967 ath5k_hw_set_rx_filter(ah, rfilt);
3266 sc->filter_flags = rfilt; 2968 sc->filter_flags = rfilt;
3267} 2969}
3268
3269static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
3270 struct ieee80211_vif *vif,
3271 struct ieee80211_bss_conf *bss_conf,
3272 u32 changes)
3273{
3274 struct ath5k_vif *avf = (void *)vif->drv_priv;
3275 struct ath5k_softc *sc = hw->priv;
3276 struct ath5k_hw *ah = sc->ah;
3277 struct ath_common *common = ath5k_hw_common(ah);
3278 unsigned long flags;
3279
3280 mutex_lock(&sc->lock);
3281
3282 if (changes & BSS_CHANGED_BSSID) {
3283 /* Cache for later use during resets */
3284 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
3285 common->curaid = 0;
3286 ath5k_hw_set_bssid(ah);
3287 mmiowb();
3288 }
3289
3290 if (changes & BSS_CHANGED_BEACON_INT)
3291 sc->bintval = bss_conf->beacon_int;
3292
3293 if (changes & BSS_CHANGED_ASSOC) {
3294 avf->assoc = bss_conf->assoc;
3295 if (bss_conf->assoc)
3296 sc->assoc = bss_conf->assoc;
3297 else
3298 sc->assoc = ath_any_vif_assoc(sc);
3299
3300 if (sc->opmode == NL80211_IFTYPE_STATION)
3301 set_beacon_filter(hw, sc->assoc);
3302 ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
3303 AR5K_LED_ASSOC : AR5K_LED_INIT);
3304 if (bss_conf->assoc) {
3305 ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
3306 "Bss Info ASSOC %d, bssid: %pM\n",
3307 bss_conf->aid, common->curbssid);
3308 common->curaid = bss_conf->aid;
3309 ath5k_hw_set_bssid(ah);
3310 /* Once ANI is available you would start it here */
3311 }
3312 }
3313
3314 if (changes & BSS_CHANGED_BEACON) {
3315 spin_lock_irqsave(&sc->block, flags);
3316 ath5k_beacon_update(hw, vif);
3317 spin_unlock_irqrestore(&sc->block, flags);
3318 }
3319
3320 if (changes & BSS_CHANGED_BEACON_ENABLED)
3321 sc->enable_beacon = bss_conf->enable_beacon;
3322
3323 if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED |
3324 BSS_CHANGED_BEACON_INT))
3325 ath5k_beacon_config(sc);
3326
3327 mutex_unlock(&sc->lock);
3328}
3329
3330static void ath5k_sw_scan_start(struct ieee80211_hw *hw)
3331{
3332 struct ath5k_softc *sc = hw->priv;
3333 if (!sc->assoc)
3334 ath5k_hw_set_ledstate(sc->ah, AR5K_LED_SCAN);
3335}
3336
3337static void ath5k_sw_scan_complete(struct ieee80211_hw *hw)
3338{
3339 struct ath5k_softc *sc = hw->priv;
3340 ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
3341 AR5K_LED_ASSOC : AR5K_LED_INIT);
3342}
3343
3344/**
3345 * ath5k_set_coverage_class - Set IEEE 802.11 coverage class
3346 *
3347 * @hw: struct ieee80211_hw pointer
3348 * @coverage_class: IEEE 802.11 coverage class number
3349 *
3350 * Mac80211 callback. Sets slot time, ACK timeout and CTS timeout for given
3351 * coverage class. The values are persistent, they are restored after device
3352 * reset.
3353 */
3354static void ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
3355{
3356 struct ath5k_softc *sc = hw->priv;
3357
3358 mutex_lock(&sc->lock);
3359 ath5k_hw_set_coverage_class(sc->ah, coverage_class);
3360 mutex_unlock(&sc->lock);
3361}
3362
3363static int ath5k_conf_tx(struct ieee80211_hw *hw, u16 queue,
3364 const struct ieee80211_tx_queue_params *params)
3365{
3366 struct ath5k_softc *sc = hw->priv;
3367 struct ath5k_hw *ah = sc->ah;
3368 struct ath5k_txq_info qi;
3369 int ret = 0;
3370
3371 if (queue >= ah->ah_capabilities.cap_queues.q_tx_num)
3372 return 0;
3373
3374 mutex_lock(&sc->lock);
3375
3376 ath5k_hw_get_tx_queueprops(ah, queue, &qi);
3377
3378 qi.tqi_aifs = params->aifs;
3379 qi.tqi_cw_min = params->cw_min;
3380 qi.tqi_cw_max = params->cw_max;
3381 qi.tqi_burst_time = params->txop;
3382
3383 ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
3384 "Configure tx [queue %d], "
3385 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
3386 queue, params->aifs, params->cw_min,
3387 params->cw_max, params->txop);
3388
3389 if (ath5k_hw_set_tx_queueprops(ah, queue, &qi)) {
3390 ATH5K_ERR(sc,
3391 "Unable to update hardware queue %u!\n", queue);
3392 ret = -EIO;
3393 } else
3394 ath5k_hw_reset_tx_queue(ah, queue);
3395
3396 mutex_unlock(&sc->lock);
3397
3398 return ret;
3399}
3400
3401static const struct ieee80211_ops ath5k_hw_ops = {
3402 .tx = ath5k_tx,
3403 .start = ath5k_start,
3404 .stop = ath5k_stop,
3405 .add_interface = ath5k_add_interface,
3406 .remove_interface = ath5k_remove_interface,
3407 .config = ath5k_config,
3408 .prepare_multicast = ath5k_prepare_multicast,
3409 .configure_filter = ath5k_configure_filter,
3410 .set_key = ath5k_set_key,
3411 .get_stats = ath5k_get_stats,
3412 .get_survey = ath5k_get_survey,
3413 .conf_tx = ath5k_conf_tx,
3414 .get_tsf = ath5k_get_tsf,
3415 .set_tsf = ath5k_set_tsf,
3416 .reset_tsf = ath5k_reset_tsf,
3417 .bss_info_changed = ath5k_bss_info_changed,
3418 .sw_scan_start = ath5k_sw_scan_start,
3419 .sw_scan_complete = ath5k_sw_scan_complete,
3420 .set_coverage_class = ath5k_set_coverage_class,
3421};
3422
3423/********************\
3424* PCI Initialization *
3425\********************/
3426
3427static int __devinit
3428ath5k_pci_probe(struct pci_dev *pdev,
3429 const struct pci_device_id *id)
3430{
3431 void __iomem *mem;
3432 struct ath5k_softc *sc;
3433 struct ath_common *common;
3434 struct ieee80211_hw *hw;
3435 int ret;
3436 u8 csz;
3437
3438 /*
3439 * L0s needs to be disabled on all ath5k cards.
3440 *
3441 * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
3442 * by default in the future in 2.6.36) this will also mean both L1 and
3443 * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
3444 * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
3445 * though but cannot currently undue the effect of a blacklist, for
3446 * details you can read pcie_aspm_sanity_check() and see how it adjusts
3447 * the device link capability.
3448 *
3449 * It may be possible in the future to implement some PCI API to allow
3450 * drivers to override blacklists for pre 1.1 PCIe but for now it is
3451 * best to accept that both L0s and L1 will be disabled completely for
3452 * distributions shipping with CONFIG_PCIEASPM rather than having this
3453 * issue present. Motivation for adding this new API will be to help
3454 * with power consumption for some of these devices.
3455 */
3456 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
3457
3458 ret = pci_enable_device(pdev);
3459 if (ret) {
3460 dev_err(&pdev->dev, "can't enable device\n");
3461 goto err;
3462 }
3463
3464 /* XXX 32-bit addressing only */
3465 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3466 if (ret) {
3467 dev_err(&pdev->dev, "32-bit DMA not available\n");
3468 goto err_dis;
3469 }
3470
3471 /*
3472 * Cache line size is used to size and align various
3473 * structures used to communicate with the hardware.
3474 */
3475 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
3476 if (csz == 0) {
3477 /*
3478 * Linux 2.4.18 (at least) writes the cache line size
3479 * register as a 16-bit wide register which is wrong.
3480 * We must have this setup properly for rx buffer
3481 * DMA to work so force a reasonable value here if it
3482 * comes up zero.
3483 */
3484 csz = L1_CACHE_BYTES >> 2;
3485 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
3486 }
3487 /*
3488 * The default setting of latency timer yields poor results,
3489 * set it to the value used by other systems. It may be worth
3490 * tweaking this setting more.
3491 */
3492 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
3493
3494 /* Enable bus mastering */
3495 pci_set_master(pdev);
3496
3497 /*
3498 * Disable the RETRY_TIMEOUT register (0x41) to keep
3499 * PCI Tx retries from interfering with C3 CPU state.
3500 */
3501 pci_write_config_byte(pdev, 0x41, 0);
3502
3503 ret = pci_request_region(pdev, 0, "ath5k");
3504 if (ret) {
3505 dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
3506 goto err_dis;
3507 }
3508
3509 mem = pci_iomap(pdev, 0, 0);
3510 if (!mem) {
3511 dev_err(&pdev->dev, "cannot remap PCI memory region\n") ;
3512 ret = -EIO;
3513 goto err_reg;
3514 }
3515
3516 /*
3517 * Allocate hw (mac80211 main struct)
3518 * and hw->priv (driver private data)
3519 */
3520 hw = ieee80211_alloc_hw(sizeof(*sc), &ath5k_hw_ops);
3521 if (hw == NULL) {
3522 dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n");
3523 ret = -ENOMEM;
3524 goto err_map;
3525 }
3526
3527 dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy));
3528
3529 /* Initialize driver private data */
3530 SET_IEEE80211_DEV(hw, &pdev->dev);
3531 hw->flags = IEEE80211_HW_RX_INCLUDES_FCS |
3532 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
3533 IEEE80211_HW_SIGNAL_DBM;
3534
3535 hw->wiphy->interface_modes =
3536 BIT(NL80211_IFTYPE_AP) |
3537 BIT(NL80211_IFTYPE_STATION) |
3538 BIT(NL80211_IFTYPE_ADHOC) |
3539 BIT(NL80211_IFTYPE_MESH_POINT);
3540
3541 hw->extra_tx_headroom = 2;
3542 hw->channel_change_time = 5000;
3543 sc = hw->priv;
3544 sc->hw = hw;
3545 sc->pdev = pdev;
3546
3547 /*
3548 * Mark the device as detached to avoid processing
3549 * interrupts until setup is complete.
3550 */
3551 __set_bit(ATH_STAT_INVALID, sc->status);
3552
3553 sc->iobase = mem; /* So we can unmap it on detach */
3554 sc->opmode = NL80211_IFTYPE_STATION;
3555 sc->bintval = 1000;
3556 mutex_init(&sc->lock);
3557 spin_lock_init(&sc->rxbuflock);
3558 spin_lock_init(&sc->txbuflock);
3559 spin_lock_init(&sc->block);
3560
3561 /* Set private data */
3562 pci_set_drvdata(pdev, sc);
3563
3564 /* Setup interrupt handler */
3565 ret = request_irq(pdev->irq, ath5k_intr, IRQF_SHARED, "ath", sc);
3566 if (ret) {
3567 ATH5K_ERR(sc, "request_irq failed\n");
3568 goto err_free;
3569 }
3570
3571 /* If we passed the test, malloc an ath5k_hw struct */
3572 sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
3573 if (!sc->ah) {
3574 ret = -ENOMEM;
3575 ATH5K_ERR(sc, "out of memory\n");
3576 goto err_irq;
3577 }
3578
3579 sc->ah->ah_sc = sc;
3580 sc->ah->ah_iobase = sc->iobase;
3581 common = ath5k_hw_common(sc->ah);
3582 common->ops = &ath5k_common_ops;
3583 common->ah = sc->ah;
3584 common->hw = hw;
3585 common->cachelsz = csz << 2; /* convert to bytes */
3586 spin_lock_init(&common->cc_lock);
3587
3588 /* Initialize device */
3589 ret = ath5k_hw_attach(sc);
3590 if (ret) {
3591 goto err_free_ah;
3592 }
3593
3594 /* set up multi-rate retry capabilities */
3595 if (sc->ah->ah_version == AR5K_AR5212) {
3596 hw->max_rates = 4;
3597 hw->max_rate_tries = 11;
3598 }
3599
3600 hw->vif_data_size = sizeof(struct ath5k_vif);
3601
3602 /* Finish private driver data initialization */
3603 ret = ath5k_attach(pdev, hw);
3604 if (ret)
3605 goto err_ah;
3606
3607 ATH5K_INFO(sc, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
3608 ath5k_chip_name(AR5K_VERSION_MAC, sc->ah->ah_mac_srev),
3609 sc->ah->ah_mac_srev,
3610 sc->ah->ah_phy_revision);
3611
3612 if (!sc->ah->ah_single_chip) {
3613 /* Single chip radio (!RF5111) */
3614 if (sc->ah->ah_radio_5ghz_revision &&
3615 !sc->ah->ah_radio_2ghz_revision) {
3616 /* No 5GHz support -> report 2GHz radio */
3617 if (!test_bit(AR5K_MODE_11A,
3618 sc->ah->ah_capabilities.cap_mode)) {
3619 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
3620 ath5k_chip_name(AR5K_VERSION_RAD,
3621 sc->ah->ah_radio_5ghz_revision),
3622 sc->ah->ah_radio_5ghz_revision);
3623 /* No 2GHz support (5110 and some
3624 * 5Ghz only cards) -> report 5Ghz radio */
3625 } else if (!test_bit(AR5K_MODE_11B,
3626 sc->ah->ah_capabilities.cap_mode)) {
3627 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
3628 ath5k_chip_name(AR5K_VERSION_RAD,
3629 sc->ah->ah_radio_5ghz_revision),
3630 sc->ah->ah_radio_5ghz_revision);
3631 /* Multiband radio */
3632 } else {
3633 ATH5K_INFO(sc, "RF%s multiband radio found"
3634 " (0x%x)\n",
3635 ath5k_chip_name(AR5K_VERSION_RAD,
3636 sc->ah->ah_radio_5ghz_revision),
3637 sc->ah->ah_radio_5ghz_revision);
3638 }
3639 }
3640 /* Multi chip radio (RF5111 - RF2111) ->
3641 * report both 2GHz/5GHz radios */
3642 else if (sc->ah->ah_radio_5ghz_revision &&
3643 sc->ah->ah_radio_2ghz_revision){
3644 ATH5K_INFO(sc, "RF%s 5GHz radio found (0x%x)\n",
3645 ath5k_chip_name(AR5K_VERSION_RAD,
3646 sc->ah->ah_radio_5ghz_revision),
3647 sc->ah->ah_radio_5ghz_revision);
3648 ATH5K_INFO(sc, "RF%s 2GHz radio found (0x%x)\n",
3649 ath5k_chip_name(AR5K_VERSION_RAD,
3650 sc->ah->ah_radio_2ghz_revision),
3651 sc->ah->ah_radio_2ghz_revision);
3652 }
3653 }
3654
3655 ath5k_debug_init_device(sc);
3656
3657 /* ready to process interrupts */
3658 __clear_bit(ATH_STAT_INVALID, sc->status);
3659
3660 return 0;
3661err_ah:
3662 ath5k_hw_detach(sc->ah);
3663err_free_ah:
3664 kfree(sc->ah);
3665err_irq:
3666 free_irq(pdev->irq, sc);
3667err_free:
3668 ieee80211_free_hw(hw);
3669err_map:
3670 pci_iounmap(pdev, mem);
3671err_reg:
3672 pci_release_region(pdev, 0);
3673err_dis:
3674 pci_disable_device(pdev);
3675err:
3676 return ret;
3677}
3678
3679static void __devexit
3680ath5k_pci_remove(struct pci_dev *pdev)
3681{
3682 struct ath5k_softc *sc = pci_get_drvdata(pdev);
3683
3684 ath5k_debug_finish_device(sc);
3685 ath5k_detach(pdev, sc->hw);
3686 ath5k_hw_detach(sc->ah);
3687 kfree(sc->ah);
3688 free_irq(pdev->irq, sc);
3689 pci_iounmap(pdev, sc->iobase);
3690 pci_release_region(pdev, 0);
3691 pci_disable_device(pdev);
3692 ieee80211_free_hw(sc->hw);
3693}
3694
3695#ifdef CONFIG_PM_SLEEP
3696static int ath5k_pci_suspend(struct device *dev)
3697{
3698 struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
3699
3700 ath5k_led_off(sc);
3701 return 0;
3702}
3703
3704static int ath5k_pci_resume(struct device *dev)
3705{
3706 struct pci_dev *pdev = to_pci_dev(dev);
3707 struct ath5k_softc *sc = pci_get_drvdata(pdev);
3708
3709 /*
3710 * Suspend/Resume resets the PCI configuration space, so we have to
3711 * re-disable the RETRY_TIMEOUT register (0x41) to keep
3712 * PCI Tx retries from interfering with C3 CPU state
3713 */
3714 pci_write_config_byte(pdev, 0x41, 0);
3715
3716 ath5k_led_enable(sc);
3717 return 0;
3718}
3719
3720static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
3721#define ATH5K_PM_OPS (&ath5k_pm_ops)
3722#else
3723#define ATH5K_PM_OPS NULL
3724#endif /* CONFIG_PM_SLEEP */
3725
3726static struct pci_driver ath5k_pci_driver = {
3727 .name = KBUILD_MODNAME,
3728 .id_table = ath5k_pci_id_table,
3729 .probe = ath5k_pci_probe,
3730 .remove = __devexit_p(ath5k_pci_remove),
3731 .driver.pm = ATH5K_PM_OPS,
3732};
3733
3734/*
3735 * Module init/exit functions
3736 */
3737static int __init
3738init_ath5k_pci(void)
3739{
3740 int ret;
3741
3742 ret = pci_register_driver(&ath5k_pci_driver);
3743 if (ret) {
3744 printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
3745 return ret;
3746 }
3747
3748 return 0;
3749}
3750
3751static void __exit
3752exit_ath5k_pci(void)
3753{
3754 pci_unregister_driver(&ath5k_pci_driver);
3755}
3756
3757module_init(init_ath5k_pci);
3758module_exit(exit_ath5k_pci);
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index 9a79773cdc2a..6d511476e4d2 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -169,7 +169,10 @@ struct ath5k_vif {
169/* Software Carrier, keeps track of the driver state 169/* Software Carrier, keeps track of the driver state
170 * associated with an instance of a device */ 170 * associated with an instance of a device */
171struct ath5k_softc { 171struct ath5k_softc {
172 struct pci_dev *pdev; /* for dma mapping */ 172 struct pci_dev *pdev;
173 struct device *dev; /* for dma mapping */
174 int irq;
175 u16 devid;
173 void __iomem *iobase; /* address of the device */ 176 void __iomem *iobase; /* address of the device */
174 struct mutex lock; /* dev-level lock */ 177 struct mutex lock; /* dev-level lock */
175 struct ieee80211_hw *hw; /* IEEE 802.11 common */ 178 struct ieee80211_hw *hw; /* IEEE 802.11 common */
@@ -255,6 +258,8 @@ struct ath5k_softc {
255 struct tasklet_struct ani_tasklet; /* ANI calibration */ 258 struct tasklet_struct ani_tasklet; /* ANI calibration */
256 259
257 struct delayed_work tx_complete_work; 260 struct delayed_work tx_complete_work;
261
262 struct survey_info survey; /* collected survey info */
258}; 263};
259 264
260#define ath5k_hw_hasbssidmask(_ah) \ 265#define ath5k_hw_hasbssidmask(_ah) \
diff --git a/drivers/net/wireless/ath/ath5k/caps.c b/drivers/net/wireless/ath/ath5k/caps.c
index beae519aa735..31cad80e9b01 100644
--- a/drivers/net/wireless/ath/ath5k/caps.c
+++ b/drivers/net/wireless/ath/ath5k/caps.c
@@ -49,7 +49,6 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
49 49
50 /* Set supported modes */ 50 /* Set supported modes */
51 __set_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode); 51 __set_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode);
52 __set_bit(AR5K_MODE_11A_TURBO, ah->ah_capabilities.cap_mode);
53 } else { 52 } else {
54 /* 53 /*
55 * XXX The tranceiver supports frequencies from 4920 to 6100GHz 54 * XXX The tranceiver supports frequencies from 4920 to 6100GHz
@@ -74,11 +73,6 @@ int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
74 /* Set supported modes */ 73 /* Set supported modes */
75 __set_bit(AR5K_MODE_11A, 74 __set_bit(AR5K_MODE_11A,
76 ah->ah_capabilities.cap_mode); 75 ah->ah_capabilities.cap_mode);
77 __set_bit(AR5K_MODE_11A_TURBO,
78 ah->ah_capabilities.cap_mode);
79 if (ah->ah_version == AR5K_AR5212)
80 __set_bit(AR5K_MODE_11G_TURBO,
81 ah->ah_capabilities.cap_mode);
82 } 76 }
83 77
84 /* Enable 802.11b if a 2GHz capable radio (2111/5112) is 78 /* Enable 802.11b if a 2GHz capable radio (2111/5112) is
diff --git a/drivers/net/wireless/ath/ath5k/debug.c b/drivers/net/wireless/ath/ath5k/debug.c
index acda56ee521b..d2f84d76bb07 100644
--- a/drivers/net/wireless/ath/ath5k/debug.c
+++ b/drivers/net/wireless/ath/ath5k/debug.c
@@ -60,7 +60,6 @@
60 60
61#include "base.h" 61#include "base.h"
62#include "debug.h" 62#include "debug.h"
63#include "../debug.h"
64 63
65static unsigned int ath5k_debug; 64static unsigned int ath5k_debug;
66module_param_named(debug, ath5k_debug, uint, 0); 65module_param_named(debug, ath5k_debug, uint, 0);
@@ -312,6 +311,7 @@ static const struct {
312 { ATH5K_DEBUG_DUMP_RX, "dumprx", "print received skb content" }, 311 { ATH5K_DEBUG_DUMP_RX, "dumprx", "print received skb content" },
313 { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" }, 312 { ATH5K_DEBUG_DUMP_TX, "dumptx", "print transmit skb content" },
314 { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" }, 313 { ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
314 { ATH5K_DEBUG_DMA, "dma", "dma start/stop" },
315 { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" }, 315 { ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" },
316 { ATH5K_DEBUG_DESC, "desc", "descriptor chains" }, 316 { ATH5K_DEBUG_DESC, "desc", "descriptor chains" },
317 { ATH5K_DEBUG_ANY, "all", "show all debug levels" }, 317 { ATH5K_DEBUG_ANY, "all", "show all debug levels" },
@@ -554,63 +554,63 @@ static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
554 554
555 len += snprintf(buf+len, sizeof(buf)-len, 555 len += snprintf(buf+len, sizeof(buf)-len,
556 "RX\n---------------------\n"); 556 "RX\n---------------------\n");
557 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%d\t(%d%%)\n", 557 len += snprintf(buf+len, sizeof(buf)-len, "CRC\t%u\t(%u%%)\n",
558 st->rxerr_crc, 558 st->rxerr_crc,
559 st->rx_all_count > 0 ? 559 st->rx_all_count > 0 ?
560 st->rxerr_crc*100/st->rx_all_count : 0); 560 st->rxerr_crc*100/st->rx_all_count : 0);
561 len += snprintf(buf+len, sizeof(buf)-len, "PHY\t%d\t(%d%%)\n", 561 len += snprintf(buf+len, sizeof(buf)-len, "PHY\t%u\t(%u%%)\n",
562 st->rxerr_phy, 562 st->rxerr_phy,
563 st->rx_all_count > 0 ? 563 st->rx_all_count > 0 ?
564 st->rxerr_phy*100/st->rx_all_count : 0); 564 st->rxerr_phy*100/st->rx_all_count : 0);
565 for (i = 0; i < 32; i++) { 565 for (i = 0; i < 32; i++) {
566 if (st->rxerr_phy_code[i]) 566 if (st->rxerr_phy_code[i])
567 len += snprintf(buf+len, sizeof(buf)-len, 567 len += snprintf(buf+len, sizeof(buf)-len,
568 " phy_err[%d]\t%d\n", 568 " phy_err[%u]\t%u\n",
569 i, st->rxerr_phy_code[i]); 569 i, st->rxerr_phy_code[i]);
570 } 570 }
571 571
572 len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%d\t(%d%%)\n", 572 len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%u\t(%u%%)\n",
573 st->rxerr_fifo, 573 st->rxerr_fifo,
574 st->rx_all_count > 0 ? 574 st->rx_all_count > 0 ?
575 st->rxerr_fifo*100/st->rx_all_count : 0); 575 st->rxerr_fifo*100/st->rx_all_count : 0);
576 len += snprintf(buf+len, sizeof(buf)-len, "decrypt\t%d\t(%d%%)\n", 576 len += snprintf(buf+len, sizeof(buf)-len, "decrypt\t%u\t(%u%%)\n",
577 st->rxerr_decrypt, 577 st->rxerr_decrypt,
578 st->rx_all_count > 0 ? 578 st->rx_all_count > 0 ?
579 st->rxerr_decrypt*100/st->rx_all_count : 0); 579 st->rxerr_decrypt*100/st->rx_all_count : 0);
580 len += snprintf(buf+len, sizeof(buf)-len, "MIC\t%d\t(%d%%)\n", 580 len += snprintf(buf+len, sizeof(buf)-len, "MIC\t%u\t(%u%%)\n",
581 st->rxerr_mic, 581 st->rxerr_mic,
582 st->rx_all_count > 0 ? 582 st->rx_all_count > 0 ?
583 st->rxerr_mic*100/st->rx_all_count : 0); 583 st->rxerr_mic*100/st->rx_all_count : 0);
584 len += snprintf(buf+len, sizeof(buf)-len, "process\t%d\t(%d%%)\n", 584 len += snprintf(buf+len, sizeof(buf)-len, "process\t%u\t(%u%%)\n",
585 st->rxerr_proc, 585 st->rxerr_proc,
586 st->rx_all_count > 0 ? 586 st->rx_all_count > 0 ?
587 st->rxerr_proc*100/st->rx_all_count : 0); 587 st->rxerr_proc*100/st->rx_all_count : 0);
588 len += snprintf(buf+len, sizeof(buf)-len, "jumbo\t%d\t(%d%%)\n", 588 len += snprintf(buf+len, sizeof(buf)-len, "jumbo\t%u\t(%u%%)\n",
589 st->rxerr_jumbo, 589 st->rxerr_jumbo,
590 st->rx_all_count > 0 ? 590 st->rx_all_count > 0 ?
591 st->rxerr_jumbo*100/st->rx_all_count : 0); 591 st->rxerr_jumbo*100/st->rx_all_count : 0);
592 len += snprintf(buf+len, sizeof(buf)-len, "[RX all\t%d]\n", 592 len += snprintf(buf+len, sizeof(buf)-len, "[RX all\t%u]\n",
593 st->rx_all_count); 593 st->rx_all_count);
594 len += snprintf(buf+len, sizeof(buf)-len, "RX-all-bytes\t%d\n", 594 len += snprintf(buf+len, sizeof(buf)-len, "RX-all-bytes\t%u\n",
595 st->rx_bytes_count); 595 st->rx_bytes_count);
596 596
597 len += snprintf(buf+len, sizeof(buf)-len, 597 len += snprintf(buf+len, sizeof(buf)-len,
598 "\nTX\n---------------------\n"); 598 "\nTX\n---------------------\n");
599 len += snprintf(buf+len, sizeof(buf)-len, "retry\t%d\t(%d%%)\n", 599 len += snprintf(buf+len, sizeof(buf)-len, "retry\t%u\t(%u%%)\n",
600 st->txerr_retry, 600 st->txerr_retry,
601 st->tx_all_count > 0 ? 601 st->tx_all_count > 0 ?
602 st->txerr_retry*100/st->tx_all_count : 0); 602 st->txerr_retry*100/st->tx_all_count : 0);
603 len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%d\t(%d%%)\n", 603 len += snprintf(buf+len, sizeof(buf)-len, "FIFO\t%u\t(%u%%)\n",
604 st->txerr_fifo, 604 st->txerr_fifo,
605 st->tx_all_count > 0 ? 605 st->tx_all_count > 0 ?
606 st->txerr_fifo*100/st->tx_all_count : 0); 606 st->txerr_fifo*100/st->tx_all_count : 0);
607 len += snprintf(buf+len, sizeof(buf)-len, "filter\t%d\t(%d%%)\n", 607 len += snprintf(buf+len, sizeof(buf)-len, "filter\t%u\t(%u%%)\n",
608 st->txerr_filt, 608 st->txerr_filt,
609 st->tx_all_count > 0 ? 609 st->tx_all_count > 0 ?
610 st->txerr_filt*100/st->tx_all_count : 0); 610 st->txerr_filt*100/st->tx_all_count : 0);
611 len += snprintf(buf+len, sizeof(buf)-len, "[TX all\t%d]\n", 611 len += snprintf(buf+len, sizeof(buf)-len, "[TX all\t%u]\n",
612 st->tx_all_count); 612 st->tx_all_count);
613 len += snprintf(buf+len, sizeof(buf)-len, "TX-all-bytes\t%d\n", 613 len += snprintf(buf+len, sizeof(buf)-len, "TX-all-bytes\t%u\n",
614 st->tx_bytes_count); 614 st->tx_bytes_count);
615 615
616 if (len > sizeof(buf)) 616 if (len > sizeof(buf))
@@ -719,7 +719,7 @@ static ssize_t read_file_ani(struct file *file, char __user *user_buf,
719 st->mib_intr); 719 st->mib_intr);
720 len += snprintf(buf+len, sizeof(buf)-len, 720 len += snprintf(buf+len, sizeof(buf)-len,
721 "beacon RSSI average:\t%d\n", 721 "beacon RSSI average:\t%d\n",
722 sc->ah->ah_beacon_rssi_avg.avg); 722 (int)ewma_read(&sc->ah->ah_beacon_rssi_avg));
723 723
724#define CC_PRINT(_struct, _field) \ 724#define CC_PRINT(_struct, _field) \
725 _struct._field, \ 725 _struct._field, \
diff --git a/drivers/net/wireless/ath/ath5k/debug.h b/drivers/net/wireless/ath/ath5k/debug.h
index 236edbd2507d..3e34428d5126 100644
--- a/drivers/net/wireless/ath/ath5k/debug.h
+++ b/drivers/net/wireless/ath/ath5k/debug.h
@@ -95,6 +95,7 @@ struct ath5k_dbg_info {
95 * @ATH5K_DEBUG_DUMP_RX: print received skb content 95 * @ATH5K_DEBUG_DUMP_RX: print received skb content
96 * @ATH5K_DEBUG_DUMP_TX: print transmit skb content 96 * @ATH5K_DEBUG_DUMP_TX: print transmit skb content
97 * @ATH5K_DEBUG_DUMPBANDS: dump bands 97 * @ATH5K_DEBUG_DUMPBANDS: dump bands
98 * @ATH5K_DEBUG_DMA: debug dma start/stop
98 * @ATH5K_DEBUG_TRACE: trace function calls 99 * @ATH5K_DEBUG_TRACE: trace function calls
99 * @ATH5K_DEBUG_DESC: descriptor setup 100 * @ATH5K_DEBUG_DESC: descriptor setup
100 * @ATH5K_DEBUG_ANY: show at any debug level 101 * @ATH5K_DEBUG_ANY: show at any debug level
@@ -118,6 +119,7 @@ enum ath5k_debug_level {
118 ATH5K_DEBUG_DUMP_RX = 0x00000100, 119 ATH5K_DEBUG_DUMP_RX = 0x00000100,
119 ATH5K_DEBUG_DUMP_TX = 0x00000200, 120 ATH5K_DEBUG_DUMP_TX = 0x00000200,
120 ATH5K_DEBUG_DUMPBANDS = 0x00000400, 121 ATH5K_DEBUG_DUMPBANDS = 0x00000400,
122 ATH5K_DEBUG_DMA = 0x00000800,
121 ATH5K_DEBUG_ANI = 0x00002000, 123 ATH5K_DEBUG_ANI = 0x00002000,
122 ATH5K_DEBUG_DESC = 0x00004000, 124 ATH5K_DEBUG_DESC = 0x00004000,
123 ATH5K_DEBUG_ANY = 0xffffffff 125 ATH5K_DEBUG_ANY = 0xffffffff
diff --git a/drivers/net/wireless/ath/ath5k/desc.c b/drivers/net/wireless/ath/ath5k/desc.c
index 43244382f213..16b44ff7dd3e 100644
--- a/drivers/net/wireless/ath/ath5k/desc.c
+++ b/drivers/net/wireless/ath/ath5k/desc.c
@@ -26,9 +26,10 @@
26#include "debug.h" 26#include "debug.h"
27#include "base.h" 27#include "base.h"
28 28
29/* 29
30 * TX Descriptors 30/************************\
31 */ 31* TX Control descriptors *
32\************************/
32 33
33/* 34/*
34 * Initialize the 2-word tx control descriptor on 5210/5211 35 * Initialize the 2-word tx control descriptor on 5210/5211
@@ -335,6 +336,11 @@ ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah, struct ath5k_desc *desc,
335 return 0; 336 return 0;
336} 337}
337 338
339
340/***********************\
341* TX Status descriptors *
342\***********************/
343
338/* 344/*
339 * Proccess the tx status descriptor on 5210/5211 345 * Proccess the tx status descriptor on 5210/5211
340 */ 346 */
@@ -476,9 +482,10 @@ static int ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
476 return 0; 482 return 0;
477} 483}
478 484
479/* 485
480 * RX Descriptors 486/****************\
481 */ 487* RX Descriptors *
488\****************/
482 489
483/* 490/*
484 * Initialize an rx control descriptor 491 * Initialize an rx control descriptor
@@ -666,6 +673,11 @@ static int ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
666 return 0; 673 return 0;
667} 674}
668 675
676
677/********\
678* Attach *
679\********/
680
669/* 681/*
670 * Init function pointers inside ath5k_hw struct 682 * Init function pointers inside ath5k_hw struct
671 */ 683 */
diff --git a/drivers/net/wireless/ath/ath5k/desc.h b/drivers/net/wireless/ath/ath5k/desc.h
index b2adb2a281c2..2509d0bf037d 100644
--- a/drivers/net/wireless/ath/ath5k/desc.h
+++ b/drivers/net/wireless/ath/ath5k/desc.h
@@ -26,7 +26,7 @@
26struct ath5k_hw_rx_ctl { 26struct ath5k_hw_rx_ctl {
27 u32 rx_control_0; /* RX control word 0 */ 27 u32 rx_control_0; /* RX control word 0 */
28 u32 rx_control_1; /* RX control word 1 */ 28 u32 rx_control_1; /* RX control word 1 */
29} __packed; 29} __packed __aligned(4);
30 30
31/* RX control word 1 fields/flags */ 31/* RX control word 1 fields/flags */
32#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff /* data buffer length */ 32#define AR5K_DESC_RX_CTL1_BUF_LEN 0x00000fff /* data buffer length */
@@ -39,7 +39,7 @@ struct ath5k_hw_rx_ctl {
39struct ath5k_hw_rx_status { 39struct ath5k_hw_rx_status {
40 u32 rx_status_0; /* RX status word 0 */ 40 u32 rx_status_0; /* RX status word 0 */
41 u32 rx_status_1; /* RX status word 1 */ 41 u32 rx_status_1; /* RX status word 1 */
42} __packed; 42} __packed __aligned(4);
43 43
44/* 5210/5211 */ 44/* 5210/5211 */
45/* RX status word 0 fields/flags */ 45/* RX status word 0 fields/flags */
@@ -129,7 +129,7 @@ enum ath5k_phy_error_code {
129struct ath5k_hw_2w_tx_ctl { 129struct ath5k_hw_2w_tx_ctl {
130 u32 tx_control_0; /* TX control word 0 */ 130 u32 tx_control_0; /* TX control word 0 */
131 u32 tx_control_1; /* TX control word 1 */ 131 u32 tx_control_1; /* TX control word 1 */
132} __packed; 132} __packed __aligned(4);
133 133
134/* TX control word 0 fields/flags */ 134/* TX control word 0 fields/flags */
135#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */ 135#define AR5K_2W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */
@@ -185,7 +185,7 @@ struct ath5k_hw_4w_tx_ctl {
185 u32 tx_control_1; /* TX control word 1 */ 185 u32 tx_control_1; /* TX control word 1 */
186 u32 tx_control_2; /* TX control word 2 */ 186 u32 tx_control_2; /* TX control word 2 */
187 u32 tx_control_3; /* TX control word 3 */ 187 u32 tx_control_3; /* TX control word 3 */
188} __packed; 188} __packed __aligned(4);
189 189
190/* TX control word 0 fields/flags */ 190/* TX control word 0 fields/flags */
191#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */ 191#define AR5K_4W_TX_DESC_CTL0_FRAME_LEN 0x00000fff /* frame length */
@@ -244,7 +244,7 @@ struct ath5k_hw_4w_tx_ctl {
244struct ath5k_hw_tx_status { 244struct ath5k_hw_tx_status {
245 u32 tx_status_0; /* TX status word 0 */ 245 u32 tx_status_0; /* TX status word 0 */
246 u32 tx_status_1; /* TX status word 1 */ 246 u32 tx_status_1; /* TX status word 1 */
247} __packed; 247} __packed __aligned(4);
248 248
249/* TX status word 0 fields/flags */ 249/* TX status word 0 fields/flags */
250#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK 0x00000001 /* TX success */ 250#define AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK 0x00000001 /* TX success */
@@ -282,7 +282,7 @@ struct ath5k_hw_tx_status {
282struct ath5k_hw_5210_tx_desc { 282struct ath5k_hw_5210_tx_desc {
283 struct ath5k_hw_2w_tx_ctl tx_ctl; 283 struct ath5k_hw_2w_tx_ctl tx_ctl;
284 struct ath5k_hw_tx_status tx_stat; 284 struct ath5k_hw_tx_status tx_stat;
285} __packed; 285} __packed __aligned(4);
286 286
287/* 287/*
288 * 5212 hardware TX descriptor 288 * 5212 hardware TX descriptor
@@ -290,7 +290,7 @@ struct ath5k_hw_5210_tx_desc {
290struct ath5k_hw_5212_tx_desc { 290struct ath5k_hw_5212_tx_desc {
291 struct ath5k_hw_4w_tx_ctl tx_ctl; 291 struct ath5k_hw_4w_tx_ctl tx_ctl;
292 struct ath5k_hw_tx_status tx_stat; 292 struct ath5k_hw_tx_status tx_stat;
293} __packed; 293} __packed __aligned(4);
294 294
295/* 295/*
296 * Common hardware RX descriptor 296 * Common hardware RX descriptor
@@ -298,7 +298,7 @@ struct ath5k_hw_5212_tx_desc {
298struct ath5k_hw_all_rx_desc { 298struct ath5k_hw_all_rx_desc {
299 struct ath5k_hw_rx_ctl rx_ctl; 299 struct ath5k_hw_rx_ctl rx_ctl;
300 struct ath5k_hw_rx_status rx_stat; 300 struct ath5k_hw_rx_status rx_stat;
301} __packed; 301} __packed __aligned(4);
302 302
303/* 303/*
304 * Atheros hardware DMA descriptor 304 * Atheros hardware DMA descriptor
@@ -313,7 +313,7 @@ struct ath5k_desc {
313 struct ath5k_hw_5212_tx_desc ds_tx5212; 313 struct ath5k_hw_5212_tx_desc ds_tx5212;
314 struct ath5k_hw_all_rx_desc ds_rx; 314 struct ath5k_hw_all_rx_desc ds_rx;
315 } ud; 315 } ud;
316} __packed; 316} __packed __aligned(4);
317 317
318#define AR5K_RXDESC_INTREQ 0x0020 318#define AR5K_RXDESC_INTREQ 0x0020
319 319
diff --git a/drivers/net/wireless/ath/ath5k/dma.c b/drivers/net/wireless/ath/ath5k/dma.c
index 923c9ca5c4f0..0064be7ce5c9 100644
--- a/drivers/net/wireless/ath/ath5k/dma.c
+++ b/drivers/net/wireless/ath/ath5k/dma.c
@@ -37,6 +37,7 @@
37#include "debug.h" 37#include "debug.h"
38#include "base.h" 38#include "base.h"
39 39
40
40/*********\ 41/*********\
41* Receive * 42* Receive *
42\*********/ 43\*********/
@@ -57,7 +58,7 @@ void ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
57 * 58 *
58 * @ah: The &struct ath5k_hw 59 * @ah: The &struct ath5k_hw
59 */ 60 */
60int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah) 61static int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
61{ 62{
62 unsigned int i; 63 unsigned int i;
63 64
@@ -69,7 +70,11 @@ int ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
69 for (i = 1000; i > 0 && 70 for (i = 1000; i > 0 &&
70 (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0; 71 (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0;
71 i--) 72 i--)
72 udelay(10); 73 udelay(100);
74
75 if (!i)
76 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
77 "failed to stop RX DMA !\n");
73 78
74 return i ? 0 : -EBUSY; 79 return i ? 0 : -EBUSY;
75} 80}
@@ -90,11 +95,18 @@ u32 ath5k_hw_get_rxdp(struct ath5k_hw *ah)
90 * @ah: The &struct ath5k_hw 95 * @ah: The &struct ath5k_hw
91 * @phys_addr: RX descriptor address 96 * @phys_addr: RX descriptor address
92 * 97 *
93 * XXX: Should we check if rx is enabled before setting rxdp ? 98 * Returns -EIO if rx is active
94 */ 99 */
95void ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr) 100int ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
96{ 101{
102 if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
103 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
104 "tried to set RXDP while rx was active !\n");
105 return -EIO;
106 }
107
97 ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP); 108 ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
109 return 0;
98} 110}
99 111
100 112
@@ -125,7 +137,7 @@ int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
125 137
126 /* Return if queue is declared inactive */ 138 /* Return if queue is declared inactive */
127 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) 139 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
128 return -EIO; 140 return -EINVAL;
129 141
130 if (ah->ah_version == AR5K_AR5210) { 142 if (ah->ah_version == AR5K_AR5210) {
131 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); 143 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
@@ -173,10 +185,10 @@ int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
173 * 185 *
174 * Stop DMA transmit on a specific hw queue and drain queue so we don't 186 * Stop DMA transmit on a specific hw queue and drain queue so we don't
175 * have any pending frames. Returns -EBUSY if we still have pending frames, 187 * have any pending frames. Returns -EBUSY if we still have pending frames,
176 * -EINVAL if queue number is out of range. 188 * -EINVAL if queue number is out of range or inactive.
177 * 189 *
178 */ 190 */
179int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) 191static int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
180{ 192{
181 unsigned int i = 40; 193 unsigned int i = 40;
182 u32 tx_queue, pending; 194 u32 tx_queue, pending;
@@ -185,7 +197,7 @@ int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
185 197
186 /* Return if queue is declared inactive */ 198 /* Return if queue is declared inactive */
187 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) 199 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
188 return -EIO; 200 return -EINVAL;
189 201
190 if (ah->ah_version == AR5K_AR5210) { 202 if (ah->ah_version == AR5K_AR5210) {
191 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR); 203 tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
@@ -211,12 +223,31 @@ int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
211 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR); 223 ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
212 ath5k_hw_reg_read(ah, AR5K_CR); 224 ath5k_hw_reg_read(ah, AR5K_CR);
213 } else { 225 } else {
226
227 /*
228 * Enable DCU early termination to quickly
229 * flush any pending frames from QCU
230 */
231 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
232 AR5K_QCU_MISC_DCU_EARLY);
233
214 /* 234 /*
215 * Schedule TX disable and wait until queue is empty 235 * Schedule TX disable and wait until queue is empty
216 */ 236 */
217 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue); 237 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue);
218 238
219 /*Check for pending frames*/ 239 /* Wait for queue to stop */
240 for (i = 1000; i > 0 &&
241 (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue) != 0);
242 i--)
243 udelay(100);
244
245 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
246 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
247 "queue %i didn't stop !\n", queue);
248
249 /* Check for pending frames */
250 i = 1000;
220 do { 251 do {
221 pending = ath5k_hw_reg_read(ah, 252 pending = ath5k_hw_reg_read(ah,
222 AR5K_QUEUE_STATUS(queue)) & 253 AR5K_QUEUE_STATUS(queue)) &
@@ -247,12 +278,12 @@ int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
247 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH); 278 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
248 279
249 /* Wait a while and disable mechanism */ 280 /* Wait a while and disable mechanism */
250 udelay(200); 281 udelay(400);
251 AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1, 282 AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1,
252 AR5K_QUIET_CTL1_QT_EN); 283 AR5K_QUIET_CTL1_QT_EN);
253 284
254 /* Re-check for pending frames */ 285 /* Re-check for pending frames */
255 i = 40; 286 i = 100;
256 do { 287 do {
257 pending = ath5k_hw_reg_read(ah, 288 pending = ath5k_hw_reg_read(ah,
258 AR5K_QUEUE_STATUS(queue)) & 289 AR5K_QUEUE_STATUS(queue)) &
@@ -262,12 +293,27 @@ int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
262 293
263 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211, 294 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211,
264 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH); 295 AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
296
297 if (pending)
298 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
299 "quiet mechanism didn't work q:%i !\n",
300 queue);
265 } 301 }
266 302
303 /*
304 * Disable DCU early termination
305 */
306 AR5K_REG_DISABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
307 AR5K_QCU_MISC_DCU_EARLY);
308
267 /* Clear register */ 309 /* Clear register */
268 ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD); 310 ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
269 if (pending) 311 if (pending) {
312 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
313 "tx dma didn't stop (q:%i, frm:%i) !\n",
314 queue, pending);
270 return -EBUSY; 315 return -EBUSY;
316 }
271 } 317 }
272 318
273 /* TODO: Check for success on 5210 else return error */ 319 /* TODO: Check for success on 5210 else return error */
@@ -275,6 +321,26 @@ int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
275} 321}
276 322
277/** 323/**
324 * ath5k_hw_stop_beacon_queue - Stop beacon queue
325 *
326 * @ah The &struct ath5k_hw
327 * @queue The queue number
328 *
329 * Returns -EIO if queue didn't stop
330 */
331int ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
332{
333 int ret;
334 ret = ath5k_hw_stop_tx_dma(ah, queue);
335 if (ret) {
336 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_DMA,
337 "beacon queue didn't stop !\n");
338 return -EIO;
339 }
340 return 0;
341}
342
343/**
278 * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue 344 * ath5k_hw_get_txdp - Get TX Descriptor's address for a specific queue
279 * 345 *
280 * @ah: The &struct ath5k_hw 346 * @ah: The &struct ath5k_hw
@@ -427,6 +493,7 @@ done:
427 return ret; 493 return ret;
428} 494}
429 495
496
430/*******************\ 497/*******************\
431* Interrupt masking * 498* Interrupt masking *
432\*******************/ 499\*******************/
@@ -688,3 +755,92 @@ enum ath5k_int ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
688 return old_mask; 755 return old_mask;
689} 756}
690 757
758
759/********************\
760 Init/Stop functions
761\********************/
762
763/**
764 * ath5k_hw_dma_init - Initialize DMA unit
765 *
766 * @ah: The &struct ath5k_hw
767 *
768 * Set DMA size and pre-enable interrupts
769 * (driver handles tx/rx buffer setup and
770 * dma start/stop)
771 *
772 * XXX: Save/restore RXDP/TXDP registers ?
773 */
774void ath5k_hw_dma_init(struct ath5k_hw *ah)
775{
776 /*
777 * Set Rx/Tx DMA Configuration
778 *
779 * Set standard DMA size (128). Note that
780 * a DMA size of 512 causes rx overruns and tx errors
781 * on pci-e cards (tested on 5424 but since rx overruns
782 * also occur on 5416/5418 with madwifi we set 128
783 * for all PCI-E cards to be safe).
784 *
785 * XXX: need to check 5210 for this
786 * TODO: Check out tx triger level, it's always 64 on dumps but I
787 * guess we can tweak it and see how it goes ;-)
788 */
789 if (ah->ah_version != AR5K_AR5210) {
790 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
791 AR5K_TXCFG_SDMAMR, AR5K_DMASIZE_128B);
792 AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG,
793 AR5K_RXCFG_SDMAMW, AR5K_DMASIZE_128B);
794 }
795
796 /* Pre-enable interrupts on 5211/5212*/
797 if (ah->ah_version != AR5K_AR5210)
798 ath5k_hw_set_imr(ah, ah->ah_imr);
799
800}
801
802/**
803 * ath5k_hw_dma_stop - stop DMA unit
804 *
805 * @ah: The &struct ath5k_hw
806 *
807 * Stop tx/rx DMA and interrupts. Returns
808 * -EBUSY if tx or rx dma failed to stop.
809 *
810 * XXX: Sometimes DMA unit hangs and we have
811 * stuck frames on tx queues, only a reset
812 * can fix that.
813 */
814int ath5k_hw_dma_stop(struct ath5k_hw *ah)
815{
816 int i, qmax, err;
817 err = 0;
818
819 /* Disable interrupts */
820 ath5k_hw_set_imr(ah, 0);
821
822 /* Stop rx dma */
823 err = ath5k_hw_stop_rx_dma(ah);
824 if (err)
825 return err;
826
827 /* Clear any pending interrupts
828 * and disable tx dma */
829 if (ah->ah_version != AR5K_AR5210) {
830 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR);
831 qmax = AR5K_NUM_TX_QUEUES;
832 } else {
833 /* PISR/SISR Not available on 5210 */
834 ath5k_hw_reg_read(ah, AR5K_ISR);
835 qmax = AR5K_NUM_TX_QUEUES_NOQCU;
836 }
837
838 for (i = 0; i < qmax; i++) {
839 err = ath5k_hw_stop_tx_dma(ah, i);
840 /* -EINVAL -> queue inactive */
841 if (err != -EINVAL)
842 return err;
843 }
844
845 return err;
846}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.c b/drivers/net/wireless/ath/ath5k/eeprom.c
index 39722dd73e43..80e625608bac 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.c
+++ b/drivers/net/wireless/ath/ath5k/eeprom.c
@@ -28,45 +28,16 @@
28#include "debug.h" 28#include "debug.h"
29#include "base.h" 29#include "base.h"
30 30
31/*
32 * Read from eeprom
33 */
34static int ath5k_hw_eeprom_read(struct ath5k_hw *ah, u32 offset, u16 *data)
35{
36 u32 status, timeout;
37
38 /*
39 * Initialize EEPROM access
40 */
41 if (ah->ah_version == AR5K_AR5210) {
42 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_EEAE);
43 (void)ath5k_hw_reg_read(ah, AR5K_EEPROM_BASE + (4 * offset));
44 } else {
45 ath5k_hw_reg_write(ah, offset, AR5K_EEPROM_BASE);
46 AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
47 AR5K_EEPROM_CMD_READ);
48 }
49
50 for (timeout = AR5K_TUNE_REGISTER_TIMEOUT; timeout > 0; timeout--) {
51 status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
52 if (status & AR5K_EEPROM_STAT_RDDONE) {
53 if (status & AR5K_EEPROM_STAT_RDERR)
54 return -EIO;
55 *data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
56 0xffff);
57 return 0;
58 }
59 udelay(15);
60 }
61 31
62 return -ETIMEDOUT; 32/******************\
63} 33* Helper functions *
34\******************/
64 35
65/* 36/*
66 * Translate binary channel representation in EEPROM to frequency 37 * Translate binary channel representation in EEPROM to frequency
67 */ 38 */
68static u16 ath5k_eeprom_bin2freq(struct ath5k_eeprom_info *ee, u16 bin, 39static u16 ath5k_eeprom_bin2freq(struct ath5k_eeprom_info *ee, u16 bin,
69 unsigned int mode) 40 unsigned int mode)
70{ 41{
71 u16 val; 42 u16 val;
72 43
@@ -89,6 +60,11 @@ static u16 ath5k_eeprom_bin2freq(struct ath5k_eeprom_info *ee, u16 bin,
89 return val; 60 return val;
90} 61}
91 62
63
64/*********\
65* Parsers *
66\*********/
67
92/* 68/*
93 * Initialize eeprom & capabilities structs 69 * Initialize eeprom & capabilities structs
94 */ 70 */
@@ -198,7 +174,7 @@ ath5k_eeprom_init_header(struct ath5k_hw *ah)
198 * 174 *
199 * XXX: Serdes values seem to be fixed so 175 * XXX: Serdes values seem to be fixed so
200 * no need to read them here, we write them 176 * no need to read them here, we write them
201 * during ath5k_hw_attach */ 177 * during ath5k_hw_init */
202 AR5K_EEPROM_READ(AR5K_EEPROM_PCIE_OFFSET, val); 178 AR5K_EEPROM_READ(AR5K_EEPROM_PCIE_OFFSET, val);
203 ee->ee_serdes = (val == AR5K_EEPROM_PCIE_SERDES_SECTION) ? 179 ee->ee_serdes = (val == AR5K_EEPROM_PCIE_SERDES_SECTION) ?
204 true : false; 180 true : false;
@@ -647,6 +623,7 @@ ath5k_eeprom_init_11bg_2413(struct ath5k_hw *ah, unsigned int mode, int offset)
647 return 0; 623 return 0;
648} 624}
649 625
626
650/* 627/*
651 * Read power calibration for RF5111 chips 628 * Read power calibration for RF5111 chips
652 * 629 *
@@ -1514,6 +1491,7 @@ ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
1514 return 0; 1491 return 0;
1515} 1492}
1516 1493
1494
1517/* 1495/*
1518 * Read per channel calibration info from EEPROM 1496 * Read per channel calibration info from EEPROM
1519 * 1497 *
@@ -1607,15 +1585,6 @@ ath5k_eeprom_free_pcal_info(struct ath5k_hw *ah, int mode)
1607 return 0; 1585 return 0;
1608} 1586}
1609 1587
1610void
1611ath5k_eeprom_detach(struct ath5k_hw *ah)
1612{
1613 u8 mode;
1614
1615 for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++)
1616 ath5k_eeprom_free_pcal_info(ah, mode);
1617}
1618
1619/* Read conformance test limits used for regulatory control */ 1588/* Read conformance test limits used for regulatory control */
1620static int 1589static int
1621ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah) 1590ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah)
@@ -1757,6 +1726,44 @@ ath5k_eeprom_read_spur_chans(struct ath5k_hw *ah)
1757} 1726}
1758 1727
1759/* 1728/*
1729 * Read the MAC address from eeprom
1730 */
1731int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
1732{
1733 u8 mac_d[ETH_ALEN] = {};
1734 u32 total, offset;
1735 u16 data;
1736 int octet, ret;
1737
1738 ret = ath5k_hw_nvram_read(ah, 0x20, &data);
1739 if (ret)
1740 return ret;
1741
1742 for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
1743 ret = ath5k_hw_nvram_read(ah, offset, &data);
1744 if (ret)
1745 return ret;
1746
1747 total += data;
1748 mac_d[octet + 1] = data & 0xff;
1749 mac_d[octet] = data >> 8;
1750 octet += 2;
1751 }
1752
1753 if (!total || total == 3 * 0xffff)
1754 return -EINVAL;
1755
1756 memcpy(mac, mac_d, ETH_ALEN);
1757
1758 return 0;
1759}
1760
1761
1762/***********************\
1763* Init/Detach functions *
1764\***********************/
1765
1766/*
1760 * Initialize eeprom data structure 1767 * Initialize eeprom data structure
1761 */ 1768 */
1762int 1769int
@@ -1787,35 +1794,27 @@ ath5k_eeprom_init(struct ath5k_hw *ah)
1787 return 0; 1794 return 0;
1788} 1795}
1789 1796
1790/* 1797void
1791 * Read the MAC address from eeprom 1798ath5k_eeprom_detach(struct ath5k_hw *ah)
1792 */
1793int ath5k_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
1794{ 1799{
1795 u8 mac_d[ETH_ALEN] = {}; 1800 u8 mode;
1796 u32 total, offset;
1797 u16 data;
1798 int octet, ret;
1799
1800 ret = ath5k_hw_eeprom_read(ah, 0x20, &data);
1801 if (ret)
1802 return ret;
1803 1801
1804 for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) { 1802 for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++)
1805 ret = ath5k_hw_eeprom_read(ah, offset, &data); 1803 ath5k_eeprom_free_pcal_info(ah, mode);
1806 if (ret) 1804}
1807 return ret;
1808 1805
1809 total += data; 1806int
1810 mac_d[octet + 1] = data & 0xff; 1807ath5k_eeprom_mode_from_channel(struct ieee80211_channel *channel)
1811 mac_d[octet] = data >> 8; 1808{
1812 octet += 2; 1809 switch (channel->hw_value & CHANNEL_MODES) {
1810 case CHANNEL_A:
1811 case CHANNEL_XR:
1812 return AR5K_EEPROM_MODE_11A;
1813 case CHANNEL_G:
1814 return AR5K_EEPROM_MODE_11G;
1815 case CHANNEL_B:
1816 return AR5K_EEPROM_MODE_11B;
1817 default:
1818 return -1;
1813 } 1819 }
1814
1815 if (!total || total == 3 * 0xffff)
1816 return -EINVAL;
1817
1818 memcpy(mac, mac_d, ETH_ALEN);
1819
1820 return 0;
1821} 1820}
diff --git a/drivers/net/wireless/ath/ath5k/eeprom.h b/drivers/net/wireless/ath/ath5k/eeprom.h
index c4a6d5f26af4..7c09e150dbdc 100644
--- a/drivers/net/wireless/ath/ath5k/eeprom.h
+++ b/drivers/net/wireless/ath/ath5k/eeprom.h
@@ -241,7 +241,7 @@ enum ath5k_eeprom_freq_bands{
241#define AR5K_SPUR_SYMBOL_WIDTH_TURBO_100Hz 6250 241#define AR5K_SPUR_SYMBOL_WIDTH_TURBO_100Hz 6250
242 242
243#define AR5K_EEPROM_READ(_o, _v) do { \ 243#define AR5K_EEPROM_READ(_o, _v) do { \
244 ret = ath5k_hw_eeprom_read(ah, (_o), &(_v)); \ 244 ret = ath5k_hw_nvram_read(ah, (_o), &(_v)); \
245 if (ret) \ 245 if (ret) \
246 return ret; \ 246 return ret; \
247} while (0) 247} while (0)
@@ -517,3 +517,5 @@ struct ath5k_eeprom_info {
517 u32 ee_antenna[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX]; 517 u32 ee_antenna[AR5K_EEPROM_N_MODES][AR5K_ANT_MAX];
518}; 518};
519 519
520int
521ath5k_eeprom_mode_from_channel(struct ieee80211_channel *channel);
diff --git a/drivers/net/wireless/ath/ath5k/initvals.c b/drivers/net/wireless/ath/ath5k/initvals.c
index 8fa439308828..e49340d18df4 100644
--- a/drivers/net/wireless/ath/ath5k/initvals.c
+++ b/drivers/net/wireless/ath/ath5k/initvals.c
@@ -44,7 +44,7 @@ struct ath5k_ini {
44 44
45struct ath5k_ini_mode { 45struct ath5k_ini_mode {
46 u16 mode_register; 46 u16 mode_register;
47 u32 mode_value[5]; 47 u32 mode_value[3];
48}; 48};
49 49
50/* Initial register settings for AR5210 */ 50/* Initial register settings for AR5210 */
@@ -391,76 +391,74 @@ static const struct ath5k_ini ar5211_ini[] = {
391 */ 391 */
392static const struct ath5k_ini_mode ar5211_ini_mode[] = { 392static const struct ath5k_ini_mode ar5211_ini_mode[] = {
393 { AR5K_TXCFG, 393 { AR5K_TXCFG,
394 /* a aTurbo b g (OFDM) */ 394 /* A/XR B G */
395 { 0x00000015, 0x00000015, 0x0000001d, 0x00000015 } }, 395 { 0x00000015, 0x0000001d, 0x00000015 } },
396 { AR5K_QUEUE_DFS_LOCAL_IFS(0), 396 { AR5K_QUEUE_DFS_LOCAL_IFS(0),
397 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, 397 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
398 { AR5K_QUEUE_DFS_LOCAL_IFS(1), 398 { AR5K_QUEUE_DFS_LOCAL_IFS(1),
399 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, 399 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
400 { AR5K_QUEUE_DFS_LOCAL_IFS(2), 400 { AR5K_QUEUE_DFS_LOCAL_IFS(2),
401 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, 401 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
402 { AR5K_QUEUE_DFS_LOCAL_IFS(3), 402 { AR5K_QUEUE_DFS_LOCAL_IFS(3),
403 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, 403 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
404 { AR5K_QUEUE_DFS_LOCAL_IFS(4), 404 { AR5K_QUEUE_DFS_LOCAL_IFS(4),
405 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, 405 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
406 { AR5K_QUEUE_DFS_LOCAL_IFS(5), 406 { AR5K_QUEUE_DFS_LOCAL_IFS(5),
407 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, 407 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
408 { AR5K_QUEUE_DFS_LOCAL_IFS(6), 408 { AR5K_QUEUE_DFS_LOCAL_IFS(6),
409 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, 409 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
410 { AR5K_QUEUE_DFS_LOCAL_IFS(7), 410 { AR5K_QUEUE_DFS_LOCAL_IFS(7),
411 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, 411 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
412 { AR5K_QUEUE_DFS_LOCAL_IFS(8), 412 { AR5K_QUEUE_DFS_LOCAL_IFS(8),
413 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, 413 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
414 { AR5K_QUEUE_DFS_LOCAL_IFS(9), 414 { AR5K_QUEUE_DFS_LOCAL_IFS(9),
415 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } }, 415 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
416 { AR5K_DCU_GBL_IFS_SLOT, 416 { AR5K_DCU_GBL_IFS_SLOT,
417 { 0x00000168, 0x000001e0, 0x000001b8, 0x00000168 } }, 417 { 0x00000168, 0x000001b8, 0x00000168 } },
418 { AR5K_DCU_GBL_IFS_SIFS, 418 { AR5K_DCU_GBL_IFS_SIFS,
419 { 0x00000230, 0x000001e0, 0x000000b0, 0x00000230 } }, 419 { 0x00000230, 0x000000b0, 0x00000230 } },
420 { AR5K_DCU_GBL_IFS_EIFS, 420 { AR5K_DCU_GBL_IFS_EIFS,
421 { 0x00000d98, 0x00001180, 0x00001f48, 0x00000d98 } }, 421 { 0x00000d98, 0x00001f48, 0x00000d98 } },
422 { AR5K_DCU_GBL_IFS_MISC, 422 { AR5K_DCU_GBL_IFS_MISC,
423 { 0x0000a0e0, 0x00014068, 0x00005880, 0x0000a0e0 } }, 423 { 0x0000a0e0, 0x00005880, 0x0000a0e0 } },
424 { AR5K_TIME_OUT, 424 { AR5K_TIME_OUT,
425 { 0x04000400, 0x08000800, 0x20003000, 0x04000400 } }, 425 { 0x04000400, 0x20003000, 0x04000400 } },
426 { AR5K_USEC_5211, 426 { AR5K_USEC_5211,
427 { 0x0e8d8fa7, 0x0e8d8fcf, 0x01608f95, 0x0e8d8fa7 } }, 427 { 0x0e8d8fa7, 0x01608f95, 0x0e8d8fa7 } },
428 { AR5K_PHY_TURBO,
429 { 0x00000000, 0x00000003, 0x00000000, 0x00000000 } },
430 { AR5K_PHY(8), 428 { AR5K_PHY(8),
431 { 0x02020200, 0x02020200, 0x02010200, 0x02020200 } }, 429 { 0x02020200, 0x02010200, 0x02020200 } },
432 { AR5K_PHY(9), 430 { AR5K_PHY_RF_CTL2,
433 { 0x00000e0e, 0x00000e0e, 0x00000707, 0x00000e0e } }, 431 { 0x00000e0e, 0x00000707, 0x00000e0e } },
434 { AR5K_PHY(10), 432 { AR5K_PHY_RF_CTL3,
435 { 0x0a020001, 0x0a020001, 0x05010000, 0x0a020001 } }, 433 { 0x0a020001, 0x05010000, 0x0a020001 } },
436 { AR5K_PHY(13), 434 { AR5K_PHY_RF_CTL4,
437 { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } }, 435 { 0x00000e0e, 0x00000e0e, 0x00000e0e } },
438 { AR5K_PHY(14), 436 { AR5K_PHY_PA_CTL,
439 { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b } }, 437 { 0x00000007, 0x0000000b, 0x0000000b } },
440 { AR5K_PHY(17), 438 { AR5K_PHY_SETTLING,
441 { 0x1372169c, 0x137216a5, 0x137216a8, 0x1372169c } }, 439 { 0x1372169c, 0x137216a8, 0x1372169c } },
442 { AR5K_PHY(18), 440 { AR5K_PHY_GAIN,
443 { 0x0018ba67, 0x0018ba67, 0x0018ba69, 0x0018ba69 } }, 441 { 0x0018ba67, 0x0018ba69, 0x0018ba69 } },
444 { AR5K_PHY(20), 442 { AR5K_PHY_DESIRED_SIZE,
445 { 0x0c28b4e0, 0x0c28b4e0, 0x0c28b4e0, 0x0c28b4e0 } }, 443 { 0x0c28b4e0, 0x0c28b4e0, 0x0c28b4e0 } },
446 { AR5K_PHY_SIG, 444 { AR5K_PHY_SIG,
447 { 0x7e800d2e, 0x7e800d2e, 0x7ec00d2e, 0x7e800d2e } }, 445 { 0x7e800d2e, 0x7ec00d2e, 0x7e800d2e } },
448 { AR5K_PHY_AGCCOARSE, 446 { AR5K_PHY_AGCCOARSE,
449 { 0x31375d5e, 0x31375d5e, 0x313a5d5e, 0x31375d5e } }, 447 { 0x31375d5e, 0x313a5d5e, 0x31375d5e } },
450 { AR5K_PHY_AGCCTL, 448 { AR5K_PHY_AGCCTL,
451 { 0x0000bd10, 0x0000bd10, 0x0000bd38, 0x0000bd10 } }, 449 { 0x0000bd10, 0x0000bd38, 0x0000bd10 } },
452 { AR5K_PHY_NF, 450 { AR5K_PHY_NF,
453 { 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 } }, 451 { 0x0001ce00, 0x0001ce00, 0x0001ce00 } },
454 { AR5K_PHY_RX_DELAY, 452 { AR5K_PHY_RX_DELAY,
455 { 0x00002710, 0x00002710, 0x0000157c, 0x00002710 } }, 453 { 0x00002710, 0x0000157c, 0x00002710 } },
456 { AR5K_PHY(70), 454 { AR5K_PHY(70),
457 { 0x00000190, 0x00000190, 0x00000084, 0x00000190 } }, 455 { 0x00000190, 0x00000084, 0x00000190 } },
458 { AR5K_PHY_FRAME_CTL_5211, 456 { AR5K_PHY_FRAME_CTL_5211,
459 { 0x6fe01020, 0x6fe01020, 0x6fe00920, 0x6fe01020 } }, 457 { 0x6fe01020, 0x6fe00920, 0x6fe01020 } },
460 { AR5K_PHY_PCDAC_TXPOWER_BASE, 458 { AR5K_PHY_PCDAC_TXPOWER_BASE,
461 { 0x05ff14ff, 0x05ff14ff, 0x05ff14ff, 0x05ff19ff } }, 459 { 0x05ff14ff, 0x05ff14ff, 0x05ff19ff } },
462 { AR5K_RF_BUFFER_CONTROL_4, 460 { AR5K_RF_BUFFER_CONTROL_4,
463 { 0x00000010, 0x00000014, 0x00000010, 0x00000010 } }, 461 { 0x00000010, 0x00000010, 0x00000010 } },
464}; 462};
465 463
466/* Initial register settings for AR5212 */ 464/* Initial register settings for AR5212 */
@@ -677,89 +675,87 @@ static const struct ath5k_ini ar5212_ini_common_start[] = {
677/* Initial mode-specific settings for AR5212 (Written before ar5212_ini) */ 675/* Initial mode-specific settings for AR5212 (Written before ar5212_ini) */
678static const struct ath5k_ini_mode ar5212_ini_mode_start[] = { 676static const struct ath5k_ini_mode ar5212_ini_mode_start[] = {
679 { AR5K_QUEUE_DFS_LOCAL_IFS(0), 677 { AR5K_QUEUE_DFS_LOCAL_IFS(0),
680 /* a/XR aTurbo b g (DYN) gTurbo */ 678 /* A/XR B G */
681 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, 679 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
682 { AR5K_QUEUE_DFS_LOCAL_IFS(1), 680 { AR5K_QUEUE_DFS_LOCAL_IFS(1),
683 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, 681 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
684 { AR5K_QUEUE_DFS_LOCAL_IFS(2), 682 { AR5K_QUEUE_DFS_LOCAL_IFS(2),
685 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, 683 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
686 { AR5K_QUEUE_DFS_LOCAL_IFS(3), 684 { AR5K_QUEUE_DFS_LOCAL_IFS(3),
687 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, 685 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
688 { AR5K_QUEUE_DFS_LOCAL_IFS(4), 686 { AR5K_QUEUE_DFS_LOCAL_IFS(4),
689 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, 687 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
690 { AR5K_QUEUE_DFS_LOCAL_IFS(5), 688 { AR5K_QUEUE_DFS_LOCAL_IFS(5),
691 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, 689 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
692 { AR5K_QUEUE_DFS_LOCAL_IFS(6), 690 { AR5K_QUEUE_DFS_LOCAL_IFS(6),
693 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, 691 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
694 { AR5K_QUEUE_DFS_LOCAL_IFS(7), 692 { AR5K_QUEUE_DFS_LOCAL_IFS(7),
695 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, 693 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
696 { AR5K_QUEUE_DFS_LOCAL_IFS(8), 694 { AR5K_QUEUE_DFS_LOCAL_IFS(8),
697 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, 695 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
698 { AR5K_QUEUE_DFS_LOCAL_IFS(9), 696 { AR5K_QUEUE_DFS_LOCAL_IFS(9),
699 { 0x002ffc0f, 0x002ffc0f, 0x002ffc1f, 0x002ffc0f, 0x002ffc0f } }, 697 { 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
700 { AR5K_DCU_GBL_IFS_SIFS, 698 { AR5K_DCU_GBL_IFS_SIFS,
701 { 0x00000230, 0x000001e0, 0x000000b0, 0x00000160, 0x000001e0 } }, 699 { 0x00000230, 0x000000b0, 0x00000160 } },
702 { AR5K_DCU_GBL_IFS_SLOT, 700 { AR5K_DCU_GBL_IFS_SLOT,
703 { 0x00000168, 0x000001e0, 0x000001b8, 0x0000018c, 0x000001e0 } }, 701 { 0x00000168, 0x000001b8, 0x0000018c } },
704 { AR5K_DCU_GBL_IFS_EIFS, 702 { AR5K_DCU_GBL_IFS_EIFS,
705 { 0x00000e60, 0x00001180, 0x00001f1c, 0x00003e38, 0x00001180 } }, 703 { 0x00000e60, 0x00001f1c, 0x00003e38 } },
706 { AR5K_DCU_GBL_IFS_MISC, 704 { AR5K_DCU_GBL_IFS_MISC,
707 { 0x0000a0e0, 0x00014068, 0x00005880, 0x0000b0e0, 0x00014068 } }, 705 { 0x0000a0e0, 0x00005880, 0x0000b0e0 } },
708 { AR5K_TIME_OUT, 706 { AR5K_TIME_OUT,
709 { 0x03e803e8, 0x06e006e0, 0x04200420, 0x08400840, 0x06e006e0 } }, 707 { 0x03e803e8, 0x04200420, 0x08400840 } },
710 { AR5K_PHY_TURBO,
711 { 0x00000000, 0x00000003, 0x00000000, 0x00000000, 0x00000003 } },
712 { AR5K_PHY(8), 708 { AR5K_PHY(8),
713 { 0x02020200, 0x02020200, 0x02010200, 0x02020200, 0x02020200 } }, 709 { 0x02020200, 0x02010200, 0x02020200 } },
714 { AR5K_PHY_RF_CTL2, 710 { AR5K_PHY_RF_CTL2,
715 { 0x00000e0e, 0x00000e0e, 0x00000707, 0x00000e0e, 0x00000e0e } }, 711 { 0x00000e0e, 0x00000707, 0x00000e0e } },
716 { AR5K_PHY_SETTLING, 712 { AR5K_PHY_SETTLING,
717 { 0x1372161c, 0x13721c25, 0x13721722, 0x137216a2, 0x13721c25 } }, 713 { 0x1372161c, 0x13721722, 0x137216a2 } },
718 { AR5K_PHY_AGCCTL, 714 { AR5K_PHY_AGCCTL,
719 { 0x00009d10, 0x00009d10, 0x00009d18, 0x00009d18, 0x00009d10 } }, 715 { 0x00009d10, 0x00009d18, 0x00009d18 } },
720 { AR5K_PHY_NF, 716 { AR5K_PHY_NF,
721 { 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 } }, 717 { 0x0001ce00, 0x0001ce00, 0x0001ce00 } },
722 { AR5K_PHY_WEAK_OFDM_HIGH_THR, 718 { AR5K_PHY_WEAK_OFDM_HIGH_THR,
723 { 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 } }, 719 { 0x409a4190, 0x409a4190, 0x409a4190 } },
724 { AR5K_PHY(70), 720 { AR5K_PHY(70),
725 { 0x000001b8, 0x000001b8, 0x00000084, 0x00000108, 0x000001b8 } }, 721 { 0x000001b8, 0x00000084, 0x00000108 } },
726 { AR5K_PHY_OFDM_SELFCORR, 722 { AR5K_PHY_OFDM_SELFCORR,
727 { 0x10058a05, 0x10058a05, 0x10058a05, 0x10058a05, 0x10058a05 } }, 723 { 0x10058a05, 0x10058a05, 0x10058a05 } },
728 { 0xa230, 724 { 0xa230,
729 { 0x00000000, 0x00000000, 0x00000000, 0x00000108, 0x00000000 } }, 725 { 0x00000000, 0x00000000, 0x00000108 } },
730}; 726};
731 727
732/* Initial mode-specific settings for AR5212 + RF5111 (Written after ar5212_ini) */ 728/* Initial mode-specific settings for AR5212 + RF5111 (Written after ar5212_ini) */
733static const struct ath5k_ini_mode rf5111_ini_mode_end[] = { 729static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
734 { AR5K_TXCFG, 730 { AR5K_TXCFG,
735 /* a/XR aTurbo b g (DYN) gTurbo */ 731 /* A/XR B G */
736 { 0x00008015, 0x00008015, 0x00008015, 0x00008015, 0x00008015 } }, 732 { 0x00008015, 0x00008015, 0x00008015 } },
737 { AR5K_USEC_5211, 733 { AR5K_USEC_5211,
738 { 0x128d8fa7, 0x09880fcf, 0x04e00f95, 0x12e00fab, 0x09880fcf } }, 734 { 0x128d8fa7, 0x04e00f95, 0x12e00fab } },
739 { AR5K_PHY_RF_CTL3, 735 { AR5K_PHY_RF_CTL3,
740 { 0x0a020001, 0x0a020001, 0x05010100, 0x0a020001, 0x0a020001 } }, 736 { 0x0a020001, 0x05010100, 0x0a020001 } },
741 { AR5K_PHY_RF_CTL4, 737 { AR5K_PHY_RF_CTL4,
742 { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } }, 738 { 0x00000e0e, 0x00000e0e, 0x00000e0e } },
743 { AR5K_PHY_PA_CTL, 739 { AR5K_PHY_PA_CTL,
744 { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b, 0x0000000b } }, 740 { 0x00000007, 0x0000000b, 0x0000000b } },
745 { AR5K_PHY_GAIN, 741 { AR5K_PHY_GAIN,
746 { 0x0018da5a, 0x0018da5a, 0x0018ca69, 0x0018ca69, 0x0018ca69 } }, 742 { 0x0018da5a, 0x0018ca69, 0x0018ca69 } },
747 { AR5K_PHY_DESIRED_SIZE, 743 { AR5K_PHY_DESIRED_SIZE,
748 { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } }, 744 { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } },
749 { AR5K_PHY_SIG, 745 { AR5K_PHY_SIG,
750 { 0x7e800d2e, 0x7e800d2e, 0x7ee84d2e, 0x7ee84d2e, 0x7e800d2e } }, 746 { 0x7e800d2e, 0x7ee84d2e, 0x7ee84d2e } },
751 { AR5K_PHY_AGCCOARSE, 747 { AR5K_PHY_AGCCOARSE,
752 { 0x3137665e, 0x3137665e, 0x3137665e, 0x3137665e, 0x3137615e } }, 748 { 0x3137665e, 0x3137665e, 0x3137665e } },
753 { AR5K_PHY_WEAK_OFDM_LOW_THR, 749 { AR5K_PHY_WEAK_OFDM_LOW_THR,
754 { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb080, 0x050cb080 } }, 750 { 0x050cb081, 0x050cb081, 0x050cb080 } },
755 { AR5K_PHY_RX_DELAY, 751 { AR5K_PHY_RX_DELAY,
756 { 0x00002710, 0x00002710, 0x0000157c, 0x00002af8, 0x00002710 } }, 752 { 0x00002710, 0x0000157c, 0x00002af8 } },
757 { AR5K_PHY_FRAME_CTL_5211, 753 { AR5K_PHY_FRAME_CTL_5211,
758 { 0xf7b81020, 0xf7b81020, 0xf7b80d20, 0xf7b81020, 0xf7b81020 } }, 754 { 0xf7b81020, 0xf7b80d20, 0xf7b81020 } },
759 { AR5K_PHY_GAIN_2GHZ, 755 { AR5K_PHY_GAIN_2GHZ,
760 { 0x642c416a, 0x642c416a, 0x6440416a, 0x6440416a, 0x6440416a } }, 756 { 0x642c416a, 0x6440416a, 0x6440416a } },
761 { AR5K_PHY_CCK_RX_CTL_4, 757 { AR5K_PHY_CCK_RX_CTL_4,
762 { 0x1883800a, 0x1883800a, 0x1873800a, 0x1883800a, 0x1883800a } }, 758 { 0x1883800a, 0x1873800a, 0x1883800a } },
763}; 759};
764 760
765static const struct ath5k_ini rf5111_ini_common_end[] = { 761static const struct ath5k_ini rf5111_ini_common_end[] = {
@@ -782,38 +778,38 @@ static const struct ath5k_ini rf5111_ini_common_end[] = {
782/* Initial mode-specific settings for AR5212 + RF5112 (Written after ar5212_ini) */ 778/* Initial mode-specific settings for AR5212 + RF5112 (Written after ar5212_ini) */
783static const struct ath5k_ini_mode rf5112_ini_mode_end[] = { 779static const struct ath5k_ini_mode rf5112_ini_mode_end[] = {
784 { AR5K_TXCFG, 780 { AR5K_TXCFG,
785 /* a/XR aTurbo b g (DYN) gTurbo */ 781 /* A/XR B G */
786 { 0x00008015, 0x00008015, 0x00008015, 0x00008015, 0x00008015 } }, 782 { 0x00008015, 0x00008015, 0x00008015 } },
787 { AR5K_USEC_5211, 783 { AR5K_USEC_5211,
788 { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } }, 784 { 0x128d93a7, 0x04e01395, 0x12e013ab } },
789 { AR5K_PHY_RF_CTL3, 785 { AR5K_PHY_RF_CTL3,
790 { 0x0a020001, 0x0a020001, 0x05020100, 0x0a020001, 0x0a020001 } }, 786 { 0x0a020001, 0x05020100, 0x0a020001 } },
791 { AR5K_PHY_RF_CTL4, 787 { AR5K_PHY_RF_CTL4,
792 { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } }, 788 { 0x00000e0e, 0x00000e0e, 0x00000e0e } },
793 { AR5K_PHY_PA_CTL, 789 { AR5K_PHY_PA_CTL,
794 { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b, 0x0000000b } }, 790 { 0x00000007, 0x0000000b, 0x0000000b } },
795 { AR5K_PHY_GAIN, 791 { AR5K_PHY_GAIN,
796 { 0x0018da6d, 0x0018da6d, 0x0018ca75, 0x0018ca75, 0x0018ca75 } }, 792 { 0x0018da6d, 0x0018ca75, 0x0018ca75 } },
797 { AR5K_PHY_DESIRED_SIZE, 793 { AR5K_PHY_DESIRED_SIZE,
798 { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } }, 794 { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } },
799 { AR5K_PHY_SIG, 795 { AR5K_PHY_SIG,
800 { 0x7e800d2e, 0x7e800d2e, 0x7ee80d2e, 0x7ee80d2e, 0x7e800d2e } }, 796 { 0x7e800d2e, 0x7ee80d2e, 0x7ee80d2e } },
801 { AR5K_PHY_AGCCOARSE, 797 { AR5K_PHY_AGCCOARSE,
802 { 0x3137665e, 0x3137665e, 0x3137665e, 0x3137665e, 0x3137665e } }, 798 { 0x3137665e, 0x3137665e, 0x3137665e } },
803 { AR5K_PHY_WEAK_OFDM_LOW_THR, 799 { AR5K_PHY_WEAK_OFDM_LOW_THR,
804 { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } }, 800 { 0x050cb081, 0x050cb081, 0x050cb081 } },
805 { AR5K_PHY_RX_DELAY, 801 { AR5K_PHY_RX_DELAY,
806 { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } }, 802 { 0x000007d0, 0x0000044c, 0x00000898 } },
807 { AR5K_PHY_FRAME_CTL_5211, 803 { AR5K_PHY_FRAME_CTL_5211,
808 { 0xf7b81020, 0xf7b81020, 0xf7b80d10, 0xf7b81010, 0xf7b81010 } }, 804 { 0xf7b81020, 0xf7b80d10, 0xf7b81010 } },
809 { AR5K_PHY_CCKTXCTL, 805 { AR5K_PHY_CCKTXCTL,
810 { 0x00000000, 0x00000000, 0x00000008, 0x00000008, 0x00000008 } }, 806 { 0x00000000, 0x00000008, 0x00000008 } },
811 { AR5K_PHY_CCK_CROSSCORR, 807 { AR5K_PHY_CCK_CROSSCORR,
812 { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } }, 808 { 0xd6be6788, 0xd03e6788, 0xd03e6788 } },
813 { AR5K_PHY_GAIN_2GHZ, 809 { AR5K_PHY_GAIN_2GHZ,
814 { 0x642c0140, 0x642c0140, 0x6442c160, 0x6442c160, 0x6442c160 } }, 810 { 0x642c0140, 0x6442c160, 0x6442c160 } },
815 { AR5K_PHY_CCK_RX_CTL_4, 811 { AR5K_PHY_CCK_RX_CTL_4,
816 { 0x1883800a, 0x1883800a, 0x1873800a, 0x1883800a, 0x1883800a } }, 812 { 0x1883800a, 0x1873800a, 0x1883800a } },
817}; 813};
818 814
819static const struct ath5k_ini rf5112_ini_common_end[] = { 815static const struct ath5k_ini rf5112_ini_common_end[] = {
@@ -833,66 +829,66 @@ static const struct ath5k_ini rf5112_ini_common_end[] = {
833/* Initial mode-specific settings for RF5413/5414 (Written after ar5212_ini) */ 829/* Initial mode-specific settings for RF5413/5414 (Written after ar5212_ini) */
834static const struct ath5k_ini_mode rf5413_ini_mode_end[] = { 830static const struct ath5k_ini_mode rf5413_ini_mode_end[] = {
835 { AR5K_TXCFG, 831 { AR5K_TXCFG,
836 /* a/XR aTurbo b g (DYN) gTurbo */ 832 /* A/XR B G */
837 { 0x00000015, 0x00000015, 0x00000015, 0x00000015, 0x00000015 } }, 833 { 0x00000015, 0x00000015, 0x00000015 } },
838 { AR5K_USEC_5211, 834 { AR5K_USEC_5211,
839 { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } }, 835 { 0x128d93a7, 0x04e01395, 0x12e013ab } },
840 { AR5K_PHY_RF_CTL3, 836 { AR5K_PHY_RF_CTL3,
841 { 0x0a020001, 0x0a020001, 0x05020100, 0x0a020001, 0x0a020001 } }, 837 { 0x0a020001, 0x05020100, 0x0a020001 } },
842 { AR5K_PHY_RF_CTL4, 838 { AR5K_PHY_RF_CTL4,
843 { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } }, 839 { 0x00000e0e, 0x00000e0e, 0x00000e0e } },
844 { AR5K_PHY_PA_CTL, 840 { AR5K_PHY_PA_CTL,
845 { 0x00000007, 0x00000007, 0x0000000b, 0x0000000b, 0x0000000b } }, 841 { 0x00000007, 0x0000000b, 0x0000000b } },
846 { AR5K_PHY_GAIN, 842 { AR5K_PHY_GAIN,
847 { 0x0018fa61, 0x0018fa61, 0x001a1a63, 0x001a1a63, 0x001a1a63 } }, 843 { 0x0018fa61, 0x001a1a63, 0x001a1a63 } },
848 { AR5K_PHY_DESIRED_SIZE, 844 { AR5K_PHY_DESIRED_SIZE,
849 { 0x0c98b4e0, 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da, 0x0c98b0da } }, 845 { 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da } },
850 { AR5K_PHY_SIG, 846 { AR5K_PHY_SIG,
851 { 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } }, 847 { 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } },
852 { AR5K_PHY_AGCCOARSE, 848 { AR5K_PHY_AGCCOARSE,
853 { 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e } }, 849 { 0x3139605e, 0x3139605e, 0x3139605e } },
854 { AR5K_PHY_WEAK_OFDM_LOW_THR, 850 { AR5K_PHY_WEAK_OFDM_LOW_THR,
855 { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } }, 851 { 0x050cb081, 0x050cb081, 0x050cb081 } },
856 { AR5K_PHY_RX_DELAY, 852 { AR5K_PHY_RX_DELAY,
857 { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } }, 853 { 0x000007d0, 0x0000044c, 0x00000898 } },
858 { AR5K_PHY_FRAME_CTL_5211, 854 { AR5K_PHY_FRAME_CTL_5211,
859 { 0xf7b81000, 0xf7b81000, 0xf7b80d00, 0xf7b81000, 0xf7b81000 } }, 855 { 0xf7b81000, 0xf7b80d00, 0xf7b81000 } },
860 { AR5K_PHY_CCKTXCTL, 856 { AR5K_PHY_CCKTXCTL,
861 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 857 { 0x00000000, 0x00000000, 0x00000000 } },
862 { AR5K_PHY_CCK_CROSSCORR, 858 { AR5K_PHY_CCK_CROSSCORR,
863 { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } }, 859 { 0xd6be6788, 0xd03e6788, 0xd03e6788 } },
864 { AR5K_PHY_GAIN_2GHZ, 860 { AR5K_PHY_GAIN_2GHZ,
865 { 0x002ec1e0, 0x002ec1e0, 0x002ac120, 0x002ac120, 0x002ac120 } }, 861 { 0x002ec1e0, 0x002ac120, 0x002ac120 } },
866 { AR5K_PHY_CCK_RX_CTL_4, 862 { AR5K_PHY_CCK_RX_CTL_4,
867 { 0x1883800a, 0x1883800a, 0x1863800a, 0x1883800a, 0x1883800a } }, 863 { 0x1883800a, 0x1863800a, 0x1883800a } },
868 { 0xa300, 864 { 0xa300,
869 { 0x18010000, 0x18010000, 0x18010000, 0x18010000, 0x18010000 } }, 865 { 0x18010000, 0x18010000, 0x18010000 } },
870 { 0xa304, 866 { 0xa304,
871 { 0x30032602, 0x30032602, 0x30032602, 0x30032602, 0x30032602 } }, 867 { 0x30032602, 0x30032602, 0x30032602 } },
872 { 0xa308, 868 { 0xa308,
873 { 0x48073e06, 0x48073e06, 0x48073e06, 0x48073e06, 0x48073e06 } }, 869 { 0x48073e06, 0x48073e06, 0x48073e06 } },
874 { 0xa30c, 870 { 0xa30c,
875 { 0x560b4c0a, 0x560b4c0a, 0x560b4c0a, 0x560b4c0a, 0x560b4c0a } }, 871 { 0x560b4c0a, 0x560b4c0a, 0x560b4c0a } },
876 { 0xa310, 872 { 0xa310,
877 { 0x641a600f, 0x641a600f, 0x641a600f, 0x641a600f, 0x641a600f } }, 873 { 0x641a600f, 0x641a600f, 0x641a600f } },
878 { 0xa314, 874 { 0xa314,
879 { 0x784f6e1b, 0x784f6e1b, 0x784f6e1b, 0x784f6e1b, 0x784f6e1b } }, 875 { 0x784f6e1b, 0x784f6e1b, 0x784f6e1b } },
880 { 0xa318, 876 { 0xa318,
881 { 0x868f7c5a, 0x868f7c5a, 0x868f7c5a, 0x868f7c5a, 0x868f7c5a } }, 877 { 0x868f7c5a, 0x868f7c5a, 0x868f7c5a } },
882 { 0xa31c, 878 { 0xa31c,
883 { 0x90cf865b, 0x90cf865b, 0x8ecf865b, 0x8ecf865b, 0x8ecf865b } }, 879 { 0x90cf865b, 0x8ecf865b, 0x8ecf865b } },
884 { 0xa320, 880 { 0xa320,
885 { 0x9d4f970f, 0x9d4f970f, 0x9b4f970f, 0x9b4f970f, 0x9b4f970f } }, 881 { 0x9d4f970f, 0x9b4f970f, 0x9b4f970f } },
886 { 0xa324, 882 { 0xa324,
887 { 0xa7cfa38f, 0xa7cfa38f, 0xa3cf9f8f, 0xa3cf9f8f, 0xa3cf9f8f } }, 883 { 0xa7cfa38f, 0xa3cf9f8f, 0xa3cf9f8f } },
888 { 0xa328, 884 { 0xa328,
889 { 0xb55faf1f, 0xb55faf1f, 0xb35faf1f, 0xb35faf1f, 0xb35faf1f } }, 885 { 0xb55faf1f, 0xb35faf1f, 0xb35faf1f } },
890 { 0xa32c, 886 { 0xa32c,
891 { 0xbddfb99f, 0xbddfb99f, 0xbbdfb99f, 0xbbdfb99f, 0xbbdfb99f } }, 887 { 0xbddfb99f, 0xbbdfb99f, 0xbbdfb99f } },
892 { 0xa330, 888 { 0xa330,
893 { 0xcb7fc53f, 0xcb7fc53f, 0xcb7fc73f, 0xcb7fc73f, 0xcb7fc73f } }, 889 { 0xcb7fc53f, 0xcb7fc73f, 0xcb7fc73f } },
894 { 0xa334, 890 { 0xa334,
895 { 0xd5ffd1bf, 0xd5ffd1bf, 0xd3ffd1bf, 0xd3ffd1bf, 0xd3ffd1bf } }, 891 { 0xd5ffd1bf, 0xd3ffd1bf, 0xd3ffd1bf } },
896}; 892};
897 893
898static const struct ath5k_ini rf5413_ini_common_end[] = { 894static const struct ath5k_ini rf5413_ini_common_end[] = {
@@ -972,38 +968,38 @@ static const struct ath5k_ini rf5413_ini_common_end[] = {
972/* XXX: a mode ? */ 968/* XXX: a mode ? */
973static const struct ath5k_ini_mode rf2413_ini_mode_end[] = { 969static const struct ath5k_ini_mode rf2413_ini_mode_end[] = {
974 { AR5K_TXCFG, 970 { AR5K_TXCFG,
975 /* a/XR aTurbo b g (DYN) gTurbo */ 971 /* A/XR B G */
976 { 0x00000015, 0x00000015, 0x00000015, 0x00000015, 0x00000015 } }, 972 { 0x00000015, 0x00000015, 0x00000015 } },
977 { AR5K_USEC_5211, 973 { AR5K_USEC_5211,
978 { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } }, 974 { 0x128d93a7, 0x04e01395, 0x12e013ab } },
979 { AR5K_PHY_RF_CTL3, 975 { AR5K_PHY_RF_CTL3,
980 { 0x0a020001, 0x0a020001, 0x05020000, 0x0a020001, 0x0a020001 } }, 976 { 0x0a020001, 0x05020000, 0x0a020001 } },
981 { AR5K_PHY_RF_CTL4, 977 { AR5K_PHY_RF_CTL4,
982 { 0x00000e00, 0x00000e00, 0x00000e00, 0x00000e00, 0x00000e00 } }, 978 { 0x00000e00, 0x00000e00, 0x00000e00 } },
983 { AR5K_PHY_PA_CTL, 979 { AR5K_PHY_PA_CTL,
984 { 0x00000002, 0x00000002, 0x0000000a, 0x0000000a, 0x0000000a } }, 980 { 0x00000002, 0x0000000a, 0x0000000a } },
985 { AR5K_PHY_GAIN, 981 { AR5K_PHY_GAIN,
986 { 0x0018da6d, 0x0018da6d, 0x001a6a64, 0x001a6a64, 0x001a6a64 } }, 982 { 0x0018da6d, 0x001a6a64, 0x001a6a64 } },
987 { AR5K_PHY_DESIRED_SIZE, 983 { AR5K_PHY_DESIRED_SIZE,
988 { 0x0de8b4e0, 0x0de8b4e0, 0x0de8b0da, 0x0c98b0da, 0x0de8b0da } }, 984 { 0x0de8b4e0, 0x0de8b0da, 0x0c98b0da } },
989 { AR5K_PHY_SIG, 985 { AR5K_PHY_SIG,
990 { 0x7e800d2e, 0x7e800d2e, 0x7ee80d2e, 0x7ec80d2e, 0x7e800d2e } }, 986 { 0x7e800d2e, 0x7ee80d2e, 0x7ec80d2e } },
991 { AR5K_PHY_AGCCOARSE, 987 { AR5K_PHY_AGCCOARSE,
992 { 0x3137665e, 0x3137665e, 0x3137665e, 0x3139605e, 0x3137665e } }, 988 { 0x3137665e, 0x3137665e, 0x3139605e } },
993 { AR5K_PHY_WEAK_OFDM_LOW_THR, 989 { AR5K_PHY_WEAK_OFDM_LOW_THR,
994 { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } }, 990 { 0x050cb081, 0x050cb081, 0x050cb081 } },
995 { AR5K_PHY_RX_DELAY, 991 { AR5K_PHY_RX_DELAY,
996 { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } }, 992 { 0x000007d0, 0x0000044c, 0x00000898 } },
997 { AR5K_PHY_FRAME_CTL_5211, 993 { AR5K_PHY_FRAME_CTL_5211,
998 { 0xf7b81000, 0xf7b81000, 0xf7b80d00, 0xf7b81000, 0xf7b81000 } }, 994 { 0xf7b81000, 0xf7b80d00, 0xf7b81000 } },
999 { AR5K_PHY_CCKTXCTL, 995 { AR5K_PHY_CCKTXCTL,
1000 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 996 { 0x00000000, 0x00000000, 0x00000000 } },
1001 { AR5K_PHY_CCK_CROSSCORR, 997 { AR5K_PHY_CCK_CROSSCORR,
1002 { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } }, 998 { 0xd6be6788, 0xd03e6788, 0xd03e6788 } },
1003 { AR5K_PHY_GAIN_2GHZ, 999 { AR5K_PHY_GAIN_2GHZ,
1004 { 0x002c0140, 0x002c0140, 0x0042c140, 0x0042c140, 0x0042c140 } }, 1000 { 0x002c0140, 0x0042c140, 0x0042c140 } },
1005 { AR5K_PHY_CCK_RX_CTL_4, 1001 { AR5K_PHY_CCK_RX_CTL_4,
1006 { 0x1883800a, 0x1883800a, 0x1863800a, 0x1883800a, 0x1883800a } }, 1002 { 0x1883800a, 0x1863800a, 0x1883800a } },
1007}; 1003};
1008 1004
1009static const struct ath5k_ini rf2413_ini_common_end[] = { 1005static const struct ath5k_ini rf2413_ini_common_end[] = {
@@ -1094,52 +1090,50 @@ static const struct ath5k_ini rf2413_ini_common_end[] = {
1094/* XXX: a mode ? */ 1090/* XXX: a mode ? */
1095static const struct ath5k_ini_mode rf2425_ini_mode_end[] = { 1091static const struct ath5k_ini_mode rf2425_ini_mode_end[] = {
1096 { AR5K_TXCFG, 1092 { AR5K_TXCFG,
1097 /* a/XR aTurbo b g (DYN) gTurbo */ 1093 /* A/XR B G */
1098 { 0x00000015, 0x00000015, 0x00000015, 0x00000015, 0x00000015 } }, 1094 { 0x00000015, 0x00000015, 0x00000015 } },
1099 { AR5K_USEC_5211, 1095 { AR5K_USEC_5211,
1100 { 0x128d93a7, 0x098813cf, 0x04e01395, 0x12e013ab, 0x098813cf } }, 1096 { 0x128d93a7, 0x04e01395, 0x12e013ab } },
1101 { AR5K_PHY_TURBO,
1102 { 0x00000000, 0x00000001, 0x00000000, 0x00000000, 0x00000001 } },
1103 { AR5K_PHY_RF_CTL3, 1097 { AR5K_PHY_RF_CTL3,
1104 { 0x0a020001, 0x0a020001, 0x05020100, 0x0a020001, 0x0a020001 } }, 1098 { 0x0a020001, 0x05020100, 0x0a020001 } },
1105 { AR5K_PHY_RF_CTL4, 1099 { AR5K_PHY_RF_CTL4,
1106 { 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e } }, 1100 { 0x00000e0e, 0x00000e0e, 0x00000e0e } },
1107 { AR5K_PHY_PA_CTL, 1101 { AR5K_PHY_PA_CTL,
1108 { 0x00000003, 0x00000003, 0x0000000b, 0x0000000b, 0x0000000b } }, 1102 { 0x00000003, 0x0000000b, 0x0000000b } },
1109 { AR5K_PHY_SETTLING, 1103 { AR5K_PHY_SETTLING,
1110 { 0x1372161c, 0x13721c25, 0x13721722, 0x13721422, 0x13721c25 } }, 1104 { 0x1372161c, 0x13721722, 0x13721422 } },
1111 { AR5K_PHY_GAIN, 1105 { AR5K_PHY_GAIN,
1112 { 0x0018fa61, 0x0018fa61, 0x00199a65, 0x00199a65, 0x00199a65 } }, 1106 { 0x0018fa61, 0x00199a65, 0x00199a65 } },
1113 { AR5K_PHY_DESIRED_SIZE, 1107 { AR5K_PHY_DESIRED_SIZE,
1114 { 0x0c98b4e0, 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da, 0x0c98b0da } }, 1108 { 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da } },
1115 { AR5K_PHY_SIG, 1109 { AR5K_PHY_SIG,
1116 { 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } }, 1110 { 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } },
1117 { AR5K_PHY_AGCCOARSE, 1111 { AR5K_PHY_AGCCOARSE,
1118 { 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e, 0x3139605e } }, 1112 { 0x3139605e, 0x3139605e, 0x3139605e } },
1119 { AR5K_PHY_WEAK_OFDM_LOW_THR, 1113 { AR5K_PHY_WEAK_OFDM_LOW_THR,
1120 { 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 } }, 1114 { 0x050cb081, 0x050cb081, 0x050cb081 } },
1121 { AR5K_PHY_RX_DELAY, 1115 { AR5K_PHY_RX_DELAY,
1122 { 0x000007d0, 0x000007d0, 0x0000044c, 0x00000898, 0x000007d0 } }, 1116 { 0x000007d0, 0x0000044c, 0x00000898 } },
1123 { AR5K_PHY_FRAME_CTL_5211, 1117 { AR5K_PHY_FRAME_CTL_5211,
1124 { 0xf7b81000, 0xf7b81000, 0xf7b80d00, 0xf7b81000, 0xf7b81000 } }, 1118 { 0xf7b81000, 0xf7b80d00, 0xf7b81000 } },
1125 { AR5K_PHY_CCKTXCTL, 1119 { AR5K_PHY_CCKTXCTL,
1126 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 1120 { 0x00000000, 0x00000000, 0x00000000 } },
1127 { AR5K_PHY_CCK_CROSSCORR, 1121 { AR5K_PHY_CCK_CROSSCORR,
1128 { 0xd6be6788, 0xd6be6788, 0xd03e6788, 0xd03e6788, 0xd03e6788 } }, 1122 { 0xd6be6788, 0xd03e6788, 0xd03e6788 } },
1129 { AR5K_PHY_GAIN_2GHZ, 1123 { AR5K_PHY_GAIN_2GHZ,
1130 { 0x00000140, 0x00000140, 0x0052c140, 0x0052c140, 0x0052c140 } }, 1124 { 0x00000140, 0x0052c140, 0x0052c140 } },
1131 { AR5K_PHY_CCK_RX_CTL_4, 1125 { AR5K_PHY_CCK_RX_CTL_4,
1132 { 0x1883800a, 0x1883800a, 0x1863800a, 0x1883800a, 0x1883800a } }, 1126 { 0x1883800a, 0x1863800a, 0x1883800a } },
1133 { 0xa324, 1127 { 0xa324,
1134 { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, 1128 { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
1135 { 0xa328, 1129 { 0xa328,
1136 { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, 1130 { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
1137 { 0xa32c, 1131 { 0xa32c,
1138 { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, 1132 { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
1139 { 0xa330, 1133 { 0xa330,
1140 { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, 1134 { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
1141 { 0xa334, 1135 { 0xa334,
1142 { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } }, 1136 { 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
1143}; 1137};
1144 1138
1145static const struct ath5k_ini rf2425_ini_common_end[] = { 1139static const struct ath5k_ini rf2425_ini_common_end[] = {
@@ -1368,15 +1362,15 @@ static const struct ath5k_ini rf5112_ini_bbgain[] = {
1368 * Write initial register dump 1362 * Write initial register dump
1369 */ 1363 */
1370static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size, 1364static void ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
1371 const struct ath5k_ini *ini_regs, bool change_channel) 1365 const struct ath5k_ini *ini_regs, bool skip_pcu)
1372{ 1366{
1373 unsigned int i; 1367 unsigned int i;
1374 1368
1375 /* Write initial registers */ 1369 /* Write initial registers */
1376 for (i = 0; i < size; i++) { 1370 for (i = 0; i < size; i++) {
1377 /* On channel change there is 1371 /* Skip PCU registers if
1378 * no need to mess with PCU */ 1372 * requested */
1379 if (change_channel && 1373 if (skip_pcu &&
1380 ini_regs[i].ini_register >= AR5K_PCU_MIN && 1374 ini_regs[i].ini_register >= AR5K_PCU_MIN &&
1381 ini_regs[i].ini_register <= AR5K_PCU_MAX) 1375 ini_regs[i].ini_register <= AR5K_PCU_MAX)
1382 continue; 1376 continue;
@@ -1409,7 +1403,7 @@ static void ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
1409 1403
1410} 1404}
1411 1405
1412int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel) 1406int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
1413{ 1407{
1414 /* 1408 /*
1415 * Write initial register settings 1409 * Write initial register settings
@@ -1427,7 +1421,7 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
1427 * Write initial settings common for all modes 1421 * Write initial settings common for all modes
1428 */ 1422 */
1429 ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5212_ini_common_start), 1423 ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5212_ini_common_start),
1430 ar5212_ini_common_start, change_channel); 1424 ar5212_ini_common_start, skip_pcu);
1431 1425
1432 /* Second set of mode-specific settings */ 1426 /* Second set of mode-specific settings */
1433 switch (ah->ah_radio) { 1427 switch (ah->ah_radio) {
@@ -1439,12 +1433,12 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
1439 1433
1440 ath5k_hw_ini_registers(ah, 1434 ath5k_hw_ini_registers(ah,
1441 ARRAY_SIZE(rf5111_ini_common_end), 1435 ARRAY_SIZE(rf5111_ini_common_end),
1442 rf5111_ini_common_end, change_channel); 1436 rf5111_ini_common_end, skip_pcu);
1443 1437
1444 /* Baseband gain table */ 1438 /* Baseband gain table */
1445 ath5k_hw_ini_registers(ah, 1439 ath5k_hw_ini_registers(ah,
1446 ARRAY_SIZE(rf5111_ini_bbgain), 1440 ARRAY_SIZE(rf5111_ini_bbgain),
1447 rf5111_ini_bbgain, change_channel); 1441 rf5111_ini_bbgain, skip_pcu);
1448 1442
1449 break; 1443 break;
1450 case AR5K_RF5112: 1444 case AR5K_RF5112:
@@ -1455,11 +1449,11 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
1455 1449
1456 ath5k_hw_ini_registers(ah, 1450 ath5k_hw_ini_registers(ah,
1457 ARRAY_SIZE(rf5112_ini_common_end), 1451 ARRAY_SIZE(rf5112_ini_common_end),
1458 rf5112_ini_common_end, change_channel); 1452 rf5112_ini_common_end, skip_pcu);
1459 1453
1460 ath5k_hw_ini_registers(ah, 1454 ath5k_hw_ini_registers(ah,
1461 ARRAY_SIZE(rf5112_ini_bbgain), 1455 ARRAY_SIZE(rf5112_ini_bbgain),
1462 rf5112_ini_bbgain, change_channel); 1456 rf5112_ini_bbgain, skip_pcu);
1463 1457
1464 break; 1458 break;
1465 case AR5K_RF5413: 1459 case AR5K_RF5413:
@@ -1470,11 +1464,11 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
1470 1464
1471 ath5k_hw_ini_registers(ah, 1465 ath5k_hw_ini_registers(ah,
1472 ARRAY_SIZE(rf5413_ini_common_end), 1466 ARRAY_SIZE(rf5413_ini_common_end),
1473 rf5413_ini_common_end, change_channel); 1467 rf5413_ini_common_end, skip_pcu);
1474 1468
1475 ath5k_hw_ini_registers(ah, 1469 ath5k_hw_ini_registers(ah,
1476 ARRAY_SIZE(rf5112_ini_bbgain), 1470 ARRAY_SIZE(rf5112_ini_bbgain),
1477 rf5112_ini_bbgain, change_channel); 1471 rf5112_ini_bbgain, skip_pcu);
1478 1472
1479 break; 1473 break;
1480 case AR5K_RF2316: 1474 case AR5K_RF2316:
@@ -1486,7 +1480,7 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
1486 1480
1487 ath5k_hw_ini_registers(ah, 1481 ath5k_hw_ini_registers(ah,
1488 ARRAY_SIZE(rf2413_ini_common_end), 1482 ARRAY_SIZE(rf2413_ini_common_end),
1489 rf2413_ini_common_end, change_channel); 1483 rf2413_ini_common_end, skip_pcu);
1490 1484
1491 /* Override settings from rf2413_ini_common_end */ 1485 /* Override settings from rf2413_ini_common_end */
1492 if (ah->ah_radio == AR5K_RF2316) { 1486 if (ah->ah_radio == AR5K_RF2316) {
@@ -1498,9 +1492,32 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
1498 1492
1499 ath5k_hw_ini_registers(ah, 1493 ath5k_hw_ini_registers(ah,
1500 ARRAY_SIZE(rf5112_ini_bbgain), 1494 ARRAY_SIZE(rf5112_ini_bbgain),
1501 rf5112_ini_bbgain, change_channel); 1495 rf5112_ini_bbgain, skip_pcu);
1502 break; 1496 break;
1503 case AR5K_RF2317: 1497 case AR5K_RF2317:
1498
1499 ath5k_hw_ini_mode_registers(ah,
1500 ARRAY_SIZE(rf2413_ini_mode_end),
1501 rf2413_ini_mode_end, mode);
1502
1503 ath5k_hw_ini_registers(ah,
1504 ARRAY_SIZE(rf2425_ini_common_end),
1505 rf2425_ini_common_end, skip_pcu);
1506
1507 /* Override settings from rf2413_ini_mode_end */
1508 ath5k_hw_reg_write(ah, 0x00180a65, AR5K_PHY_GAIN);
1509
1510 /* Override settings from rf2413_ini_common_end */
1511 ath5k_hw_reg_write(ah, 0x00004000, AR5K_PHY_AGC);
1512 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TPC_RG5,
1513 AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP, 0xa);
1514 ath5k_hw_reg_write(ah, 0x800000a8, 0x8140);
1515 ath5k_hw_reg_write(ah, 0x000000ff, 0x9958);
1516
1517 ath5k_hw_ini_registers(ah,
1518 ARRAY_SIZE(rf5112_ini_bbgain),
1519 rf5112_ini_bbgain, skip_pcu);
1520 break;
1504 case AR5K_RF2425: 1521 case AR5K_RF2425:
1505 1522
1506 ath5k_hw_ini_mode_registers(ah, 1523 ath5k_hw_ini_mode_registers(ah,
@@ -1509,11 +1526,11 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
1509 1526
1510 ath5k_hw_ini_registers(ah, 1527 ath5k_hw_ini_registers(ah,
1511 ARRAY_SIZE(rf2425_ini_common_end), 1528 ARRAY_SIZE(rf2425_ini_common_end),
1512 rf2425_ini_common_end, change_channel); 1529 rf2425_ini_common_end, skip_pcu);
1513 1530
1514 ath5k_hw_ini_registers(ah, 1531 ath5k_hw_ini_registers(ah,
1515 ARRAY_SIZE(rf5112_ini_bbgain), 1532 ARRAY_SIZE(rf5112_ini_bbgain),
1516 rf5112_ini_bbgain, change_channel); 1533 rf5112_ini_bbgain, skip_pcu);
1517 break; 1534 break;
1518 default: 1535 default:
1519 return -EINVAL; 1536 return -EINVAL;
@@ -1538,17 +1555,17 @@ int ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool change_channel)
1538 * Write initial settings common for all modes 1555 * Write initial settings common for all modes
1539 */ 1556 */
1540 ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5211_ini), 1557 ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5211_ini),
1541 ar5211_ini, change_channel); 1558 ar5211_ini, skip_pcu);
1542 1559
1543 /* AR5211 only comes with 5111 */ 1560 /* AR5211 only comes with 5111 */
1544 1561
1545 /* Baseband gain table */ 1562 /* Baseband gain table */
1546 ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5111_ini_bbgain), 1563 ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5111_ini_bbgain),
1547 rf5111_ini_bbgain, change_channel); 1564 rf5111_ini_bbgain, skip_pcu);
1548 /* For AR5210 (for mode settings check out ath5k_hw_reset_tx_queue) */ 1565 /* For AR5210 (for mode settings check out ath5k_hw_reset_tx_queue) */
1549 } else if (ah->ah_version == AR5K_AR5210) { 1566 } else if (ah->ah_version == AR5K_AR5210) {
1550 ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5210_ini), 1567 ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5210_ini),
1551 ar5210_ini, change_channel); 1568 ar5210_ini, skip_pcu);
1552 } 1569 }
1553 1570
1554 return 0; 1571 return 0;
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index 67aa52e9bf94..576edf2965dc 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -133,7 +133,7 @@ ath5k_register_led(struct ath5k_softc *sc, struct ath5k_led *led,
133 led->led_dev.default_trigger = trigger; 133 led->led_dev.default_trigger = trigger;
134 led->led_dev.brightness_set = ath5k_led_brightness_set; 134 led->led_dev.brightness_set = ath5k_led_brightness_set;
135 135
136 err = led_classdev_register(&sc->pdev->dev, &led->led_dev); 136 err = led_classdev_register(sc->dev, &led->led_dev);
137 if (err) { 137 if (err) {
138 ATH5K_WARN(sc, "could not register LED %s\n", name); 138 ATH5K_WARN(sc, "could not register LED %s\n", name);
139 led->sc = NULL; 139 led->sc = NULL;
@@ -161,11 +161,20 @@ int ath5k_init_leds(struct ath5k_softc *sc)
161{ 161{
162 int ret = 0; 162 int ret = 0;
163 struct ieee80211_hw *hw = sc->hw; 163 struct ieee80211_hw *hw = sc->hw;
164#ifndef CONFIG_ATHEROS_AR231X
164 struct pci_dev *pdev = sc->pdev; 165 struct pci_dev *pdev = sc->pdev;
166#endif
165 char name[ATH5K_LED_MAX_NAME_LEN + 1]; 167 char name[ATH5K_LED_MAX_NAME_LEN + 1];
166 const struct pci_device_id *match; 168 const struct pci_device_id *match;
167 169
170 if (!sc->pdev)
171 return 0;
172
173#ifdef CONFIG_ATHEROS_AR231X
174 match = NULL;
175#else
168 match = pci_match_id(&ath5k_led_devices[0], pdev); 176 match = pci_match_id(&ath5k_led_devices[0], pdev);
177#endif
169 if (match) { 178 if (match) {
170 __set_bit(ATH_STAT_LEDSOFT, sc->status); 179 __set_bit(ATH_STAT_LEDSOFT, sc->status);
171 sc->led_pin = ATH_PIN(match->driver_data); 180 sc->led_pin = ATH_PIN(match->driver_data);
diff --git a/drivers/net/wireless/ath/ath5k/mac80211-ops.c b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
new file mode 100644
index 000000000000..d76d68c99f72
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/mac80211-ops.c
@@ -0,0 +1,774 @@
1/*-
2 * Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
3 * Copyright (c) 2004-2005 Atheros Communications, Inc.
4 * Copyright (c) 2006 Devicescape Software, Inc.
5 * Copyright (c) 2007 Jiri Slaby <jirislaby@gmail.com>
6 * Copyright (c) 2007 Luis R. Rodriguez <mcgrof@winlab.rutgers.edu>
7 * Copyright (c) 2010 Bruno Randolf <br1@einfach.org>
8 *
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
19 * redistribution must be conditioned upon including a substantially
20 * similar Disclaimer requirement for further binary redistribution.
21 * 3. Neither the names of the above-listed copyright holders nor the names
22 * of any contributors may be used to endorse or promote products derived
23 * from this software without specific prior written permission.
24 *
25 * Alternatively, this software may be distributed under the terms of the
26 * GNU General Public License ("GPL") version 2 as published by the Free
27 * Software Foundation.
28 *
29 * NO WARRANTY
30 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
31 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
32 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
33 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
34 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
35 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
36 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
37 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
38 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
39 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
40 * THE POSSIBILITY OF SUCH DAMAGES.
41 *
42 */
43
44#include <asm/unaligned.h>
45
46#include "base.h"
47#include "reg.h"
48
49extern int ath5k_modparam_nohwcrypt;
50
51/* functions used from base.c */
52void set_beacon_filter(struct ieee80211_hw *hw, bool enable);
53bool ath_any_vif_assoc(struct ath5k_softc *sc);
54int ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
55 struct ath5k_txq *txq);
56int ath5k_init_hw(struct ath5k_softc *sc);
57int ath5k_stop_hw(struct ath5k_softc *sc);
58void ath5k_mode_setup(struct ath5k_softc *sc, struct ieee80211_vif *vif);
59void ath5k_update_bssid_mask_and_opmode(struct ath5k_softc *sc,
60 struct ieee80211_vif *vif);
61int ath5k_chan_set(struct ath5k_softc *sc, struct ieee80211_channel *chan);
62void ath5k_beacon_update_timers(struct ath5k_softc *sc, u64 bc_tsf);
63int ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
64void ath5k_beacon_config(struct ath5k_softc *sc);
65void ath5k_txbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
66void ath5k_rxbuf_free_skb(struct ath5k_softc *sc, struct ath5k_buf *bf);
67
68/********************\
69* Mac80211 functions *
70\********************/
71
72static int
73ath5k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
74{
75 struct ath5k_softc *sc = hw->priv;
76 u16 qnum = skb_get_queue_mapping(skb);
77
78 if (WARN_ON(qnum >= sc->ah->ah_capabilities.cap_queues.q_tx_num)) {
79 dev_kfree_skb_any(skb);
80 return 0;
81 }
82
83 return ath5k_tx_queue(hw, skb, &sc->txqs[qnum]);
84}
85
86
87static int
88ath5k_start(struct ieee80211_hw *hw)
89{
90 return ath5k_init_hw(hw->priv);
91}
92
93
94static void
95ath5k_stop(struct ieee80211_hw *hw)
96{
97 ath5k_stop_hw(hw->priv);
98}
99
100
101static int
102ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
103{
104 struct ath5k_softc *sc = hw->priv;
105 int ret;
106 struct ath5k_vif *avf = (void *)vif->drv_priv;
107
108 mutex_lock(&sc->lock);
109
110 if ((vif->type == NL80211_IFTYPE_AP ||
111 vif->type == NL80211_IFTYPE_ADHOC)
112 && (sc->num_ap_vifs + sc->num_adhoc_vifs) >= ATH_BCBUF) {
113 ret = -ELNRNG;
114 goto end;
115 }
116
117 /* Don't allow other interfaces if one ad-hoc is configured.
118 * TODO: Fix the problems with ad-hoc and multiple other interfaces.
119 * We would need to operate the HW in ad-hoc mode to allow TSF updates
120 * for the IBSS, but this breaks with additional AP or STA interfaces
121 * at the moment. */
122 if (sc->num_adhoc_vifs ||
123 (sc->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
124 ATH5K_ERR(sc, "Only one single ad-hoc interface is allowed.\n");
125 ret = -ELNRNG;
126 goto end;
127 }
128
129 switch (vif->type) {
130 case NL80211_IFTYPE_AP:
131 case NL80211_IFTYPE_STATION:
132 case NL80211_IFTYPE_ADHOC:
133 case NL80211_IFTYPE_MESH_POINT:
134 avf->opmode = vif->type;
135 break;
136 default:
137 ret = -EOPNOTSUPP;
138 goto end;
139 }
140
141 sc->nvifs++;
142 ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "add interface mode %d\n", avf->opmode);
143
144 /* Assign the vap/adhoc to a beacon xmit slot. */
145 if ((avf->opmode == NL80211_IFTYPE_AP) ||
146 (avf->opmode == NL80211_IFTYPE_ADHOC) ||
147 (avf->opmode == NL80211_IFTYPE_MESH_POINT)) {
148 int slot;
149
150 WARN_ON(list_empty(&sc->bcbuf));
151 avf->bbuf = list_first_entry(&sc->bcbuf, struct ath5k_buf,
152 list);
153 list_del(&avf->bbuf->list);
154
155 avf->bslot = 0;
156 for (slot = 0; slot < ATH_BCBUF; slot++) {
157 if (!sc->bslot[slot]) {
158 avf->bslot = slot;
159 break;
160 }
161 }
162 BUG_ON(sc->bslot[avf->bslot] != NULL);
163 sc->bslot[avf->bslot] = vif;
164 if (avf->opmode == NL80211_IFTYPE_AP)
165 sc->num_ap_vifs++;
166 else if (avf->opmode == NL80211_IFTYPE_ADHOC)
167 sc->num_adhoc_vifs++;
168 }
169
170 /* Any MAC address is fine, all others are included through the
171 * filter.
172 */
173 memcpy(&sc->lladdr, vif->addr, ETH_ALEN);
174 ath5k_hw_set_lladdr(sc->ah, vif->addr);
175
176 memcpy(&avf->lladdr, vif->addr, ETH_ALEN);
177
178 ath5k_mode_setup(sc, vif);
179
180 ret = 0;
181end:
182 mutex_unlock(&sc->lock);
183 return ret;
184}
185
186
187static void
188ath5k_remove_interface(struct ieee80211_hw *hw,
189 struct ieee80211_vif *vif)
190{
191 struct ath5k_softc *sc = hw->priv;
192 struct ath5k_vif *avf = (void *)vif->drv_priv;
193 unsigned int i;
194
195 mutex_lock(&sc->lock);
196 sc->nvifs--;
197
198 if (avf->bbuf) {
199 ath5k_txbuf_free_skb(sc, avf->bbuf);
200 list_add_tail(&avf->bbuf->list, &sc->bcbuf);
201 for (i = 0; i < ATH_BCBUF; i++) {
202 if (sc->bslot[i] == vif) {
203 sc->bslot[i] = NULL;
204 break;
205 }
206 }
207 avf->bbuf = NULL;
208 }
209 if (avf->opmode == NL80211_IFTYPE_AP)
210 sc->num_ap_vifs--;
211 else if (avf->opmode == NL80211_IFTYPE_ADHOC)
212 sc->num_adhoc_vifs--;
213
214 ath5k_update_bssid_mask_and_opmode(sc, NULL);
215 mutex_unlock(&sc->lock);
216}
217
218
219/*
220 * TODO: Phy disable/diversity etc
221 */
222static int
223ath5k_config(struct ieee80211_hw *hw, u32 changed)
224{
225 struct ath5k_softc *sc = hw->priv;
226 struct ath5k_hw *ah = sc->ah;
227 struct ieee80211_conf *conf = &hw->conf;
228 int ret = 0;
229
230 mutex_lock(&sc->lock);
231
232 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
233 ret = ath5k_chan_set(sc, conf->channel);
234 if (ret < 0)
235 goto unlock;
236 }
237
238 if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
239 (sc->power_level != conf->power_level)) {
240 sc->power_level = conf->power_level;
241
242 /* Half dB steps */
243 ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
244 }
245
246 /* TODO:
247 * 1) Move this on config_interface and handle each case
248 * separately eg. when we have only one STA vif, use
249 * AR5K_ANTMODE_SINGLE_AP
250 *
251 * 2) Allow the user to change antenna mode eg. when only
252 * one antenna is present
253 *
254 * 3) Allow the user to set default/tx antenna when possible
255 *
256 * 4) Default mode should handle 90% of the cases, together
257 * with fixed a/b and single AP modes we should be able to
258 * handle 99%. Sectored modes are extreme cases and i still
259 * haven't found a usage for them. If we decide to support them,
260 * then we must allow the user to set how many tx antennas we
261 * have available
262 */
263 ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
264
265unlock:
266 mutex_unlock(&sc->lock);
267 return ret;
268}
269
270
271static void
272ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
273 struct ieee80211_bss_conf *bss_conf, u32 changes)
274{
275 struct ath5k_vif *avf = (void *)vif->drv_priv;
276 struct ath5k_softc *sc = hw->priv;
277 struct ath5k_hw *ah = sc->ah;
278 struct ath_common *common = ath5k_hw_common(ah);
279 unsigned long flags;
280
281 mutex_lock(&sc->lock);
282
283 if (changes & BSS_CHANGED_BSSID) {
284 /* Cache for later use during resets */
285 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
286 common->curaid = 0;
287 ath5k_hw_set_bssid(ah);
288 mmiowb();
289 }
290
291 if (changes & BSS_CHANGED_BEACON_INT)
292 sc->bintval = bss_conf->beacon_int;
293
294 if (changes & BSS_CHANGED_ASSOC) {
295 avf->assoc = bss_conf->assoc;
296 if (bss_conf->assoc)
297 sc->assoc = bss_conf->assoc;
298 else
299 sc->assoc = ath_any_vif_assoc(sc);
300
301 if (sc->opmode == NL80211_IFTYPE_STATION)
302 set_beacon_filter(hw, sc->assoc);
303 ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
304 AR5K_LED_ASSOC : AR5K_LED_INIT);
305 if (bss_conf->assoc) {
306 ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
307 "Bss Info ASSOC %d, bssid: %pM\n",
308 bss_conf->aid, common->curbssid);
309 common->curaid = bss_conf->aid;
310 ath5k_hw_set_bssid(ah);
311 /* Once ANI is available you would start it here */
312 }
313 }
314
315 if (changes & BSS_CHANGED_BEACON) {
316 spin_lock_irqsave(&sc->block, flags);
317 ath5k_beacon_update(hw, vif);
318 spin_unlock_irqrestore(&sc->block, flags);
319 }
320
321 if (changes & BSS_CHANGED_BEACON_ENABLED)
322 sc->enable_beacon = bss_conf->enable_beacon;
323
324 if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED |
325 BSS_CHANGED_BEACON_INT))
326 ath5k_beacon_config(sc);
327
328 mutex_unlock(&sc->lock);
329}
330
331
332static u64
333ath5k_prepare_multicast(struct ieee80211_hw *hw,
334 struct netdev_hw_addr_list *mc_list)
335{
336 u32 mfilt[2], val;
337 u8 pos;
338 struct netdev_hw_addr *ha;
339
340 mfilt[0] = 0;
341 mfilt[1] = 1;
342
343 netdev_hw_addr_list_for_each(ha, mc_list) {
344 /* calculate XOR of eight 6-bit values */
345 val = get_unaligned_le32(ha->addr + 0);
346 pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
347 val = get_unaligned_le32(ha->addr + 3);
348 pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
349 pos &= 0x3f;
350 mfilt[pos / 32] |= (1 << (pos % 32));
351 /* XXX: we might be able to just do this instead,
352 * but not sure, needs testing, if we do use this we'd
353 * neet to inform below to not reset the mcast */
354 /* ath5k_hw_set_mcast_filterindex(ah,
355 * ha->addr[5]); */
356 }
357
358 return ((u64)(mfilt[1]) << 32) | mfilt[0];
359}
360
361
362/*
363 * o always accept unicast, broadcast, and multicast traffic
364 * o multicast traffic for all BSSIDs will be enabled if mac80211
365 * says it should be
366 * o maintain current state of phy ofdm or phy cck error reception.
367 * If the hardware detects any of these type of errors then
368 * ath5k_hw_get_rx_filter() will pass to us the respective
369 * hardware filters to be able to receive these type of frames.
370 * o probe request frames are accepted only when operating in
371 * hostap, adhoc, or monitor modes
372 * o enable promiscuous mode according to the interface state
373 * o accept beacons:
374 * - when operating in adhoc mode so the 802.11 layer creates
375 * node table entries for peers,
376 * - when operating in station mode for collecting rssi data when
377 * the station is otherwise quiet, or
378 * - when scanning
379 */
380static void
381ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
382 unsigned int *new_flags, u64 multicast)
383{
384#define SUPPORTED_FIF_FLAGS \
385 (FIF_PROMISC_IN_BSS | FIF_ALLMULTI | FIF_FCSFAIL | \
386 FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \
387 FIF_BCN_PRBRESP_PROMISC)
388
389 struct ath5k_softc *sc = hw->priv;
390 struct ath5k_hw *ah = sc->ah;
391 u32 mfilt[2], rfilt;
392
393 mutex_lock(&sc->lock);
394
395 mfilt[0] = multicast;
396 mfilt[1] = multicast >> 32;
397
398 /* Only deal with supported flags */
399 changed_flags &= SUPPORTED_FIF_FLAGS;
400 *new_flags &= SUPPORTED_FIF_FLAGS;
401
402 /* If HW detects any phy or radar errors, leave those filters on.
403 * Also, always enable Unicast, Broadcasts and Multicast
404 * XXX: move unicast, bssid broadcasts and multicast to mac80211 */
405 rfilt = (ath5k_hw_get_rx_filter(ah) & (AR5K_RX_FILTER_PHYERR)) |
406 (AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST |
407 AR5K_RX_FILTER_MCAST);
408
409 if (changed_flags & (FIF_PROMISC_IN_BSS | FIF_OTHER_BSS)) {
410 if (*new_flags & FIF_PROMISC_IN_BSS)
411 __set_bit(ATH_STAT_PROMISC, sc->status);
412 else
413 __clear_bit(ATH_STAT_PROMISC, sc->status);
414 }
415
416 if (test_bit(ATH_STAT_PROMISC, sc->status))
417 rfilt |= AR5K_RX_FILTER_PROM;
418
419 /* Note, AR5K_RX_FILTER_MCAST is already enabled */
420 if (*new_flags & FIF_ALLMULTI) {
421 mfilt[0] = ~0;
422 mfilt[1] = ~0;
423 }
424
425 /* This is the best we can do */
426 if (*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL))
427 rfilt |= AR5K_RX_FILTER_PHYERR;
428
429 /* FIF_BCN_PRBRESP_PROMISC really means to enable beacons
430 * and probes for any BSSID */
431 if ((*new_flags & FIF_BCN_PRBRESP_PROMISC) || (sc->nvifs > 1))
432 rfilt |= AR5K_RX_FILTER_BEACON;
433
434 /* FIF_CONTROL doc says that if FIF_PROMISC_IN_BSS is not
435 * set we should only pass on control frames for this
436 * station. This needs testing. I believe right now this
437 * enables *all* control frames, which is OK.. but
438 * but we should see if we can improve on granularity */
439 if (*new_flags & FIF_CONTROL)
440 rfilt |= AR5K_RX_FILTER_CONTROL;
441
442 /* Additional settings per mode -- this is per ath5k */
443
444 /* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */
445
446 switch (sc->opmode) {
447 case NL80211_IFTYPE_MESH_POINT:
448 rfilt |= AR5K_RX_FILTER_CONTROL |
449 AR5K_RX_FILTER_BEACON |
450 AR5K_RX_FILTER_PROBEREQ |
451 AR5K_RX_FILTER_PROM;
452 break;
453 case NL80211_IFTYPE_AP:
454 case NL80211_IFTYPE_ADHOC:
455 rfilt |= AR5K_RX_FILTER_PROBEREQ |
456 AR5K_RX_FILTER_BEACON;
457 break;
458 case NL80211_IFTYPE_STATION:
459 if (sc->assoc)
460 rfilt |= AR5K_RX_FILTER_BEACON;
461 default:
462 break;
463 }
464
465 /* Set filters */
466 ath5k_hw_set_rx_filter(ah, rfilt);
467
468 /* Set multicast bits */
469 ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]);
470 /* Set the cached hw filter flags, this will later actually
471 * be set in HW */
472 sc->filter_flags = rfilt;
473
474 mutex_unlock(&sc->lock);
475}
476
477
478static int
479ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
480 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
481 struct ieee80211_key_conf *key)
482{
483 struct ath5k_softc *sc = hw->priv;
484 struct ath5k_hw *ah = sc->ah;
485 struct ath_common *common = ath5k_hw_common(ah);
486 int ret = 0;
487
488 if (ath5k_modparam_nohwcrypt)
489 return -EOPNOTSUPP;
490
491 switch (key->cipher) {
492 case WLAN_CIPHER_SUITE_WEP40:
493 case WLAN_CIPHER_SUITE_WEP104:
494 case WLAN_CIPHER_SUITE_TKIP:
495 break;
496 case WLAN_CIPHER_SUITE_CCMP:
497 if (common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)
498 break;
499 return -EOPNOTSUPP;
500 default:
501 WARN_ON(1);
502 return -EINVAL;
503 }
504
505 mutex_lock(&sc->lock);
506
507 switch (cmd) {
508 case SET_KEY:
509 ret = ath_key_config(common, vif, sta, key);
510 if (ret >= 0) {
511 key->hw_key_idx = ret;
512 /* push IV and Michael MIC generation to stack */
513 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
514 if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
515 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
516 if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
517 key->flags |= IEEE80211_KEY_FLAG_SW_MGMT;
518 ret = 0;
519 }
520 break;
521 case DISABLE_KEY:
522 ath_key_delete(common, key);
523 break;
524 default:
525 ret = -EINVAL;
526 }
527
528 mmiowb();
529 mutex_unlock(&sc->lock);
530 return ret;
531}
532
533
534static void
535ath5k_sw_scan_start(struct ieee80211_hw *hw)
536{
537 struct ath5k_softc *sc = hw->priv;
538 if (!sc->assoc)
539 ath5k_hw_set_ledstate(sc->ah, AR5K_LED_SCAN);
540}
541
542
543static void
544ath5k_sw_scan_complete(struct ieee80211_hw *hw)
545{
546 struct ath5k_softc *sc = hw->priv;
547 ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
548 AR5K_LED_ASSOC : AR5K_LED_INIT);
549}
550
551
552static int
553ath5k_get_stats(struct ieee80211_hw *hw,
554 struct ieee80211_low_level_stats *stats)
555{
556 struct ath5k_softc *sc = hw->priv;
557
558 /* Force update */
559 ath5k_hw_update_mib_counters(sc->ah);
560
561 stats->dot11ACKFailureCount = sc->stats.ack_fail;
562 stats->dot11RTSFailureCount = sc->stats.rts_fail;
563 stats->dot11RTSSuccessCount = sc->stats.rts_ok;
564 stats->dot11FCSErrorCount = sc->stats.fcs_error;
565
566 return 0;
567}
568
569
570static int
571ath5k_conf_tx(struct ieee80211_hw *hw, u16 queue,
572 const struct ieee80211_tx_queue_params *params)
573{
574 struct ath5k_softc *sc = hw->priv;
575 struct ath5k_hw *ah = sc->ah;
576 struct ath5k_txq_info qi;
577 int ret = 0;
578
579 if (queue >= ah->ah_capabilities.cap_queues.q_tx_num)
580 return 0;
581
582 mutex_lock(&sc->lock);
583
584 ath5k_hw_get_tx_queueprops(ah, queue, &qi);
585
586 qi.tqi_aifs = params->aifs;
587 qi.tqi_cw_min = params->cw_min;
588 qi.tqi_cw_max = params->cw_max;
589 qi.tqi_burst_time = params->txop;
590
591 ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
592 "Configure tx [queue %d], "
593 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
594 queue, params->aifs, params->cw_min,
595 params->cw_max, params->txop);
596
597 if (ath5k_hw_set_tx_queueprops(ah, queue, &qi)) {
598 ATH5K_ERR(sc,
599 "Unable to update hardware queue %u!\n", queue);
600 ret = -EIO;
601 } else
602 ath5k_hw_reset_tx_queue(ah, queue);
603
604 mutex_unlock(&sc->lock);
605
606 return ret;
607}
608
609
610static u64
611ath5k_get_tsf(struct ieee80211_hw *hw)
612{
613 struct ath5k_softc *sc = hw->priv;
614
615 return ath5k_hw_get_tsf64(sc->ah);
616}
617
618
619static void
620ath5k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
621{
622 struct ath5k_softc *sc = hw->priv;
623
624 ath5k_hw_set_tsf64(sc->ah, tsf);
625}
626
627
628static void
629ath5k_reset_tsf(struct ieee80211_hw *hw)
630{
631 struct ath5k_softc *sc = hw->priv;
632
633 /*
634 * in IBSS mode we need to update the beacon timers too.
635 * this will also reset the TSF if we call it with 0
636 */
637 if (sc->opmode == NL80211_IFTYPE_ADHOC)
638 ath5k_beacon_update_timers(sc, 0);
639 else
640 ath5k_hw_reset_tsf(sc->ah);
641}
642
643
644static int
645ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
646{
647 struct ath5k_softc *sc = hw->priv;
648 struct ieee80211_conf *conf = &hw->conf;
649 struct ath_common *common = ath5k_hw_common(sc->ah);
650 struct ath_cycle_counters *cc = &common->cc_survey;
651 unsigned int div = common->clockrate * 1000;
652
653 if (idx != 0)
654 return -ENOENT;
655
656 spin_lock_bh(&common->cc_lock);
657 ath_hw_cycle_counters_update(common);
658 if (cc->cycles > 0) {
659 sc->survey.channel_time += cc->cycles / div;
660 sc->survey.channel_time_busy += cc->rx_busy / div;
661 sc->survey.channel_time_rx += cc->rx_frame / div;
662 sc->survey.channel_time_tx += cc->tx_frame / div;
663 }
664 memset(cc, 0, sizeof(*cc));
665 spin_unlock_bh(&common->cc_lock);
666
667 memcpy(survey, &sc->survey, sizeof(*survey));
668
669 survey->channel = conf->channel;
670 survey->noise = sc->ah->ah_noise_floor;
671 survey->filled = SURVEY_INFO_NOISE_DBM |
672 SURVEY_INFO_CHANNEL_TIME |
673 SURVEY_INFO_CHANNEL_TIME_BUSY |
674 SURVEY_INFO_CHANNEL_TIME_RX |
675 SURVEY_INFO_CHANNEL_TIME_TX;
676
677 return 0;
678}
679
680
681/**
682 * ath5k_set_coverage_class - Set IEEE 802.11 coverage class
683 *
684 * @hw: struct ieee80211_hw pointer
685 * @coverage_class: IEEE 802.11 coverage class number
686 *
687 * Mac80211 callback. Sets slot time, ACK timeout and CTS timeout for given
688 * coverage class. The values are persistent, they are restored after device
689 * reset.
690 */
691static void
692ath5k_set_coverage_class(struct ieee80211_hw *hw, u8 coverage_class)
693{
694 struct ath5k_softc *sc = hw->priv;
695
696 mutex_lock(&sc->lock);
697 ath5k_hw_set_coverage_class(sc->ah, coverage_class);
698 mutex_unlock(&sc->lock);
699}
700
701
702static int
703ath5k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
704{
705 struct ath5k_softc *sc = hw->priv;
706
707 if (tx_ant == 1 && rx_ant == 1)
708 ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_A);
709 else if (tx_ant == 2 && rx_ant == 2)
710 ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_FIXED_B);
711 else if ((tx_ant & 3) == 3 && (rx_ant & 3) == 3)
712 ath5k_hw_set_antenna_mode(sc->ah, AR5K_ANTMODE_DEFAULT);
713 else
714 return -EINVAL;
715 return 0;
716}
717
718
719static int
720ath5k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
721{
722 struct ath5k_softc *sc = hw->priv;
723
724 switch (sc->ah->ah_ant_mode) {
725 case AR5K_ANTMODE_FIXED_A:
726 *tx_ant = 1; *rx_ant = 1; break;
727 case AR5K_ANTMODE_FIXED_B:
728 *tx_ant = 2; *rx_ant = 2; break;
729 case AR5K_ANTMODE_DEFAULT:
730 *tx_ant = 3; *rx_ant = 3; break;
731 }
732 return 0;
733}
734
735
736const struct ieee80211_ops ath5k_hw_ops = {
737 .tx = ath5k_tx,
738 .start = ath5k_start,
739 .stop = ath5k_stop,
740 .add_interface = ath5k_add_interface,
741 /* .change_interface = not implemented */
742 .remove_interface = ath5k_remove_interface,
743 .config = ath5k_config,
744 .bss_info_changed = ath5k_bss_info_changed,
745 .prepare_multicast = ath5k_prepare_multicast,
746 .configure_filter = ath5k_configure_filter,
747 /* .set_tim = not implemented */
748 .set_key = ath5k_set_key,
749 /* .update_tkip_key = not implemented */
750 /* .hw_scan = not implemented */
751 .sw_scan_start = ath5k_sw_scan_start,
752 .sw_scan_complete = ath5k_sw_scan_complete,
753 .get_stats = ath5k_get_stats,
754 /* .get_tkip_seq = not implemented */
755 /* .set_frag_threshold = not implemented */
756 /* .set_rts_threshold = not implemented */
757 /* .sta_add = not implemented */
758 /* .sta_remove = not implemented */
759 /* .sta_notify = not implemented */
760 .conf_tx = ath5k_conf_tx,
761 .get_tsf = ath5k_get_tsf,
762 .set_tsf = ath5k_set_tsf,
763 .reset_tsf = ath5k_reset_tsf,
764 /* .tx_last_beacon = not implemented */
765 /* .ampdu_action = not needed */
766 .get_survey = ath5k_get_survey,
767 .set_coverage_class = ath5k_set_coverage_class,
768 /* .rfkill_poll = not implemented */
769 /* .flush = not implemented */
770 /* .channel_switch = not implemented */
771 /* .napi_poll = not implemented */
772 .set_antenna = ath5k_set_antenna,
773 .get_antenna = ath5k_get_antenna,
774};
diff --git a/drivers/net/wireless/ath/ath5k/pci.c b/drivers/net/wireless/ath/ath5k/pci.c
new file mode 100644
index 000000000000..7f8c5b0e9d2a
--- /dev/null
+++ b/drivers/net/wireless/ath/ath5k/pci.c
@@ -0,0 +1,327 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <linux/nl80211.h>
18#include <linux/pci.h>
19#include <linux/pci-aspm.h>
20#include "../ath.h"
21#include "ath5k.h"
22#include "debug.h"
23#include "base.h"
24#include "reg.h"
25
26/* Known PCI ids */
27static DEFINE_PCI_DEVICE_TABLE(ath5k_pci_id_table) = {
28 { PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */
29 { PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */
30 { PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/
31 { PCI_VDEVICE(ATHEROS, 0x0012) }, /* 5211 */
32 { PCI_VDEVICE(ATHEROS, 0x0013) }, /* 5212 */
33 { PCI_VDEVICE(3COM_2, 0x0013) }, /* 3com 5212 */
34 { PCI_VDEVICE(3COM, 0x0013) }, /* 3com 3CRDAG675 5212 */
35 { PCI_VDEVICE(ATHEROS, 0x1014) }, /* IBM minipci 5212 */
36 { PCI_VDEVICE(ATHEROS, 0x0014) }, /* 5212 combatible */
37 { PCI_VDEVICE(ATHEROS, 0x0015) }, /* 5212 combatible */
38 { PCI_VDEVICE(ATHEROS, 0x0016) }, /* 5212 combatible */
39 { PCI_VDEVICE(ATHEROS, 0x0017) }, /* 5212 combatible */
40 { PCI_VDEVICE(ATHEROS, 0x0018) }, /* 5212 combatible */
41 { PCI_VDEVICE(ATHEROS, 0x0019) }, /* 5212 combatible */
42 { PCI_VDEVICE(ATHEROS, 0x001a) }, /* 2413 Griffin-lite */
43 { PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */
44 { PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */
45 { PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */
46 { 0 }
47};
48MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
49
50/* return bus cachesize in 4B word units */
51static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz)
52{
53 struct ath5k_softc *sc = (struct ath5k_softc *) common->priv;
54 u8 u8tmp;
55
56 pci_read_config_byte(sc->pdev, PCI_CACHE_LINE_SIZE, &u8tmp);
57 *csz = (int)u8tmp;
58
59 /*
60 * This check was put in to avoid "unplesant" consequences if
61 * the bootrom has not fully initialized all PCI devices.
62 * Sometimes the cache line size register is not set
63 */
64
65 if (*csz == 0)
66 *csz = L1_CACHE_BYTES >> 2; /* Use the default size */
67}
68
69/*
70 * Read from eeprom
71 */
72bool ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
73{
74 struct ath5k_hw *ah = (struct ath5k_hw *) common->ah;
75 u32 status, timeout;
76
77 /*
78 * Initialize EEPROM access
79 */
80 if (ah->ah_version == AR5K_AR5210) {
81 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_EEAE);
82 (void)ath5k_hw_reg_read(ah, AR5K_EEPROM_BASE + (4 * offset));
83 } else {
84 ath5k_hw_reg_write(ah, offset, AR5K_EEPROM_BASE);
85 AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
86 AR5K_EEPROM_CMD_READ);
87 }
88
89 for (timeout = AR5K_TUNE_REGISTER_TIMEOUT; timeout > 0; timeout--) {
90 status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
91 if (status & AR5K_EEPROM_STAT_RDDONE) {
92 if (status & AR5K_EEPROM_STAT_RDERR)
93 return -EIO;
94 *data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
95 0xffff);
96 return 0;
97 }
98 udelay(15);
99 }
100
101 return -ETIMEDOUT;
102}
103
104int ath5k_hw_read_srev(struct ath5k_hw *ah)
105{
106 ah->ah_mac_srev = ath5k_hw_reg_read(ah, AR5K_SREV);
107 return 0;
108}
109
110/* Common ath_bus_opts structure */
111static const struct ath_bus_ops ath_pci_bus_ops = {
112 .ath_bus_type = ATH_PCI,
113 .read_cachesize = ath5k_pci_read_cachesize,
114 .eeprom_read = ath5k_pci_eeprom_read,
115};
116
117/********************\
118* PCI Initialization *
119\********************/
120
121static int __devinit
122ath5k_pci_probe(struct pci_dev *pdev,
123 const struct pci_device_id *id)
124{
125 void __iomem *mem;
126 struct ath5k_softc *sc;
127 struct ieee80211_hw *hw;
128 int ret;
129 u8 csz;
130
131 /*
132 * L0s needs to be disabled on all ath5k cards.
133 *
134 * For distributions shipping with CONFIG_PCIEASPM (this will be enabled
135 * by default in the future in 2.6.36) this will also mean both L1 and
136 * L0s will be disabled when a pre 1.1 PCIe device is detected. We do
137 * know L1 works correctly even for all ath5k pre 1.1 PCIe devices
138 * though but cannot currently undue the effect of a blacklist, for
139 * details you can read pcie_aspm_sanity_check() and see how it adjusts
140 * the device link capability.
141 *
142 * It may be possible in the future to implement some PCI API to allow
143 * drivers to override blacklists for pre 1.1 PCIe but for now it is
144 * best to accept that both L0s and L1 will be disabled completely for
145 * distributions shipping with CONFIG_PCIEASPM rather than having this
146 * issue present. Motivation for adding this new API will be to help
147 * with power consumption for some of these devices.
148 */
149 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
150
151 ret = pci_enable_device(pdev);
152 if (ret) {
153 dev_err(&pdev->dev, "can't enable device\n");
154 goto err;
155 }
156
157 /* XXX 32-bit addressing only */
158 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
159 if (ret) {
160 dev_err(&pdev->dev, "32-bit DMA not available\n");
161 goto err_dis;
162 }
163
164 /*
165 * Cache line size is used to size and align various
166 * structures used to communicate with the hardware.
167 */
168 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
169 if (csz == 0) {
170 /*
171 * Linux 2.4.18 (at least) writes the cache line size
172 * register as a 16-bit wide register which is wrong.
173 * We must have this setup properly for rx buffer
174 * DMA to work so force a reasonable value here if it
175 * comes up zero.
176 */
177 csz = L1_CACHE_BYTES >> 2;
178 pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
179 }
180 /*
181 * The default setting of latency timer yields poor results,
182 * set it to the value used by other systems. It may be worth
183 * tweaking this setting more.
184 */
185 pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
186
187 /* Enable bus mastering */
188 pci_set_master(pdev);
189
190 /*
191 * Disable the RETRY_TIMEOUT register (0x41) to keep
192 * PCI Tx retries from interfering with C3 CPU state.
193 */
194 pci_write_config_byte(pdev, 0x41, 0);
195
196 ret = pci_request_region(pdev, 0, "ath5k");
197 if (ret) {
198 dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
199 goto err_dis;
200 }
201
202 mem = pci_iomap(pdev, 0, 0);
203 if (!mem) {
204 dev_err(&pdev->dev, "cannot remap PCI memory region\n") ;
205 ret = -EIO;
206 goto err_reg;
207 }
208
209 /*
210 * Allocate hw (mac80211 main struct)
211 * and hw->priv (driver private data)
212 */
213 hw = ieee80211_alloc_hw(sizeof(*sc), &ath5k_hw_ops);
214 if (hw == NULL) {
215 dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n");
216 ret = -ENOMEM;
217 goto err_map;
218 }
219
220 dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy));
221
222 sc = hw->priv;
223 sc->hw = hw;
224 sc->pdev = pdev;
225 sc->dev = &pdev->dev;
226 sc->irq = pdev->irq;
227 sc->devid = id->device;
228 sc->iobase = mem; /* So we can unmap it on detach */
229
230 /* Initialize */
231 ret = ath5k_init_softc(sc, &ath_pci_bus_ops);
232 if (ret)
233 goto err_free;
234
235 /* Set private data */
236 pci_set_drvdata(pdev, hw);
237
238 return 0;
239err_free:
240 ieee80211_free_hw(hw);
241err_map:
242 pci_iounmap(pdev, mem);
243err_reg:
244 pci_release_region(pdev, 0);
245err_dis:
246 pci_disable_device(pdev);
247err:
248 return ret;
249}
250
251static void __devexit
252ath5k_pci_remove(struct pci_dev *pdev)
253{
254 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
255 struct ath5k_softc *sc = hw->priv;
256
257 ath5k_deinit_softc(sc);
258 pci_iounmap(pdev, sc->iobase);
259 pci_release_region(pdev, 0);
260 pci_disable_device(pdev);
261 ieee80211_free_hw(hw);
262}
263
264#ifdef CONFIG_PM_SLEEP
265static int ath5k_pci_suspend(struct device *dev)
266{
267 struct ath5k_softc *sc = pci_get_drvdata(to_pci_dev(dev));
268
269 ath5k_led_off(sc);
270 return 0;
271}
272
273static int ath5k_pci_resume(struct device *dev)
274{
275 struct pci_dev *pdev = to_pci_dev(dev);
276 struct ath5k_softc *sc = pci_get_drvdata(pdev);
277
278 /*
279 * Suspend/Resume resets the PCI configuration space, so we have to
280 * re-disable the RETRY_TIMEOUT register (0x41) to keep
281 * PCI Tx retries from interfering with C3 CPU state
282 */
283 pci_write_config_byte(pdev, 0x41, 0);
284
285 ath5k_led_enable(sc);
286 return 0;
287}
288
289static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
290#define ATH5K_PM_OPS (&ath5k_pm_ops)
291#else
292#define ATH5K_PM_OPS NULL
293#endif /* CONFIG_PM_SLEEP */
294
295static struct pci_driver ath5k_pci_driver = {
296 .name = KBUILD_MODNAME,
297 .id_table = ath5k_pci_id_table,
298 .probe = ath5k_pci_probe,
299 .remove = __devexit_p(ath5k_pci_remove),
300 .driver.pm = ATH5K_PM_OPS,
301};
302
303/*
304 * Module init/exit functions
305 */
306static int __init
307init_ath5k_pci(void)
308{
309 int ret;
310
311 ret = pci_register_driver(&ath5k_pci_driver);
312 if (ret) {
313 printk(KERN_ERR "ath5k_pci: can't register pci driver\n");
314 return ret;
315 }
316
317 return 0;
318}
319
320static void __exit
321exit_ath5k_pci(void)
322{
323 pci_unregister_driver(&ath5k_pci_driver);
324}
325
326module_init(init_ath5k_pci);
327module_exit(exit_ath5k_pci);
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 074b4c644399..e5f2b96a4c63 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -31,87 +31,163 @@
31#include "debug.h" 31#include "debug.h"
32#include "base.h" 32#include "base.h"
33 33
34/*
35 * AR5212+ can use higher rates for ack transmition
36 * based on current tx rate instead of the base rate.
37 * It does this to better utilize channel usage.
38 * This is a mapping between G rates (that cover both
39 * CCK and OFDM) and ack rates that we use when setting
40 * rate -> duration table. This mapping is hw-based so
41 * don't change anything.
42 *
43 * To enable this functionality we must set
44 * ah->ah_ack_bitrate_high to true else base rate is
45 * used (1Mb for CCK, 6Mb for OFDM).
46 */
47static const unsigned int ack_rates_high[] =
48/* Tx -> ACK */
49/* 1Mb -> 1Mb */ { 0,
50/* 2MB -> 2Mb */ 1,
51/* 5.5Mb -> 2Mb */ 1,
52/* 11Mb -> 2Mb */ 1,
53/* 6Mb -> 6Mb */ 4,
54/* 9Mb -> 6Mb */ 4,
55/* 12Mb -> 12Mb */ 6,
56/* 18Mb -> 12Mb */ 6,
57/* 24Mb -> 24Mb */ 8,
58/* 36Mb -> 24Mb */ 8,
59/* 48Mb -> 24Mb */ 8,
60/* 54Mb -> 24Mb */ 8 };
61
34/*******************\ 62/*******************\
35* Generic functions * 63* Helper functions *
36\*******************/ 64\*******************/
37 65
38/** 66/**
39 * ath5k_hw_set_opmode - Set PCU operating mode 67 * ath5k_hw_get_frame_duration - Get tx time of a frame
40 * 68 *
41 * @ah: The &struct ath5k_hw 69 * @ah: The &struct ath5k_hw
42 * @op_mode: &enum nl80211_iftype operating mode 70 * @len: Frame's length in bytes
71 * @rate: The @struct ieee80211_rate
43 * 72 *
44 * Initialize PCU for the various operating modes (AP/STA etc) 73 * Calculate tx duration of a frame given it's rate and length
74 * It extends ieee80211_generic_frame_duration for non standard
75 * bwmodes.
45 */ 76 */
46int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode) 77int ath5k_hw_get_frame_duration(struct ath5k_hw *ah,
78 int len, struct ieee80211_rate *rate)
47{ 79{
48 struct ath_common *common = ath5k_hw_common(ah); 80 struct ath5k_softc *sc = ah->ah_sc;
49 u32 pcu_reg, beacon_reg, low_id, high_id; 81 int sifs, preamble, plcp_bits, sym_time;
82 int bitrate, bits, symbols, symbol_bits;
83 int dur;
84
85 /* Fallback */
86 if (!ah->ah_bwmode) {
87 dur = ieee80211_generic_frame_duration(sc->hw,
88 NULL, len, rate);
89 return dur;
90 }
50 91
51 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_MODE, "mode %d\n", op_mode); 92 bitrate = rate->bitrate;
93 preamble = AR5K_INIT_OFDM_PREAMPLE_TIME;
94 plcp_bits = AR5K_INIT_OFDM_PLCP_BITS;
95 sym_time = AR5K_INIT_OFDM_SYMBOL_TIME;
52 96
53 /* Preserve rest settings */ 97 switch (ah->ah_bwmode) {
54 pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000; 98 case AR5K_BWMODE_40MHZ:
55 pcu_reg &= ~(AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_AP 99 sifs = AR5K_INIT_SIFS_TURBO;
56 | AR5K_STA_ID1_KEYSRCH_MODE 100 preamble = AR5K_INIT_OFDM_PREAMBLE_TIME_MIN;
57 | (ah->ah_version == AR5K_AR5210 ? 101 break;
58 (AR5K_STA_ID1_PWR_SV | AR5K_STA_ID1_NO_PSPOLL) : 0)); 102 case AR5K_BWMODE_10MHZ:
103 sifs = AR5K_INIT_SIFS_HALF_RATE;
104 preamble *= 2;
105 sym_time *= 2;
106 break;
107 case AR5K_BWMODE_5MHZ:
108 sifs = AR5K_INIT_SIFS_QUARTER_RATE;
109 preamble *= 4;
110 sym_time *= 4;
111 break;
112 default:
113 sifs = AR5K_INIT_SIFS_DEFAULT_BG;
114 break;
115 }
59 116
60 beacon_reg = 0; 117 bits = plcp_bits + (len << 3);
118 /* Bit rate is in 100Kbits */
119 symbol_bits = bitrate * sym_time;
120 symbols = DIV_ROUND_UP(bits * 10, symbol_bits);
61 121
62 switch (op_mode) { 122 dur = sifs + preamble + (sym_time * symbols);
63 case NL80211_IFTYPE_ADHOC:
64 pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE;
65 beacon_reg |= AR5K_BCR_ADHOC;
66 if (ah->ah_version == AR5K_AR5210)
67 pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
68 else
69 AR5K_REG_ENABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
70 break;
71 123
72 case NL80211_IFTYPE_AP: 124 return dur;
73 case NL80211_IFTYPE_MESH_POINT: 125}
74 pcu_reg |= AR5K_STA_ID1_AP | AR5K_STA_ID1_KEYSRCH_MODE;
75 beacon_reg |= AR5K_BCR_AP;
76 if (ah->ah_version == AR5K_AR5210)
77 pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
78 else
79 AR5K_REG_DISABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
80 break;
81 126
82 case NL80211_IFTYPE_STATION: 127/**
83 pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE 128 * ath5k_hw_get_default_slottime - Get the default slot time for current mode
84 | (ah->ah_version == AR5K_AR5210 ? 129 *
85 AR5K_STA_ID1_PWR_SV : 0); 130 * @ah: The &struct ath5k_hw
86 case NL80211_IFTYPE_MONITOR: 131 */
87 pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE 132unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
88 | (ah->ah_version == AR5K_AR5210 ? 133{
89 AR5K_STA_ID1_NO_PSPOLL : 0); 134 struct ieee80211_channel *channel = ah->ah_current_channel;
90 break; 135 unsigned int slot_time;
91 136
137 switch (ah->ah_bwmode) {
138 case AR5K_BWMODE_40MHZ:
139 slot_time = AR5K_INIT_SLOT_TIME_TURBO;
140 break;
141 case AR5K_BWMODE_10MHZ:
142 slot_time = AR5K_INIT_SLOT_TIME_HALF_RATE;
143 break;
144 case AR5K_BWMODE_5MHZ:
145 slot_time = AR5K_INIT_SLOT_TIME_QUARTER_RATE;
146 break;
147 case AR5K_BWMODE_DEFAULT:
148 slot_time = AR5K_INIT_SLOT_TIME_DEFAULT;
92 default: 149 default:
93 return -EINVAL; 150 if (channel->hw_value & CHANNEL_CCK)
151 slot_time = AR5K_INIT_SLOT_TIME_B;
152 break;
94 } 153 }
95 154
96 /* 155 return slot_time;
97 * Set PCU registers 156}
98 */
99 low_id = get_unaligned_le32(common->macaddr);
100 high_id = get_unaligned_le16(common->macaddr + 4);
101 ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
102 ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
103 157
104 /* 158/**
105 * Set Beacon Control Register on 5210 159 * ath5k_hw_get_default_sifs - Get the default SIFS for current mode
106 */ 160 *
107 if (ah->ah_version == AR5K_AR5210) 161 * @ah: The &struct ath5k_hw
108 ath5k_hw_reg_write(ah, beacon_reg, AR5K_BCR); 162 */
163unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
164{
165 struct ieee80211_channel *channel = ah->ah_current_channel;
166 unsigned int sifs;
109 167
110 return 0; 168 switch (ah->ah_bwmode) {
169 case AR5K_BWMODE_40MHZ:
170 sifs = AR5K_INIT_SIFS_TURBO;
171 break;
172 case AR5K_BWMODE_10MHZ:
173 sifs = AR5K_INIT_SIFS_HALF_RATE;
174 break;
175 case AR5K_BWMODE_5MHZ:
176 sifs = AR5K_INIT_SIFS_QUARTER_RATE;
177 break;
178 case AR5K_BWMODE_DEFAULT:
179 sifs = AR5K_INIT_SIFS_DEFAULT_BG;
180 default:
181 if (channel->hw_value & CHANNEL_5GHZ)
182 sifs = AR5K_INIT_SIFS_DEFAULT_A;
183 break;
184 }
185
186 return sifs;
111} 187}
112 188
113/** 189/**
114 * ath5k_hw_update - Update MIB counters (mac layer statistics) 190 * ath5k_hw_update_mib_counters - Update MIB counters (mac layer statistics)
115 * 191 *
116 * @ah: The &struct ath5k_hw 192 * @ah: The &struct ath5k_hw
117 * 193 *
@@ -133,36 +209,88 @@ void ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
133 stats->beacons += ath5k_hw_reg_read(ah, AR5K_BEACON_CNT); 209 stats->beacons += ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
134} 210}
135 211
212
213/******************\
214* ACK/CTS Timeouts *
215\******************/
216
136/** 217/**
137 * ath5k_hw_set_ack_bitrate - set bitrate for ACKs 218 * ath5k_hw_write_rate_duration - fill rate code to duration table
138 * 219 *
139 * @ah: The &struct ath5k_hw 220 * @ah: the &struct ath5k_hw
140 * @high: Flag to determine if we want to use high transmission rate 221 * @mode: one of enum ath5k_driver_mode
141 * for ACKs or not 222 *
223 * Write the rate code to duration table upon hw reset. This is a helper for
224 * ath5k_hw_pcu_init(). It seems all this is doing is setting an ACK timeout on
225 * the hardware, based on current mode, for each rate. The rates which are
226 * capable of short preamble (802.11b rates 2Mbps, 5.5Mbps, and 11Mbps) have
227 * different rate code so we write their value twice (one for long preamble
228 * and one for short).
229 *
230 * Note: Band doesn't matter here, if we set the values for OFDM it works
231 * on both a and g modes. So all we have to do is set values for all g rates
232 * that include all OFDM and CCK rates.
142 * 233 *
143 * If high flag is set, we tell hw to use a set of control rates based on
144 * the current transmission rate (check out control_rates array inside reset.c).
145 * If not hw just uses the lowest rate available for the current modulation
146 * scheme being used (1Mbit for CCK and 6Mbits for OFDM).
147 */ 234 */
148void ath5k_hw_set_ack_bitrate_high(struct ath5k_hw *ah, bool high) 235static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
149{ 236{
150 if (ah->ah_version != AR5K_AR5212) 237 struct ath5k_softc *sc = ah->ah_sc;
151 return; 238 struct ieee80211_rate *rate;
152 else { 239 unsigned int i;
153 u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB; 240 /* 802.11g covers both OFDM and CCK */
154 if (high) 241 u8 band = IEEE80211_BAND_2GHZ;
155 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val); 242
243 /* Write rate duration table */
244 for (i = 0; i < sc->sbands[band].n_bitrates; i++) {
245 u32 reg;
246 u16 tx_time;
247
248 if (ah->ah_ack_bitrate_high)
249 rate = &sc->sbands[band].bitrates[ack_rates_high[i]];
250 /* CCK -> 1Mb */
251 else if (i < 4)
252 rate = &sc->sbands[band].bitrates[0];
253 /* OFDM -> 6Mb */
156 else 254 else
157 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val); 255 rate = &sc->sbands[band].bitrates[4];
256
257 /* Set ACK timeout */
258 reg = AR5K_RATE_DUR(rate->hw_value);
259
260 /* An ACK frame consists of 10 bytes. If you add the FCS,
261 * which ieee80211_generic_frame_duration() adds,
262 * its 14 bytes. Note we use the control rate and not the
263 * actual rate for this rate. See mac80211 tx.c
264 * ieee80211_duration() for a brief description of
265 * what rate we should choose to TX ACKs. */
266 tx_time = ath5k_hw_get_frame_duration(ah, 10, rate);
267
268 tx_time = le16_to_cpu(tx_time);
269
270 ath5k_hw_reg_write(ah, tx_time, reg);
271
272 if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
273 continue;
274
275 /*
276 * We're not distinguishing short preamble here,
277 * This is true, all we'll get is a longer value here
278 * which is not necessarilly bad. We could use
279 * export ieee80211_frame_duration() but that needs to be
280 * fixed first to be properly used by mac802111 drivers:
281 *
282 * - remove erp stuff and let the routine figure ofdm
283 * erp rates
284 * - remove passing argument ieee80211_local as
285 * drivers don't have access to it
286 * - move drivers using ieee80211_generic_frame_duration()
287 * to this
288 */
289 ath5k_hw_reg_write(ah, tx_time,
290 reg + (AR5K_SET_SHORT_PREAMBLE << 2));
158 } 291 }
159} 292}
160 293
161
162/******************\
163* ACK/CTS Timeouts *
164\******************/
165
166/** 294/**
167 * ath5k_hw_set_ack_timeout - Set ACK timeout on PCU 295 * ath5k_hw_set_ack_timeout - Set ACK timeout on PCU
168 * 296 *
@@ -199,88 +327,10 @@ static int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
199 return 0; 327 return 0;
200} 328}
201 329
202/**
203 * ath5k_hw_htoclock - Translate usec to hw clock units
204 *
205 * @ah: The &struct ath5k_hw
206 * @usec: value in microseconds
207 */
208unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
209{
210 struct ath_common *common = ath5k_hw_common(ah);
211 return usec * common->clockrate;
212}
213
214/**
215 * ath5k_hw_clocktoh - Translate hw clock units to usec
216 * @clock: value in hw clock units
217 */
218unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
219{
220 struct ath_common *common = ath5k_hw_common(ah);
221 return clock / common->clockrate;
222}
223
224/**
225 * ath5k_hw_set_clockrate - Set common->clockrate for the current channel
226 *
227 * @ah: The &struct ath5k_hw
228 */
229void ath5k_hw_set_clockrate(struct ath5k_hw *ah)
230{
231 struct ieee80211_channel *channel = ah->ah_current_channel;
232 struct ath_common *common = ath5k_hw_common(ah);
233 int clock;
234
235 if (channel->hw_value & CHANNEL_5GHZ)
236 clock = 40; /* 802.11a */
237 else if (channel->hw_value & CHANNEL_CCK)
238 clock = 22; /* 802.11b */
239 else
240 clock = 44; /* 802.11g */
241
242 /* Clock rate in turbo modes is twice the normal rate */
243 if (channel->hw_value & CHANNEL_TURBO)
244 clock *= 2;
245
246 common->clockrate = clock;
247}
248
249/**
250 * ath5k_hw_get_default_slottime - Get the default slot time for current mode
251 *
252 * @ah: The &struct ath5k_hw
253 */
254static unsigned int ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
255{
256 struct ieee80211_channel *channel = ah->ah_current_channel;
257
258 if (channel->hw_value & CHANNEL_TURBO)
259 return 6; /* both turbo modes */
260
261 if (channel->hw_value & CHANNEL_CCK)
262 return 20; /* 802.11b */
263
264 return 9; /* 802.11 a/g */
265}
266
267/**
268 * ath5k_hw_get_default_sifs - Get the default SIFS for current mode
269 *
270 * @ah: The &struct ath5k_hw
271 */
272static unsigned int ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
273{
274 struct ieee80211_channel *channel = ah->ah_current_channel;
275
276 if (channel->hw_value & CHANNEL_TURBO)
277 return 8; /* both turbo modes */
278 330
279 if (channel->hw_value & CHANNEL_5GHZ) 331/*******************\
280 return 16; /* 802.11a */ 332* RX filter Control *
281 333\*******************/
282 return 10; /* 802.11 b/g */
283}
284 334
285/** 335/**
286 * ath5k_hw_set_lladdr - Set station id 336 * ath5k_hw_set_lladdr - Set station id
@@ -362,39 +412,6 @@ void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
362 ath_hw_setbssidmask(common); 412 ath_hw_setbssidmask(common);
363} 413}
364 414
365/************\
366* RX Control *
367\************/
368
369/**
370 * ath5k_hw_start_rx_pcu - Start RX engine
371 *
372 * @ah: The &struct ath5k_hw
373 *
374 * Starts RX engine on PCU so that hw can process RXed frames
375 * (ACK etc).
376 *
377 * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
378 */
379void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
380{
381 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
382}
383
384/**
385 * at5k_hw_stop_rx_pcu - Stop RX engine
386 *
387 * @ah: The &struct ath5k_hw
388 *
389 * Stops RX engine on PCU
390 *
391 * TODO: Detach ANI here
392 */
393void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
394{
395 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
396}
397
398/* 415/*
399 * Set multicast filter 416 * Set multicast filter
400 */ 417 */
@@ -746,7 +763,7 @@ ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval)
746 * @ah: The &struct ath5k_hw 763 * @ah: The &struct ath5k_hw
747 * @coverage_class: IEEE 802.11 coverage class number 764 * @coverage_class: IEEE 802.11 coverage class number
748 * 765 *
749 * Sets slot time, ACK timeout and CTS timeout for given coverage class. 766 * Sets IFS intervals and ACK/CTS timeouts for given coverage class.
750 */ 767 */
751void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class) 768void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
752{ 769{
@@ -755,9 +772,175 @@ void ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
755 int ack_timeout = ath5k_hw_get_default_sifs(ah) + slot_time; 772 int ack_timeout = ath5k_hw_get_default_sifs(ah) + slot_time;
756 int cts_timeout = ack_timeout; 773 int cts_timeout = ack_timeout;
757 774
758 ath5k_hw_set_slot_time(ah, slot_time); 775 ath5k_hw_set_ifs_intervals(ah, slot_time);
759 ath5k_hw_set_ack_timeout(ah, ack_timeout); 776 ath5k_hw_set_ack_timeout(ah, ack_timeout);
760 ath5k_hw_set_cts_timeout(ah, cts_timeout); 777 ath5k_hw_set_cts_timeout(ah, cts_timeout);
761 778
762 ah->ah_coverage_class = coverage_class; 779 ah->ah_coverage_class = coverage_class;
763} 780}
781
782/***************************\
783* Init/Start/Stop functions *
784\***************************/
785
786/**
787 * ath5k_hw_start_rx_pcu - Start RX engine
788 *
789 * @ah: The &struct ath5k_hw
790 *
791 * Starts RX engine on PCU so that hw can process RXed frames
792 * (ACK etc).
793 *
794 * NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
795 */
796void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
797{
798 AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
799}
800
801/**
802 * at5k_hw_stop_rx_pcu - Stop RX engine
803 *
804 * @ah: The &struct ath5k_hw
805 *
806 * Stops RX engine on PCU
807 */
808void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
809{
810 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
811}
812
813/**
814 * ath5k_hw_set_opmode - Set PCU operating mode
815 *
816 * @ah: The &struct ath5k_hw
817 * @op_mode: &enum nl80211_iftype operating mode
818 *
819 * Configure PCU for the various operating modes (AP/STA etc)
820 */
821int ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
822{
823 struct ath_common *common = ath5k_hw_common(ah);
824 u32 pcu_reg, beacon_reg, low_id, high_id;
825
826 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_MODE, "mode %d\n", op_mode);
827
828 /* Preserve rest settings */
829 pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
830 pcu_reg &= ~(AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_AP
831 | AR5K_STA_ID1_KEYSRCH_MODE
832 | (ah->ah_version == AR5K_AR5210 ?
833 (AR5K_STA_ID1_PWR_SV | AR5K_STA_ID1_NO_PSPOLL) : 0));
834
835 beacon_reg = 0;
836
837 switch (op_mode) {
838 case NL80211_IFTYPE_ADHOC:
839 pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE;
840 beacon_reg |= AR5K_BCR_ADHOC;
841 if (ah->ah_version == AR5K_AR5210)
842 pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
843 else
844 AR5K_REG_ENABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
845 break;
846
847 case NL80211_IFTYPE_AP:
848 case NL80211_IFTYPE_MESH_POINT:
849 pcu_reg |= AR5K_STA_ID1_AP | AR5K_STA_ID1_KEYSRCH_MODE;
850 beacon_reg |= AR5K_BCR_AP;
851 if (ah->ah_version == AR5K_AR5210)
852 pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
853 else
854 AR5K_REG_DISABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
855 break;
856
857 case NL80211_IFTYPE_STATION:
858 pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
859 | (ah->ah_version == AR5K_AR5210 ?
860 AR5K_STA_ID1_PWR_SV : 0);
861 case NL80211_IFTYPE_MONITOR:
862 pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
863 | (ah->ah_version == AR5K_AR5210 ?
864 AR5K_STA_ID1_NO_PSPOLL : 0);
865 break;
866
867 default:
868 return -EINVAL;
869 }
870
871 /*
872 * Set PCU registers
873 */
874 low_id = get_unaligned_le32(common->macaddr);
875 high_id = get_unaligned_le16(common->macaddr + 4);
876 ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
877 ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
878
879 /*
880 * Set Beacon Control Register on 5210
881 */
882 if (ah->ah_version == AR5K_AR5210)
883 ath5k_hw_reg_write(ah, beacon_reg, AR5K_BCR);
884
885 return 0;
886}
887
888void ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
889 u8 mode)
890{
891 /* Set bssid and bssid mask */
892 ath5k_hw_set_bssid(ah);
893
894 /* Set PCU config */
895 ath5k_hw_set_opmode(ah, op_mode);
896
897 /* Write rate duration table only on AR5212 and if
898 * virtual interface has already been brought up
899 * XXX: rethink this after new mode changes to
900 * mac80211 are integrated */
901 if (ah->ah_version == AR5K_AR5212 &&
902 ah->ah_sc->nvifs)
903 ath5k_hw_write_rate_duration(ah);
904
905 /* Set RSSI/BRSSI thresholds
906 *
907 * Note: If we decide to set this value
908 * dynamicaly, have in mind that when AR5K_RSSI_THR
909 * register is read it might return 0x40 if we haven't
910 * wrote anything to it plus BMISS RSSI threshold is zeroed.
911 * So doing a save/restore procedure here isn't the right
912 * choice. Instead store it on ath5k_hw */
913 ath5k_hw_reg_write(ah, (AR5K_TUNE_RSSI_THRES |
914 AR5K_TUNE_BMISS_THRES <<
915 AR5K_RSSI_THR_BMISS_S),
916 AR5K_RSSI_THR);
917
918 /* MIC QoS support */
919 if (ah->ah_mac_srev >= AR5K_SREV_AR2413) {
920 ath5k_hw_reg_write(ah, 0x000100aa, AR5K_MIC_QOS_CTL);
921 ath5k_hw_reg_write(ah, 0x00003210, AR5K_MIC_QOS_SEL);
922 }
923
924 /* QoS NOACK Policy */
925 if (ah->ah_version == AR5K_AR5212) {
926 ath5k_hw_reg_write(ah,
927 AR5K_REG_SM(2, AR5K_QOS_NOACK_2BIT_VALUES) |
928 AR5K_REG_SM(5, AR5K_QOS_NOACK_BIT_OFFSET) |
929 AR5K_REG_SM(0, AR5K_QOS_NOACK_BYTE_OFFSET),
930 AR5K_QOS_NOACK);
931 }
932
933 /* Restore slot time and ACK timeouts */
934 if (ah->ah_coverage_class > 0)
935 ath5k_hw_set_coverage_class(ah, ah->ah_coverage_class);
936
937 /* Set ACK bitrate mode (see ack_rates_high) */
938 if (ah->ah_version == AR5K_AR5212) {
939 u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB;
940 if (ah->ah_ack_bitrate_high)
941 AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val);
942 else
943 AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
944 }
945 return;
946}
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 219367884e64..78c26fdccad1 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -29,6 +29,95 @@
29#include "rfbuffer.h" 29#include "rfbuffer.h"
30#include "rfgain.h" 30#include "rfgain.h"
31 31
32
33/******************\
34* Helper functions *
35\******************/
36
37/*
38 * Get the PHY Chip revision
39 */
40u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan)
41{
42 unsigned int i;
43 u32 srev;
44 u16 ret;
45
46 /*
47 * Set the radio chip access register
48 */
49 switch (chan) {
50 case CHANNEL_2GHZ:
51 ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_2GHZ, AR5K_PHY(0));
52 break;
53 case CHANNEL_5GHZ:
54 ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
55 break;
56 default:
57 return 0;
58 }
59
60 mdelay(2);
61
62 /* ...wait until PHY is ready and read the selected radio revision */
63 ath5k_hw_reg_write(ah, 0x00001c16, AR5K_PHY(0x34));
64
65 for (i = 0; i < 8; i++)
66 ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20));
67
68 if (ah->ah_version == AR5K_AR5210) {
69 srev = ath5k_hw_reg_read(ah, AR5K_PHY(256) >> 28) & 0xf;
70 ret = (u16)ath5k_hw_bitswap(srev, 4) + 1;
71 } else {
72 srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff;
73 ret = (u16)ath5k_hw_bitswap(((srev & 0xf0) >> 4) |
74 ((srev & 0x0f) << 4), 8);
75 }
76
77 /* Reset to the 5GHz mode */
78 ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
79
80 return ret;
81}
82
83/*
84 * Check if a channel is supported
85 */
86bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags)
87{
88 /* Check if the channel is in our supported range */
89 if (flags & CHANNEL_2GHZ) {
90 if ((freq >= ah->ah_capabilities.cap_range.range_2ghz_min) &&
91 (freq <= ah->ah_capabilities.cap_range.range_2ghz_max))
92 return true;
93 } else if (flags & CHANNEL_5GHZ)
94 if ((freq >= ah->ah_capabilities.cap_range.range_5ghz_min) &&
95 (freq <= ah->ah_capabilities.cap_range.range_5ghz_max))
96 return true;
97
98 return false;
99}
100
101bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
102 struct ieee80211_channel *channel)
103{
104 u8 refclk_freq;
105
106 if ((ah->ah_radio == AR5K_RF5112) ||
107 (ah->ah_radio == AR5K_RF5413) ||
108 (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
109 refclk_freq = 40;
110 else
111 refclk_freq = 32;
112
113 if ((channel->center_freq % refclk_freq != 0) &&
114 ((channel->center_freq % refclk_freq < 10) ||
115 (channel->center_freq % refclk_freq > 22)))
116 return true;
117 else
118 return false;
119}
120
32/* 121/*
33 * Used to modify RF Banks before writing them to AR5K_RF_BUFFER 122 * Used to modify RF Banks before writing them to AR5K_RF_BUFFER
34 */ 123 */
@@ -110,6 +199,90 @@ static unsigned int ath5k_hw_rfb_op(struct ath5k_hw *ah,
110 return data; 199 return data;
111} 200}
112 201
202/**
203 * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212
204 *
205 * @ah: the &struct ath5k_hw
206 * @channel: the currently set channel upon reset
207 *
208 * Write the delta slope coefficient (used on pilot tracking ?) for OFDM
209 * operation on the AR5212 upon reset. This is a helper for ath5k_hw_phy_init.
210 *
211 * Since delta slope is floating point we split it on its exponent and
212 * mantissa and provide these values on hw.
213 *
214 * For more infos i think this patent is related
215 * http://www.freepatentsonline.com/7184495.html
216 */
217static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
218 struct ieee80211_channel *channel)
219{
220 /* Get exponent and mantissa and set it */
221 u32 coef_scaled, coef_exp, coef_man,
222 ds_coef_exp, ds_coef_man, clock;
223
224 BUG_ON(!(ah->ah_version == AR5K_AR5212) ||
225 !(channel->hw_value & CHANNEL_OFDM));
226
227 /* Get coefficient
228 * ALGO: coef = (5 * clock / carrier_freq) / 2
229 * we scale coef by shifting clock value by 24 for
230 * better precision since we use integers */
231 switch (ah->ah_bwmode) {
232 case AR5K_BWMODE_40MHZ:
233 clock = 40 * 2;
234 break;
235 case AR5K_BWMODE_10MHZ:
236 clock = 40 / 2;
237 break;
238 case AR5K_BWMODE_5MHZ:
239 clock = 40 / 4;
240 break;
241 default:
242 clock = 40;
243 break;
244 }
245 coef_scaled = ((5 * (clock << 24)) / 2) / channel->center_freq;
246
247 /* Get exponent
248 * ALGO: coef_exp = 14 - highest set bit position */
249 coef_exp = ilog2(coef_scaled);
250
251 /* Doesn't make sense if it's zero*/
252 if (!coef_scaled || !coef_exp)
253 return -EINVAL;
254
255 /* Note: we've shifted coef_scaled by 24 */
256 coef_exp = 14 - (coef_exp - 24);
257
258
259 /* Get mantissa (significant digits)
260 * ALGO: coef_mant = floor(coef_scaled* 2^coef_exp+0.5) */
261 coef_man = coef_scaled +
262 (1 << (24 - coef_exp - 1));
263
264 /* Calculate delta slope coefficient exponent
265 * and mantissa (remove scaling) and set them on hw */
266 ds_coef_man = coef_man >> (24 - coef_exp);
267 ds_coef_exp = coef_exp - 16;
268
269 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
270 AR5K_PHY_TIMING_3_DSC_MAN, ds_coef_man);
271 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
272 AR5K_PHY_TIMING_3_DSC_EXP, ds_coef_exp);
273
274 return 0;
275}
276
277int ath5k_hw_phy_disable(struct ath5k_hw *ah)
278{
279 /*Just a try M.F.*/
280 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
281
282 return 0;
283}
284
285
113/**********************\ 286/**********************\
114* RF Gain optimization * 287* RF Gain optimization *
115\**********************/ 288\**********************/
@@ -436,10 +609,10 @@ done:
436/* Write initial RF gain table to set the RF sensitivity 609/* Write initial RF gain table to set the RF sensitivity
437 * this one works on all RF chips and has nothing to do 610 * this one works on all RF chips and has nothing to do
438 * with gain_F calibration */ 611 * with gain_F calibration */
439int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq) 612static int ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum ieee80211_band band)
440{ 613{
441 const struct ath5k_ini_rfgain *ath5k_rfg; 614 const struct ath5k_ini_rfgain *ath5k_rfg;
442 unsigned int i, size; 615 unsigned int i, size, index;
443 616
444 switch (ah->ah_radio) { 617 switch (ah->ah_radio) {
445 case AR5K_RF5111: 618 case AR5K_RF5111:
@@ -471,17 +644,11 @@ int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq)
471 return -EINVAL; 644 return -EINVAL;
472 } 645 }
473 646
474 switch (freq) { 647 index = (band == IEEE80211_BAND_2GHZ) ? 1 : 0;
475 case AR5K_INI_RFGAIN_2GHZ:
476 case AR5K_INI_RFGAIN_5GHZ:
477 break;
478 default:
479 return -EINVAL;
480 }
481 648
482 for (i = 0; i < size; i++) { 649 for (i = 0; i < size; i++) {
483 AR5K_REG_WAIT(i); 650 AR5K_REG_WAIT(i);
484 ath5k_hw_reg_write(ah, ath5k_rfg[i].rfg_value[freq], 651 ath5k_hw_reg_write(ah, ath5k_rfg[i].rfg_value[index],
485 (u32)ath5k_rfg[i].rfg_register); 652 (u32)ath5k_rfg[i].rfg_register);
486 } 653 }
487 654
@@ -494,12 +661,11 @@ int ath5k_hw_rfgain_init(struct ath5k_hw *ah, unsigned int freq)
494* RF Registers setup * 661* RF Registers setup *
495\********************/ 662\********************/
496 663
497
498/* 664/*
499 * Setup RF registers by writing RF buffer on hw 665 * Setup RF registers by writing RF buffer on hw
500 */ 666 */
501int ath5k_hw_rfregs_init(struct ath5k_hw *ah, struct ieee80211_channel *channel, 667static int ath5k_hw_rfregs_init(struct ath5k_hw *ah,
502 unsigned int mode) 668 struct ieee80211_channel *channel, unsigned int mode)
503{ 669{
504 const struct ath5k_rf_reg *rf_regs; 670 const struct ath5k_rf_reg *rf_regs;
505 const struct ath5k_ini_rfbuffer *ini_rfb; 671 const struct ath5k_ini_rfbuffer *ini_rfb;
@@ -652,6 +818,11 @@ int ath5k_hw_rfregs_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
652 818
653 g_step = &go->go_step[ah->ah_gain.g_step_idx]; 819 g_step = &go->go_step[ah->ah_gain.g_step_idx];
654 820
821 /* Set turbo mode (N/A on RF5413) */
822 if ((ah->ah_bwmode == AR5K_BWMODE_40MHZ) &&
823 (ah->ah_radio != AR5K_RF5413))
824 ath5k_hw_rfb_op(ah, rf_regs, 1, AR5K_RF_TURBO, false);
825
655 /* Bank Modifications (chip-specific) */ 826 /* Bank Modifications (chip-specific) */
656 if (ah->ah_radio == AR5K_RF5111) { 827 if (ah->ah_radio == AR5K_RF5111) {
657 828
@@ -691,7 +862,23 @@ int ath5k_hw_rfregs_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
691 ath5k_hw_rfb_op(ah, rf_regs, ee->ee_xpd[ee_mode], 862 ath5k_hw_rfb_op(ah, rf_regs, ee->ee_xpd[ee_mode],
692 AR5K_RF_PLO_SEL, true); 863 AR5K_RF_PLO_SEL, true);
693 864
694 /* TODO: Half/quarter channel support */ 865 /* Tweak power detectors for half/quarter rate support */
866 if (ah->ah_bwmode == AR5K_BWMODE_5MHZ ||
867 ah->ah_bwmode == AR5K_BWMODE_10MHZ) {
868 u8 wait_i;
869
870 ath5k_hw_rfb_op(ah, rf_regs, 0x1f,
871 AR5K_RF_WAIT_S, true);
872
873 wait_i = (ah->ah_bwmode == AR5K_BWMODE_5MHZ) ?
874 0x1f : 0x10;
875
876 ath5k_hw_rfb_op(ah, rf_regs, wait_i,
877 AR5K_RF_WAIT_I, true);
878 ath5k_hw_rfb_op(ah, rf_regs, 3,
879 AR5K_RF_MAX_TIME, true);
880
881 }
695 } 882 }
696 883
697 if (ah->ah_radio == AR5K_RF5112) { 884 if (ah->ah_radio == AR5K_RF5112) {
@@ -789,8 +976,20 @@ int ath5k_hw_rfregs_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
789 ath5k_hw_rfb_op(ah, rf_regs, ee->ee_i_gain[ee_mode], 976 ath5k_hw_rfb_op(ah, rf_regs, ee->ee_i_gain[ee_mode],
790 AR5K_RF_GAIN_I, true); 977 AR5K_RF_GAIN_I, true);
791 978
792 /* TODO: Half/quarter channel support */ 979 /* Tweak power detector for half/quarter rates */
980 if (ah->ah_bwmode == AR5K_BWMODE_5MHZ ||
981 ah->ah_bwmode == AR5K_BWMODE_10MHZ) {
982 u8 pd_delay;
793 983
984 pd_delay = (ah->ah_bwmode == AR5K_BWMODE_5MHZ) ?
985 0xf : 0x8;
986
987 ath5k_hw_rfb_op(ah, rf_regs, pd_delay,
988 AR5K_RF_PD_PERIOD_A, true);
989 ath5k_hw_rfb_op(ah, rf_regs, 0xf,
990 AR5K_RF_PD_DELAY_A, true);
991
992 }
794 } 993 }
795 994
796 if (ah->ah_radio == AR5K_RF5413 && 995 if (ah->ah_radio == AR5K_RF5413 &&
@@ -822,24 +1021,6 @@ int ath5k_hw_rfregs_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
822\**************************/ 1021\**************************/
823 1022
824/* 1023/*
825 * Check if a channel is supported
826 */
827bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags)
828{
829 /* Check if the channel is in our supported range */
830 if (flags & CHANNEL_2GHZ) {
831 if ((freq >= ah->ah_capabilities.cap_range.range_2ghz_min) &&
832 (freq <= ah->ah_capabilities.cap_range.range_2ghz_max))
833 return true;
834 } else if (flags & CHANNEL_5GHZ)
835 if ((freq >= ah->ah_capabilities.cap_range.range_5ghz_min) &&
836 (freq <= ah->ah_capabilities.cap_range.range_5ghz_max))
837 return true;
838
839 return false;
840}
841
842/*
843 * Convertion needed for RF5110 1024 * Convertion needed for RF5110
844 */ 1025 */
845static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel) 1026static u32 ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
@@ -1045,7 +1226,8 @@ static int ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
1045/* 1226/*
1046 * Set a channel on the radio chip 1227 * Set a channel on the radio chip
1047 */ 1228 */
1048int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel) 1229static int ath5k_hw_channel(struct ath5k_hw *ah,
1230 struct ieee80211_channel *channel)
1049{ 1231{
1050 int ret; 1232 int ret;
1051 /* 1233 /*
@@ -1092,8 +1274,6 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
1092 } 1274 }
1093 1275
1094 ah->ah_current_channel = channel; 1276 ah->ah_current_channel = channel;
1095 ah->ah_turbo = channel->hw_value == CHANNEL_T ? true : false;
1096 ath5k_hw_set_clockrate(ah);
1097 1277
1098 return 0; 1278 return 0;
1099} 1279}
@@ -1102,18 +1282,12 @@ int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel)
1102 PHY calibration 1282 PHY calibration
1103\*****************/ 1283\*****************/
1104 1284
1105static int sign_extend(int val, const int nbits)
1106{
1107 int order = BIT(nbits-1);
1108 return (val ^ order) - order;
1109}
1110
1111static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah) 1285static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
1112{ 1286{
1113 s32 val; 1287 s32 val;
1114 1288
1115 val = ath5k_hw_reg_read(ah, AR5K_PHY_NF); 1289 val = ath5k_hw_reg_read(ah, AR5K_PHY_NF);
1116 return sign_extend(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 9); 1290 return sign_extend32(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 8);
1117} 1291}
1118 1292
1119void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah) 1293void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
@@ -1181,22 +1355,7 @@ void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
1181 return; 1355 return;
1182 } 1356 }
1183 1357
1184 switch (ah->ah_current_channel->hw_value & CHANNEL_MODES) { 1358 ee_mode = ath5k_eeprom_mode_from_channel(ah->ah_current_channel);
1185 case CHANNEL_A:
1186 case CHANNEL_T:
1187 case CHANNEL_XR:
1188 ee_mode = AR5K_EEPROM_MODE_11A;
1189 break;
1190 case CHANNEL_G:
1191 case CHANNEL_TG:
1192 ee_mode = AR5K_EEPROM_MODE_11G;
1193 break;
1194 default:
1195 case CHANNEL_B:
1196 ee_mode = AR5K_EEPROM_MODE_11B;
1197 break;
1198 }
1199
1200 1359
1201 /* completed NF calibration, test threshold */ 1360 /* completed NF calibration, test threshold */
1202 nf = ath5k_hw_read_measured_noise_floor(ah); 1361 nf = ath5k_hw_read_measured_noise_floor(ah);
@@ -1425,31 +1584,12 @@ int ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
1425 return ret; 1584 return ret;
1426} 1585}
1427 1586
1587
1428/***************************\ 1588/***************************\
1429* Spur mitigation functions * 1589* Spur mitigation functions *
1430\***************************/ 1590\***************************/
1431 1591
1432bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah, 1592static void
1433 struct ieee80211_channel *channel)
1434{
1435 u8 refclk_freq;
1436
1437 if ((ah->ah_radio == AR5K_RF5112) ||
1438 (ah->ah_radio == AR5K_RF5413) ||
1439 (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
1440 refclk_freq = 40;
1441 else
1442 refclk_freq = 32;
1443
1444 if ((channel->center_freq % refclk_freq != 0) &&
1445 ((channel->center_freq % refclk_freq < 10) ||
1446 (channel->center_freq % refclk_freq > 22)))
1447 return true;
1448 else
1449 return false;
1450}
1451
1452void
1453ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah, 1593ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
1454 struct ieee80211_channel *channel) 1594 struct ieee80211_channel *channel)
1455{ 1595{
@@ -1478,7 +1618,7 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
1478 spur_chan_fbin = AR5K_EEPROM_NO_SPUR; 1618 spur_chan_fbin = AR5K_EEPROM_NO_SPUR;
1479 spur_detection_window = AR5K_SPUR_CHAN_WIDTH; 1619 spur_detection_window = AR5K_SPUR_CHAN_WIDTH;
1480 /* XXX: Half/Quarter channels ?*/ 1620 /* XXX: Half/Quarter channels ?*/
1481 if (channel->hw_value & CHANNEL_TURBO) 1621 if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
1482 spur_detection_window *= 2; 1622 spur_detection_window *= 2;
1483 1623
1484 for (i = 0; i < AR5K_EEPROM_N_SPUR_CHANS; i++) { 1624 for (i = 0; i < AR5K_EEPROM_N_SPUR_CHANS; i++) {
@@ -1507,32 +1647,43 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
1507 * Calculate deltas: 1647 * Calculate deltas:
1508 * spur_freq_sigma_delta -> spur_offset / sample_freq << 21 1648 * spur_freq_sigma_delta -> spur_offset / sample_freq << 21
1509 * spur_delta_phase -> spur_offset / chip_freq << 11 1649 * spur_delta_phase -> spur_offset / chip_freq << 11
1510 * Note: Both values have 100KHz resolution 1650 * Note: Both values have 100Hz resolution
1511 */ 1651 */
1512 /* XXX: Half/Quarter rate channels ? */ 1652 switch (ah->ah_bwmode) {
1513 switch (channel->hw_value) { 1653 case AR5K_BWMODE_40MHZ:
1514 case CHANNEL_A:
1515 /* Both sample_freq and chip_freq are 40MHz */
1516 spur_delta_phase = (spur_offset << 17) / 25;
1517 spur_freq_sigma_delta = (spur_delta_phase >> 10);
1518 symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz;
1519 break;
1520 case CHANNEL_G:
1521 /* sample_freq -> 40MHz chip_freq -> 44MHz
1522 * (for b compatibility) */
1523 spur_freq_sigma_delta = (spur_offset << 8) / 55;
1524 spur_delta_phase = (spur_offset << 17) / 25;
1525 symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz;
1526 break;
1527 case CHANNEL_T:
1528 case CHANNEL_TG:
1529 /* Both sample_freq and chip_freq are 80MHz */ 1654 /* Both sample_freq and chip_freq are 80MHz */
1530 spur_delta_phase = (spur_offset << 16) / 25; 1655 spur_delta_phase = (spur_offset << 16) / 25;
1531 spur_freq_sigma_delta = (spur_delta_phase >> 10); 1656 spur_freq_sigma_delta = (spur_delta_phase >> 10);
1532 symbol_width = AR5K_SPUR_SYMBOL_WIDTH_TURBO_100Hz; 1657 symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz * 2;
1533 break; 1658 break;
1659 case AR5K_BWMODE_10MHZ:
1660 /* Both sample_freq and chip_freq are 20MHz (?) */
1661 spur_delta_phase = (spur_offset << 18) / 25;
1662 spur_freq_sigma_delta = (spur_delta_phase >> 10);
1663 symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 2;
1664 case AR5K_BWMODE_5MHZ:
1665 /* Both sample_freq and chip_freq are 10MHz (?) */
1666 spur_delta_phase = (spur_offset << 19) / 25;
1667 spur_freq_sigma_delta = (spur_delta_phase >> 10);
1668 symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 4;
1534 default: 1669 default:
1535 return; 1670 if (channel->hw_value == CHANNEL_A) {
1671 /* Both sample_freq and chip_freq are 40MHz */
1672 spur_delta_phase = (spur_offset << 17) / 25;
1673 spur_freq_sigma_delta =
1674 (spur_delta_phase >> 10);
1675 symbol_width =
1676 AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz;
1677 } else {
1678 /* sample_freq -> 40MHz chip_freq -> 44MHz
1679 * (for b compatibility) */
1680 spur_delta_phase = (spur_offset << 17) / 25;
1681 spur_freq_sigma_delta =
1682 (spur_offset << 8) / 55;
1683 symbol_width =
1684 AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz;
1685 }
1686 break;
1536 } 1687 }
1537 1688
1538 /* Calculate pilot and magnitude masks */ 1689 /* Calculate pilot and magnitude masks */
@@ -1672,63 +1823,6 @@ ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
1672 } 1823 }
1673} 1824}
1674 1825
1675/********************\
1676 Misc PHY functions
1677\********************/
1678
1679int ath5k_hw_phy_disable(struct ath5k_hw *ah)
1680{
1681 /*Just a try M.F.*/
1682 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
1683
1684 return 0;
1685}
1686
1687/*
1688 * Get the PHY Chip revision
1689 */
1690u16 ath5k_hw_radio_revision(struct ath5k_hw *ah, unsigned int chan)
1691{
1692 unsigned int i;
1693 u32 srev;
1694 u16 ret;
1695
1696 /*
1697 * Set the radio chip access register
1698 */
1699 switch (chan) {
1700 case CHANNEL_2GHZ:
1701 ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_2GHZ, AR5K_PHY(0));
1702 break;
1703 case CHANNEL_5GHZ:
1704 ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
1705 break;
1706 default:
1707 return 0;
1708 }
1709
1710 mdelay(2);
1711
1712 /* ...wait until PHY is ready and read the selected radio revision */
1713 ath5k_hw_reg_write(ah, 0x00001c16, AR5K_PHY(0x34));
1714
1715 for (i = 0; i < 8; i++)
1716 ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20));
1717
1718 if (ah->ah_version == AR5K_AR5210) {
1719 srev = ath5k_hw_reg_read(ah, AR5K_PHY(256) >> 28) & 0xf;
1720 ret = (u16)ath5k_hw_bitswap(srev, 4) + 1;
1721 } else {
1722 srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff;
1723 ret = (u16)ath5k_hw_bitswap(((srev & 0xf0) >> 4) |
1724 ((srev & 0x0f) << 4), 8);
1725 }
1726
1727 /* Reset to the 5GHz mode */
1728 ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
1729
1730 return ret;
1731}
1732 1826
1733/*****************\ 1827/*****************\
1734* Antenna control * 1828* Antenna control *
@@ -1822,7 +1916,8 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
1822 struct ieee80211_channel *channel = ah->ah_current_channel; 1916 struct ieee80211_channel *channel = ah->ah_current_channel;
1823 bool use_def_for_tx, update_def_on_tx, use_def_for_rts, fast_div; 1917 bool use_def_for_tx, update_def_on_tx, use_def_for_rts, fast_div;
1824 bool use_def_for_sg; 1918 bool use_def_for_sg;
1825 u8 def_ant, tx_ant, ee_mode; 1919 int ee_mode;
1920 u8 def_ant, tx_ant;
1826 u32 sta_id1 = 0; 1921 u32 sta_id1 = 0;
1827 1922
1828 /* if channel is not initialized yet we can't set the antennas 1923 /* if channel is not initialized yet we can't set the antennas
@@ -1834,20 +1929,8 @@ ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
1834 1929
1835 def_ant = ah->ah_def_ant; 1930 def_ant = ah->ah_def_ant;
1836 1931
1837 switch (channel->hw_value & CHANNEL_MODES) { 1932 ee_mode = ath5k_eeprom_mode_from_channel(channel);
1838 case CHANNEL_A: 1933 if (ee_mode < 0) {
1839 case CHANNEL_T:
1840 case CHANNEL_XR:
1841 ee_mode = AR5K_EEPROM_MODE_11A;
1842 break;
1843 case CHANNEL_G:
1844 case CHANNEL_TG:
1845 ee_mode = AR5K_EEPROM_MODE_11G;
1846 break;
1847 case CHANNEL_B:
1848 ee_mode = AR5K_EEPROM_MODE_11B;
1849 break;
1850 default:
1851 ATH5K_ERR(ah->ah_sc, 1934 ATH5K_ERR(ah->ah_sc,
1852 "invalid channel: %d\n", channel->center_freq); 1935 "invalid channel: %d\n", channel->center_freq);
1853 return; 1936 return;
@@ -2275,20 +2358,20 @@ ath5k_get_max_ctl_power(struct ath5k_hw *ah,
2275 2358
2276 switch (channel->hw_value & CHANNEL_MODES) { 2359 switch (channel->hw_value & CHANNEL_MODES) {
2277 case CHANNEL_A: 2360 case CHANNEL_A:
2278 ctl_mode |= AR5K_CTL_11A; 2361 if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
2362 ctl_mode |= AR5K_CTL_TURBO;
2363 else
2364 ctl_mode |= AR5K_CTL_11A;
2279 break; 2365 break;
2280 case CHANNEL_G: 2366 case CHANNEL_G:
2281 ctl_mode |= AR5K_CTL_11G; 2367 if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
2368 ctl_mode |= AR5K_CTL_TURBOG;
2369 else
2370 ctl_mode |= AR5K_CTL_11G;
2282 break; 2371 break;
2283 case CHANNEL_B: 2372 case CHANNEL_B:
2284 ctl_mode |= AR5K_CTL_11B; 2373 ctl_mode |= AR5K_CTL_11B;
2285 break; 2374 break;
2286 case CHANNEL_T:
2287 ctl_mode |= AR5K_CTL_TURBO;
2288 break;
2289 case CHANNEL_TG:
2290 ctl_mode |= AR5K_CTL_TURBOG;
2291 break;
2292 case CHANNEL_XR: 2375 case CHANNEL_XR:
2293 /* Fall through */ 2376 /* Fall through */
2294 default: 2377 default:
@@ -2482,7 +2565,7 @@ ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
2482 2565
2483/* Write PCDAC values on hw */ 2566/* Write PCDAC values on hw */
2484static void 2567static void
2485ath5k_setup_pcdac_table(struct ath5k_hw *ah) 2568ath5k_write_pcdac_table(struct ath5k_hw *ah)
2486{ 2569{
2487 u8 *pcdac_out = ah->ah_txpower.txp_pd_table; 2570 u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
2488 int i; 2571 int i;
@@ -2631,10 +2714,12 @@ ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
2631 2714
2632/* Write PDADC values on hw */ 2715/* Write PDADC values on hw */
2633static void 2716static void
2634ath5k_setup_pwr_to_pdadc_table(struct ath5k_hw *ah, 2717ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
2635 u8 pdcurves, u8 *pdg_to_idx)
2636{ 2718{
2719 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
2637 u8 *pdadc_out = ah->ah_txpower.txp_pd_table; 2720 u8 *pdadc_out = ah->ah_txpower.txp_pd_table;
2721 u8 *pdg_to_idx = ee->ee_pdc_to_idx[ee_mode];
2722 u8 pdcurves = ee->ee_pd_gains[ee_mode];
2638 u32 reg; 2723 u32 reg;
2639 u8 i; 2724 u8 i;
2640 2725
@@ -2844,8 +2929,7 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
2844 (s16) pcinfo_R->freq, 2929 (s16) pcinfo_R->freq,
2845 pcinfo_L->max_pwr, pcinfo_R->max_pwr); 2930 pcinfo_L->max_pwr, pcinfo_R->max_pwr);
2846 2931
2847 /* We are ready to go, fill PCDAC/PDADC 2932 /* Fill PCDAC/PDADC table */
2848 * table and write settings on hardware */
2849 switch (type) { 2933 switch (type) {
2850 case AR5K_PWRTABLE_LINEAR_PCDAC: 2934 case AR5K_PWRTABLE_LINEAR_PCDAC:
2851 /* For RF5112 we can have one or two curves 2935 /* For RF5112 we can have one or two curves
@@ -2858,9 +2942,6 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
2858 * match max power value with max 2942 * match max power value with max
2859 * table index */ 2943 * table index */
2860 ah->ah_txpower.txp_offset = 64 - (table_max[0] / 2); 2944 ah->ah_txpower.txp_offset = 64 - (table_max[0] / 2);
2861
2862 /* Write settings on hw */
2863 ath5k_setup_pcdac_table(ah);
2864 break; 2945 break;
2865 case AR5K_PWRTABLE_PWR_TO_PCDAC: 2946 case AR5K_PWRTABLE_PWR_TO_PCDAC:
2866 /* We are done for RF5111 since it has only 2947 /* We are done for RF5111 since it has only
@@ -2870,9 +2951,6 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
2870 /* No rate powertable adjustment for RF5111 */ 2951 /* No rate powertable adjustment for RF5111 */
2871 ah->ah_txpower.txp_min_idx = 0; 2952 ah->ah_txpower.txp_min_idx = 0;
2872 ah->ah_txpower.txp_offset = 0; 2953 ah->ah_txpower.txp_offset = 0;
2873
2874 /* Write settings on hw */
2875 ath5k_setup_pcdac_table(ah);
2876 break; 2954 break;
2877 case AR5K_PWRTABLE_PWR_TO_PDADC: 2955 case AR5K_PWRTABLE_PWR_TO_PDADC:
2878 /* Set PDADC boundaries and fill 2956 /* Set PDADC boundaries and fill
@@ -2880,9 +2958,6 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
2880 ath5k_combine_pwr_to_pdadc_curves(ah, table_min, table_max, 2958 ath5k_combine_pwr_to_pdadc_curves(ah, table_min, table_max,
2881 ee->ee_pd_gains[ee_mode]); 2959 ee->ee_pd_gains[ee_mode]);
2882 2960
2883 /* Write settings on hw */
2884 ath5k_setup_pwr_to_pdadc_table(ah, pdg, pdg_curve_to_idx);
2885
2886 /* Set txp.offset, note that table_min 2961 /* Set txp.offset, note that table_min
2887 * can be negative */ 2962 * can be negative */
2888 ah->ah_txpower.txp_offset = table_min[0]; 2963 ah->ah_txpower.txp_offset = table_min[0];
@@ -2891,9 +2966,20 @@ ath5k_setup_channel_powertable(struct ath5k_hw *ah,
2891 return -EINVAL; 2966 return -EINVAL;
2892 } 2967 }
2893 2968
2969 ah->ah_txpower.txp_setup = true;
2970
2894 return 0; 2971 return 0;
2895} 2972}
2896 2973
2974/* Write power table for current channel to hw */
2975static void
2976ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type)
2977{
2978 if (type == AR5K_PWRTABLE_PWR_TO_PDADC)
2979 ath5k_write_pwr_to_pdadc_table(ah, ee_mode);
2980 else
2981 ath5k_write_pcdac_table(ah);
2982}
2897 2983
2898/* 2984/*
2899 * Per-rate tx power setting 2985 * Per-rate tx power setting
@@ -2982,7 +3068,7 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
2982 3068
2983 /* Min/max in 0.25dB units */ 3069 /* Min/max in 0.25dB units */
2984 ah->ah_txpower.txp_min_pwr = 2 * rates[7]; 3070 ah->ah_txpower.txp_min_pwr = 2 * rates[7];
2985 ah->ah_txpower.txp_max_pwr = 2 * rates[0]; 3071 ah->ah_txpower.txp_cur_pwr = 2 * rates[0];
2986 ah->ah_txpower.txp_ofdm = rates[7]; 3072 ah->ah_txpower.txp_ofdm = rates[7];
2987} 3073}
2988 3074
@@ -2990,11 +3076,13 @@ ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
2990/* 3076/*
2991 * Set transmission power 3077 * Set transmission power
2992 */ 3078 */
2993int 3079static int
2994ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel, 3080ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
2995 u8 ee_mode, u8 txpower) 3081 u8 txpower)
2996{ 3082{
2997 struct ath5k_rate_pcal_info rate_info; 3083 struct ath5k_rate_pcal_info rate_info;
3084 struct ieee80211_channel *curr_channel = ah->ah_current_channel;
3085 int ee_mode;
2998 u8 type; 3086 u8 type;
2999 int ret; 3087 int ret;
3000 3088
@@ -3003,14 +3091,18 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3003 return -EINVAL; 3091 return -EINVAL;
3004 } 3092 }
3005 3093
3006 /* Reset TX power values */ 3094 ee_mode = ath5k_eeprom_mode_from_channel(channel);
3007 memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower)); 3095 if (ee_mode < 0) {
3008 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER; 3096 ATH5K_ERR(ah->ah_sc,
3009 ah->ah_txpower.txp_min_pwr = 0; 3097 "invalid channel: %d\n", channel->center_freq);
3010 ah->ah_txpower.txp_max_pwr = AR5K_TUNE_MAX_TXPOWER; 3098 return -EINVAL;
3099 }
3011 3100
3012 /* Initialize TX power table */ 3101 /* Initialize TX power table */
3013 switch (ah->ah_radio) { 3102 switch (ah->ah_radio) {
3103 case AR5K_RF5110:
3104 /* TODO */
3105 return 0;
3014 case AR5K_RF5111: 3106 case AR5K_RF5111:
3015 type = AR5K_PWRTABLE_PWR_TO_PCDAC; 3107 type = AR5K_PWRTABLE_PWR_TO_PCDAC;
3016 break; 3108 break;
@@ -3028,10 +3120,26 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3028 return -EINVAL; 3120 return -EINVAL;
3029 } 3121 }
3030 3122
3031 /* FIXME: Only on channel/mode change */ 3123 /*
3032 ret = ath5k_setup_channel_powertable(ah, channel, ee_mode, type); 3124 * If we don't change channel/mode skip tx powertable calculation
3033 if (ret) 3125 * and use the cached one.
3034 return ret; 3126 */
3127 if (!ah->ah_txpower.txp_setup ||
3128 (channel->hw_value != curr_channel->hw_value) ||
3129 (channel->center_freq != curr_channel->center_freq)) {
3130 /* Reset TX power values */
3131 memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower));
3132 ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
3133
3134 /* Calculate the powertable */
3135 ret = ath5k_setup_channel_powertable(ah, channel,
3136 ee_mode, type);
3137 if (ret)
3138 return ret;
3139 }
3140
3141 /* Write table on hw */
3142 ath5k_write_channel_powertable(ah, ee_mode, type);
3035 3143
3036 /* Limit max power if we have a CTL available */ 3144 /* Limit max power if we have a CTL available */
3037 ath5k_get_max_ctl_power(ah, channel); 3145 ath5k_get_max_ctl_power(ah, channel);
@@ -3086,31 +3194,219 @@ ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3086 3194
3087int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower) 3195int ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
3088{ 3196{
3089 /*Just a try M.F.*/ 3197 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_TXPOWER,
3090 struct ieee80211_channel *channel = ah->ah_current_channel; 3198 "changing txpower to %d\n", txpower);
3091 u8 ee_mode;
3092 3199
3093 switch (channel->hw_value & CHANNEL_MODES) { 3200 return ath5k_hw_txpower(ah, ah->ah_current_channel, txpower);
3094 case CHANNEL_A: 3201}
3095 case CHANNEL_T: 3202
3096 case CHANNEL_XR: 3203/*************\
3097 ee_mode = AR5K_EEPROM_MODE_11A; 3204 Init function
3098 break; 3205\*************/
3099 case CHANNEL_G: 3206
3100 case CHANNEL_TG: 3207int ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
3101 ee_mode = AR5K_EEPROM_MODE_11G; 3208 u8 mode, bool fast)
3102 break; 3209{
3103 case CHANNEL_B: 3210 struct ieee80211_channel *curr_channel;
3104 ee_mode = AR5K_EEPROM_MODE_11B; 3211 int ret, i;
3105 break; 3212 u32 phy_tst1;
3106 default: 3213 ret = 0;
3107 ATH5K_ERR(ah->ah_sc, 3214
3108 "invalid channel: %d\n", channel->center_freq); 3215 /*
3216 * Sanity check for fast flag
3217 * Don't try fast channel change when changing modulation
3218 * mode/band. We check for chip compatibility on
3219 * ath5k_hw_reset.
3220 */
3221 curr_channel = ah->ah_current_channel;
3222 if (fast && (channel->hw_value != curr_channel->hw_value))
3109 return -EINVAL; 3223 return -EINVAL;
3224
3225 /*
3226 * On fast channel change we only set the synth parameters
3227 * while PHY is running, enable calibration and skip the rest.
3228 */
3229 if (fast) {
3230 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
3231 AR5K_PHY_RFBUS_REQ_REQUEST);
3232 for (i = 0; i < 100; i++) {
3233 if (ath5k_hw_reg_read(ah, AR5K_PHY_RFBUS_GRANT))
3234 break;
3235 udelay(5);
3236 }
3237 /* Failed */
3238 if (i >= 100)
3239 return -EIO;
3110 } 3240 }
3111 3241
3112 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_TXPOWER, 3242 /*
3113 "changing txpower to %d\n", txpower); 3243 * Set TX power
3244 *
3245 * Note: We need to do that before we set
3246 * RF buffer settings on 5211/5212+ so that we
3247 * properly set curve indices.
3248 */
3249 ret = ath5k_hw_txpower(ah, channel, ah->ah_txpower.txp_cur_pwr ?
3250 ah->ah_txpower.txp_cur_pwr / 2 : AR5K_TUNE_MAX_TXPOWER);
3251 if (ret)
3252 return ret;
3253
3254 /*
3255 * For 5210 we do all initialization using
3256 * initvals, so we don't have to modify
3257 * any settings (5210 also only supports
3258 * a/aturbo modes)
3259 */
3260 if ((ah->ah_version != AR5K_AR5210) && !fast) {
3261
3262 /*
3263 * Write initial RF gain settings
3264 * This should work for both 5111/5112
3265 */
3266 ret = ath5k_hw_rfgain_init(ah, channel->band);
3267 if (ret)
3268 return ret;
3269
3270 mdelay(1);
3271
3272 /*
3273 * Write RF buffer
3274 */
3275 ret = ath5k_hw_rfregs_init(ah, channel, mode);
3276 if (ret)
3277 return ret;
3278
3279 /* Write OFDM timings on 5212*/
3280 if (ah->ah_version == AR5K_AR5212 &&
3281 channel->hw_value & CHANNEL_OFDM) {
3282
3283 ret = ath5k_hw_write_ofdm_timings(ah, channel);
3284 if (ret)
3285 return ret;
3286
3287 /* Spur info is available only from EEPROM versions
3288 * greater than 5.3, but the EEPROM routines will use
3289 * static values for older versions */
3290 if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
3291 ath5k_hw_set_spur_mitigation_filter(ah,
3292 channel);
3293 }
3294
3295 /*Enable/disable 802.11b mode on 5111
3296 (enable 2111 frequency converter + CCK)*/
3297 if (ah->ah_radio == AR5K_RF5111) {
3298 if (mode == AR5K_MODE_11B)
3299 AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG,
3300 AR5K_TXCFG_B_MODE);
3301 else
3302 AR5K_REG_DISABLE_BITS(ah, AR5K_TXCFG,
3303 AR5K_TXCFG_B_MODE);
3304 }
3305
3306 } else if (ah->ah_version == AR5K_AR5210) {
3307 mdelay(1);
3308 /* Disable phy and wait */
3309 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
3310 mdelay(1);
3311 }
3312
3313 /* Set channel on PHY */
3314 ret = ath5k_hw_channel(ah, channel);
3315 if (ret)
3316 return ret;
3317
3318 /*
3319 * Enable the PHY and wait until completion
3320 * This includes BaseBand and Synthesizer
3321 * activation.
3322 */
3323 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
3324
3325 /*
3326 * On 5211+ read activation -> rx delay
3327 * and use it.
3328 */
3329 if (ah->ah_version != AR5K_AR5210) {
3330 u32 delay;
3331 delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
3332 AR5K_PHY_RX_DELAY_M;
3333 delay = (channel->hw_value & CHANNEL_CCK) ?
3334 ((delay << 2) / 22) : (delay / 10);
3335 if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
3336 delay = delay << 1;
3337 if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
3338 delay = delay << 2;
3339 /* XXX: /2 on turbo ? Let's be safe
3340 * for now */
3341 udelay(100 + delay);
3342 } else {
3343 mdelay(1);
3344 }
3345
3346 if (fast)
3347 /*
3348 * Release RF Bus grant
3349 */
3350 AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
3351 AR5K_PHY_RFBUS_REQ_REQUEST);
3352 else {
3353 /*
3354 * Perform ADC test to see if baseband is ready
3355 * Set tx hold and check adc test register
3356 */
3357 phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
3358 ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
3359 for (i = 0; i <= 20; i++) {
3360 if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
3361 break;
3362 udelay(200);
3363 }
3364 ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
3365 }
3366
3367 /*
3368 * Start automatic gain control calibration
3369 *
3370 * During AGC calibration RX path is re-routed to
3371 * a power detector so we don't receive anything.
3372 *
3373 * This method is used to calibrate some static offsets
3374 * used together with on-the fly I/Q calibration (the
3375 * one performed via ath5k_hw_phy_calibrate), which doesn't
3376 * interrupt rx path.
3377 *
3378 * While rx path is re-routed to the power detector we also
3379 * start a noise floor calibration to measure the
3380 * card's noise floor (the noise we measure when we are not
3381 * transmitting or receiving anything).
3382 *
3383 * If we are in a noisy environment, AGC calibration may time
3384 * out and/or noise floor calibration might timeout.
3385 */
3386 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
3387 AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF);
3388
3389 /* At the same time start I/Q calibration for QAM constellation
3390 * -no need for CCK- */
3391 ah->ah_calibration = false;
3392 if (!(mode == AR5K_MODE_11B)) {
3393 ah->ah_calibration = true;
3394 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
3395 AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
3396 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
3397 AR5K_PHY_IQ_RUN);
3398 }
3399
3400 /* Wait for gain calibration to finish (we check for I/Q calibration
3401 * during ath5k_phy_calibrate) */
3402 if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
3403 AR5K_PHY_AGCCTL_CAL, 0, false)) {
3404 ATH5K_ERR(ah->ah_sc, "gain calibration timeout (%uMHz)\n",
3405 channel->center_freq);
3406 }
3407
3408 /* Restore antenna mode */
3409 ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
3114 3410
3115 return ath5k_hw_txpower(ah, channel, ee_mode, txpower); 3411 return ret;
3116} 3412}
diff --git a/drivers/net/wireless/ath/ath5k/qcu.c b/drivers/net/wireless/ath/ath5k/qcu.c
index 84c717ded1c5..2c9c9e793d4e 100644
--- a/drivers/net/wireless/ath/ath5k/qcu.c
+++ b/drivers/net/wireless/ath/ath5k/qcu.c
@@ -25,14 +25,52 @@ Queue Control Unit, DFS Control Unit Functions
25#include "debug.h" 25#include "debug.h"
26#include "base.h" 26#include "base.h"
27 27
28
29/******************\
30* Helper functions *
31\******************/
32
28/* 33/*
29 * Get properties for a transmit queue 34 * Get number of pending frames
35 * for a specific queue [5211+]
30 */ 36 */
31int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, 37u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
32 struct ath5k_txq_info *queue_info)
33{ 38{
34 memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info)); 39 u32 pending;
35 return 0; 40 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
41
42 /* Return if queue is declared inactive */
43 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
44 return false;
45
46 /* XXX: How about AR5K_CFG_TXCNT ? */
47 if (ah->ah_version == AR5K_AR5210)
48 return false;
49
50 pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
51 pending &= AR5K_QCU_STS_FRMPENDCNT;
52
53 /* It's possible to have no frames pending even if TXE
54 * is set. To indicate that q has not stopped return
55 * true */
56 if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
57 return true;
58
59 return pending;
60}
61
62/*
63 * Set a transmit queue inactive
64 */
65void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
66{
67 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
68 return;
69
70 /* This queue will be skipped in further operations */
71 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
72 /*For SIMR setup*/
73 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
36} 74}
37 75
38/* 76/*
@@ -50,6 +88,16 @@ static u16 ath5k_cw_validate(u16 cw_req)
50} 88}
51 89
52/* 90/*
91 * Get properties for a transmit queue
92 */
93int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
94 struct ath5k_txq_info *queue_info)
95{
96 memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
97 return 0;
98}
99
100/*
53 * Set properties for a transmit queue 101 * Set properties for a transmit queue
54 */ 102 */
55int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, 103int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
@@ -104,8 +152,8 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
104 /* 152 /*
105 * Get queue by type 153 * Get queue by type
106 */ 154 */
107 /*5210 only has 2 queues*/ 155 /* 5210 only has 2 queues */
108 if (ah->ah_version == AR5K_AR5210) { 156 if (ah->ah_capabilities.cap_queues.q_tx_num == 2) {
109 switch (queue_type) { 157 switch (queue_type) {
110 case AR5K_TX_QUEUE_DATA: 158 case AR5K_TX_QUEUE_DATA:
111 queue = AR5K_TX_QUEUE_ID_NOQCU_DATA; 159 queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
@@ -172,113 +220,18 @@ int ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
172 return queue; 220 return queue;
173} 221}
174 222
175/*
176 * Get number of pending frames
177 * for a specific queue [5211+]
178 */
179u32 ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
180{
181 u32 pending;
182 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
183
184 /* Return if queue is declared inactive */
185 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
186 return false;
187
188 /* XXX: How about AR5K_CFG_TXCNT ? */
189 if (ah->ah_version == AR5K_AR5210)
190 return false;
191
192 pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
193 pending &= AR5K_QCU_STS_FRMPENDCNT;
194
195 /* It's possible to have no frames pending even if TXE
196 * is set. To indicate that q has not stopped return
197 * true */
198 if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
199 return true;
200
201 return pending;
202}
203
204/*
205 * Set a transmit queue inactive
206 */
207void ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
208{
209 if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
210 return;
211 223
212 /* This queue will be skipped in further operations */ 224/*******************************\
213 ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE; 225* Single QCU/DCU initialization *
214 /*For SIMR setup*/ 226\*******************************/
215 AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
216}
217 227
218/* 228/*
219 * Set DFS properties for a transmit queue on DCU 229 * Set tx retry limits on DCU
220 */ 230 */
221int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue) 231static void ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
232 unsigned int queue)
222{ 233{
223 u32 retry_lg, retry_sh; 234 u32 retry_lg, retry_sh;
224 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
225
226 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
227
228 tq = &ah->ah_txq[queue];
229
230 if (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE)
231 return 0;
232
233 if (ah->ah_version == AR5K_AR5210) {
234 /* Only handle data queues, others will be ignored */
235 if (tq->tqi_type != AR5K_TX_QUEUE_DATA)
236 return 0;
237
238 /* Set Slot time */
239 ath5k_hw_reg_write(ah, ah->ah_turbo ?
240 AR5K_INIT_SLOT_TIME_TURBO : AR5K_INIT_SLOT_TIME,
241 AR5K_SLOT_TIME);
242 /* Set ACK_CTS timeout */
243 ath5k_hw_reg_write(ah, ah->ah_turbo ?
244 AR5K_INIT_ACK_CTS_TIMEOUT_TURBO :
245 AR5K_INIT_ACK_CTS_TIMEOUT, AR5K_SLOT_TIME);
246 /* Set Transmit Latency */
247 ath5k_hw_reg_write(ah, ah->ah_turbo ?
248 AR5K_INIT_TRANSMIT_LATENCY_TURBO :
249 AR5K_INIT_TRANSMIT_LATENCY, AR5K_USEC_5210);
250
251 /* Set IFS0 */
252 if (ah->ah_turbo) {
253 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS_TURBO +
254 tq->tqi_aifs * AR5K_INIT_SLOT_TIME_TURBO) <<
255 AR5K_IFS0_DIFS_S) | AR5K_INIT_SIFS_TURBO,
256 AR5K_IFS0);
257 } else {
258 ath5k_hw_reg_write(ah, ((AR5K_INIT_SIFS +
259 tq->tqi_aifs * AR5K_INIT_SLOT_TIME) <<
260 AR5K_IFS0_DIFS_S) |
261 AR5K_INIT_SIFS, AR5K_IFS0);
262 }
263
264 /* Set IFS1 */
265 ath5k_hw_reg_write(ah, ah->ah_turbo ?
266 AR5K_INIT_PROTO_TIME_CNTRL_TURBO :
267 AR5K_INIT_PROTO_TIME_CNTRL, AR5K_IFS1);
268 /* Set AR5K_PHY_SETTLING */
269 ath5k_hw_reg_write(ah, ah->ah_turbo ?
270 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
271 | 0x38 :
272 (ath5k_hw_reg_read(ah, AR5K_PHY_SETTLING) & ~0x7F)
273 | 0x1C,
274 AR5K_PHY_SETTLING);
275 /* Set Frame Control Register */
276 ath5k_hw_reg_write(ah, ah->ah_turbo ?
277 (AR5K_PHY_FRAME_CTL_INI | AR5K_PHY_TURBO_MODE |
278 AR5K_PHY_TURBO_SHORT | 0x2020) :
279 (AR5K_PHY_FRAME_CTL_INI | 0x1020),
280 AR5K_PHY_FRAME_CTL_5210);
281 }
282 235
283 /* 236 /*
284 * Calculate and set retry limits 237 * Calculate and set retry limits
@@ -293,8 +246,13 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
293 retry_sh = AR5K_INIT_SH_RETRY; 246 retry_sh = AR5K_INIT_SH_RETRY;
294 } 247 }
295 248
296 /*No QCU/DCU [5210]*/ 249 /* Single data queue on AR5210 */
297 if (ah->ah_version == AR5K_AR5210) { 250 if (ah->ah_version == AR5K_AR5210) {
251 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
252
253 if (queue > 0)
254 return;
255
298 ath5k_hw_reg_write(ah, 256 ath5k_hw_reg_write(ah,
299 (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S) 257 (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
300 | AR5K_REG_SM(AR5K_INIT_SLG_RETRY, 258 | AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
@@ -304,8 +262,8 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
304 | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY) 262 | AR5K_REG_SM(retry_lg, AR5K_NODCU_RETRY_LMT_LG_RETRY)
305 | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY), 263 | AR5K_REG_SM(retry_sh, AR5K_NODCU_RETRY_LMT_SH_RETRY),
306 AR5K_NODCU_RETRY_LMT); 264 AR5K_NODCU_RETRY_LMT);
265 /* DCU on AR5211+ */
307 } else { 266 } else {
308 /*QCU/DCU [5211+]*/
309 ath5k_hw_reg_write(ah, 267 ath5k_hw_reg_write(ah,
310 AR5K_REG_SM(AR5K_INIT_SLG_RETRY, 268 AR5K_REG_SM(AR5K_INIT_SLG_RETRY,
311 AR5K_DCU_RETRY_LMT_SLG_RETRY) | 269 AR5K_DCU_RETRY_LMT_SLG_RETRY) |
@@ -314,219 +272,393 @@ int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
314 AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) | 272 AR5K_REG_SM(retry_lg, AR5K_DCU_RETRY_LMT_LG_RETRY) |
315 AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY), 273 AR5K_REG_SM(retry_sh, AR5K_DCU_RETRY_LMT_SH_RETRY),
316 AR5K_QUEUE_DFS_RETRY_LIMIT(queue)); 274 AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
275 }
276 return;
277}
317 278
318 /*===Rest is also for QCU/DCU only [5211+]===*/ 279/**
280 * ath5k_hw_reset_tx_queue - Initialize a single hw queue
281 *
282 * @ah The &struct ath5k_hw
283 * @queue The hw queue number
284 *
285 * Set DFS properties for the given transmit queue on DCU
286 * and configures all queue-specific parameters.
287 */
288int ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
289{
290 struct ath5k_txq_info *tq = &ah->ah_txq[queue];
319 291
320 /* 292 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
321 * Set contention window (cw_min/cw_max) 293
322 * and arbitrated interframe space (aifs)... 294 tq = &ah->ah_txq[queue];
323 */ 295
324 ath5k_hw_reg_write(ah, 296 /* Skip if queue inactive or if we are on AR5210
325 AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) | 297 * that doesn't have QCU/DCU */
326 AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) | 298 if ((ah->ah_version == AR5K_AR5210) ||
327 AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS), 299 (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE))
328 AR5K_QUEUE_DFS_LOCAL_IFS(queue)); 300 return 0;
329 301
330 /* 302 /*
331 * Set misc registers 303 * Set contention window (cw_min/cw_max)
332 */ 304 * and arbitrated interframe space (aifs)...
333 /* Enable DCU early termination for this queue */ 305 */
334 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 306 ath5k_hw_reg_write(ah,
335 AR5K_QCU_MISC_DCU_EARLY); 307 AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
308 AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
309 AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS),
310 AR5K_QUEUE_DFS_LOCAL_IFS(queue));
311
312 /*
313 * Set tx retry limits for this queue
314 */
315 ath5k_hw_set_tx_retry_limits(ah, queue);
316
317
318 /*
319 * Set misc registers
320 */
321
322 /* Enable DCU to wait for next fragment from QCU */
323 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
324 AR5K_DCU_MISC_FRAG_WAIT);
336 325
337 /* Enable DCU to wait for next fragment from QCU */ 326 /* On Maui and Spirit use the global seqnum on DCU */
327 if (ah->ah_mac_version < AR5K_SREV_AR5211)
338 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), 328 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
339 AR5K_DCU_MISC_FRAG_WAIT); 329 AR5K_DCU_MISC_SEQNUM_CTL);
340 330
341 /* On Maui and Spirit use the global seqnum on DCU */ 331 /* Constant bit rate period */
342 if (ah->ah_mac_version < AR5K_SREV_AR5211) 332 if (tq->tqi_cbr_period) {
343 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), 333 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
344 AR5K_DCU_MISC_SEQNUM_CTL); 334 AR5K_QCU_CBRCFG_INTVAL) |
345 335 AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
346 if (tq->tqi_cbr_period) { 336 AR5K_QCU_CBRCFG_ORN_THRES),
347 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period, 337 AR5K_QUEUE_CBRCFG(queue));
348 AR5K_QCU_CBRCFG_INTVAL) | 338
349 AR5K_REG_SM(tq->tqi_cbr_overflow_limit, 339 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
350 AR5K_QCU_CBRCFG_ORN_THRES), 340 AR5K_QCU_MISC_FRSHED_CBR);
351 AR5K_QUEUE_CBRCFG(queue)); 341
342 if (tq->tqi_cbr_overflow_limit)
352 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 343 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
353 AR5K_QCU_MISC_FRSHED_CBR);
354 if (tq->tqi_cbr_overflow_limit)
355 AR5K_REG_ENABLE_BITS(ah,
356 AR5K_QUEUE_MISC(queue),
357 AR5K_QCU_MISC_CBR_THRES_ENABLE); 344 AR5K_QCU_MISC_CBR_THRES_ENABLE);
358 } 345 }
359 346
360 if (tq->tqi_ready_time && 347 /* Ready time interval */
361 (tq->tqi_type != AR5K_TX_QUEUE_CAB)) 348 if (tq->tqi_ready_time && (tq->tqi_type != AR5K_TX_QUEUE_CAB))
362 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time, 349 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
363 AR5K_QCU_RDYTIMECFG_INTVAL) | 350 AR5K_QCU_RDYTIMECFG_INTVAL) |
364 AR5K_QCU_RDYTIMECFG_ENABLE, 351 AR5K_QCU_RDYTIMECFG_ENABLE,
365 AR5K_QUEUE_RDYTIMECFG(queue)); 352 AR5K_QUEUE_RDYTIMECFG(queue));
366 353
367 if (tq->tqi_burst_time) { 354 if (tq->tqi_burst_time) {
368 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time, 355 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
369 AR5K_DCU_CHAN_TIME_DUR) | 356 AR5K_DCU_CHAN_TIME_DUR) |
370 AR5K_DCU_CHAN_TIME_ENABLE, 357 AR5K_DCU_CHAN_TIME_ENABLE,
371 AR5K_QUEUE_DFS_CHANNEL_TIME(queue)); 358 AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
372 359
373 if (tq->tqi_flags 360 if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
374 & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE) 361 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
375 AR5K_REG_ENABLE_BITS(ah,
376 AR5K_QUEUE_MISC(queue),
377 AR5K_QCU_MISC_RDY_VEOL_POLICY); 362 AR5K_QCU_MISC_RDY_VEOL_POLICY);
378 } 363 }
379 364
380 if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE) 365 /* Enable/disable Post frame backoff */
381 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS, 366 if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
382 AR5K_QUEUE_DFS_MISC(queue)); 367 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
368 AR5K_QUEUE_DFS_MISC(queue));
383 369
384 if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE) 370 /* Enable/disable fragmentation burst backoff */
385 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG, 371 if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
386 AR5K_QUEUE_DFS_MISC(queue)); 372 ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
373 AR5K_QUEUE_DFS_MISC(queue));
387 374
388 /* 375 /*
389 * Set registers by queue type 376 * Set registers by queue type
390 */ 377 */
391 switch (tq->tqi_type) { 378 switch (tq->tqi_type) {
392 case AR5K_TX_QUEUE_BEACON: 379 case AR5K_TX_QUEUE_BEACON:
393 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 380 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
394 AR5K_QCU_MISC_FRSHED_DBA_GT | 381 AR5K_QCU_MISC_FRSHED_DBA_GT |
395 AR5K_QCU_MISC_CBREXP_BCN_DIS | 382 AR5K_QCU_MISC_CBREXP_BCN_DIS |
396 AR5K_QCU_MISC_BCN_ENABLE); 383 AR5K_QCU_MISC_BCN_ENABLE);
397 384
398 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), 385 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
399 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL << 386 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
400 AR5K_DCU_MISC_ARBLOCK_CTL_S) | 387 AR5K_DCU_MISC_ARBLOCK_CTL_S) |
401 AR5K_DCU_MISC_ARBLOCK_IGNORE | 388 AR5K_DCU_MISC_ARBLOCK_IGNORE |
402 AR5K_DCU_MISC_POST_FR_BKOFF_DIS | 389 AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
403 AR5K_DCU_MISC_BCN_ENABLE); 390 AR5K_DCU_MISC_BCN_ENABLE);
404 break; 391 break;
405 392
406 case AR5K_TX_QUEUE_CAB: 393 case AR5K_TX_QUEUE_CAB:
407 /* XXX: use BCN_SENT_GT, if we can figure out how */ 394 /* XXX: use BCN_SENT_GT, if we can figure out how */
408 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 395 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
409 AR5K_QCU_MISC_FRSHED_DBA_GT | 396 AR5K_QCU_MISC_FRSHED_DBA_GT |
410 AR5K_QCU_MISC_CBREXP_DIS | 397 AR5K_QCU_MISC_CBREXP_DIS |
411 AR5K_QCU_MISC_CBREXP_BCN_DIS); 398 AR5K_QCU_MISC_CBREXP_BCN_DIS);
412 399
413 ath5k_hw_reg_write(ah, ((tq->tqi_ready_time - 400 ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
414 (AR5K_TUNE_SW_BEACON_RESP - 401 (AR5K_TUNE_SW_BEACON_RESP -
415 AR5K_TUNE_DMA_BEACON_RESP) - 402 AR5K_TUNE_DMA_BEACON_RESP) -
416 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) | 403 AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
417 AR5K_QCU_RDYTIMECFG_ENABLE, 404 AR5K_QCU_RDYTIMECFG_ENABLE,
418 AR5K_QUEUE_RDYTIMECFG(queue)); 405 AR5K_QUEUE_RDYTIMECFG(queue));
419 406
420 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue), 407 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
421 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL << 408 (AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
422 AR5K_DCU_MISC_ARBLOCK_CTL_S)); 409 AR5K_DCU_MISC_ARBLOCK_CTL_S));
423 break; 410 break;
424 411
425 case AR5K_TX_QUEUE_UAPSD: 412 case AR5K_TX_QUEUE_UAPSD:
426 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue), 413 AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
427 AR5K_QCU_MISC_CBREXP_DIS); 414 AR5K_QCU_MISC_CBREXP_DIS);
428 break; 415 break;
429 416
430 case AR5K_TX_QUEUE_DATA: 417 case AR5K_TX_QUEUE_DATA:
431 default: 418 default:
432 break; 419 break;
433 }
434
435 /* TODO: Handle frame compression */
436
437 /*
438 * Enable interrupts for this tx queue
439 * in the secondary interrupt mask registers
440 */
441 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
442 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
443
444 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
445 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
446
447 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
448 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
449
450 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
451 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
452
453 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
454 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
455
456 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
457 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
458
459 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
460 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
461
462 if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
463 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
464
465 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
466 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
467
468 /* Update secondary interrupt mask registers */
469
470 /* Filter out inactive queues */
471 ah->ah_txq_imr_txok &= ah->ah_txq_status;
472 ah->ah_txq_imr_txerr &= ah->ah_txq_status;
473 ah->ah_txq_imr_txurn &= ah->ah_txq_status;
474 ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
475 ah->ah_txq_imr_txeol &= ah->ah_txq_status;
476 ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
477 ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
478 ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
479 ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
480
481 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
482 AR5K_SIMR0_QCU_TXOK) |
483 AR5K_REG_SM(ah->ah_txq_imr_txdesc,
484 AR5K_SIMR0_QCU_TXDESC), AR5K_SIMR0);
485 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
486 AR5K_SIMR1_QCU_TXERR) |
487 AR5K_REG_SM(ah->ah_txq_imr_txeol,
488 AR5K_SIMR1_QCU_TXEOL), AR5K_SIMR1);
489 /* Update simr2 but don't overwrite rest simr2 settings */
490 AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
491 AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
492 AR5K_REG_SM(ah->ah_txq_imr_txurn,
493 AR5K_SIMR2_QCU_TXURN));
494 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
495 AR5K_SIMR3_QCBRORN) |
496 AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
497 AR5K_SIMR3_QCBRURN), AR5K_SIMR3);
498 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
499 AR5K_SIMR4_QTRIG), AR5K_SIMR4);
500 /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
501 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
502 AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
503 /* No queue has TXNOFRM enabled, disable the interrupt
504 * by setting AR5K_TXNOFRM to zero */
505 if (ah->ah_txq_imr_nofrm == 0)
506 ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
507
508 /* Set QCU mask for this DCU to save power */
509 AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
510 } 420 }
511 421
422 /* TODO: Handle frame compression */
423
424 /*
425 * Enable interrupts for this tx queue
426 * in the secondary interrupt mask registers
427 */
428 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
429 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
430
431 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
432 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
433
434 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
435 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
436
437 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
438 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
439
440 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
441 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
442
443 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
444 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
445
446 if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
447 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
448
449 if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
450 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
451
452 if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
453 AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
454
455 /* Update secondary interrupt mask registers */
456
457 /* Filter out inactive queues */
458 ah->ah_txq_imr_txok &= ah->ah_txq_status;
459 ah->ah_txq_imr_txerr &= ah->ah_txq_status;
460 ah->ah_txq_imr_txurn &= ah->ah_txq_status;
461 ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
462 ah->ah_txq_imr_txeol &= ah->ah_txq_status;
463 ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
464 ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
465 ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
466 ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
467
468 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
469 AR5K_SIMR0_QCU_TXOK) |
470 AR5K_REG_SM(ah->ah_txq_imr_txdesc,
471 AR5K_SIMR0_QCU_TXDESC),
472 AR5K_SIMR0);
473
474 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
475 AR5K_SIMR1_QCU_TXERR) |
476 AR5K_REG_SM(ah->ah_txq_imr_txeol,
477 AR5K_SIMR1_QCU_TXEOL),
478 AR5K_SIMR1);
479
480 /* Update SIMR2 but don't overwrite rest simr2 settings */
481 AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
482 AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
483 AR5K_REG_SM(ah->ah_txq_imr_txurn,
484 AR5K_SIMR2_QCU_TXURN));
485
486 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
487 AR5K_SIMR3_QCBRORN) |
488 AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
489 AR5K_SIMR3_QCBRURN),
490 AR5K_SIMR3);
491
492 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
493 AR5K_SIMR4_QTRIG), AR5K_SIMR4);
494
495 /* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
496 ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
497 AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
498
499 /* No queue has TXNOFRM enabled, disable the interrupt
500 * by setting AR5K_TXNOFRM to zero */
501 if (ah->ah_txq_imr_nofrm == 0)
502 ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
503
504 /* Set QCU mask for this DCU to save power */
505 AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
506
512 return 0; 507 return 0;
513} 508}
514 509
515/* 510
516 * Set slot time on DCU 511/**************************\
512* Global QCU/DCU functions *
513\**************************/
514
515/**
516 * ath5k_hw_set_ifs_intervals - Set global inter-frame spaces on DCU
517 *
518 * @ah The &struct ath5k_hw
519 * @slot_time Slot time in us
520 *
521 * Sets the global IFS intervals on DCU (also works on AR5210) for
522 * the given slot time and the current bwmode.
517 */ 523 */
518int ath5k_hw_set_slot_time(struct ath5k_hw *ah, unsigned int slot_time) 524int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
519{ 525{
526 struct ieee80211_channel *channel = ah->ah_current_channel;
527 struct ath5k_softc *sc = ah->ah_sc;
528 struct ieee80211_rate *rate;
529 u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
520 u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time); 530 u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
521 531
522 if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX) 532 if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
523 return -EINVAL; 533 return -EINVAL;
524 534
525 if (ah->ah_version == AR5K_AR5210) 535 sifs = ath5k_hw_get_default_sifs(ah);
526 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME); 536 sifs_clock = ath5k_hw_htoclock(ah, sifs);
537
538 /* EIFS
539 * Txtime of ack at lowest rate + SIFS + DIFS
540 * (DIFS = SIFS + 2 * Slot time)
541 *
542 * Note: HAL has some predefined values for EIFS
543 * Turbo: (37 + 2 * 6)
544 * Default: (74 + 2 * 9)
545 * Half: (149 + 2 * 13)
546 * Quarter: (298 + 2 * 21)
547 *
548 * (74 + 2 * 6) for AR5210 default and turbo !
549 *
550 * According to the formula we have
551 * ack_tx_time = 25 for turbo and
552 * ack_tx_time = 42.5 * clock multiplier
553 * for default/half/quarter.
554 *
555 * This can't be right, 42 is what we would get
556 * from ath5k_hw_get_frame_dur_for_bwmode or
557 * ieee80211_generic_frame_duration for zero frame
558 * length and without SIFS !
559 *
560 * Also we have different lowest rate for 802.11a
561 */
562 if (channel->hw_value & CHANNEL_5GHZ)
563 rate = &sc->sbands[IEEE80211_BAND_5GHZ].bitrates[0];
527 else 564 else
528 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT); 565 rate = &sc->sbands[IEEE80211_BAND_2GHZ].bitrates[0];
566
567 ack_tx_time = ath5k_hw_get_frame_duration(ah, 10, rate);
568
569 /* ack_tx_time includes an SIFS already */
570 eifs = ack_tx_time + sifs + 2 * slot_time;
571 eifs_clock = ath5k_hw_htoclock(ah, eifs);
572
573 /* Set IFS settings on AR5210 */
574 if (ah->ah_version == AR5K_AR5210) {
575 u32 pifs, pifs_clock, difs, difs_clock;
576
577 /* Set slot time */
578 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
579
580 /* Set EIFS */
581 eifs_clock = AR5K_REG_SM(eifs_clock, AR5K_IFS1_EIFS);
582
583 /* PIFS = Slot time + SIFS */
584 pifs = slot_time + sifs;
585 pifs_clock = ath5k_hw_htoclock(ah, pifs);
586 pifs_clock = AR5K_REG_SM(pifs_clock, AR5K_IFS1_PIFS);
587
588 /* DIFS = SIFS + 2 * Slot time */
589 difs = sifs + 2 * slot_time;
590 difs_clock = ath5k_hw_htoclock(ah, difs);
591
592 /* Set SIFS/DIFS */
593 ath5k_hw_reg_write(ah, (difs_clock <<
594 AR5K_IFS0_DIFS_S) | sifs_clock,
595 AR5K_IFS0);
596
597 /* Set PIFS/EIFS and preserve AR5K_INIT_CARR_SENSE_EN */
598 ath5k_hw_reg_write(ah, pifs_clock | eifs_clock |
599 (AR5K_INIT_CARR_SENSE_EN << AR5K_IFS1_CS_EN_S),
600 AR5K_IFS1);
601
602 return 0;
603 }
604
605 /* Set IFS slot time */
606 ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
607
608 /* Set EIFS interval */
609 ath5k_hw_reg_write(ah, eifs_clock, AR5K_DCU_GBL_IFS_EIFS);
610
611 /* Set SIFS interval in usecs */
612 AR5K_REG_WRITE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
613 AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC,
614 sifs);
615
616 /* Set SIFS interval in clock cycles */
617 ath5k_hw_reg_write(ah, sifs_clock, AR5K_DCU_GBL_IFS_SIFS);
529 618
530 return 0; 619 return 0;
531} 620}
532 621
622
623int ath5k_hw_init_queues(struct ath5k_hw *ah)
624{
625 int i, ret;
626
627 /* TODO: HW Compression support for data queues */
628 /* TODO: Burst prefetch for data queues */
629
630 /*
631 * Reset queues and start beacon timers at the end of the reset routine
632 * This also sets QCU mask on each DCU for 1:1 qcu to dcu mapping
633 * Note: If we want we can assign multiple qcus on one dcu.
634 */
635 if (ah->ah_version != AR5K_AR5210)
636 for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
637 ret = ath5k_hw_reset_tx_queue(ah, i);
638 if (ret) {
639 ATH5K_ERR(ah->ah_sc,
640 "failed to reset TX queue #%d\n", i);
641 return ret;
642 }
643 }
644 else
645 /* No QCU/DCU on AR5210, just set tx
646 * retry limits. We set IFS parameters
647 * on ath5k_hw_set_ifs_intervals */
648 ath5k_hw_set_tx_retry_limits(ah, 0);
649
650 /* Set the turbo flag when operating on 40MHz */
651 if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
652 AR5K_REG_ENABLE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
653 AR5K_DCU_GBL_IFS_MISC_TURBO_MODE);
654
655 /* If we didn't set IFS timings through
656 * ath5k_hw_set_coverage_class make sure
657 * we set them here */
658 if (!ah->ah_coverage_class) {
659 unsigned int slot_time = ath5k_hw_get_default_slottime(ah);
660 ath5k_hw_set_ifs_intervals(ah, slot_time);
661 }
662
663 return 0;
664}
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index 2a246d13b520..fd14b9103951 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -787,6 +787,7 @@
787#define AR5K_DCU_GBL_IFS_MISC_LFSR_SLICE 0x00000007 /* LFSR Slice Select */ 787#define AR5K_DCU_GBL_IFS_MISC_LFSR_SLICE 0x00000007 /* LFSR Slice Select */
788#define AR5K_DCU_GBL_IFS_MISC_TURBO_MODE 0x00000008 /* Turbo mode */ 788#define AR5K_DCU_GBL_IFS_MISC_TURBO_MODE 0x00000008 /* Turbo mode */
789#define AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC 0x000003f0 /* SIFS Duration mask */ 789#define AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC 0x000003f0 /* SIFS Duration mask */
790#define AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC_S 4
790#define AR5K_DCU_GBL_IFS_MISC_USEC_DUR 0x000ffc00 /* USEC Duration mask */ 791#define AR5K_DCU_GBL_IFS_MISC_USEC_DUR 0x000ffc00 /* USEC Duration mask */
791#define AR5K_DCU_GBL_IFS_MISC_USEC_DUR_S 10 792#define AR5K_DCU_GBL_IFS_MISC_USEC_DUR_S 10
792#define AR5K_DCU_GBL_IFS_MISC_DCU_ARB_DELAY 0x00300000 /* DCU Arbiter delay mask */ 793#define AR5K_DCU_GBL_IFS_MISC_DCU_ARB_DELAY 0x00300000 /* DCU Arbiter delay mask */
@@ -1311,7 +1312,7 @@
1311#define AR5K_IFS1_EIFS 0x03fff000 1312#define AR5K_IFS1_EIFS 0x03fff000
1312#define AR5K_IFS1_EIFS_S 12 1313#define AR5K_IFS1_EIFS_S 12
1313#define AR5K_IFS1_CS_EN 0x04000000 1314#define AR5K_IFS1_CS_EN 0x04000000
1314 1315#define AR5K_IFS1_CS_EN_S 26
1315 1316
1316/* 1317/*
1317 * CFP duration register 1318 * CFP duration register
@@ -2058,6 +2059,7 @@
2058 2059
2059#define AR5K_PHY_SCAL 0x9878 2060#define AR5K_PHY_SCAL 0x9878
2060#define AR5K_PHY_SCAL_32MHZ 0x0000000e 2061#define AR5K_PHY_SCAL_32MHZ 0x0000000e
2062#define AR5K_PHY_SCAL_32MHZ_5311 0x00000008
2061#define AR5K_PHY_SCAL_32MHZ_2417 0x0000000a 2063#define AR5K_PHY_SCAL_32MHZ_2417 0x0000000a
2062#define AR5K_PHY_SCAL_32MHZ_HB63 0x00000032 2064#define AR5K_PHY_SCAL_32MHZ_HB63 0x00000032
2063 2065
@@ -2244,6 +2246,8 @@
2244#define AR5K_PHY_FRAME_CTL (ah->ah_version == AR5K_AR5210 ? \ 2246#define AR5K_PHY_FRAME_CTL (ah->ah_version == AR5K_AR5210 ? \
2245 AR5K_PHY_FRAME_CTL_5210 : AR5K_PHY_FRAME_CTL_5211) 2247 AR5K_PHY_FRAME_CTL_5210 : AR5K_PHY_FRAME_CTL_5211)
2246/*---[5111+]---*/ 2248/*---[5111+]---*/
2249#define AR5K_PHY_FRAME_CTL_WIN_LEN 0x00000003 /* Force window length (?) */
2250#define AR5K_PHY_FRAME_CTL_WIN_LEN_S 0
2247#define AR5K_PHY_FRAME_CTL_TX_CLIP 0x00000038 /* Mask for tx clip (?) */ 2251#define AR5K_PHY_FRAME_CTL_TX_CLIP 0x00000038 /* Mask for tx clip (?) */
2248#define AR5K_PHY_FRAME_CTL_TX_CLIP_S 3 2252#define AR5K_PHY_FRAME_CTL_TX_CLIP_S 3
2249#define AR5K_PHY_FRAME_CTL_PREP_CHINFO 0x00010000 /* Prepend chan info */ 2253#define AR5K_PHY_FRAME_CTL_PREP_CHINFO 0x00010000 /* Prepend chan info */
@@ -2558,3 +2562,28 @@
2558 */ 2562 */
2559#define AR5K_PHY_PDADC_TXPOWER_BASE 0xa280 2563#define AR5K_PHY_PDADC_TXPOWER_BASE 0xa280
2560#define AR5K_PHY_PDADC_TXPOWER(_n) (AR5K_PHY_PDADC_TXPOWER_BASE + ((_n) << 2)) 2564#define AR5K_PHY_PDADC_TXPOWER(_n) (AR5K_PHY_PDADC_TXPOWER_BASE + ((_n) << 2))
2565
2566/*
2567 * Platform registers for WiSoC
2568 */
2569#define AR5K_AR5312_RESET 0xbc003020
2570#define AR5K_AR5312_RESET_BB0_COLD 0x00000004
2571#define AR5K_AR5312_RESET_BB1_COLD 0x00000200
2572#define AR5K_AR5312_RESET_WMAC0 0x00002000
2573#define AR5K_AR5312_RESET_BB0_WARM 0x00004000
2574#define AR5K_AR5312_RESET_WMAC1 0x00020000
2575#define AR5K_AR5312_RESET_BB1_WARM 0x00040000
2576
2577#define AR5K_AR5312_ENABLE 0xbc003080
2578#define AR5K_AR5312_ENABLE_WLAN0 0x00000001
2579#define AR5K_AR5312_ENABLE_WLAN1 0x00000008
2580
2581#define AR5K_AR2315_RESET 0xb1000004
2582#define AR5K_AR2315_RESET_WMAC 0x00000001
2583#define AR5K_AR2315_RESET_BB_WARM 0x00000002
2584
2585#define AR5K_AR2315_AHB_ARB_CTL 0xb1000008
2586#define AR5K_AR2315_AHB_ARB_CTL_WLAN 0x00000002
2587
2588#define AR5K_AR2315_BYTESWAP 0xb100000c
2589#define AR5K_AR2315_BYTESWAP_WMAC 0x00000002
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 5b179d01f97d..84206898f77d 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -27,11 +27,17 @@
27 27
28#include <linux/pci.h> /* To determine if a card is pci-e */ 28#include <linux/pci.h> /* To determine if a card is pci-e */
29#include <linux/log2.h> 29#include <linux/log2.h>
30#include <linux/platform_device.h>
30#include "ath5k.h" 31#include "ath5k.h"
31#include "reg.h" 32#include "reg.h"
32#include "base.h" 33#include "base.h"
33#include "debug.h" 34#include "debug.h"
34 35
36
37/******************\
38* Helper functions *
39\******************/
40
35/* 41/*
36 * Check if a register write has been completed 42 * Check if a register write has been completed
37 */ 43 */
@@ -53,146 +59,267 @@ int ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
53 return (i <= 0) ? -EAGAIN : 0; 59 return (i <= 0) ? -EAGAIN : 0;
54} 60}
55 61
62
63/*************************\
64* Clock related functions *
65\*************************/
66
56/** 67/**
57 * ath5k_hw_write_ofdm_timings - set OFDM timings on AR5212 68 * ath5k_hw_htoclock - Translate usec to hw clock units
58 * 69 *
59 * @ah: the &struct ath5k_hw 70 * @ah: The &struct ath5k_hw
60 * @channel: the currently set channel upon reset 71 * @usec: value in microseconds
61 * 72 */
62 * Write the delta slope coefficient (used on pilot tracking ?) for OFDM 73unsigned int ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
63 * operation on the AR5212 upon reset. This is a helper for ath5k_hw_reset(). 74{
75 struct ath_common *common = ath5k_hw_common(ah);
76 return usec * common->clockrate;
77}
78
79/**
80 * ath5k_hw_clocktoh - Translate hw clock units to usec
81 * @clock: value in hw clock units
82 */
83unsigned int ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
84{
85 struct ath_common *common = ath5k_hw_common(ah);
86 return clock / common->clockrate;
87}
88
89/**
90 * ath5k_hw_init_core_clock - Initialize core clock
64 * 91 *
65 * Since delta slope is floating point we split it on its exponent and 92 * @ah The &struct ath5k_hw
66 * mantissa and provide these values on hw.
67 * 93 *
68 * For more infos i think this patent is related 94 * Initialize core clock parameters (usec, usec32, latencies etc).
69 * http://www.freepatentsonline.com/7184495.html
70 */ 95 */
71static inline int ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah, 96static void ath5k_hw_init_core_clock(struct ath5k_hw *ah)
72 struct ieee80211_channel *channel)
73{ 97{
74 /* Get exponent and mantissa and set it */ 98 struct ieee80211_channel *channel = ah->ah_current_channel;
75 u32 coef_scaled, coef_exp, coef_man, 99 struct ath_common *common = ath5k_hw_common(ah);
76 ds_coef_exp, ds_coef_man, clock; 100 u32 usec_reg, txlat, rxlat, usec, clock, sclock, txf2txs;
77 101
78 BUG_ON(!(ah->ah_version == AR5K_AR5212) || 102 /*
79 !(channel->hw_value & CHANNEL_OFDM)); 103 * Set core clock frequency
80 104 */
81 /* Get coefficient 105 if (channel->hw_value & CHANNEL_5GHZ)
82 * ALGO: coef = (5 * clock / carrier_freq) / 2 106 clock = 40; /* 802.11a */
83 * we scale coef by shifting clock value by 24 for 107 else if (channel->hw_value & CHANNEL_CCK)
84 * better precision since we use integers */ 108 clock = 22; /* 802.11b */
85 /* TODO: Half/quarter rate */ 109 else
86 clock = (channel->hw_value & CHANNEL_TURBO) ? 80 : 40; 110 clock = 44; /* 802.11g */
87 coef_scaled = ((5 * (clock << 24)) / 2) / channel->center_freq; 111
88 112 /* Use clock multiplier for non-default
89 /* Get exponent 113 * bwmode */
90 * ALGO: coef_exp = 14 - highest set bit position */ 114 switch (ah->ah_bwmode) {
91 coef_exp = ilog2(coef_scaled); 115 case AR5K_BWMODE_40MHZ:
92 116 clock *= 2;
93 /* Doesn't make sense if it's zero*/ 117 break;
94 if (!coef_scaled || !coef_exp) 118 case AR5K_BWMODE_10MHZ:
95 return -EINVAL; 119 clock /= 2;
120 break;
121 case AR5K_BWMODE_5MHZ:
122 clock /= 4;
123 break;
124 default:
125 break;
126 }
96 127
97 /* Note: we've shifted coef_scaled by 24 */ 128 common->clockrate = clock;
98 coef_exp = 14 - (coef_exp - 24);
99 129
130 /*
131 * Set USEC parameters
132 */
133 /* Set USEC counter on PCU*/
134 usec = clock - 1;
135 usec = AR5K_REG_SM(usec, AR5K_USEC_1);
100 136
101 /* Get mantissa (significant digits) 137 /* Set usec duration on DCU */
102 * ALGO: coef_mant = floor(coef_scaled* 2^coef_exp+0.5) */ 138 if (ah->ah_version != AR5K_AR5210)
103 coef_man = coef_scaled + 139 AR5K_REG_WRITE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
104 (1 << (24 - coef_exp - 1)); 140 AR5K_DCU_GBL_IFS_MISC_USEC_DUR,
141 clock);
105 142
106 /* Calculate delta slope coefficient exponent 143 /* Set 32MHz USEC counter */
107 * and mantissa (remove scaling) and set them on hw */ 144 if ((ah->ah_radio == AR5K_RF5112) ||
108 ds_coef_man = coef_man >> (24 - coef_exp); 145 (ah->ah_radio == AR5K_RF5413) ||
109 ds_coef_exp = coef_exp - 16; 146 (ah->ah_radio == AR5K_RF2316) ||
147 (ah->ah_radio == AR5K_RF2317))
148 /* Remain on 40MHz clock ? */
149 sclock = 40 - 1;
150 else
151 sclock = 32 - 1;
152 sclock = AR5K_REG_SM(sclock, AR5K_USEC_32);
110 153
111 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3, 154 /*
112 AR5K_PHY_TIMING_3_DSC_MAN, ds_coef_man); 155 * Set tx/rx latencies
113 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3, 156 */
114 AR5K_PHY_TIMING_3_DSC_EXP, ds_coef_exp); 157 usec_reg = ath5k_hw_reg_read(ah, AR5K_USEC_5211);
158 txlat = AR5K_REG_MS(usec_reg, AR5K_USEC_TX_LATENCY_5211);
159 rxlat = AR5K_REG_MS(usec_reg, AR5K_USEC_RX_LATENCY_5211);
115 160
116 return 0; 161 /*
117} 162 * 5210 initvals don't include usec settings
163 * so we need to use magic values here for
164 * tx/rx latencies
165 */
166 if (ah->ah_version == AR5K_AR5210) {
167 /* same for turbo */
168 txlat = AR5K_INIT_TX_LATENCY_5210;
169 rxlat = AR5K_INIT_RX_LATENCY_5210;
170 }
118 171
172 if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
173 /* 5311 has different tx/rx latency masks
174 * from 5211, since we deal 5311 the same
175 * as 5211 when setting initvals, shift
176 * values here to their proper locations
177 *
178 * Note: Initvals indicate tx/rx/ latencies
179 * are the same for turbo mode */
180 txlat = AR5K_REG_SM(txlat, AR5K_USEC_TX_LATENCY_5210);
181 rxlat = AR5K_REG_SM(rxlat, AR5K_USEC_RX_LATENCY_5210);
182 } else
183 switch (ah->ah_bwmode) {
184 case AR5K_BWMODE_10MHZ:
185 txlat = AR5K_REG_SM(txlat * 2,
186 AR5K_USEC_TX_LATENCY_5211);
187 rxlat = AR5K_REG_SM(AR5K_INIT_RX_LAT_MAX,
188 AR5K_USEC_RX_LATENCY_5211);
189 txf2txs = AR5K_INIT_TXF2TXD_START_DELAY_10MHZ;
190 break;
191 case AR5K_BWMODE_5MHZ:
192 txlat = AR5K_REG_SM(txlat * 4,
193 AR5K_USEC_TX_LATENCY_5211);
194 rxlat = AR5K_REG_SM(AR5K_INIT_RX_LAT_MAX,
195 AR5K_USEC_RX_LATENCY_5211);
196 txf2txs = AR5K_INIT_TXF2TXD_START_DELAY_5MHZ;
197 break;
198 case AR5K_BWMODE_40MHZ:
199 txlat = AR5K_INIT_TX_LAT_MIN;
200 rxlat = AR5K_REG_SM(rxlat / 2,
201 AR5K_USEC_RX_LATENCY_5211);
202 txf2txs = AR5K_INIT_TXF2TXD_START_DEFAULT;
203 break;
204 default:
205 break;
206 }
119 207
120/* 208 usec_reg = (usec | sclock | txlat | rxlat);
121 * index into rates for control rates, we can set it up like this because 209 ath5k_hw_reg_write(ah, usec_reg, AR5K_USEC);
122 * this is only used for AR5212 and we know it supports G mode
123 */
124static const unsigned int control_rates[] =
125 { 0, 1, 1, 1, 4, 4, 6, 6, 8, 8, 8, 8 };
126 210
127/** 211 /* On 5112 set tx frane to tx data start delay */
128 * ath5k_hw_write_rate_duration - fill rate code to duration table 212 if (ah->ah_radio == AR5K_RF5112) {
129 * 213 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RF_CTL2,
130 * @ah: the &struct ath5k_hw 214 AR5K_PHY_RF_CTL2_TXF2TXD_START,
131 * @mode: one of enum ath5k_driver_mode 215 txf2txs);
132 * 216 }
133 * Write the rate code to duration table upon hw reset. This is a helper for 217}
134 * ath5k_hw_reset(). It seems all this is doing is setting an ACK timeout on 218
135 * the hardware, based on current mode, for each rate. The rates which are 219/*
136 * capable of short preamble (802.11b rates 2Mbps, 5.5Mbps, and 11Mbps) have 220 * If there is an external 32KHz crystal available, use it
137 * different rate code so we write their value twice (one for long preample 221 * as ref. clock instead of 32/40MHz clock and baseband clocks
138 * and one for short). 222 * to save power during sleep or restore normal 32/40MHz
223 * operation.
139 * 224 *
140 * Note: Band doesn't matter here, if we set the values for OFDM it works 225 * XXX: When operating on 32KHz certain PHY registers (27 - 31,
141 * on both a and g modes. So all we have to do is set values for all g rates 226 * 123 - 127) require delay on access.
142 * that include all OFDM and CCK rates. If we operate in turbo or xr/half/
143 * quarter rate mode, we need to use another set of bitrates (that's why we
144 * need the mode parameter) but we don't handle these proprietary modes yet.
145 */ 227 */
146static inline void ath5k_hw_write_rate_duration(struct ath5k_hw *ah, 228static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
147 unsigned int mode)
148{ 229{
149 struct ath5k_softc *sc = ah->ah_sc; 230 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
150 struct ieee80211_rate *rate; 231 u32 scal, spending;
151 unsigned int i; 232
233 /* Only set 32KHz settings if we have an external
234 * 32KHz crystal present */
235 if ((AR5K_EEPROM_HAS32KHZCRYSTAL(ee->ee_misc1) ||
236 AR5K_EEPROM_HAS32KHZCRYSTAL_OLD(ee->ee_misc1)) &&
237 enable) {
152 238
153 /* Write rate duration table */ 239 /* 1 usec/cycle */
154 for (i = 0; i < sc->sbands[IEEE80211_BAND_2GHZ].n_bitrates; i++) { 240 AR5K_REG_WRITE_BITS(ah, AR5K_USEC_5211, AR5K_USEC_32, 1);
155 u32 reg; 241 /* Set up tsf increment on each cycle */
156 u16 tx_time; 242 AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 61);
157 243
158 rate = &sc->sbands[IEEE80211_BAND_2GHZ].bitrates[control_rates[i]]; 244 /* Set baseband sleep control registers
245 * and sleep control rate */
246 ath5k_hw_reg_write(ah, 0x1f, AR5K_PHY_SCR);
247
248 if ((ah->ah_radio == AR5K_RF5112) ||
249 (ah->ah_radio == AR5K_RF5413) ||
250 (ah->ah_radio == AR5K_RF2316) ||
251 (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
252 spending = 0x14;
253 else
254 spending = 0x18;
255 ath5k_hw_reg_write(ah, spending, AR5K_PHY_SPENDING);
159 256
160 /* Set ACK timeout */ 257 if ((ah->ah_radio == AR5K_RF5112) ||
161 reg = AR5K_RATE_DUR(rate->hw_value); 258 (ah->ah_radio == AR5K_RF5413) ||
259 (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))) {
260 ath5k_hw_reg_write(ah, 0x26, AR5K_PHY_SLMT);
261 ath5k_hw_reg_write(ah, 0x0d, AR5K_PHY_SCAL);
262 ath5k_hw_reg_write(ah, 0x07, AR5K_PHY_SCLOCK);
263 ath5k_hw_reg_write(ah, 0x3f, AR5K_PHY_SDELAY);
264 AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG,
265 AR5K_PCICFG_SLEEP_CLOCK_RATE, 0x02);
266 } else {
267 ath5k_hw_reg_write(ah, 0x0a, AR5K_PHY_SLMT);
268 ath5k_hw_reg_write(ah, 0x0c, AR5K_PHY_SCAL);
269 ath5k_hw_reg_write(ah, 0x03, AR5K_PHY_SCLOCK);
270 ath5k_hw_reg_write(ah, 0x20, AR5K_PHY_SDELAY);
271 AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG,
272 AR5K_PCICFG_SLEEP_CLOCK_RATE, 0x03);
273 }
162 274
163 /* An ACK frame consists of 10 bytes. If you add the FCS, 275 /* Enable sleep clock operation */
164 * which ieee80211_generic_frame_duration() adds, 276 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG,
165 * its 14 bytes. Note we use the control rate and not the 277 AR5K_PCICFG_SLEEP_CLOCK_EN);
166 * actual rate for this rate. See mac80211 tx.c
167 * ieee80211_duration() for a brief description of
168 * what rate we should choose to TX ACKs. */
169 tx_time = le16_to_cpu(ieee80211_generic_frame_duration(sc->hw,
170 NULL, 10, rate));
171 278
172 ath5k_hw_reg_write(ah, tx_time, reg); 279 } else {
173 280
174 if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE)) 281 /* Disable sleep clock operation and
175 continue; 282 * restore default parameters */
283 AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG,
284 AR5K_PCICFG_SLEEP_CLOCK_EN);
176 285
177 /* 286 AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG,
178 * We're not distinguishing short preamble here, 287 AR5K_PCICFG_SLEEP_CLOCK_RATE, 0);
179 * This is true, all we'll get is a longer value here 288
180 * which is not necessarilly bad. We could use 289 /* Set DAC/ADC delays */
181 * export ieee80211_frame_duration() but that needs to be 290 ath5k_hw_reg_write(ah, 0x1f, AR5K_PHY_SCR);
182 * fixed first to be properly used by mac802111 drivers: 291 ath5k_hw_reg_write(ah, AR5K_PHY_SLMT_32MHZ, AR5K_PHY_SLMT);
183 * 292
184 * - remove erp stuff and let the routine figure ofdm 293 if (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))
185 * erp rates 294 scal = AR5K_PHY_SCAL_32MHZ_2417;
186 * - remove passing argument ieee80211_local as 295 else if (ee->ee_is_hb63)
187 * drivers don't have access to it 296 scal = AR5K_PHY_SCAL_32MHZ_HB63;
188 * - move drivers using ieee80211_generic_frame_duration() 297 else
189 * to this 298 scal = AR5K_PHY_SCAL_32MHZ;
190 */ 299 ath5k_hw_reg_write(ah, scal, AR5K_PHY_SCAL);
191 ath5k_hw_reg_write(ah, tx_time, 300
192 reg + (AR5K_SET_SHORT_PREAMBLE << 2)); 301 ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK);
302 ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY);
303
304 if ((ah->ah_radio == AR5K_RF5112) ||
305 (ah->ah_radio == AR5K_RF5413) ||
306 (ah->ah_radio == AR5K_RF2316) ||
307 (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
308 spending = 0x14;
309 else
310 spending = 0x18;
311 ath5k_hw_reg_write(ah, spending, AR5K_PHY_SPENDING);
312
313 /* Set up tsf increment on each cycle */
314 AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 1);
193 } 315 }
194} 316}
195 317
318
319/*********************\
320* Reset/Sleep control *
321\*********************/
322
196/* 323/*
197 * Reset chipset 324 * Reset chipset
198 */ 325 */
@@ -236,6 +363,64 @@ static int ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
236} 363}
237 364
238/* 365/*
366 * Reset AHB chipset
367 * AR5K_RESET_CTL_PCU flag resets WMAC
368 * AR5K_RESET_CTL_BASEBAND flag resets WBB
369 */
370static int ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
371{
372 u32 mask = flags ? flags : ~0U;
373 volatile u32 *reg;
374 u32 regval;
375 u32 val = 0;
376
377 /* ah->ah_mac_srev is not available at this point yet */
378 if (ah->ah_sc->devid >= AR5K_SREV_AR2315_R6) {
379 reg = (u32 *) AR5K_AR2315_RESET;
380 if (mask & AR5K_RESET_CTL_PCU)
381 val |= AR5K_AR2315_RESET_WMAC;
382 if (mask & AR5K_RESET_CTL_BASEBAND)
383 val |= AR5K_AR2315_RESET_BB_WARM;
384 } else {
385 reg = (u32 *) AR5K_AR5312_RESET;
386 if (to_platform_device(ah->ah_sc->dev)->id == 0) {
387 if (mask & AR5K_RESET_CTL_PCU)
388 val |= AR5K_AR5312_RESET_WMAC0;
389 if (mask & AR5K_RESET_CTL_BASEBAND)
390 val |= AR5K_AR5312_RESET_BB0_COLD |
391 AR5K_AR5312_RESET_BB0_WARM;
392 } else {
393 if (mask & AR5K_RESET_CTL_PCU)
394 val |= AR5K_AR5312_RESET_WMAC1;
395 if (mask & AR5K_RESET_CTL_BASEBAND)
396 val |= AR5K_AR5312_RESET_BB1_COLD |
397 AR5K_AR5312_RESET_BB1_WARM;
398 }
399 }
400
401 /* Put BB/MAC into reset */
402 regval = __raw_readl(reg);
403 __raw_writel(regval | val, reg);
404 regval = __raw_readl(reg);
405 udelay(100);
406
407 /* Bring BB/MAC out of reset */
408 __raw_writel(regval & ~val, reg);
409 regval = __raw_readl(reg);
410
411 /*
412 * Reset configuration register (for hw byte-swap). Note that this
413 * is only set for big endian. We do the necessary magic in
414 * AR5K_INIT_CFG.
415 */
416 if ((flags & AR5K_RESET_CTL_PCU) == 0)
417 ath5k_hw_reg_write(ah, AR5K_INIT_CFG, AR5K_CFG);
418
419 return 0;
420}
421
422
423/*
239 * Sleep control 424 * Sleep control
240 */ 425 */
241static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode, 426static int ath5k_hw_set_power(struct ath5k_hw *ah, enum ath5k_power_mode mode,
@@ -334,6 +519,9 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
334 u32 bus_flags; 519 u32 bus_flags;
335 int ret; 520 int ret;
336 521
522 if (ath5k_get_bus_type(ah) == ATH_AHB)
523 return 0;
524
337 /* Make sure device is awake */ 525 /* Make sure device is awake */
338 ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0); 526 ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
339 if (ret) { 527 if (ret) {
@@ -349,7 +537,7 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
349 * we ingore that flag for PCI-E cards. On PCI cards 537 * we ingore that flag for PCI-E cards. On PCI cards
350 * this flag gets cleared after 64 PCI clocks. 538 * this flag gets cleared after 64 PCI clocks.
351 */ 539 */
352 bus_flags = (pdev->is_pcie) ? 0 : AR5K_RESET_CTL_PCI; 540 bus_flags = (pdev && pci_is_pcie(pdev)) ? 0 : AR5K_RESET_CTL_PCI;
353 541
354 if (ah->ah_version == AR5K_AR5210) { 542 if (ah->ah_version == AR5K_AR5210) {
355 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | 543 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
@@ -378,7 +566,6 @@ int ath5k_hw_on_hold(struct ath5k_hw *ah)
378 566
379/* 567/*
380 * Bring up MAC + PHY Chips and program PLL 568 * Bring up MAC + PHY Chips and program PLL
381 * TODO: Half/Quarter rate support
382 */ 569 */
383int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial) 570int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
384{ 571{
@@ -390,11 +577,13 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
390 mode = 0; 577 mode = 0;
391 clock = 0; 578 clock = 0;
392 579
393 /* Wakeup the device */ 580 if ((ath5k_get_bus_type(ah) != ATH_AHB) || !initial) {
394 ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0); 581 /* Wakeup the device */
395 if (ret) { 582 ret = ath5k_hw_set_power(ah, AR5K_PM_AWAKE, true, 0);
396 ATH5K_ERR(ah->ah_sc, "failed to wakeup the MAC Chip\n"); 583 if (ret) {
397 return ret; 584 ATH5K_ERR(ah->ah_sc, "failed to wakeup the MAC Chip\n");
585 return ret;
586 }
398 } 587 }
399 588
400 /* 589 /*
@@ -405,7 +594,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
405 * we ingore that flag for PCI-E cards. On PCI cards 594 * we ingore that flag for PCI-E cards. On PCI cards
406 * this flag gets cleared after 64 PCI clocks. 595 * this flag gets cleared after 64 PCI clocks.
407 */ 596 */
408 bus_flags = (pdev->is_pcie) ? 0 : AR5K_RESET_CTL_PCI; 597 bus_flags = (pdev && pci_is_pcie(pdev)) ? 0 : AR5K_RESET_CTL_PCI;
409 598
410 if (ah->ah_version == AR5K_AR5210) { 599 if (ah->ah_version == AR5K_AR5210) {
411 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | 600 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
@@ -413,8 +602,12 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
413 AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI); 602 AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
414 mdelay(2); 603 mdelay(2);
415 } else { 604 } else {
416 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU | 605 if (ath5k_get_bus_type(ah) == ATH_AHB)
417 AR5K_RESET_CTL_BASEBAND | bus_flags); 606 ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU |
607 AR5K_RESET_CTL_BASEBAND);
608 else
609 ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
610 AR5K_RESET_CTL_BASEBAND | bus_flags);
418 } 611 }
419 612
420 if (ret) { 613 if (ret) {
@@ -429,9 +622,15 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
429 return ret; 622 return ret;
430 } 623 }
431 624
432 /* ...clear reset control register and pull device out of 625 /* ...reset configuration regiter on Wisoc ...
433 * warm reset */ 626 * ...clear reset control register and pull device out of
434 if (ath5k_hw_nic_reset(ah, 0)) { 627 * warm reset on others */
628 if (ath5k_get_bus_type(ah) == ATH_AHB)
629 ret = ath5k_hw_wisoc_reset(ah, 0);
630 else
631 ret = ath5k_hw_nic_reset(ah, 0);
632
633 if (ret) {
435 ATH5K_ERR(ah->ah_sc, "failed to warm reset the MAC Chip\n"); 634 ATH5K_ERR(ah->ah_sc, "failed to warm reset the MAC Chip\n");
436 return -EIO; 635 return -EIO;
437 } 636 }
@@ -466,7 +665,8 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
466 * CCK headers) operation. We need to test 665 * CCK headers) operation. We need to test
467 * this, 5211 might support ofdm-only g after 666 * this, 5211 might support ofdm-only g after
468 * all, there are also initial register values 667 * all, there are also initial register values
469 * in the code for g mode (see initvals.c). */ 668 * in the code for g mode (see initvals.c).
669 */
470 if (ah->ah_version == AR5K_AR5211) 670 if (ah->ah_version == AR5K_AR5211)
471 mode |= AR5K_PHY_MODE_MOD_OFDM; 671 mode |= AR5K_PHY_MODE_MOD_OFDM;
472 else 672 else
@@ -479,6 +679,7 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
479 } else if (flags & CHANNEL_5GHZ) { 679 } else if (flags & CHANNEL_5GHZ) {
480 mode |= AR5K_PHY_MODE_FREQ_5GHZ; 680 mode |= AR5K_PHY_MODE_FREQ_5GHZ;
481 681
682 /* Different PLL setting for 5413 */
482 if (ah->ah_radio == AR5K_RF5413) 683 if (ah->ah_radio == AR5K_RF5413)
483 clock = AR5K_PHY_PLL_40MHZ_5413; 684 clock = AR5K_PHY_PLL_40MHZ_5413;
484 else 685 else
@@ -496,12 +697,29 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
496 return -EINVAL; 697 return -EINVAL;
497 } 698 }
498 699
499 if (flags & CHANNEL_TURBO) 700 /*XXX: Can bwmode be used with dynamic mode ?
500 turbo = AR5K_PHY_TURBO_MODE | AR5K_PHY_TURBO_SHORT; 701 * (I don't think it supports 44MHz) */
702 /* On 2425 initvals TURBO_SHORT is not pressent */
703 if (ah->ah_bwmode == AR5K_BWMODE_40MHZ) {
704 turbo = AR5K_PHY_TURBO_MODE |
705 (ah->ah_radio == AR5K_RF2425) ? 0 :
706 AR5K_PHY_TURBO_SHORT;
707 } else if (ah->ah_bwmode != AR5K_BWMODE_DEFAULT) {
708 if (ah->ah_radio == AR5K_RF5413) {
709 mode |= (ah->ah_bwmode == AR5K_BWMODE_10MHZ) ?
710 AR5K_PHY_MODE_HALF_RATE :
711 AR5K_PHY_MODE_QUARTER_RATE;
712 } else if (ah->ah_version == AR5K_AR5212) {
713 clock |= (ah->ah_bwmode == AR5K_BWMODE_10MHZ) ?
714 AR5K_PHY_PLL_HALF_RATE :
715 AR5K_PHY_PLL_QUARTER_RATE;
716 }
717 }
718
501 } else { /* Reset the device */ 719 } else { /* Reset the device */
502 720
503 /* ...enable Atheros turbo mode if requested */ 721 /* ...enable Atheros turbo mode if requested */
504 if (flags & CHANNEL_TURBO) 722 if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
505 ath5k_hw_reg_write(ah, AR5K_PHY_TURBO_MODE, 723 ath5k_hw_reg_write(ah, AR5K_PHY_TURBO_MODE,
506 AR5K_PHY_TURBO); 724 AR5K_PHY_TURBO);
507 } 725 }
@@ -522,107 +740,10 @@ int ath5k_hw_nic_wakeup(struct ath5k_hw *ah, int flags, bool initial)
522 return 0; 740 return 0;
523} 741}
524 742
525/*
526 * If there is an external 32KHz crystal available, use it
527 * as ref. clock instead of 32/40MHz clock and baseband clocks
528 * to save power during sleep or restore normal 32/40MHz
529 * operation.
530 *
531 * XXX: When operating on 32KHz certain PHY registers (27 - 31,
532 * 123 - 127) require delay on access.
533 */
534static void ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
535{
536 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
537 u32 scal, spending, usec32;
538 743
539 /* Only set 32KHz settings if we have an external 744/**************************************\
540 * 32KHz crystal present */ 745* Post-initvals register modifications *
541 if ((AR5K_EEPROM_HAS32KHZCRYSTAL(ee->ee_misc1) || 746\**************************************/
542 AR5K_EEPROM_HAS32KHZCRYSTAL_OLD(ee->ee_misc1)) &&
543 enable) {
544
545 /* 1 usec/cycle */
546 AR5K_REG_WRITE_BITS(ah, AR5K_USEC_5211, AR5K_USEC_32, 1);
547 /* Set up tsf increment on each cycle */
548 AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 61);
549
550 /* Set baseband sleep control registers
551 * and sleep control rate */
552 ath5k_hw_reg_write(ah, 0x1f, AR5K_PHY_SCR);
553
554 if ((ah->ah_radio == AR5K_RF5112) ||
555 (ah->ah_radio == AR5K_RF5413) ||
556 (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
557 spending = 0x14;
558 else
559 spending = 0x18;
560 ath5k_hw_reg_write(ah, spending, AR5K_PHY_SPENDING);
561
562 if ((ah->ah_radio == AR5K_RF5112) ||
563 (ah->ah_radio == AR5K_RF5413) ||
564 (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))) {
565 ath5k_hw_reg_write(ah, 0x26, AR5K_PHY_SLMT);
566 ath5k_hw_reg_write(ah, 0x0d, AR5K_PHY_SCAL);
567 ath5k_hw_reg_write(ah, 0x07, AR5K_PHY_SCLOCK);
568 ath5k_hw_reg_write(ah, 0x3f, AR5K_PHY_SDELAY);
569 AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG,
570 AR5K_PCICFG_SLEEP_CLOCK_RATE, 0x02);
571 } else {
572 ath5k_hw_reg_write(ah, 0x0a, AR5K_PHY_SLMT);
573 ath5k_hw_reg_write(ah, 0x0c, AR5K_PHY_SCAL);
574 ath5k_hw_reg_write(ah, 0x03, AR5K_PHY_SCLOCK);
575 ath5k_hw_reg_write(ah, 0x20, AR5K_PHY_SDELAY);
576 AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG,
577 AR5K_PCICFG_SLEEP_CLOCK_RATE, 0x03);
578 }
579
580 /* Enable sleep clock operation */
581 AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG,
582 AR5K_PCICFG_SLEEP_CLOCK_EN);
583
584 } else {
585
586 /* Disable sleep clock operation and
587 * restore default parameters */
588 AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG,
589 AR5K_PCICFG_SLEEP_CLOCK_EN);
590
591 AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG,
592 AR5K_PCICFG_SLEEP_CLOCK_RATE, 0);
593
594 ath5k_hw_reg_write(ah, 0x1f, AR5K_PHY_SCR);
595 ath5k_hw_reg_write(ah, AR5K_PHY_SLMT_32MHZ, AR5K_PHY_SLMT);
596
597 if (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))
598 scal = AR5K_PHY_SCAL_32MHZ_2417;
599 else if (ee->ee_is_hb63)
600 scal = AR5K_PHY_SCAL_32MHZ_HB63;
601 else
602 scal = AR5K_PHY_SCAL_32MHZ;
603 ath5k_hw_reg_write(ah, scal, AR5K_PHY_SCAL);
604
605 ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK);
606 ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY);
607
608 if ((ah->ah_radio == AR5K_RF5112) ||
609 (ah->ah_radio == AR5K_RF5413) ||
610 (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
611 spending = 0x14;
612 else
613 spending = 0x18;
614 ath5k_hw_reg_write(ah, spending, AR5K_PHY_SPENDING);
615
616 if ((ah->ah_radio == AR5K_RF5112) ||
617 (ah->ah_radio == AR5K_RF5413))
618 usec32 = 39;
619 else
620 usec32 = 31;
621 AR5K_REG_WRITE_BITS(ah, AR5K_USEC_5211, AR5K_USEC_32, usec32);
622
623 AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 1);
624 }
625}
626 747
627/* TODO: Half/Quarter rate */ 748/* TODO: Half/Quarter rate */
628static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah, 749static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
@@ -663,22 +784,10 @@ static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
663 AR5K_REG_DISABLE_BITS(ah, AR5K_TXCFG, 784 AR5K_REG_DISABLE_BITS(ah, AR5K_TXCFG,
664 AR5K_TXCFG_DCU_DBL_BUF_DIS); 785 AR5K_TXCFG_DCU_DBL_BUF_DIS);
665 786
666 /* Set DAC/ADC delays */
667 if (ah->ah_version == AR5K_AR5212) {
668 u32 scal;
669 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
670 if (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))
671 scal = AR5K_PHY_SCAL_32MHZ_2417;
672 else if (ee->ee_is_hb63)
673 scal = AR5K_PHY_SCAL_32MHZ_HB63;
674 else
675 scal = AR5K_PHY_SCAL_32MHZ;
676 ath5k_hw_reg_write(ah, scal, AR5K_PHY_SCAL);
677 }
678
679 /* Set fast ADC */ 787 /* Set fast ADC */
680 if ((ah->ah_radio == AR5K_RF5413) || 788 if ((ah->ah_radio == AR5K_RF5413) ||
681 (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))) { 789 (ah->ah_radio == AR5K_RF2317) ||
790 (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))) {
682 u32 fast_adc = true; 791 u32 fast_adc = true;
683 792
684 if (channel->center_freq == 2462 || 793 if (channel->center_freq == 2462 ||
@@ -706,33 +815,68 @@ static void ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
706 } 815 }
707 816
708 if (ah->ah_mac_srev < AR5K_SREV_AR5211) { 817 if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
709 u32 usec_reg;
710 /* 5311 has different tx/rx latency masks
711 * from 5211, since we deal 5311 the same
712 * as 5211 when setting initvals, shift
713 * values here to their proper locations */
714 usec_reg = ath5k_hw_reg_read(ah, AR5K_USEC_5211);
715 ath5k_hw_reg_write(ah, usec_reg & (AR5K_USEC_1 |
716 AR5K_USEC_32 |
717 AR5K_USEC_TX_LATENCY_5211 |
718 AR5K_REG_SM(29,
719 AR5K_USEC_RX_LATENCY_5210)),
720 AR5K_USEC_5211);
721 /* Clear QCU/DCU clock gating register */ 818 /* Clear QCU/DCU clock gating register */
722 ath5k_hw_reg_write(ah, 0, AR5K_QCUDCU_CLKGT); 819 ath5k_hw_reg_write(ah, 0, AR5K_QCUDCU_CLKGT);
723 /* Set DAC/ADC delays */ 820 /* Set DAC/ADC delays */
724 ath5k_hw_reg_write(ah, 0x08, AR5K_PHY_SCAL); 821 ath5k_hw_reg_write(ah, AR5K_PHY_SCAL_32MHZ_5311,
822 AR5K_PHY_SCAL);
725 /* Enable PCU FIFO corruption ECO */ 823 /* Enable PCU FIFO corruption ECO */
726 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211, 824 AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211,
727 AR5K_DIAG_SW_ECO_ENABLE); 825 AR5K_DIAG_SW_ECO_ENABLE);
728 } 826 }
827
828 if (ah->ah_bwmode) {
829 /* Increase PHY switch and AGC settling time
830 * on turbo mode (ath5k_hw_commit_eeprom_settings
831 * will override settling time if available) */
832 if (ah->ah_bwmode == AR5K_BWMODE_40MHZ) {
833
834 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SETTLING,
835 AR5K_PHY_SETTLING_AGC,
836 AR5K_AGC_SETTLING_TURBO);
837
838 /* XXX: Initvals indicate we only increase
839 * switch time on AR5212, 5211 and 5210
840 * only change agc time (bug?) */
841 if (ah->ah_version == AR5K_AR5212)
842 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SETTLING,
843 AR5K_PHY_SETTLING_SWITCH,
844 AR5K_SWITCH_SETTLING_TURBO);
845
846 if (ah->ah_version == AR5K_AR5210) {
847 /* Set Frame Control Register */
848 ath5k_hw_reg_write(ah,
849 (AR5K_PHY_FRAME_CTL_INI |
850 AR5K_PHY_TURBO_MODE |
851 AR5K_PHY_TURBO_SHORT | 0x2020),
852 AR5K_PHY_FRAME_CTL_5210);
853 }
854 /* On 5413 PHY force window length for half/quarter rate*/
855 } else if ((ah->ah_mac_srev >= AR5K_SREV_AR5424) &&
856 (ah->ah_mac_srev <= AR5K_SREV_AR5414)) {
857 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_FRAME_CTL_5211,
858 AR5K_PHY_FRAME_CTL_WIN_LEN,
859 3);
860 }
861 } else if (ah->ah_version == AR5K_AR5210) {
862 /* Set Frame Control Register for normal operation */
863 ath5k_hw_reg_write(ah, (AR5K_PHY_FRAME_CTL_INI | 0x1020),
864 AR5K_PHY_FRAME_CTL_5210);
865 }
729} 866}
730 867
731static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah, 868static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
732 struct ieee80211_channel *channel, u8 ee_mode) 869 struct ieee80211_channel *channel)
733{ 870{
734 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom; 871 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
735 s16 cck_ofdm_pwr_delta; 872 s16 cck_ofdm_pwr_delta;
873 u8 ee_mode;
874
875 /* TODO: Add support for AR5210 EEPROM */
876 if (ah->ah_version == AR5K_AR5210)
877 return;
878
879 ee_mode = ath5k_eeprom_mode_from_channel(channel);
736 880
737 /* Adjust power delta for channel 14 */ 881 /* Adjust power delta for channel 14 */
738 if (channel->center_freq == 2484) 882 if (channel->center_freq == 2484)
@@ -772,7 +916,7 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
772 AR5K_PHY_NF_SVAL(ee->ee_noise_floor_thr[ee_mode]), 916 AR5K_PHY_NF_SVAL(ee->ee_noise_floor_thr[ee_mode]),
773 AR5K_PHY_NFTHRES); 917 AR5K_PHY_NFTHRES);
774 918
775 if ((channel->hw_value & CHANNEL_TURBO) && 919 if ((ah->ah_bwmode == AR5K_BWMODE_40MHZ) &&
776 (ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_0)) { 920 (ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_0)) {
777 /* Switch settling time (Turbo) */ 921 /* Switch settling time (Turbo) */
778 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SETTLING, 922 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SETTLING,
@@ -870,143 +1014,172 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
870 ath5k_hw_reg_write(ah, 0, AR5K_PHY_HEAVY_CLIP_ENABLE); 1014 ath5k_hw_reg_write(ah, 0, AR5K_PHY_HEAVY_CLIP_ENABLE);
871} 1015}
872 1016
873/* 1017
874 * Main reset function 1018/*********************\
875 */ 1019* Main reset function *
1020\*********************/
1021
876int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, 1022int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
877 struct ieee80211_channel *channel, bool change_channel) 1023 struct ieee80211_channel *channel, bool fast, bool skip_pcu)
878{ 1024{
879 struct ath_common *common = ath5k_hw_common(ah); 1025 u32 s_seq[10], s_led[3], tsf_up, tsf_lo;
880 u32 s_seq[10], s_led[3], staid1_flags, tsf_up, tsf_lo; 1026 u8 mode;
881 u32 phy_tst1;
882 u8 mode, freq, ee_mode;
883 int i, ret; 1027 int i, ret;
884 1028
885 ee_mode = 0;
886 staid1_flags = 0;
887 tsf_up = 0; 1029 tsf_up = 0;
888 tsf_lo = 0; 1030 tsf_lo = 0;
889 freq = 0;
890 mode = 0; 1031 mode = 0;
891 1032
892 /* 1033 /*
893 * Save some registers before a reset 1034 * Sanity check for fast flag
1035 * Fast channel change only available
1036 * on AR2413/AR5413.
894 */ 1037 */
895 /*DCU/Antenna selection not available on 5210*/ 1038 if (fast && (ah->ah_radio != AR5K_RF2413) &&
896 if (ah->ah_version != AR5K_AR5210) { 1039 (ah->ah_radio != AR5K_RF5413))
1040 fast = 0;
897 1041
898 switch (channel->hw_value & CHANNEL_MODES) { 1042 /* Disable sleep clock operation
899 case CHANNEL_A: 1043 * to avoid register access delay on certain
900 mode = AR5K_MODE_11A; 1044 * PHY registers */
901 freq = AR5K_INI_RFGAIN_5GHZ; 1045 if (ah->ah_version == AR5K_AR5212)
902 ee_mode = AR5K_EEPROM_MODE_11A; 1046 ath5k_hw_set_sleep_clock(ah, false);
903 break; 1047
904 case CHANNEL_G: 1048 /*
905 mode = AR5K_MODE_11G; 1049 * Stop PCU
906 freq = AR5K_INI_RFGAIN_2GHZ; 1050 */
907 ee_mode = AR5K_EEPROM_MODE_11G; 1051 ath5k_hw_stop_rx_pcu(ah);
908 break; 1052
909 case CHANNEL_B: 1053 /*
910 mode = AR5K_MODE_11B; 1054 * Stop DMA
911 freq = AR5K_INI_RFGAIN_2GHZ; 1055 *
912 ee_mode = AR5K_EEPROM_MODE_11B; 1056 * Note: If DMA didn't stop continue
913 break; 1057 * since only a reset will fix it.
914 case CHANNEL_T: 1058 */
915 mode = AR5K_MODE_11A_TURBO; 1059 ret = ath5k_hw_dma_stop(ah);
916 freq = AR5K_INI_RFGAIN_5GHZ; 1060
917 ee_mode = AR5K_EEPROM_MODE_11A; 1061 /* RF Bus grant won't work if we have pending
918 break; 1062 * frames */
919 case CHANNEL_TG: 1063 if (ret && fast) {
920 if (ah->ah_version == AR5K_AR5211) { 1064 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_RESET,
921 ATH5K_ERR(ah->ah_sc, 1065 "DMA didn't stop, falling back to normal reset\n");
922 "TurboG mode not available on 5211"); 1066 fast = 0;
923 return -EINVAL; 1067 /* Non fatal, just continue with
924 } 1068 * normal reset */
925 mode = AR5K_MODE_11G_TURBO; 1069 ret = 0;
926 freq = AR5K_INI_RFGAIN_2GHZ; 1070 }
927 ee_mode = AR5K_EEPROM_MODE_11G; 1071
928 break; 1072 switch (channel->hw_value & CHANNEL_MODES) {
929 case CHANNEL_XR: 1073 case CHANNEL_A:
930 if (ah->ah_version == AR5K_AR5211) { 1074 mode = AR5K_MODE_11A;
931 ATH5K_ERR(ah->ah_sc, 1075 break;
932 "XR mode not available on 5211"); 1076 case CHANNEL_G:
933 return -EINVAL; 1077
934 } 1078 if (ah->ah_version <= AR5K_AR5211) {
935 mode = AR5K_MODE_XR;
936 freq = AR5K_INI_RFGAIN_5GHZ;
937 ee_mode = AR5K_EEPROM_MODE_11A;
938 break;
939 default:
940 ATH5K_ERR(ah->ah_sc, 1079 ATH5K_ERR(ah->ah_sc,
941 "invalid channel: %d\n", channel->center_freq); 1080 "G mode not available on 5210/5211");
942 return -EINVAL; 1081 return -EINVAL;
943 } 1082 }
944 1083
945 if (change_channel) { 1084 mode = AR5K_MODE_11G;
946 /* 1085 break;
947 * Save frame sequence count 1086 case CHANNEL_B:
948 * For revs. after Oahu, only save
949 * seq num for DCU 0 (Global seq num)
950 */
951 if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
952
953 for (i = 0; i < 10; i++)
954 s_seq[i] = ath5k_hw_reg_read(ah,
955 AR5K_QUEUE_DCU_SEQNUM(i));
956 1087
957 } else { 1088 if (ah->ah_version < AR5K_AR5211) {
958 s_seq[0] = ath5k_hw_reg_read(ah, 1089 ATH5K_ERR(ah->ah_sc,
959 AR5K_QUEUE_DCU_SEQNUM(0)); 1090 "B mode not available on 5210");
960 } 1091 return -EINVAL;
1092 }
961 1093
962 /* TSF accelerates on AR5211 during reset 1094 mode = AR5K_MODE_11B;
963 * As a workaround save it here and restore 1095 break;
964 * it later so that it's back in time after 1096 case CHANNEL_XR:
965 * reset. This way it'll get re-synced on the 1097 if (ah->ah_version == AR5K_AR5211) {
966 * next beacon without breaking ad-hoc. 1098 ATH5K_ERR(ah->ah_sc,
967 * 1099 "XR mode not available on 5211");
968 * On AR5212 TSF is almost preserved across a 1100 return -EINVAL;
969 * reset so it stays back in time anyway and
970 * we don't have to save/restore it.
971 *
972 * XXX: Since this breaks power saving we have
973 * to disable power saving until we receive the
974 * next beacon, so we can resync beacon timers */
975 if (ah->ah_version == AR5K_AR5211) {
976 tsf_up = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
977 tsf_lo = ath5k_hw_reg_read(ah, AR5K_TSF_L32);
978 }
979 } 1101 }
1102 mode = AR5K_MODE_XR;
1103 break;
1104 default:
1105 ATH5K_ERR(ah->ah_sc,
1106 "invalid channel: %d\n", channel->center_freq);
1107 return -EINVAL;
1108 }
980 1109
981 if (ah->ah_version == AR5K_AR5212) { 1110 /*
982 /* Restore normal 32/40MHz clock operation 1111 * If driver requested fast channel change and DMA has stopped
983 * to avoid register access delay on certain 1112 * go on. If it fails continue with a normal reset.
984 * PHY registers */ 1113 */
985 ath5k_hw_set_sleep_clock(ah, false); 1114 if (fast) {
1115 ret = ath5k_hw_phy_init(ah, channel, mode, true);
1116 if (ret) {
1117 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_RESET,
1118 "fast chan change failed, falling back to normal reset\n");
1119 /* Non fatal, can happen eg.
1120 * on mode change */
1121 ret = 0;
1122 } else
1123 return 0;
1124 }
986 1125
987 /* Since we are going to write rf buffer 1126 /*
988 * check if we have any pending gain_F 1127 * Save some registers before a reset
989 * optimization settings */ 1128 */
990 if (change_channel && ah->ah_rf_banks != NULL) 1129 if (ah->ah_version != AR5K_AR5210) {
991 ath5k_hw_gainf_calibrate(ah); 1130 /*
1131 * Save frame sequence count
1132 * For revs. after Oahu, only save
1133 * seq num for DCU 0 (Global seq num)
1134 */
1135 if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
1136
1137 for (i = 0; i < 10; i++)
1138 s_seq[i] = ath5k_hw_reg_read(ah,
1139 AR5K_QUEUE_DCU_SEQNUM(i));
1140
1141 } else {
1142 s_seq[0] = ath5k_hw_reg_read(ah,
1143 AR5K_QUEUE_DCU_SEQNUM(0));
1144 }
1145
1146 /* TSF accelerates on AR5211 during reset
1147 * As a workaround save it here and restore
1148 * it later so that it's back in time after
1149 * reset. This way it'll get re-synced on the
1150 * next beacon without breaking ad-hoc.
1151 *
1152 * On AR5212 TSF is almost preserved across a
1153 * reset so it stays back in time anyway and
1154 * we don't have to save/restore it.
1155 *
1156 * XXX: Since this breaks power saving we have
1157 * to disable power saving until we receive the
1158 * next beacon, so we can resync beacon timers */
1159 if (ah->ah_version == AR5K_AR5211) {
1160 tsf_up = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
1161 tsf_lo = ath5k_hw_reg_read(ah, AR5K_TSF_L32);
992 } 1162 }
993 } 1163 }
994 1164
1165
995 /*GPIOs*/ 1166 /*GPIOs*/
996 s_led[0] = ath5k_hw_reg_read(ah, AR5K_PCICFG) & 1167 s_led[0] = ath5k_hw_reg_read(ah, AR5K_PCICFG) &
997 AR5K_PCICFG_LEDSTATE; 1168 AR5K_PCICFG_LEDSTATE;
998 s_led[1] = ath5k_hw_reg_read(ah, AR5K_GPIOCR); 1169 s_led[1] = ath5k_hw_reg_read(ah, AR5K_GPIOCR);
999 s_led[2] = ath5k_hw_reg_read(ah, AR5K_GPIODO); 1170 s_led[2] = ath5k_hw_reg_read(ah, AR5K_GPIODO);
1000 1171
1001 /* AR5K_STA_ID1 flags, only preserve antenna 1172
1002 * settings and ack/cts rate mode */ 1173 /*
1003 staid1_flags = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 1174 * Since we are going to write rf buffer
1004 (AR5K_STA_ID1_DEFAULT_ANTENNA | 1175 * check if we have any pending gain_F
1005 AR5K_STA_ID1_DESC_ANTENNA | 1176 * optimization settings
1006 AR5K_STA_ID1_RTS_DEF_ANTENNA | 1177 */
1007 AR5K_STA_ID1_ACKCTS_6MB | 1178 if (ah->ah_version == AR5K_AR5212 &&
1008 AR5K_STA_ID1_BASE_RATE_11B | 1179 (ah->ah_radio <= AR5K_RF5112)) {
1009 AR5K_STA_ID1_SELFGEN_DEF_ANT); 1180 if (!fast && ah->ah_rf_banks != NULL)
1181 ath5k_hw_gainf_calibrate(ah);
1182 }
1010 1183
1011 /* Wakeup the device */ 1184 /* Wakeup the device */
1012 ret = ath5k_hw_nic_wakeup(ah, channel->hw_value, false); 1185 ret = ath5k_hw_nic_wakeup(ah, channel->hw_value, false);
@@ -1021,121 +1194,42 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1021 AR5K_PHY(0)); 1194 AR5K_PHY(0));
1022 1195
1023 /* Write initial settings */ 1196 /* Write initial settings */
1024 ret = ath5k_hw_write_initvals(ah, mode, change_channel); 1197 ret = ath5k_hw_write_initvals(ah, mode, skip_pcu);
1025 if (ret) 1198 if (ret)
1026 return ret; 1199 return ret;
1027 1200
1201 /* Initialize core clock settings */
1202 ath5k_hw_init_core_clock(ah);
1203
1028 /* 1204 /*
1029 * 5211/5212 Specific 1205 * Tweak initval settings for revised
1206 * chipsets and add some more config
1207 * bits
1030 */ 1208 */
1031 if (ah->ah_version != AR5K_AR5210) { 1209 ath5k_hw_tweak_initval_settings(ah, channel);
1032
1033 /*
1034 * Write initial RF gain settings
1035 * This should work for both 5111/5112
1036 */
1037 ret = ath5k_hw_rfgain_init(ah, freq);
1038 if (ret)
1039 return ret;
1040
1041 mdelay(1);
1042
1043 /*
1044 * Tweak initval settings for revised
1045 * chipsets and add some more config
1046 * bits
1047 */
1048 ath5k_hw_tweak_initval_settings(ah, channel);
1049
1050 /*
1051 * Set TX power
1052 */
1053 ret = ath5k_hw_txpower(ah, channel, ee_mode,
1054 ah->ah_txpower.txp_max_pwr / 2);
1055 if (ret)
1056 return ret;
1057 1210
1058 /* Write rate duration table only on AR5212 and if 1211 /* Commit values from EEPROM */
1059 * virtual interface has already been brought up 1212 ath5k_hw_commit_eeprom_settings(ah, channel);
1060 * XXX: rethink this after new mode changes to
1061 * mac80211 are integrated */
1062 if (ah->ah_version == AR5K_AR5212 &&
1063 ah->ah_sc->nvifs)
1064 ath5k_hw_write_rate_duration(ah, mode);
1065 1213
1066 /*
1067 * Write RF buffer
1068 */
1069 ret = ath5k_hw_rfregs_init(ah, channel, mode);
1070 if (ret)
1071 return ret;
1072
1073
1074 /* Write OFDM timings on 5212*/
1075 if (ah->ah_version == AR5K_AR5212 &&
1076 channel->hw_value & CHANNEL_OFDM) {
1077
1078 ret = ath5k_hw_write_ofdm_timings(ah, channel);
1079 if (ret)
1080 return ret;
1081
1082 /* Spur info is available only from EEPROM versions
1083 * greater than 5.3, but the EEPROM routines will use
1084 * static values for older versions */
1085 if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
1086 ath5k_hw_set_spur_mitigation_filter(ah,
1087 channel);
1088 }
1089
1090 /*Enable/disable 802.11b mode on 5111
1091 (enable 2111 frequency converter + CCK)*/
1092 if (ah->ah_radio == AR5K_RF5111) {
1093 if (mode == AR5K_MODE_11B)
1094 AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG,
1095 AR5K_TXCFG_B_MODE);
1096 else
1097 AR5K_REG_DISABLE_BITS(ah, AR5K_TXCFG,
1098 AR5K_TXCFG_B_MODE);
1099 }
1100
1101 /* Commit values from EEPROM */
1102 ath5k_hw_commit_eeprom_settings(ah, channel, ee_mode);
1103
1104 } else {
1105 /*
1106 * For 5210 we do all initialization using
1107 * initvals, so we don't have to modify
1108 * any settings (5210 also only supports
1109 * a/aturbo modes)
1110 */
1111 mdelay(1);
1112 /* Disable phy and wait */
1113 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
1114 mdelay(1);
1115 }
1116 1214
1117 /* 1215 /*
1118 * Restore saved values 1216 * Restore saved values
1119 */ 1217 */
1120 1218
1121 /*DCU/Antenna selection not available on 5210*/ 1219 /* Seqnum, TSF */
1122 if (ah->ah_version != AR5K_AR5210) { 1220 if (ah->ah_version != AR5K_AR5210) {
1221 if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
1222 for (i = 0; i < 10; i++)
1223 ath5k_hw_reg_write(ah, s_seq[i],
1224 AR5K_QUEUE_DCU_SEQNUM(i));
1225 } else {
1226 ath5k_hw_reg_write(ah, s_seq[0],
1227 AR5K_QUEUE_DCU_SEQNUM(0));
1228 }
1123 1229
1124 if (change_channel) { 1230 if (ah->ah_version == AR5K_AR5211) {
1125 if (ah->ah_mac_srev < AR5K_SREV_AR5211) { 1231 ath5k_hw_reg_write(ah, tsf_up, AR5K_TSF_U32);
1126 for (i = 0; i < 10; i++) 1232 ath5k_hw_reg_write(ah, tsf_lo, AR5K_TSF_L32);
1127 ath5k_hw_reg_write(ah, s_seq[i],
1128 AR5K_QUEUE_DCU_SEQNUM(i));
1129 } else {
1130 ath5k_hw_reg_write(ah, s_seq[0],
1131 AR5K_QUEUE_DCU_SEQNUM(0));
1132 }
1133
1134
1135 if (ah->ah_version == AR5K_AR5211) {
1136 ath5k_hw_reg_write(ah, tsf_up, AR5K_TSF_U32);
1137 ath5k_hw_reg_write(ah, tsf_lo, AR5K_TSF_L32);
1138 }
1139 } 1233 }
1140 } 1234 }
1141 1235
@@ -1146,203 +1240,34 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1146 ath5k_hw_reg_write(ah, s_led[1], AR5K_GPIOCR); 1240 ath5k_hw_reg_write(ah, s_led[1], AR5K_GPIOCR);
1147 ath5k_hw_reg_write(ah, s_led[2], AR5K_GPIODO); 1241 ath5k_hw_reg_write(ah, s_led[2], AR5K_GPIODO);
1148 1242
1149 /* Restore sta_id flags and preserve our mac address*/
1150 ath5k_hw_reg_write(ah,
1151 get_unaligned_le32(common->macaddr),
1152 AR5K_STA_ID0);
1153 ath5k_hw_reg_write(ah,
1154 staid1_flags | get_unaligned_le16(common->macaddr + 4),
1155 AR5K_STA_ID1);
1156
1157
1158 /* 1243 /*
1159 * Configure PCU 1244 * Initialize PCU
1160 */ 1245 */
1161 1246 ath5k_hw_pcu_init(ah, op_mode, mode);
1162 /* Restore bssid and bssid mask */
1163 ath5k_hw_set_bssid(ah);
1164
1165 /* Set PCU config */
1166 ath5k_hw_set_opmode(ah, op_mode);
1167
1168 /* Clear any pending interrupts
1169 * PISR/SISR Not available on 5210 */
1170 if (ah->ah_version != AR5K_AR5210)
1171 ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR);
1172
1173 /* Set RSSI/BRSSI thresholds
1174 *
1175 * Note: If we decide to set this value
1176 * dynamically, keep in mind that when AR5K_RSSI_THR
1177 * register is read, it might return 0x40 if we haven't
1178 * written anything to it. Also, BMISS RSSI threshold is zeroed.
1179 * So doing a save/restore procedure here isn't the right
1180 * choice. Instead, store it in ath5k_hw */
1181 ath5k_hw_reg_write(ah, (AR5K_TUNE_RSSI_THRES |
1182 AR5K_TUNE_BMISS_THRES <<
1183 AR5K_RSSI_THR_BMISS_S),
1184 AR5K_RSSI_THR);
1185
1186 /* MIC QoS support */
1187 if (ah->ah_mac_srev >= AR5K_SREV_AR2413) {
1188 ath5k_hw_reg_write(ah, 0x000100aa, AR5K_MIC_QOS_CTL);
1189 ath5k_hw_reg_write(ah, 0x00003210, AR5K_MIC_QOS_SEL);
1190 }
1191
1192 /* QoS NOACK Policy */
1193 if (ah->ah_version == AR5K_AR5212) {
1194 ath5k_hw_reg_write(ah,
1195 AR5K_REG_SM(2, AR5K_QOS_NOACK_2BIT_VALUES) |
1196 AR5K_REG_SM(5, AR5K_QOS_NOACK_BIT_OFFSET) |
1197 AR5K_REG_SM(0, AR5K_QOS_NOACK_BYTE_OFFSET),
1198 AR5K_QOS_NOACK);
1199 }
1200
1201 1247
1202 /* 1248 /*
1203 * Configure PHY 1249 * Initialize PHY
1204 */ 1250 */
1205 1251 ret = ath5k_hw_phy_init(ah, channel, mode, false);
1206 /* Set channel on PHY */ 1252 if (ret) {
1207 ret = ath5k_hw_channel(ah, channel); 1253 ATH5K_ERR(ah->ah_sc,
1208 if (ret) 1254 "failed to initialize PHY (%i) !\n", ret);
1209 return ret; 1255 return ret;
1210
1211 /*
1212 * Enable the PHY and wait until completion
1213 * This includes BaseBand and Synthesizer
1214 * activation.
1215 */
1216 ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
1217
1218 /*
1219 * On 5211+ read activation -> rx delay
1220 * and use it.
1221 *
1222 * TODO: Half/quarter rate support
1223 */
1224 if (ah->ah_version != AR5K_AR5210) {
1225 u32 delay;
1226 delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
1227 AR5K_PHY_RX_DELAY_M;
1228 delay = (channel->hw_value & CHANNEL_CCK) ?
1229 ((delay << 2) / 22) : (delay / 10);
1230
1231 udelay(100 + (2 * delay));
1232 } else {
1233 mdelay(1);
1234 } 1256 }
1235 1257
1236 /* 1258 /*
1237 * Perform ADC test to see if baseband is ready
1238 * Set TX hold and check ADC test register
1239 */
1240 phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
1241 ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
1242 for (i = 0; i <= 20; i++) {
1243 if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
1244 break;
1245 udelay(200);
1246 }
1247 ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
1248
1249 /*
1250 * Start automatic gain control calibration
1251 *
1252 * During AGC calibration RX path is re-routed to
1253 * a power detector so we don't receive anything.
1254 *
1255 * This method is used to calibrate some static offsets
1256 * used together with on-the fly I/Q calibration (the
1257 * one performed via ath5k_hw_phy_calibrate), which doesn't
1258 * interrupt rx path.
1259 *
1260 * While rx path is re-routed to the power detector we also
1261 * start a noise floor calibration to measure the
1262 * card's noise floor (the noise we measure when we are not
1263 * transmitting or receiving anything).
1264 *
1265 * If we are in a noisy environment, AGC calibration may time
1266 * out and/or noise floor calibration might timeout.
1267 */
1268 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
1269 AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF);
1270
1271 /* At the same time start I/Q calibration for QAM constellation
1272 * -no need for CCK- */
1273 ah->ah_calibration = false;
1274 if (!(mode == AR5K_MODE_11B)) {
1275 ah->ah_calibration = true;
1276 AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
1277 AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
1278 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
1279 AR5K_PHY_IQ_RUN);
1280 }
1281
1282 /* Wait for gain calibration to finish (we check for I/Q calibration
1283 * during ath5k_phy_calibrate) */
1284 if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
1285 AR5K_PHY_AGCCTL_CAL, 0, false)) {
1286 ATH5K_ERR(ah->ah_sc, "gain calibration timeout (%uMHz)\n",
1287 channel->center_freq);
1288 }
1289
1290 /* Restore antenna mode */
1291 ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
1292
1293 /* Restore slot time and ACK timeouts */
1294 if (ah->ah_coverage_class > 0)
1295 ath5k_hw_set_coverage_class(ah, ah->ah_coverage_class);
1296
1297 /*
1298 * Configure QCUs/DCUs 1259 * Configure QCUs/DCUs
1299 */ 1260 */
1261 ret = ath5k_hw_init_queues(ah);
1262 if (ret)
1263 return ret;
1300 1264
1301 /* TODO: HW Compression support for data queues */
1302 /* TODO: Burst prefetch for data queues */
1303
1304 /*
1305 * Reset queues and start beacon timers at the end of the reset routine
1306 * This also sets QCU mask on each DCU for 1:1 qcu to dcu mapping
1307 * Note: If we want we can assign multiple qcus on one dcu.
1308 */
1309 for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
1310 ret = ath5k_hw_reset_tx_queue(ah, i);
1311 if (ret) {
1312 ATH5K_ERR(ah->ah_sc,
1313 "failed to reset TX queue #%d\n", i);
1314 return ret;
1315 }
1316 }
1317
1318
1319 /*
1320 * Configure DMA/Interrupts
1321 */
1322 1265
1323 /* 1266 /*
1324 * Set Rx/Tx DMA Configuration 1267 * Initialize DMA/Interrupts
1325 *
1326 * Set standard DMA size (128). Note that
1327 * a DMA size of 512 causes rx overruns and tx errors
1328 * on pci-e cards (tested on 5424 but since rx overruns
1329 * also occur on 5416/5418 with madwifi we set 128
1330 * for all PCI-E cards to be safe).
1331 *
1332 * XXX: need to check 5210 for this
1333 * TODO: Check out tx triger level, it's always 64 on dumps but I
1334 * guess we can tweak it and see how it goes ;-)
1335 */ 1268 */
1336 if (ah->ah_version != AR5K_AR5210) { 1269 ath5k_hw_dma_init(ah);
1337 AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
1338 AR5K_TXCFG_SDMAMR, AR5K_DMASIZE_128B);
1339 AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG,
1340 AR5K_RXCFG_SDMAMW, AR5K_DMASIZE_128B);
1341 }
1342 1270
1343 /* Pre-enable interrupts on 5211/5212*/
1344 if (ah->ah_version != AR5K_AR5210)
1345 ath5k_hw_set_imr(ah, ah->ah_imr);
1346 1271
1347 /* Enable 32KHz clock function for AR5212+ chips 1272 /* Enable 32KHz clock function for AR5212+ chips
1348 * Set clocks to 32KHz operation and use an 1273 * Set clocks to 32KHz operation and use an
diff --git a/drivers/net/wireless/ath/ath5k/rfbuffer.h b/drivers/net/wireless/ath/ath5k/rfbuffer.h
index 3ac4cff4239d..16b67e84906d 100644
--- a/drivers/net/wireless/ath/ath5k/rfbuffer.h
+++ b/drivers/net/wireless/ath/ath5k/rfbuffer.h
@@ -51,7 +51,7 @@
51struct ath5k_ini_rfbuffer { 51struct ath5k_ini_rfbuffer {
52 u8 rfb_bank; /* RF Bank number */ 52 u8 rfb_bank; /* RF Bank number */
53 u16 rfb_ctrl_register; /* RF Buffer control register */ 53 u16 rfb_ctrl_register; /* RF Buffer control register */
54 u32 rfb_mode_data[5]; /* RF Buffer data for each mode */ 54 u32 rfb_mode_data[3]; /* RF Buffer data for each mode */
55}; 55};
56 56
57/* 57/*
@@ -79,8 +79,10 @@ struct ath5k_rf_reg {
79 * life easier by using an index for each register 79 * life easier by using an index for each register
80 * instead of a full rfb_field */ 80 * instead of a full rfb_field */
81enum ath5k_rf_regs_idx { 81enum ath5k_rf_regs_idx {
82 /* BANK 2 */
83 AR5K_RF_TURBO = 0,
82 /* BANK 6 */ 84 /* BANK 6 */
83 AR5K_RF_OB_2GHZ = 0, 85 AR5K_RF_OB_2GHZ,
84 AR5K_RF_OB_5GHZ, 86 AR5K_RF_OB_5GHZ,
85 AR5K_RF_DB_2GHZ, 87 AR5K_RF_DB_2GHZ,
86 AR5K_RF_DB_5GHZ, 88 AR5K_RF_DB_5GHZ,
@@ -134,6 +136,9 @@ enum ath5k_rf_regs_idx {
134* RF5111 (Sombrero) * 136* RF5111 (Sombrero) *
135\*******************/ 137\*******************/
136 138
139/* BANK 2 len pos col */
140#define AR5K_RF5111_RF_TURBO { 1, 3, 0 }
141
137/* BANK 6 len pos col */ 142/* BANK 6 len pos col */
138#define AR5K_RF5111_OB_2GHZ { 3, 119, 0 } 143#define AR5K_RF5111_OB_2GHZ { 3, 119, 0 }
139#define AR5K_RF5111_DB_2GHZ { 3, 122, 0 } 144#define AR5K_RF5111_DB_2GHZ { 3, 122, 0 }
@@ -158,6 +163,7 @@ enum ath5k_rf_regs_idx {
158#define AR5K_RF5111_MAX_TIME { 2, 49, 0 } 163#define AR5K_RF5111_MAX_TIME { 2, 49, 0 }
159 164
160static const struct ath5k_rf_reg rf_regs_5111[] = { 165static const struct ath5k_rf_reg rf_regs_5111[] = {
166 {2, AR5K_RF_TURBO, AR5K_RF5111_RF_TURBO},
161 {6, AR5K_RF_OB_2GHZ, AR5K_RF5111_OB_2GHZ}, 167 {6, AR5K_RF_OB_2GHZ, AR5K_RF5111_OB_2GHZ},
162 {6, AR5K_RF_DB_2GHZ, AR5K_RF5111_DB_2GHZ}, 168 {6, AR5K_RF_DB_2GHZ, AR5K_RF5111_DB_2GHZ},
163 {6, AR5K_RF_OB_5GHZ, AR5K_RF5111_OB_5GHZ}, 169 {6, AR5K_RF_OB_5GHZ, AR5K_RF5111_OB_5GHZ},
@@ -177,97 +183,52 @@ static const struct ath5k_rf_reg rf_regs_5111[] = {
177 183
178/* Default mode specific settings */ 184/* Default mode specific settings */
179static const struct ath5k_ini_rfbuffer rfb_5111[] = { 185static const struct ath5k_ini_rfbuffer rfb_5111[] = {
180 { 0, 0x989c, 186 /* BANK / C.R. A/XR B G */
181 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */ 187 { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
182 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 188 { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
183 { 0, 0x989c, 189 { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
184 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 190 { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
185 { 0, 0x989c, 191 { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
186 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 192 { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
187 { 0, 0x989c, 193 { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
188 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 194 { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
189 { 0, 0x989c, 195 { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
190 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 196 { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
191 { 0, 0x989c, 197 { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
192 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 198 { 0, 0x989c, { 0x00380000, 0x00380000, 0x00380000 } },
193 { 0, 0x989c, 199 { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
194 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 200 { 0, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
195 { 0, 0x989c, 201 { 0, 0x989c, { 0x00000000, 0x000000c0, 0x00000080 } },
196 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 202 { 0, 0x989c, { 0x000400f9, 0x000400ff, 0x000400fd } },
197 { 0, 0x989c, 203 { 0, 0x98d4, { 0x00000000, 0x00000004, 0x00000004 } },
198 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 204 { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
199 { 0, 0x989c, 205 { 2, 0x98d4, { 0x00000010, 0x00000010, 0x00000010 } },
200 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 206 { 3, 0x98d8, { 0x00601068, 0x00601068, 0x00601068 } },
201 { 0, 0x989c, 207 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
202 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 208 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
203 { 0, 0x989c, 209 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
204 { 0x00380000, 0x00380000, 0x00380000, 0x00380000, 0x00380000 } }, 210 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
205 { 0, 0x989c, 211 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
206 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 212 { 6, 0x989c, { 0x10000000, 0x10000000, 0x10000000 } },
207 { 0, 0x989c, 213 { 6, 0x989c, { 0x04000000, 0x04000000, 0x04000000 } },
208 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 214 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
209 { 0, 0x989c, 215 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
210 { 0x00000000, 0x00000000, 0x000000c0, 0x00000080, 0x00000080 } }, 216 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
211 { 0, 0x989c, 217 { 6, 0x989c, { 0x00000000, 0x0a000000, 0x00000000 } },
212 { 0x000400f9, 0x000400f9, 0x000400ff, 0x000400fd, 0x000400fd } }, 218 { 6, 0x989c, { 0x003800c0, 0x023800c0, 0x003800c0 } },
213 { 0, 0x98d4, 219 { 6, 0x989c, { 0x00020006, 0x00000006, 0x00020006 } },
214 { 0x00000000, 0x00000000, 0x00000004, 0x00000004, 0x00000004 } }, 220 { 6, 0x989c, { 0x00000089, 0x00000089, 0x00000089 } },
215 { 1, 0x98d4, 221 { 6, 0x989c, { 0x000000a0, 0x000000a0, 0x000000a0 } },
216 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } }, 222 { 6, 0x989c, { 0x00040007, 0x00040007, 0x00040007 } },
217 { 2, 0x98d4, 223 { 6, 0x98d4, { 0x0000001a, 0x0000001a, 0x0000001a } },
218 { 0x00000010, 0x00000014, 0x00000010, 0x00000010, 0x00000014 } }, 224 { 7, 0x989c, { 0x00000040, 0x00000040, 0x00000040 } },
219 { 3, 0x98d8, 225 { 7, 0x989c, { 0x00000010, 0x00000010, 0x00000010 } },
220 { 0x00601068, 0x00601068, 0x00601068, 0x00601068, 0x00601068 } }, 226 { 7, 0x989c, { 0x00000008, 0x00000008, 0x00000008 } },
221 { 6, 0x989c, 227 { 7, 0x989c, { 0x0000004f, 0x0000004f, 0x0000004f } },
222 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 228 { 7, 0x989c, { 0x000000f1, 0x00000061, 0x000000f1 } },
223 { 6, 0x989c, 229 { 7, 0x989c, { 0x0000904f, 0x0000904c, 0x0000904f } },
224 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 230 { 7, 0x989c, { 0x0000125a, 0x0000129a, 0x0000125a } },
225 { 6, 0x989c, 231 { 7, 0x98cc, { 0x0000000e, 0x0000000f, 0x0000000e } },
226 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
227 { 6, 0x989c,
228 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
229 { 6, 0x989c,
230 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
231 { 6, 0x989c,
232 { 0x10000000, 0x10000000, 0x10000000, 0x10000000, 0x10000000 } },
233 { 6, 0x989c,
234 { 0x04000000, 0x04000000, 0x04000000, 0x04000000, 0x04000000 } },
235 { 6, 0x989c,
236 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
237 { 6, 0x989c,
238 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
239 { 6, 0x989c,
240 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
241 { 6, 0x989c,
242 { 0x00000000, 0x00000000, 0x0a000000, 0x00000000, 0x00000000 } },
243 { 6, 0x989c,
244 { 0x003800c0, 0x00380080, 0x023800c0, 0x003800c0, 0x003800c0 } },
245 { 6, 0x989c,
246 { 0x00020006, 0x00020006, 0x00000006, 0x00020006, 0x00020006 } },
247 { 6, 0x989c,
248 { 0x00000089, 0x00000089, 0x00000089, 0x00000089, 0x00000089 } },
249 { 6, 0x989c,
250 { 0x000000a0, 0x000000a0, 0x000000a0, 0x000000a0, 0x000000a0 } },
251 { 6, 0x989c,
252 { 0x00040007, 0x00040007, 0x00040007, 0x00040007, 0x00040007 } },
253 { 6, 0x98d4,
254 { 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a, 0x0000001a } },
255 { 7, 0x989c,
256 { 0x00000040, 0x00000048, 0x00000040, 0x00000040, 0x00000040 } },
257 { 7, 0x989c,
258 { 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 } },
259 { 7, 0x989c,
260 { 0x00000008, 0x00000008, 0x00000008, 0x00000008, 0x00000008 } },
261 { 7, 0x989c,
262 { 0x0000004f, 0x0000004f, 0x0000004f, 0x0000004f, 0x0000004f } },
263 { 7, 0x989c,
264 { 0x000000f1, 0x000000f1, 0x00000061, 0x000000f1, 0x000000f1 } },
265 { 7, 0x989c,
266 { 0x0000904f, 0x0000904f, 0x0000904c, 0x0000904f, 0x0000904f } },
267 { 7, 0x989c,
268 { 0x0000125a, 0x0000125a, 0x0000129a, 0x0000125a, 0x0000125a } },
269 { 7, 0x98cc,
270 { 0x0000000e, 0x0000000e, 0x0000000f, 0x0000000e, 0x0000000e } },
271}; 232};
272 233
273 234
@@ -276,6 +237,9 @@ static const struct ath5k_ini_rfbuffer rfb_5111[] = {
276* RF5112/RF2112 (Derby) * 237* RF5112/RF2112 (Derby) *
277\***********************/ 238\***********************/
278 239
240/* BANK 2 (Common) len pos col */
241#define AR5K_RF5112X_RF_TURBO { 1, 1, 2 }
242
279/* BANK 7 (Common) len pos col */ 243/* BANK 7 (Common) len pos col */
280#define AR5K_RF5112X_GAIN_I { 6, 14, 0 } 244#define AR5K_RF5112X_GAIN_I { 6, 14, 0 }
281#define AR5K_RF5112X_MIXVGA_OVR { 1, 36, 0 } 245#define AR5K_RF5112X_MIXVGA_OVR { 1, 36, 0 }
@@ -307,6 +271,7 @@ static const struct ath5k_ini_rfbuffer rfb_5111[] = {
307#define AR5K_RF5112_PWD(_n) { 1, (302 - _n), 3 } 271#define AR5K_RF5112_PWD(_n) { 1, (302 - _n), 3 }
308 272
309static const struct ath5k_rf_reg rf_regs_5112[] = { 273static const struct ath5k_rf_reg rf_regs_5112[] = {
274 {2, AR5K_RF_TURBO, AR5K_RF5112X_RF_TURBO},
310 {6, AR5K_RF_OB_2GHZ, AR5K_RF5112_OB_2GHZ}, 275 {6, AR5K_RF_OB_2GHZ, AR5K_RF5112_OB_2GHZ},
311 {6, AR5K_RF_DB_2GHZ, AR5K_RF5112_DB_2GHZ}, 276 {6, AR5K_RF_DB_2GHZ, AR5K_RF5112_DB_2GHZ},
312 {6, AR5K_RF_OB_5GHZ, AR5K_RF5112_OB_5GHZ}, 277 {6, AR5K_RF_OB_5GHZ, AR5K_RF5112_OB_5GHZ},
@@ -335,115 +300,61 @@ static const struct ath5k_rf_reg rf_regs_5112[] = {
335 300
336/* Default mode specific settings */ 301/* Default mode specific settings */
337static const struct ath5k_ini_rfbuffer rfb_5112[] = { 302static const struct ath5k_ini_rfbuffer rfb_5112[] = {
338 { 1, 0x98d4, 303 /* BANK / C.R. A/XR B G */
339 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */ 304 { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
340 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } }, 305 { 2, 0x98d0, { 0x03060408, 0x03060408, 0x03060408 } },
341 { 2, 0x98d0, 306 { 3, 0x98dc, { 0x00a0c0c0, 0x00e0c0c0, 0x00e0c0c0 } },
342 { 0x03060408, 0x03070408, 0x03060408, 0x03060408, 0x03070408 } }, 307 { 6, 0x989c, { 0x00a00000, 0x00a00000, 0x00a00000 } },
343 { 3, 0x98dc, 308 { 6, 0x989c, { 0x000a0000, 0x000a0000, 0x000a0000 } },
344 { 0x00a0c0c0, 0x00a0c0c0, 0x00e0c0c0, 0x00e0c0c0, 0x00e0c0c0 } }, 309 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
345 { 6, 0x989c, 310 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
346 { 0x00a00000, 0x00a00000, 0x00a00000, 0x00a00000, 0x00a00000 } }, 311 { 6, 0x989c, { 0x00660000, 0x00660000, 0x00660000 } },
347 { 6, 0x989c, 312 { 6, 0x989c, { 0x00db0000, 0x00db0000, 0x00db0000 } },
348 { 0x000a0000, 0x000a0000, 0x000a0000, 0x000a0000, 0x000a0000 } }, 313 { 6, 0x989c, { 0x00f10000, 0x00f10000, 0x00f10000 } },
349 { 6, 0x989c, 314 { 6, 0x989c, { 0x00120000, 0x00120000, 0x00120000 } },
350 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 315 { 6, 0x989c, { 0x00120000, 0x00120000, 0x00120000 } },
351 { 6, 0x989c, 316 { 6, 0x989c, { 0x00730000, 0x00730000, 0x00730000 } },
352 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 317 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
353 { 6, 0x989c, 318 { 6, 0x989c, { 0x000c0000, 0x000c0000, 0x000c0000 } },
354 { 0x00660000, 0x00660000, 0x00660000, 0x00660000, 0x00660000 } }, 319 { 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
355 { 6, 0x989c, 320 { 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
356 { 0x00db0000, 0x00db0000, 0x00db0000, 0x00db0000, 0x00db0000 } }, 321 { 6, 0x989c, { 0x008b0000, 0x008b0000, 0x008b0000 } },
357 { 6, 0x989c, 322 { 6, 0x989c, { 0x00600000, 0x00600000, 0x00600000 } },
358 { 0x00f10000, 0x00f10000, 0x00f10000, 0x00f10000, 0x00f10000 } }, 323 { 6, 0x989c, { 0x000c0000, 0x000c0000, 0x000c0000 } },
359 { 6, 0x989c, 324 { 6, 0x989c, { 0x00840000, 0x00840000, 0x00840000 } },
360 { 0x00120000, 0x00120000, 0x00120000, 0x00120000, 0x00120000 } }, 325 { 6, 0x989c, { 0x00640000, 0x00640000, 0x00640000 } },
361 { 6, 0x989c, 326 { 6, 0x989c, { 0x00200000, 0x00200000, 0x00200000 } },
362 { 0x00120000, 0x00120000, 0x00120000, 0x00120000, 0x00120000 } }, 327 { 6, 0x989c, { 0x00240000, 0x00240000, 0x00240000 } },
363 { 6, 0x989c, 328 { 6, 0x989c, { 0x00250000, 0x00250000, 0x00250000 } },
364 { 0x00730000, 0x00730000, 0x00730000, 0x00730000, 0x00730000 } }, 329 { 6, 0x989c, { 0x00110000, 0x00110000, 0x00110000 } },
365 { 6, 0x989c, 330 { 6, 0x989c, { 0x00110000, 0x00110000, 0x00110000 } },
366 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 331 { 6, 0x989c, { 0x00510000, 0x00510000, 0x00510000 } },
367 { 6, 0x989c, 332 { 6, 0x989c, { 0x1c040000, 0x1c040000, 0x1c040000 } },
368 { 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000 } }, 333 { 6, 0x989c, { 0x000a0000, 0x000a0000, 0x000a0000 } },
369 { 6, 0x989c, 334 { 6, 0x989c, { 0x00a10000, 0x00a10000, 0x00a10000 } },
370 { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } }, 335 { 6, 0x989c, { 0x00400000, 0x00400000, 0x00400000 } },
371 { 6, 0x989c, 336 { 6, 0x989c, { 0x03090000, 0x03090000, 0x03090000 } },
372 { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } }, 337 { 6, 0x989c, { 0x06000000, 0x06000000, 0x06000000 } },
373 { 6, 0x989c, 338 { 6, 0x989c, { 0x000000b0, 0x000000a8, 0x000000a8 } },
374 { 0x008b0000, 0x008b0000, 0x008b0000, 0x008b0000, 0x008b0000 } }, 339 { 6, 0x989c, { 0x0000002e, 0x0000002e, 0x0000002e } },
375 { 6, 0x989c, 340 { 6, 0x989c, { 0x006c4a41, 0x006c4af1, 0x006c4a61 } },
376 { 0x00600000, 0x00600000, 0x00600000, 0x00600000, 0x00600000 } }, 341 { 6, 0x989c, { 0x0050892a, 0x0050892b, 0x0050892b } },
377 { 6, 0x989c, 342 { 6, 0x989c, { 0x00842400, 0x00842400, 0x00842400 } },
378 { 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000 } }, 343 { 6, 0x989c, { 0x00c69200, 0x00c69200, 0x00c69200 } },
379 { 6, 0x989c, 344 { 6, 0x98d0, { 0x0002000c, 0x0002000c, 0x0002000c } },
380 { 0x00840000, 0x00840000, 0x00840000, 0x00840000, 0x00840000 } }, 345 { 7, 0x989c, { 0x00000094, 0x00000094, 0x00000094 } },
381 { 6, 0x989c, 346 { 7, 0x989c, { 0x00000091, 0x00000091, 0x00000091 } },
382 { 0x00640000, 0x00640000, 0x00640000, 0x00640000, 0x00640000 } }, 347 { 7, 0x989c, { 0x0000000a, 0x00000012, 0x00000012 } },
383 { 6, 0x989c, 348 { 7, 0x989c, { 0x00000080, 0x00000080, 0x00000080 } },
384 { 0x00200000, 0x00200000, 0x00200000, 0x00200000, 0x00200000 } }, 349 { 7, 0x989c, { 0x000000c1, 0x000000c1, 0x000000c1 } },
385 { 6, 0x989c, 350 { 7, 0x989c, { 0x00000060, 0x00000060, 0x00000060 } },
386 { 0x00240000, 0x00240000, 0x00240000, 0x00240000, 0x00240000 } }, 351 { 7, 0x989c, { 0x000000f0, 0x000000f0, 0x000000f0 } },
387 { 6, 0x989c, 352 { 7, 0x989c, { 0x00000022, 0x00000022, 0x00000022 } },
388 { 0x00250000, 0x00250000, 0x00250000, 0x00250000, 0x00250000 } }, 353 { 7, 0x989c, { 0x00000092, 0x00000092, 0x00000092 } },
389 { 6, 0x989c, 354 { 7, 0x989c, { 0x000000d4, 0x000000d4, 0x000000d4 } },
390 { 0x00110000, 0x00110000, 0x00110000, 0x00110000, 0x00110000 } }, 355 { 7, 0x989c, { 0x000014cc, 0x000014cc, 0x000014cc } },
391 { 6, 0x989c, 356 { 7, 0x989c, { 0x0000048c, 0x0000048c, 0x0000048c } },
392 { 0x00110000, 0x00110000, 0x00110000, 0x00110000, 0x00110000 } }, 357 { 7, 0x98c4, { 0x00000003, 0x00000003, 0x00000003 } },
393 { 6, 0x989c,
394 { 0x00510000, 0x00510000, 0x00510000, 0x00510000, 0x00510000 } },
395 { 6, 0x989c,
396 { 0x1c040000, 0x1c040000, 0x1c040000, 0x1c040000, 0x1c040000 } },
397 { 6, 0x989c,
398 { 0x000a0000, 0x000a0000, 0x000a0000, 0x000a0000, 0x000a0000 } },
399 { 6, 0x989c,
400 { 0x00a10000, 0x00a10000, 0x00a10000, 0x00a10000, 0x00a10000 } },
401 { 6, 0x989c,
402 { 0x00400000, 0x00400000, 0x00400000, 0x00400000, 0x00400000 } },
403 { 6, 0x989c,
404 { 0x03090000, 0x03090000, 0x03090000, 0x03090000, 0x03090000 } },
405 { 6, 0x989c,
406 { 0x06000000, 0x06000000, 0x06000000, 0x06000000, 0x06000000 } },
407 { 6, 0x989c,
408 { 0x000000b0, 0x000000b0, 0x000000a8, 0x000000a8, 0x000000a8 } },
409 { 6, 0x989c,
410 { 0x0000002e, 0x0000002e, 0x0000002e, 0x0000002e, 0x0000002e } },
411 { 6, 0x989c,
412 { 0x006c4a41, 0x006c4a41, 0x006c4af1, 0x006c4a61, 0x006c4a61 } },
413 { 6, 0x989c,
414 { 0x0050892a, 0x0050892a, 0x0050892b, 0x0050892b, 0x0050892b } },
415 { 6, 0x989c,
416 { 0x00842400, 0x00842400, 0x00842400, 0x00842400, 0x00842400 } },
417 { 6, 0x989c,
418 { 0x00c69200, 0x00c69200, 0x00c69200, 0x00c69200, 0x00c69200 } },
419 { 6, 0x98d0,
420 { 0x0002000c, 0x0002000c, 0x0002000c, 0x0002000c, 0x0002000c } },
421 { 7, 0x989c,
422 { 0x00000094, 0x00000094, 0x00000094, 0x00000094, 0x00000094 } },
423 { 7, 0x989c,
424 { 0x00000091, 0x00000091, 0x00000091, 0x00000091, 0x00000091 } },
425 { 7, 0x989c,
426 { 0x0000000a, 0x0000000a, 0x00000012, 0x00000012, 0x00000012 } },
427 { 7, 0x989c,
428 { 0x00000080, 0x00000080, 0x00000080, 0x00000080, 0x00000080 } },
429 { 7, 0x989c,
430 { 0x000000c1, 0x000000c1, 0x000000c1, 0x000000c1, 0x000000c1 } },
431 { 7, 0x989c,
432 { 0x00000060, 0x00000060, 0x00000060, 0x00000060, 0x00000060 } },
433 { 7, 0x989c,
434 { 0x000000f0, 0x000000f0, 0x000000f0, 0x000000f0, 0x000000f0 } },
435 { 7, 0x989c,
436 { 0x00000022, 0x00000022, 0x00000022, 0x00000022, 0x00000022 } },
437 { 7, 0x989c,
438 { 0x00000092, 0x00000092, 0x00000092, 0x00000092, 0x00000092 } },
439 { 7, 0x989c,
440 { 0x000000d4, 0x000000d4, 0x000000d4, 0x000000d4, 0x000000d4 } },
441 { 7, 0x989c,
442 { 0x000014cc, 0x000014cc, 0x000014cc, 0x000014cc, 0x000014cc } },
443 { 7, 0x989c,
444 { 0x0000048c, 0x0000048c, 0x0000048c, 0x0000048c, 0x0000048c } },
445 { 7, 0x98c4,
446 { 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003 } },
447}; 358};
448 359
449/* RFX112A (Derby 2) */ 360/* RFX112A (Derby 2) */
@@ -477,6 +388,7 @@ static const struct ath5k_ini_rfbuffer rfb_5112[] = {
477#define AR5K_RF5112A_XB5_LVL { 2, 3, 3 } 388#define AR5K_RF5112A_XB5_LVL { 2, 3, 3 }
478 389
479static const struct ath5k_rf_reg rf_regs_5112a[] = { 390static const struct ath5k_rf_reg rf_regs_5112a[] = {
391 {2, AR5K_RF_TURBO, AR5K_RF5112X_RF_TURBO},
480 {6, AR5K_RF_OB_2GHZ, AR5K_RF5112A_OB_2GHZ}, 392 {6, AR5K_RF_OB_2GHZ, AR5K_RF5112A_OB_2GHZ},
481 {6, AR5K_RF_DB_2GHZ, AR5K_RF5112A_DB_2GHZ}, 393 {6, AR5K_RF_DB_2GHZ, AR5K_RF5112A_DB_2GHZ},
482 {6, AR5K_RF_OB_5GHZ, AR5K_RF5112A_OB_5GHZ}, 394 {6, AR5K_RF_OB_5GHZ, AR5K_RF5112A_OB_5GHZ},
@@ -515,119 +427,63 @@ static const struct ath5k_rf_reg rf_regs_5112a[] = {
515 427
516/* Default mode specific settings */ 428/* Default mode specific settings */
517static const struct ath5k_ini_rfbuffer rfb_5112a[] = { 429static const struct ath5k_ini_rfbuffer rfb_5112a[] = {
518 { 1, 0x98d4, 430 /* BANK / C.R. A/XR B G */
519 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */ 431 { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
520 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } }, 432 { 2, 0x98d0, { 0x03060408, 0x03060408, 0x03060408 } },
521 { 2, 0x98d0, 433 { 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
522 { 0x03060408, 0x03070408, 0x03060408, 0x03060408, 0x03070408 } }, 434 { 6, 0x989c, { 0x0f000000, 0x0f000000, 0x0f000000 } },
523 { 3, 0x98dc, 435 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
524 { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } }, 436 { 6, 0x989c, { 0x00800000, 0x00800000, 0x00800000 } },
525 { 6, 0x989c, 437 { 6, 0x989c, { 0x002a0000, 0x002a0000, 0x002a0000 } },
526 { 0x0f000000, 0x0f000000, 0x0f000000, 0x0f000000, 0x0f000000 } }, 438 { 6, 0x989c, { 0x00010000, 0x00010000, 0x00010000 } },
527 { 6, 0x989c, 439 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
528 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 440 { 6, 0x989c, { 0x00180000, 0x00180000, 0x00180000 } },
529 { 6, 0x989c, 441 { 6, 0x989c, { 0x00600000, 0x006e0000, 0x006e0000 } },
530 { 0x00800000, 0x00800000, 0x00800000, 0x00800000, 0x00800000 } }, 442 { 6, 0x989c, { 0x00c70000, 0x00c70000, 0x00c70000 } },
531 { 6, 0x989c, 443 { 6, 0x989c, { 0x004b0000, 0x004b0000, 0x004b0000 } },
532 { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } }, 444 { 6, 0x989c, { 0x04480000, 0x04480000, 0x04480000 } },
533 { 6, 0x989c, 445 { 6, 0x989c, { 0x004c0000, 0x004c0000, 0x004c0000 } },
534 { 0x00010000, 0x00010000, 0x00010000, 0x00010000, 0x00010000 } }, 446 { 6, 0x989c, { 0x00e40000, 0x00e40000, 0x00e40000 } },
535 { 6, 0x989c, 447 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
536 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 448 { 6, 0x989c, { 0x00fc0000, 0x00fc0000, 0x00fc0000 } },
537 { 6, 0x989c, 449 { 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
538 { 0x00180000, 0x00180000, 0x00180000, 0x00180000, 0x00180000 } }, 450 { 6, 0x989c, { 0x043f0000, 0x043f0000, 0x043f0000 } },
539 { 6, 0x989c, 451 { 6, 0x989c, { 0x000c0000, 0x000c0000, 0x000c0000 } },
540 { 0x00600000, 0x00600000, 0x006e0000, 0x006e0000, 0x006e0000 } }, 452 { 6, 0x989c, { 0x02190000, 0x02190000, 0x02190000 } },
541 { 6, 0x989c, 453 { 6, 0x989c, { 0x00240000, 0x00240000, 0x00240000 } },
542 { 0x00c70000, 0x00c70000, 0x00c70000, 0x00c70000, 0x00c70000 } }, 454 { 6, 0x989c, { 0x00b40000, 0x00b40000, 0x00b40000 } },
543 { 6, 0x989c, 455 { 6, 0x989c, { 0x00990000, 0x00990000, 0x00990000 } },
544 { 0x004b0000, 0x004b0000, 0x004b0000, 0x004b0000, 0x004b0000 } }, 456 { 6, 0x989c, { 0x00500000, 0x00500000, 0x00500000 } },
545 { 6, 0x989c, 457 { 6, 0x989c, { 0x002a0000, 0x002a0000, 0x002a0000 } },
546 { 0x04480000, 0x04480000, 0x04480000, 0x04480000, 0x04480000 } }, 458 { 6, 0x989c, { 0x00120000, 0x00120000, 0x00120000 } },
547 { 6, 0x989c, 459 { 6, 0x989c, { 0xc0320000, 0xc0320000, 0xc0320000 } },
548 { 0x004c0000, 0x004c0000, 0x004c0000, 0x004c0000, 0x004c0000 } }, 460 { 6, 0x989c, { 0x01740000, 0x01740000, 0x01740000 } },
549 { 6, 0x989c, 461 { 6, 0x989c, { 0x00110000, 0x00110000, 0x00110000 } },
550 { 0x00e40000, 0x00e40000, 0x00e40000, 0x00e40000, 0x00e40000 } }, 462 { 6, 0x989c, { 0x86280000, 0x86280000, 0x86280000 } },
551 { 6, 0x989c, 463 { 6, 0x989c, { 0x31840000, 0x31840000, 0x31840000 } },
552 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 464 { 6, 0x989c, { 0x00f20080, 0x00f20080, 0x00f20080 } },
553 { 6, 0x989c, 465 { 6, 0x989c, { 0x00270019, 0x00270019, 0x00270019 } },
554 { 0x00fc0000, 0x00fc0000, 0x00fc0000, 0x00fc0000, 0x00fc0000 } }, 466 { 6, 0x989c, { 0x00000003, 0x00000003, 0x00000003 } },
555 { 6, 0x989c, 467 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
556 { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } }, 468 { 6, 0x989c, { 0x000000b2, 0x000000b2, 0x000000b2 } },
557 { 6, 0x989c, 469 { 6, 0x989c, { 0x00b02084, 0x00b02084, 0x00b02084 } },
558 { 0x043f0000, 0x043f0000, 0x043f0000, 0x043f0000, 0x043f0000 } }, 470 { 6, 0x989c, { 0x004125a4, 0x004125a4, 0x004125a4 } },
559 { 6, 0x989c, 471 { 6, 0x989c, { 0x00119220, 0x00119220, 0x00119220 } },
560 { 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000, 0x000c0000 } }, 472 { 6, 0x989c, { 0x001a4800, 0x001a4800, 0x001a4800 } },
561 { 6, 0x989c, 473 { 6, 0x98d8, { 0x000b0230, 0x000b0230, 0x000b0230 } },
562 { 0x02190000, 0x02190000, 0x02190000, 0x02190000, 0x02190000 } }, 474 { 7, 0x989c, { 0x00000094, 0x00000094, 0x00000094 } },
563 { 6, 0x989c, 475 { 7, 0x989c, { 0x00000091, 0x00000091, 0x00000091 } },
564 { 0x00240000, 0x00240000, 0x00240000, 0x00240000, 0x00240000 } }, 476 { 7, 0x989c, { 0x00000012, 0x00000012, 0x00000012 } },
565 { 6, 0x989c, 477 { 7, 0x989c, { 0x00000080, 0x00000080, 0x00000080 } },
566 { 0x00b40000, 0x00b40000, 0x00b40000, 0x00b40000, 0x00b40000 } }, 478 { 7, 0x989c, { 0x000000d9, 0x000000d9, 0x000000d9 } },
567 { 6, 0x989c, 479 { 7, 0x989c, { 0x00000060, 0x00000060, 0x00000060 } },
568 { 0x00990000, 0x00990000, 0x00990000, 0x00990000, 0x00990000 } }, 480 { 7, 0x989c, { 0x000000f0, 0x000000f0, 0x000000f0 } },
569 { 6, 0x989c, 481 { 7, 0x989c, { 0x000000a2, 0x000000a2, 0x000000a2 } },
570 { 0x00500000, 0x00500000, 0x00500000, 0x00500000, 0x00500000 } }, 482 { 7, 0x989c, { 0x00000052, 0x00000052, 0x00000052 } },
571 { 6, 0x989c, 483 { 7, 0x989c, { 0x000000d4, 0x000000d4, 0x000000d4 } },
572 { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } }, 484 { 7, 0x989c, { 0x000014cc, 0x000014cc, 0x000014cc } },
573 { 6, 0x989c, 485 { 7, 0x989c, { 0x0000048c, 0x0000048c, 0x0000048c } },
574 { 0x00120000, 0x00120000, 0x00120000, 0x00120000, 0x00120000 } }, 486 { 7, 0x98c4, { 0x00000003, 0x00000003, 0x00000003 } },
575 { 6, 0x989c,
576 { 0xc0320000, 0xc0320000, 0xc0320000, 0xc0320000, 0xc0320000 } },
577 { 6, 0x989c,
578 { 0x01740000, 0x01740000, 0x01740000, 0x01740000, 0x01740000 } },
579 { 6, 0x989c,
580 { 0x00110000, 0x00110000, 0x00110000, 0x00110000, 0x00110000 } },
581 { 6, 0x989c,
582 { 0x86280000, 0x86280000, 0x86280000, 0x86280000, 0x86280000 } },
583 { 6, 0x989c,
584 { 0x31840000, 0x31840000, 0x31840000, 0x31840000, 0x31840000 } },
585 { 6, 0x989c,
586 { 0x00f20080, 0x00f20080, 0x00f20080, 0x00f20080, 0x00f20080 } },
587 { 6, 0x989c,
588 { 0x00270019, 0x00270019, 0x00270019, 0x00270019, 0x00270019 } },
589 { 6, 0x989c,
590 { 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003 } },
591 { 6, 0x989c,
592 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
593 { 6, 0x989c,
594 { 0x000000b2, 0x000000b2, 0x000000b2, 0x000000b2, 0x000000b2 } },
595 { 6, 0x989c,
596 { 0x00b02084, 0x00b02084, 0x00b02084, 0x00b02084, 0x00b02084 } },
597 { 6, 0x989c,
598 { 0x004125a4, 0x004125a4, 0x004125a4, 0x004125a4, 0x004125a4 } },
599 { 6, 0x989c,
600 { 0x00119220, 0x00119220, 0x00119220, 0x00119220, 0x00119220 } },
601 { 6, 0x989c,
602 { 0x001a4800, 0x001a4800, 0x001a4800, 0x001a4800, 0x001a4800 } },
603 { 6, 0x98d8,
604 { 0x000b0230, 0x000b0230, 0x000b0230, 0x000b0230, 0x000b0230 } },
605 { 7, 0x989c,
606 { 0x00000094, 0x00000094, 0x00000094, 0x00000094, 0x00000094 } },
607 { 7, 0x989c,
608 { 0x00000091, 0x00000091, 0x00000091, 0x00000091, 0x00000091 } },
609 { 7, 0x989c,
610 { 0x00000012, 0x00000012, 0x00000012, 0x00000012, 0x00000012 } },
611 { 7, 0x989c,
612 { 0x00000080, 0x00000080, 0x00000080, 0x00000080, 0x00000080 } },
613 { 7, 0x989c,
614 { 0x000000d9, 0x000000d9, 0x000000d9, 0x000000d9, 0x000000d9 } },
615 { 7, 0x989c,
616 { 0x00000060, 0x00000060, 0x00000060, 0x00000060, 0x00000060 } },
617 { 7, 0x989c,
618 { 0x000000f0, 0x000000f0, 0x000000f0, 0x000000f0, 0x000000f0 } },
619 { 7, 0x989c,
620 { 0x000000a2, 0x000000a2, 0x000000a2, 0x000000a2, 0x000000a2 } },
621 { 7, 0x989c,
622 { 0x00000052, 0x00000052, 0x00000052, 0x00000052, 0x00000052 } },
623 { 7, 0x989c,
624 { 0x000000d4, 0x000000d4, 0x000000d4, 0x000000d4, 0x000000d4 } },
625 { 7, 0x989c,
626 { 0x000014cc, 0x000014cc, 0x000014cc, 0x000014cc, 0x000014cc } },
627 { 7, 0x989c,
628 { 0x0000048c, 0x0000048c, 0x0000048c, 0x0000048c, 0x0000048c } },
629 { 7, 0x98c4,
630 { 0x00000003, 0x00000003, 0x00000003, 0x00000003, 0x00000003 } },
631}; 487};
632 488
633 489
@@ -636,11 +492,15 @@ static const struct ath5k_ini_rfbuffer rfb_5112a[] = {
636* RF2413 (Griffin) * 492* RF2413 (Griffin) *
637\******************/ 493\******************/
638 494
495/* BANK 2 len pos col */
496#define AR5K_RF2413_RF_TURBO { 1, 1, 2 }
497
639/* BANK 6 len pos col */ 498/* BANK 6 len pos col */
640#define AR5K_RF2413_OB_2GHZ { 3, 168, 0 } 499#define AR5K_RF2413_OB_2GHZ { 3, 168, 0 }
641#define AR5K_RF2413_DB_2GHZ { 3, 165, 0 } 500#define AR5K_RF2413_DB_2GHZ { 3, 165, 0 }
642 501
643static const struct ath5k_rf_reg rf_regs_2413[] = { 502static const struct ath5k_rf_reg rf_regs_2413[] = {
503 {2, AR5K_RF_TURBO, AR5K_RF2413_RF_TURBO},
644 {6, AR5K_RF_OB_2GHZ, AR5K_RF2413_OB_2GHZ}, 504 {6, AR5K_RF_OB_2GHZ, AR5K_RF2413_OB_2GHZ},
645 {6, AR5K_RF_DB_2GHZ, AR5K_RF2413_DB_2GHZ}, 505 {6, AR5K_RF_DB_2GHZ, AR5K_RF2413_DB_2GHZ},
646}; 506};
@@ -649,73 +509,40 @@ static const struct ath5k_rf_reg rf_regs_2413[] = {
649 * XXX: a/aTurbo ??? 509 * XXX: a/aTurbo ???
650 */ 510 */
651static const struct ath5k_ini_rfbuffer rfb_2413[] = { 511static const struct ath5k_ini_rfbuffer rfb_2413[] = {
652 { 1, 0x98d4, 512 /* BANK / C.R. A/XR B G */
653 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */ 513 { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
654 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } }, 514 { 2, 0x98d0, { 0x02001408, 0x02001408, 0x02001408 } },
655 { 2, 0x98d0, 515 { 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
656 { 0x02001408, 0x02011408, 0x02001408, 0x02001408, 0x02011408 } }, 516 { 6, 0x989c, { 0xf0000000, 0xf0000000, 0xf0000000 } },
657 { 3, 0x98dc, 517 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
658 { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } }, 518 { 6, 0x989c, { 0x03000000, 0x03000000, 0x03000000 } },
659 { 6, 0x989c, 519 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
660 { 0xf0000000, 0xf0000000, 0xf0000000, 0xf0000000, 0xf0000000 } }, 520 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
661 { 6, 0x989c, 521 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
662 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 522 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
663 { 6, 0x989c, 523 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
664 { 0x03000000, 0x03000000, 0x03000000, 0x03000000, 0x03000000 } }, 524 { 6, 0x989c, { 0x40400000, 0x40400000, 0x40400000 } },
665 { 6, 0x989c, 525 { 6, 0x989c, { 0x65050000, 0x65050000, 0x65050000 } },
666 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 526 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
667 { 6, 0x989c, 527 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
668 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 528 { 6, 0x989c, { 0x00420000, 0x00420000, 0x00420000 } },
669 { 6, 0x989c, 529 { 6, 0x989c, { 0x00b50000, 0x00b50000, 0x00b50000 } },
670 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 530 { 6, 0x989c, { 0x00030000, 0x00030000, 0x00030000 } },
671 { 6, 0x989c, 531 { 6, 0x989c, { 0x00f70000, 0x00f70000, 0x00f70000 } },
672 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 532 { 6, 0x989c, { 0x009d0000, 0x009d0000, 0x009d0000 } },
673 { 6, 0x989c, 533 { 6, 0x989c, { 0x00220000, 0x00220000, 0x00220000 } },
674 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 534 { 6, 0x989c, { 0x04220000, 0x04220000, 0x04220000 } },
675 { 6, 0x989c, 535 { 6, 0x989c, { 0x00230018, 0x00230018, 0x00230018 } },
676 { 0x40400000, 0x40400000, 0x40400000, 0x40400000, 0x40400000 } }, 536 { 6, 0x989c, { 0x00280000, 0x00280060, 0x00280060 } },
677 { 6, 0x989c, 537 { 6, 0x989c, { 0x005000c0, 0x005000c3, 0x005000c3 } },
678 { 0x65050000, 0x65050000, 0x65050000, 0x65050000, 0x65050000 } }, 538 { 6, 0x989c, { 0x0004007f, 0x0004007f, 0x0004007f } },
679 { 6, 0x989c, 539 { 6, 0x989c, { 0x00000458, 0x00000458, 0x00000458 } },
680 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 540 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
681 { 6, 0x989c, 541 { 6, 0x989c, { 0x0000c000, 0x0000c000, 0x0000c000 } },
682 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 542 { 6, 0x98d8, { 0x00400230, 0x00400230, 0x00400230 } },
683 { 6, 0x989c, 543 { 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
684 { 0x00420000, 0x00420000, 0x00420000, 0x00420000, 0x00420000 } }, 544 { 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
685 { 6, 0x989c, 545 { 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
686 { 0x00b50000, 0x00b50000, 0x00b50000, 0x00b50000, 0x00b50000 } },
687 { 6, 0x989c,
688 { 0x00030000, 0x00030000, 0x00030000, 0x00030000, 0x00030000 } },
689 { 6, 0x989c,
690 { 0x00f70000, 0x00f70000, 0x00f70000, 0x00f70000, 0x00f70000 } },
691 { 6, 0x989c,
692 { 0x009d0000, 0x009d0000, 0x009d0000, 0x009d0000, 0x009d0000 } },
693 { 6, 0x989c,
694 { 0x00220000, 0x00220000, 0x00220000, 0x00220000, 0x00220000 } },
695 { 6, 0x989c,
696 { 0x04220000, 0x04220000, 0x04220000, 0x04220000, 0x04220000 } },
697 { 6, 0x989c,
698 { 0x00230018, 0x00230018, 0x00230018, 0x00230018, 0x00230018 } },
699 { 6, 0x989c,
700 { 0x00280000, 0x00280000, 0x00280060, 0x00280060, 0x00280060 } },
701 { 6, 0x989c,
702 { 0x005000c0, 0x005000c0, 0x005000c3, 0x005000c3, 0x005000c3 } },
703 { 6, 0x989c,
704 { 0x0004007f, 0x0004007f, 0x0004007f, 0x0004007f, 0x0004007f } },
705 { 6, 0x989c,
706 { 0x00000458, 0x00000458, 0x00000458, 0x00000458, 0x00000458 } },
707 { 6, 0x989c,
708 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
709 { 6, 0x989c,
710 { 0x0000c000, 0x0000c000, 0x0000c000, 0x0000c000, 0x0000c000 } },
711 { 6, 0x98d8,
712 { 0x00400230, 0x00400230, 0x00400230, 0x00400230, 0x00400230 } },
713 { 7, 0x989c,
714 { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } },
715 { 7, 0x989c,
716 { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } },
717 { 7, 0x98cc,
718 { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } },
719}; 546};
720 547
721 548
@@ -724,88 +551,57 @@ static const struct ath5k_ini_rfbuffer rfb_2413[] = {
724* RF2315/RF2316 (Cobra SoC) * 551* RF2315/RF2316 (Cobra SoC) *
725\***************************/ 552\***************************/
726 553
554/* BANK 2 len pos col */
555#define AR5K_RF2316_RF_TURBO { 1, 1, 2 }
556
727/* BANK 6 len pos col */ 557/* BANK 6 len pos col */
728#define AR5K_RF2316_OB_2GHZ { 3, 178, 0 } 558#define AR5K_RF2316_OB_2GHZ { 3, 178, 0 }
729#define AR5K_RF2316_DB_2GHZ { 3, 175, 0 } 559#define AR5K_RF2316_DB_2GHZ { 3, 175, 0 }
730 560
731static const struct ath5k_rf_reg rf_regs_2316[] = { 561static const struct ath5k_rf_reg rf_regs_2316[] = {
562 {2, AR5K_RF_TURBO, AR5K_RF2316_RF_TURBO},
732 {6, AR5K_RF_OB_2GHZ, AR5K_RF2316_OB_2GHZ}, 563 {6, AR5K_RF_OB_2GHZ, AR5K_RF2316_OB_2GHZ},
733 {6, AR5K_RF_DB_2GHZ, AR5K_RF2316_DB_2GHZ}, 564 {6, AR5K_RF_DB_2GHZ, AR5K_RF2316_DB_2GHZ},
734}; 565};
735 566
736/* Default mode specific settings */ 567/* Default mode specific settings */
737static const struct ath5k_ini_rfbuffer rfb_2316[] = { 568static const struct ath5k_ini_rfbuffer rfb_2316[] = {
738 { 1, 0x98d4, 569 /* BANK / C.R. A/XR B G */
739 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */ 570 { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
740 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } }, 571 { 2, 0x98d0, { 0x02001408, 0x02001408, 0x02001408 } },
741 { 2, 0x98d0, 572 { 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
742 { 0x02001408, 0x02011408, 0x02001408, 0x02001408, 0x02011408 } }, 573 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
743 { 3, 0x98dc, 574 { 6, 0x989c, { 0xc0000000, 0xc0000000, 0xc0000000 } },
744 { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } }, 575 { 6, 0x989c, { 0x0f000000, 0x0f000000, 0x0f000000 } },
745 { 6, 0x989c, 576 { 6, 0x989c, { 0x02000000, 0x02000000, 0x02000000 } },
746 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 577 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
747 { 6, 0x989c, 578 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
748 { 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000, 0xc0000000 } }, 579 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
749 { 6, 0x989c, 580 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
750 { 0x0f000000, 0x0f000000, 0x0f000000, 0x0f000000, 0x0f000000 } }, 581 { 6, 0x989c, { 0xf8000000, 0xf8000000, 0xf8000000 } },
751 { 6, 0x989c, 582 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
752 { 0x02000000, 0x02000000, 0x02000000, 0x02000000, 0x02000000 } }, 583 { 6, 0x989c, { 0x95150000, 0x95150000, 0x95150000 } },
753 { 6, 0x989c, 584 { 6, 0x989c, { 0xc1000000, 0xc1000000, 0xc1000000 } },
754 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 585 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
755 { 6, 0x989c, 586 { 6, 0x989c, { 0x00080000, 0x00080000, 0x00080000 } },
756 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 587 { 6, 0x989c, { 0x00d50000, 0x00d50000, 0x00d50000 } },
757 { 6, 0x989c, 588 { 6, 0x989c, { 0x000e0000, 0x000e0000, 0x000e0000 } },
758 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 589 { 6, 0x989c, { 0x00dc0000, 0x00dc0000, 0x00dc0000 } },
759 { 6, 0x989c, 590 { 6, 0x989c, { 0x00770000, 0x00770000, 0x00770000 } },
760 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 591 { 6, 0x989c, { 0x008a0000, 0x008a0000, 0x008a0000 } },
761 { 6, 0x989c, 592 { 6, 0x989c, { 0x10880000, 0x10880000, 0x10880000 } },
762 { 0xf8000000, 0xf8000000, 0xf8000000, 0xf8000000, 0xf8000000 } }, 593 { 6, 0x989c, { 0x008c0060, 0x008c0060, 0x008c0060 } },
763 { 6, 0x989c, 594 { 6, 0x989c, { 0x00a00000, 0x00a00080, 0x00a00080 } },
764 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 595 { 6, 0x989c, { 0x00400000, 0x0040000d, 0x0040000d } },
765 { 6, 0x989c, 596 { 6, 0x989c, { 0x00110400, 0x00110400, 0x00110400 } },
766 { 0x95150000, 0x95150000, 0x95150000, 0x95150000, 0x95150000 } }, 597 { 6, 0x989c, { 0x00000060, 0x00000060, 0x00000060 } },
767 { 6, 0x989c, 598 { 6, 0x989c, { 0x00000001, 0x00000001, 0x00000001 } },
768 { 0xc1000000, 0xc1000000, 0xc1000000, 0xc1000000, 0xc1000000 } }, 599 { 6, 0x989c, { 0x00000b00, 0x00000b00, 0x00000b00 } },
769 { 6, 0x989c, 600 { 6, 0x989c, { 0x00000be8, 0x00000be8, 0x00000be8 } },
770 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 601 { 6, 0x98c0, { 0x00010000, 0x00010000, 0x00010000 } },
771 { 6, 0x989c, 602 { 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
772 { 0x00080000, 0x00080000, 0x00080000, 0x00080000, 0x00080000 } }, 603 { 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
773 { 6, 0x989c, 604 { 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
774 { 0x00d50000, 0x00d50000, 0x00d50000, 0x00d50000, 0x00d50000 } },
775 { 6, 0x989c,
776 { 0x000e0000, 0x000e0000, 0x000e0000, 0x000e0000, 0x000e0000 } },
777 { 6, 0x989c,
778 { 0x00dc0000, 0x00dc0000, 0x00dc0000, 0x00dc0000, 0x00dc0000 } },
779 { 6, 0x989c,
780 { 0x00770000, 0x00770000, 0x00770000, 0x00770000, 0x00770000 } },
781 { 6, 0x989c,
782 { 0x008a0000, 0x008a0000, 0x008a0000, 0x008a0000, 0x008a0000 } },
783 { 6, 0x989c,
784 { 0x10880000, 0x10880000, 0x10880000, 0x10880000, 0x10880000 } },
785 { 6, 0x989c,
786 { 0x008c0060, 0x008c0060, 0x008c0060, 0x008c0060, 0x008c0060 } },
787 { 6, 0x989c,
788 { 0x00a00000, 0x00a00000, 0x00a00080, 0x00a00080, 0x00a00080 } },
789 { 6, 0x989c,
790 { 0x00400000, 0x00400000, 0x0040000d, 0x0040000d, 0x0040000d } },
791 { 6, 0x989c,
792 { 0x00110400, 0x00110400, 0x00110400, 0x00110400, 0x00110400 } },
793 { 6, 0x989c,
794 { 0x00000060, 0x00000060, 0x00000060, 0x00000060, 0x00000060 } },
795 { 6, 0x989c,
796 { 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001 } },
797 { 6, 0x989c,
798 { 0x00000b00, 0x00000b00, 0x00000b00, 0x00000b00, 0x00000b00 } },
799 { 6, 0x989c,
800 { 0x00000be8, 0x00000be8, 0x00000be8, 0x00000be8, 0x00000be8 } },
801 { 6, 0x98c0,
802 { 0x00010000, 0x00010000, 0x00010000, 0x00010000, 0x00010000 } },
803 { 7, 0x989c,
804 { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } },
805 { 7, 0x989c,
806 { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } },
807 { 7, 0x98cc,
808 { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } },
809}; 605};
810 606
811 607
@@ -835,93 +631,50 @@ static const struct ath5k_rf_reg rf_regs_5413[] = {
835 631
836/* Default mode specific settings */ 632/* Default mode specific settings */
837static const struct ath5k_ini_rfbuffer rfb_5413[] = { 633static const struct ath5k_ini_rfbuffer rfb_5413[] = {
838 { 1, 0x98d4, 634 /* BANK / C.R. A/XR B G */
839 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */ 635 { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
840 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } }, 636 { 2, 0x98d0, { 0x00000008, 0x00000008, 0x00000008 } },
841 { 2, 0x98d0, 637 { 3, 0x98dc, { 0x00a000c0, 0x00e000c0, 0x00e000c0 } },
842 { 0x00000008, 0x00000008, 0x00000008, 0x00000008, 0x00000008 } }, 638 { 6, 0x989c, { 0x33000000, 0x33000000, 0x33000000 } },
843 { 3, 0x98dc, 639 { 6, 0x989c, { 0x01000000, 0x01000000, 0x01000000 } },
844 { 0x00a000c0, 0x00a000c0, 0x00e000c0, 0x00e000c0, 0x00e000c0 } }, 640 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
845 { 6, 0x989c, 641 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
846 { 0x33000000, 0x33000000, 0x33000000, 0x33000000, 0x33000000 } }, 642 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
847 { 6, 0x989c, 643 { 6, 0x989c, { 0x1f000000, 0x1f000000, 0x1f000000 } },
848 { 0x01000000, 0x01000000, 0x01000000, 0x01000000, 0x01000000 } }, 644 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
849 { 6, 0x989c, 645 { 6, 0x989c, { 0x00b80000, 0x00b80000, 0x00b80000 } },
850 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 646 { 6, 0x989c, { 0x00b70000, 0x00b70000, 0x00b70000 } },
851 { 6, 0x989c, 647 { 6, 0x989c, { 0x00840000, 0x00840000, 0x00840000 } },
852 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 648 { 6, 0x989c, { 0x00980000, 0x00980000, 0x00980000 } },
853 { 6, 0x989c, 649 { 6, 0x989c, { 0x00c00000, 0x00c00000, 0x00c00000 } },
854 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 650 { 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
855 { 6, 0x989c, 651 { 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
856 { 0x1f000000, 0x1f000000, 0x1f000000, 0x1f000000, 0x1f000000 } }, 652 { 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
857 { 6, 0x989c, 653 { 6, 0x989c, { 0x00ff0000, 0x00ff0000, 0x00ff0000 } },
858 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 654 { 6, 0x989c, { 0x00d70000, 0x00d70000, 0x00d70000 } },
859 { 6, 0x989c, 655 { 6, 0x989c, { 0x00610000, 0x00610000, 0x00610000 } },
860 { 0x00b80000, 0x00b80000, 0x00b80000, 0x00b80000, 0x00b80000 } }, 656 { 6, 0x989c, { 0x00fe0000, 0x00fe0000, 0x00fe0000 } },
861 { 6, 0x989c, 657 { 6, 0x989c, { 0x00de0000, 0x00de0000, 0x00de0000 } },
862 { 0x00b70000, 0x00b70000, 0x00b70000, 0x00b70000, 0x00b70000 } }, 658 { 6, 0x989c, { 0x007f0000, 0x007f0000, 0x007f0000 } },
863 { 6, 0x989c, 659 { 6, 0x989c, { 0x043d0000, 0x043d0000, 0x043d0000 } },
864 { 0x00840000, 0x00840000, 0x00840000, 0x00840000, 0x00840000 } }, 660 { 6, 0x989c, { 0x00770000, 0x00770000, 0x00770000 } },
865 { 6, 0x989c, 661 { 6, 0x989c, { 0x00440000, 0x00440000, 0x00440000 } },
866 { 0x00980000, 0x00980000, 0x00980000, 0x00980000, 0x00980000 } }, 662 { 6, 0x989c, { 0x00980000, 0x00980000, 0x00980000 } },
867 { 6, 0x989c, 663 { 6, 0x989c, { 0x00100080, 0x00100080, 0x00100080 } },
868 { 0x00c00000, 0x00c00000, 0x00c00000, 0x00c00000, 0x00c00000 } }, 664 { 6, 0x989c, { 0x0005c034, 0x0005c034, 0x0005c034 } },
869 { 6, 0x989c, 665 { 6, 0x989c, { 0x003100f0, 0x003100f0, 0x003100f0 } },
870 { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } }, 666 { 6, 0x989c, { 0x000c011f, 0x000c011f, 0x000c011f } },
871 { 6, 0x989c, 667 { 6, 0x989c, { 0x00510040, 0x00510040, 0x00510040 } },
872 { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } }, 668 { 6, 0x989c, { 0x005000da, 0x005000da, 0x005000da } },
873 { 6, 0x989c, 669 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
874 { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } }, 670 { 6, 0x989c, { 0x00004044, 0x00004044, 0x00004044 } },
875 { 6, 0x989c, 671 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
876 { 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000, 0x00ff0000 } }, 672 { 6, 0x989c, { 0x000060c0, 0x000060c0, 0x000060c0 } },
877 { 6, 0x989c, 673 { 6, 0x989c, { 0x00002c00, 0x00003600, 0x00003600 } },
878 { 0x00d70000, 0x00d70000, 0x00d70000, 0x00d70000, 0x00d70000 } }, 674 { 6, 0x98c8, { 0x00000403, 0x00040403, 0x00040403 } },
879 { 6, 0x989c, 675 { 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
880 { 0x00610000, 0x00610000, 0x00610000, 0x00610000, 0x00610000 } }, 676 { 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
881 { 6, 0x989c, 677 { 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
882 { 0x00fe0000, 0x00fe0000, 0x00fe0000, 0x00fe0000, 0x00fe0000 } },
883 { 6, 0x989c,
884 { 0x00de0000, 0x00de0000, 0x00de0000, 0x00de0000, 0x00de0000 } },
885 { 6, 0x989c,
886 { 0x007f0000, 0x007f0000, 0x007f0000, 0x007f0000, 0x007f0000 } },
887 { 6, 0x989c,
888 { 0x043d0000, 0x043d0000, 0x043d0000, 0x043d0000, 0x043d0000 } },
889 { 6, 0x989c,
890 { 0x00770000, 0x00770000, 0x00770000, 0x00770000, 0x00770000 } },
891 { 6, 0x989c,
892 { 0x00440000, 0x00440000, 0x00440000, 0x00440000, 0x00440000 } },
893 { 6, 0x989c,
894 { 0x00980000, 0x00980000, 0x00980000, 0x00980000, 0x00980000 } },
895 { 6, 0x989c,
896 { 0x00100080, 0x00100080, 0x00100080, 0x00100080, 0x00100080 } },
897 { 6, 0x989c,
898 { 0x0005c034, 0x0005c034, 0x0005c034, 0x0005c034, 0x0005c034 } },
899 { 6, 0x989c,
900 { 0x003100f0, 0x003100f0, 0x003100f0, 0x003100f0, 0x003100f0 } },
901 { 6, 0x989c,
902 { 0x000c011f, 0x000c011f, 0x000c011f, 0x000c011f, 0x000c011f } },
903 { 6, 0x989c,
904 { 0x00510040, 0x00510040, 0x00510040, 0x00510040, 0x00510040 } },
905 { 6, 0x989c,
906 { 0x005000da, 0x005000da, 0x005000da, 0x005000da, 0x005000da } },
907 { 6, 0x989c,
908 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
909 { 6, 0x989c,
910 { 0x00004044, 0x00004044, 0x00004044, 0x00004044, 0x00004044 } },
911 { 6, 0x989c,
912 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
913 { 6, 0x989c,
914 { 0x000060c0, 0x000060c0, 0x000060c0, 0x000060c0, 0x000060c0 } },
915 { 6, 0x989c,
916 { 0x00002c00, 0x00002c00, 0x00003600, 0x00003600, 0x00002c00 } },
917 { 6, 0x98c8,
918 { 0x00000403, 0x00000403, 0x00040403, 0x00040403, 0x00040403 } },
919 { 7, 0x989c,
920 { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } },
921 { 7, 0x989c,
922 { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } },
923 { 7, 0x98cc,
924 { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } },
925}; 678};
926 679
927 680
@@ -931,92 +684,59 @@ static const struct ath5k_ini_rfbuffer rfb_5413[] = {
931* AR2317 (Spider SoC) * 684* AR2317 (Spider SoC) *
932\***************************/ 685\***************************/
933 686
687/* BANK 2 len pos col */
688#define AR5K_RF2425_RF_TURBO { 1, 1, 2 }
689
934/* BANK 6 len pos col */ 690/* BANK 6 len pos col */
935#define AR5K_RF2425_OB_2GHZ { 3, 193, 0 } 691#define AR5K_RF2425_OB_2GHZ { 3, 193, 0 }
936#define AR5K_RF2425_DB_2GHZ { 3, 190, 0 } 692#define AR5K_RF2425_DB_2GHZ { 3, 190, 0 }
937 693
938static const struct ath5k_rf_reg rf_regs_2425[] = { 694static const struct ath5k_rf_reg rf_regs_2425[] = {
695 {2, AR5K_RF_TURBO, AR5K_RF2425_RF_TURBO},
939 {6, AR5K_RF_OB_2GHZ, AR5K_RF2425_OB_2GHZ}, 696 {6, AR5K_RF_OB_2GHZ, AR5K_RF2425_OB_2GHZ},
940 {6, AR5K_RF_DB_2GHZ, AR5K_RF2425_DB_2GHZ}, 697 {6, AR5K_RF_DB_2GHZ, AR5K_RF2425_DB_2GHZ},
941}; 698};
942 699
943/* Default mode specific settings 700/* Default mode specific settings
944 * XXX: a/aTurbo ?
945 */ 701 */
946static const struct ath5k_ini_rfbuffer rfb_2425[] = { 702static const struct ath5k_ini_rfbuffer rfb_2425[] = {
947 { 1, 0x98d4, 703 /* BANK / C.R. A/XR B G */
948 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */ 704 { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
949 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } }, 705 { 2, 0x98d0, { 0x02001408, 0x02001408, 0x02001408 } },
950 { 2, 0x98d0, 706 { 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
951 { 0x02001408, 0x02001408, 0x02001408, 0x02001408, 0x02001408 } }, 707 { 6, 0x989c, { 0x10000000, 0x10000000, 0x10000000 } },
952 { 3, 0x98dc, 708 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
953 { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } }, 709 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
954 { 6, 0x989c, 710 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
955 { 0x10000000, 0x10000000, 0x10000000, 0x10000000, 0x10000000 } }, 711 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
956 { 6, 0x989c, 712 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
957 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 713 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
958 { 6, 0x989c, 714 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
959 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 715 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
960 { 6, 0x989c, 716 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
961 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 717 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
962 { 6, 0x989c, 718 { 6, 0x989c, { 0x002a0000, 0x002a0000, 0x002a0000 } },
963 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 719 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
964 { 6, 0x989c, 720 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
965 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 721 { 6, 0x989c, { 0x00100000, 0x00100000, 0x00100000 } },
966 { 6, 0x989c, 722 { 6, 0x989c, { 0x00020000, 0x00020000, 0x00020000 } },
967 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 723 { 6, 0x989c, { 0x00730000, 0x00730000, 0x00730000 } },
968 { 6, 0x989c, 724 { 6, 0x989c, { 0x00f80000, 0x00f80000, 0x00f80000 } },
969 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 725 { 6, 0x989c, { 0x00e70000, 0x00e70000, 0x00e70000 } },
970 { 6, 0x989c, 726 { 6, 0x989c, { 0x00140000, 0x00140000, 0x00140000 } },
971 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 727 { 6, 0x989c, { 0x00910040, 0x00910040, 0x00910040 } },
972 { 6, 0x989c, 728 { 6, 0x989c, { 0x0007001a, 0x0007001a, 0x0007001a } },
973 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 729 { 6, 0x989c, { 0x00410000, 0x00410000, 0x00410000 } },
974 { 6, 0x989c, 730 { 6, 0x989c, { 0x00810000, 0x00810060, 0x00810060 } },
975 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 731 { 6, 0x989c, { 0x00020800, 0x00020803, 0x00020803 } },
976 { 6, 0x989c, 732 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
977 { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } }, 733 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
978 { 6, 0x989c, 734 { 6, 0x989c, { 0x00001660, 0x00001660, 0x00001660 } },
979 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 735 { 6, 0x989c, { 0x00001688, 0x00001688, 0x00001688 } },
980 { 6, 0x989c, 736 { 6, 0x98c4, { 0x00000001, 0x00000001, 0x00000001 } },
981 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 737 { 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
982 { 6, 0x989c, 738 { 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
983 { 0x00100000, 0x00100000, 0x00100000, 0x00100000, 0x00100000 } }, 739 { 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
984 { 6, 0x989c,
985 { 0x00020000, 0x00020000, 0x00020000, 0x00020000, 0x00020000 } },
986 { 6, 0x989c,
987 { 0x00730000, 0x00730000, 0x00730000, 0x00730000, 0x00730000 } },
988 { 6, 0x989c,
989 { 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000 } },
990 { 6, 0x989c,
991 { 0x00e70000, 0x00e70000, 0x00e70000, 0x00e70000, 0x00e70000 } },
992 { 6, 0x989c,
993 { 0x00140000, 0x00140000, 0x00140000, 0x00140000, 0x00140000 } },
994 { 6, 0x989c,
995 { 0x00910040, 0x00910040, 0x00910040, 0x00910040, 0x00910040 } },
996 { 6, 0x989c,
997 { 0x0007001a, 0x0007001a, 0x0007001a, 0x0007001a, 0x0007001a } },
998 { 6, 0x989c,
999 { 0x00410000, 0x00410000, 0x00410000, 0x00410000, 0x00410000 } },
1000 { 6, 0x989c,
1001 { 0x00810000, 0x00810000, 0x00810060, 0x00810060, 0x00810060 } },
1002 { 6, 0x989c,
1003 { 0x00020800, 0x00020800, 0x00020803, 0x00020803, 0x00020803 } },
1004 { 6, 0x989c,
1005 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1006 { 6, 0x989c,
1007 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1008 { 6, 0x989c,
1009 { 0x00001660, 0x00001660, 0x00001660, 0x00001660, 0x00001660 } },
1010 { 6, 0x989c,
1011 { 0x00001688, 0x00001688, 0x00001688, 0x00001688, 0x00001688 } },
1012 { 6, 0x98c4,
1013 { 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001 } },
1014 { 7, 0x989c,
1015 { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } },
1016 { 7, 0x989c,
1017 { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } },
1018 { 7, 0x98cc,
1019 { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } },
1020}; 740};
1021 741
1022/* 742/*
@@ -1024,158 +744,85 @@ static const struct ath5k_ini_rfbuffer rfb_2425[] = {
1024 * bank modification and get rid of this 744 * bank modification and get rid of this
1025 */ 745 */
1026static const struct ath5k_ini_rfbuffer rfb_2317[] = { 746static const struct ath5k_ini_rfbuffer rfb_2317[] = {
1027 { 1, 0x98d4, 747 /* BANK / C.R. A/XR B G */
1028 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */ 748 { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
1029 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } }, 749 { 2, 0x98d0, { 0x02001408, 0x02001408, 0x02001408 } },
1030 { 2, 0x98d0, 750 { 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
1031 { 0x02001408, 0x02011408, 0x02001408, 0x02001408, 0x02011408 } }, 751 { 6, 0x989c, { 0x10000000, 0x10000000, 0x10000000 } },
1032 { 3, 0x98dc, 752 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1033 { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } }, 753 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1034 { 6, 0x989c, 754 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1035 { 0x10000000, 0x10000000, 0x10000000, 0x10000000, 0x10000000 } }, 755 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1036 { 6, 0x989c, 756 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1037 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 757 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1038 { 6, 0x989c, 758 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1039 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 759 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1040 { 6, 0x989c, 760 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1041 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 761 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1042 { 6, 0x989c, 762 { 6, 0x989c, { 0x002a0000, 0x002a0000, 0x002a0000 } },
1043 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 763 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1044 { 6, 0x989c, 764 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1045 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 765 { 6, 0x989c, { 0x00100000, 0x00100000, 0x00100000 } },
1046 { 6, 0x989c, 766 { 6, 0x989c, { 0x00020000, 0x00020000, 0x00020000 } },
1047 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 767 { 6, 0x989c, { 0x00730000, 0x00730000, 0x00730000 } },
1048 { 6, 0x989c, 768 { 6, 0x989c, { 0x00f80000, 0x00f80000, 0x00f80000 } },
1049 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 769 { 6, 0x989c, { 0x00e70000, 0x00e70000, 0x00e70000 } },
1050 { 6, 0x989c, 770 { 6, 0x989c, { 0x00140100, 0x00140100, 0x00140100 } },
1051 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 771 { 6, 0x989c, { 0x00910040, 0x00910040, 0x00910040 } },
1052 { 6, 0x989c, 772 { 6, 0x989c, { 0x0007001a, 0x0007001a, 0x0007001a } },
1053 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 773 { 6, 0x989c, { 0x00410000, 0x00410000, 0x00410000 } },
1054 { 6, 0x989c, 774 { 6, 0x989c, { 0x00810000, 0x00810060, 0x00810060 } },
1055 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 775 { 6, 0x989c, { 0x00020800, 0x00020803, 0x00020803 } },
1056 { 6, 0x989c, 776 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1057 { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } }, 777 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1058 { 6, 0x989c, 778 { 6, 0x989c, { 0x00001660, 0x00001660, 0x00001660 } },
1059 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 779 { 6, 0x989c, { 0x00009688, 0x00009688, 0x00009688 } },
1060 { 6, 0x989c, 780 { 6, 0x98c4, { 0x00000001, 0x00000001, 0x00000001 } },
1061 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 781 { 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
1062 { 6, 0x989c, 782 { 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
1063 { 0x00100000, 0x00100000, 0x00100000, 0x00100000, 0x00100000 } }, 783 { 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
1064 { 6, 0x989c,
1065 { 0x00020000, 0x00020000, 0x00020000, 0x00020000, 0x00020000 } },
1066 { 6, 0x989c,
1067 { 0x00730000, 0x00730000, 0x00730000, 0x00730000, 0x00730000 } },
1068 { 6, 0x989c,
1069 { 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000 } },
1070 { 6, 0x989c,
1071 { 0x00e70000, 0x00e70000, 0x00e70000, 0x00e70000, 0x00e70000 } },
1072 { 6, 0x989c,
1073 { 0x00140100, 0x00140100, 0x00140100, 0x00140100, 0x00140100 } },
1074 { 6, 0x989c,
1075 { 0x00910040, 0x00910040, 0x00910040, 0x00910040, 0x00910040 } },
1076 { 6, 0x989c,
1077 { 0x0007001a, 0x0007001a, 0x0007001a, 0x0007001a, 0x0007001a } },
1078 { 6, 0x989c,
1079 { 0x00410000, 0x00410000, 0x00410000, 0x00410000, 0x00410000 } },
1080 { 6, 0x989c,
1081 { 0x00810000, 0x00810000, 0x00810060, 0x00810060, 0x00810060 } },
1082 { 6, 0x989c,
1083 { 0x00020800, 0x00020800, 0x00020803, 0x00020803, 0x00020803 } },
1084 { 6, 0x989c,
1085 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1086 { 6, 0x989c,
1087 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1088 { 6, 0x989c,
1089 { 0x00001660, 0x00001660, 0x00001660, 0x00001660, 0x00001660 } },
1090 { 6, 0x989c,
1091 { 0x00009688, 0x00009688, 0x00009688, 0x00009688, 0x00009688 } },
1092 { 6, 0x98c4,
1093 { 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001 } },
1094 { 7, 0x989c,
1095 { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } },
1096 { 7, 0x989c,
1097 { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } },
1098 { 7, 0x98cc,
1099 { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } },
1100}; 784};
1101 785
1102/* 786/*
1103 * TODO: Handle the few differences with swan during 787 * TODO: Handle the few differences with swan during
1104 * bank modification and get rid of this 788 * bank modification and get rid of this
1105 * XXX: a/aTurbo ?
1106 */ 789 */
1107static const struct ath5k_ini_rfbuffer rfb_2417[] = { 790static const struct ath5k_ini_rfbuffer rfb_2417[] = {
1108 { 1, 0x98d4, 791 /* BANK / C.R. A/XR B G */
1109 /* mode a/XR mode aTurbo mode b mode g mode gTurbo */ 792 { 1, 0x98d4, { 0x00000020, 0x00000020, 0x00000020 } },
1110 { 0x00000020, 0x00000020, 0x00000020, 0x00000020, 0x00000020 } }, 793 { 2, 0x98d0, { 0x02001408, 0x02001408, 0x02001408 } },
1111 { 2, 0x98d0, 794 { 3, 0x98dc, { 0x00a020c0, 0x00e020c0, 0x00e020c0 } },
1112 { 0x02001408, 0x02001408, 0x02001408, 0x02001408, 0x02001408 } }, 795 { 6, 0x989c, { 0x10000000, 0x10000000, 0x10000000 } },
1113 { 3, 0x98dc, 796 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1114 { 0x00a020c0, 0x00a020c0, 0x00e020c0, 0x00e020c0, 0x00e020c0 } }, 797 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1115 { 6, 0x989c, 798 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1116 { 0x10000000, 0x10000000, 0x10000000, 0x10000000, 0x10000000 } }, 799 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1117 { 6, 0x989c, 800 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1118 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 801 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1119 { 6, 0x989c, 802 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1120 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 803 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1121 { 6, 0x989c, 804 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1122 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 805 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1123 { 6, 0x989c, 806 { 6, 0x989c, { 0x002a0000, 0x002a0000, 0x002a0000 } },
1124 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 807 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1125 { 6, 0x989c, 808 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1126 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 809 { 6, 0x989c, { 0x00100000, 0x00100000, 0x00100000 } },
1127 { 6, 0x989c, 810 { 6, 0x989c, { 0x00020000, 0x00020000, 0x00020000 } },
1128 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 811 { 6, 0x989c, { 0x00730000, 0x00730000, 0x00730000 } },
1129 { 6, 0x989c, 812 { 6, 0x989c, { 0x00f80000, 0x00f80000, 0x00f80000 } },
1130 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 813 { 6, 0x989c, { 0x00e70000, 0x80e70000, 0x80e70000 } },
1131 { 6, 0x989c, 814 { 6, 0x989c, { 0x00140000, 0x00140000, 0x00140000 } },
1132 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 815 { 6, 0x989c, { 0x00910040, 0x00910040, 0x00910040 } },
1133 { 6, 0x989c, 816 { 6, 0x989c, { 0x0007001a, 0x0207001a, 0x0207001a } },
1134 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 817 { 6, 0x989c, { 0x00410000, 0x00410000, 0x00410000 } },
1135 { 6, 0x989c, 818 { 6, 0x989c, { 0x00810000, 0x00810060, 0x00810060 } },
1136 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 819 { 6, 0x989c, { 0x00020800, 0x00020803, 0x00020803 } },
1137 { 6, 0x989c, 820 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1138 { 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000, 0x002a0000 } }, 821 { 6, 0x989c, { 0x00000000, 0x00000000, 0x00000000 } },
1139 { 6, 0x989c, 822 { 6, 0x989c, { 0x00001660, 0x00001660, 0x00001660 } },
1140 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 823 { 6, 0x989c, { 0x00001688, 0x00001688, 0x00001688 } },
1141 { 6, 0x989c, 824 { 6, 0x98c4, { 0x00000001, 0x00000001, 0x00000001 } },
1142 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } }, 825 { 7, 0x989c, { 0x00006400, 0x00006400, 0x00006400 } },
1143 { 6, 0x989c, 826 { 7, 0x989c, { 0x00000800, 0x00000800, 0x00000800 } },
1144 { 0x00100000, 0x00100000, 0x00100000, 0x00100000, 0x00100000 } }, 827 { 7, 0x98cc, { 0x0000000e, 0x0000000e, 0x0000000e } },
1145 { 6, 0x989c,
1146 { 0x00020000, 0x00020000, 0x00020000, 0x00020000, 0x00020000 } },
1147 { 6, 0x989c,
1148 { 0x00730000, 0x00730000, 0x00730000, 0x00730000, 0x00730000 } },
1149 { 6, 0x989c,
1150 { 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000, 0x00f80000 } },
1151 { 6, 0x989c,
1152 { 0x00e70000, 0x00e70000, 0x80e70000, 0x80e70000, 0x00e70000 } },
1153 { 6, 0x989c,
1154 { 0x00140000, 0x00140000, 0x00140000, 0x00140000, 0x00140000 } },
1155 { 6, 0x989c,
1156 { 0x00910040, 0x00910040, 0x00910040, 0x00910040, 0x00910040 } },
1157 { 6, 0x989c,
1158 { 0x0007001a, 0x0007001a, 0x0207001a, 0x0207001a, 0x0007001a } },
1159 { 6, 0x989c,
1160 { 0x00410000, 0x00410000, 0x00410000, 0x00410000, 0x00410000 } },
1161 { 6, 0x989c,
1162 { 0x00810000, 0x00810000, 0x00810060, 0x00810060, 0x00810060 } },
1163 { 6, 0x989c,
1164 { 0x00020800, 0x00020800, 0x00020803, 0x00020803, 0x00020803 } },
1165 { 6, 0x989c,
1166 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1167 { 6, 0x989c,
1168 { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 } },
1169 { 6, 0x989c,
1170 { 0x00001660, 0x00001660, 0x00001660, 0x00001660, 0x00001660 } },
1171 { 6, 0x989c,
1172 { 0x00001688, 0x00001688, 0x00001688, 0x00001688, 0x00001688 } },
1173 { 6, 0x98c4,
1174 { 0x00000001, 0x00000001, 0x00000001, 0x00000001, 0x00000001 } },
1175 { 7, 0x989c,
1176 { 0x00006400, 0x00006400, 0x00006400, 0x00006400, 0x00006400 } },
1177 { 7, 0x989c,
1178 { 0x00000800, 0x00000800, 0x00000800, 0x00000800, 0x00000800 } },
1179 { 7, 0x98cc,
1180 { 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e, 0x0000000e } },
1181}; 828};
diff --git a/drivers/net/wireless/ath/ath5k/sysfs.c b/drivers/net/wireless/ath/ath5k/sysfs.c
index 90757de7bf59..929c68cdf8ab 100644
--- a/drivers/net/wireless/ath/ath5k/sysfs.c
+++ b/drivers/net/wireless/ath/ath5k/sysfs.c
@@ -95,7 +95,7 @@ static struct attribute_group ath5k_attribute_group_ani = {
95int 95int
96ath5k_sysfs_register(struct ath5k_softc *sc) 96ath5k_sysfs_register(struct ath5k_softc *sc)
97{ 97{
98 struct device *dev = &sc->pdev->dev; 98 struct device *dev = sc->dev;
99 int err; 99 int err;
100 100
101 err = sysfs_create_group(&dev->kobj, &ath5k_attribute_group_ani); 101 err = sysfs_create_group(&dev->kobj, &ath5k_attribute_group_ani);
@@ -110,7 +110,7 @@ ath5k_sysfs_register(struct ath5k_softc *sc)
110void 110void
111ath5k_sysfs_unregister(struct ath5k_softc *sc) 111ath5k_sysfs_unregister(struct ath5k_softc *sc)
112{ 112{
113 struct device *dev = &sc->pdev->dev; 113 struct device *dev = sc->dev;
114 114
115 sysfs_remove_group(&dev->kobj, &ath5k_attribute_group_ani); 115 sysfs_remove_group(&dev->kobj, &ath5k_attribute_group_ani);
116} 116}
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 1a984b02e9e5..25a6e4417cdb 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -35,10 +35,9 @@ static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
35 35
36 pdata = (struct ath9k_platform_data *) pdev->dev.platform_data; 36 pdata = (struct ath9k_platform_data *) pdev->dev.platform_data;
37 if (off >= (ARRAY_SIZE(pdata->eeprom_data))) { 37 if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
38 ath_print(common, ATH_DBG_FATAL, 38 ath_err(common,
39 "%s: flash read failed, offset %08x " 39 "%s: flash read failed, offset %08x is out of range\n",
40 "is out of range\n", 40 __func__, off);
41 __func__, off);
42 return false; 41 return false;
43 } 42 }
44 43
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 63ccb39cdcd4..2e31c775351f 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -135,8 +135,8 @@ static void ath9k_ani_restart(struct ath_hw *ah)
135 cck_base = AR_PHY_COUNTMAX - ah->config.cck_trig_high; 135 cck_base = AR_PHY_COUNTMAX - ah->config.cck_trig_high;
136 } 136 }
137 137
138 ath_print(common, ATH_DBG_ANI, 138 ath_dbg(common, ATH_DBG_ANI,
139 "Writing ofdmbase=%u cckbase=%u\n", ofdm_base, cck_base); 139 "Writing ofdmbase=%u cckbase=%u\n", ofdm_base, cck_base);
140 140
141 ENABLE_REGWRITE_BUFFER(ah); 141 ENABLE_REGWRITE_BUFFER(ah);
142 142
@@ -267,11 +267,11 @@ static void ath9k_hw_set_ofdm_nil(struct ath_hw *ah, u8 immunityLevel)
267 267
268 aniState->noiseFloor = BEACON_RSSI(ah); 268 aniState->noiseFloor = BEACON_RSSI(ah);
269 269
270 ath_print(common, ATH_DBG_ANI, 270 ath_dbg(common, ATH_DBG_ANI,
271 "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n", 271 "**** ofdmlevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
272 aniState->ofdmNoiseImmunityLevel, 272 aniState->ofdmNoiseImmunityLevel,
273 immunityLevel, aniState->noiseFloor, 273 immunityLevel, aniState->noiseFloor,
274 aniState->rssiThrLow, aniState->rssiThrHigh); 274 aniState->rssiThrLow, aniState->rssiThrHigh);
275 275
276 aniState->ofdmNoiseImmunityLevel = immunityLevel; 276 aniState->ofdmNoiseImmunityLevel = immunityLevel;
277 277
@@ -334,11 +334,11 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
334 const struct ani_cck_level_entry *entry_cck; 334 const struct ani_cck_level_entry *entry_cck;
335 335
336 aniState->noiseFloor = BEACON_RSSI(ah); 336 aniState->noiseFloor = BEACON_RSSI(ah);
337 ath_print(common, ATH_DBG_ANI, 337 ath_dbg(common, ATH_DBG_ANI,
338 "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n", 338 "**** ccklevel %d=>%d, rssi=%d[lo=%d hi=%d]\n",
339 aniState->cckNoiseImmunityLevel, immunityLevel, 339 aniState->cckNoiseImmunityLevel, immunityLevel,
340 aniState->noiseFloor, aniState->rssiThrLow, 340 aniState->noiseFloor, aniState->rssiThrLow,
341 aniState->rssiThrHigh); 341 aniState->rssiThrHigh);
342 342
343 if ((ah->opmode == NL80211_IFTYPE_STATION || 343 if ((ah->opmode == NL80211_IFTYPE_STATION ||
344 ah->opmode == NL80211_IFTYPE_ADHOC) && 344 ah->opmode == NL80211_IFTYPE_ADHOC) &&
@@ -358,7 +358,7 @@ static void ath9k_hw_set_cck_nil(struct ath_hw *ah, u_int8_t immunityLevel)
358 entry_cck->fir_step_level); 358 entry_cck->fir_step_level);
359 359
360 /* Skip MRC CCK for pre AR9003 families */ 360 /* Skip MRC CCK for pre AR9003 families */
361 if (!AR_SREV_9300_20_OR_LATER(ah)) 361 if (!AR_SREV_9300_20_OR_LATER(ah) || AR_SREV_9485(ah))
362 return; 362 return;
363 363
364 if (aniState->mrcCCKOff == entry_cck->mrc_cck_on) 364 if (aniState->mrcCCKOff == entry_cck->mrc_cck_on)
@@ -478,8 +478,8 @@ static void ath9k_ani_reset_old(struct ath_hw *ah, bool is_scanning)
478 478
479 if (ah->opmode != NL80211_IFTYPE_STATION 479 if (ah->opmode != NL80211_IFTYPE_STATION
480 && ah->opmode != NL80211_IFTYPE_ADHOC) { 480 && ah->opmode != NL80211_IFTYPE_ADHOC) {
481 ath_print(common, ATH_DBG_ANI, 481 ath_dbg(common, ATH_DBG_ANI,
482 "Reset ANI state opmode %u\n", ah->opmode); 482 "Reset ANI state opmode %u\n", ah->opmode);
483 ah->stats.ast_ani_reset++; 483 ah->stats.ast_ani_reset++;
484 484
485 if (ah->opmode == NL80211_IFTYPE_AP) { 485 if (ah->opmode == NL80211_IFTYPE_AP) {
@@ -584,16 +584,14 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
584 ATH9K_ANI_OFDM_DEF_LEVEL || 584 ATH9K_ANI_OFDM_DEF_LEVEL ||
585 aniState->cckNoiseImmunityLevel != 585 aniState->cckNoiseImmunityLevel !=
586 ATH9K_ANI_CCK_DEF_LEVEL) { 586 ATH9K_ANI_CCK_DEF_LEVEL) {
587 ath_print(common, ATH_DBG_ANI, 587 ath_dbg(common, ATH_DBG_ANI,
588 "Restore defaults: opmode %u " 588 "Restore defaults: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
589 "chan %d Mhz/0x%x is_scanning=%d " 589 ah->opmode,
590 "ofdm:%d cck:%d\n", 590 chan->channel,
591 ah->opmode, 591 chan->channelFlags,
592 chan->channel, 592 is_scanning,
593 chan->channelFlags, 593 aniState->ofdmNoiseImmunityLevel,
594 is_scanning, 594 aniState->cckNoiseImmunityLevel);
595 aniState->ofdmNoiseImmunityLevel,
596 aniState->cckNoiseImmunityLevel);
597 595
598 ath9k_hw_set_ofdm_nil(ah, ATH9K_ANI_OFDM_DEF_LEVEL); 596 ath9k_hw_set_ofdm_nil(ah, ATH9K_ANI_OFDM_DEF_LEVEL);
599 ath9k_hw_set_cck_nil(ah, ATH9K_ANI_CCK_DEF_LEVEL); 597 ath9k_hw_set_cck_nil(ah, ATH9K_ANI_CCK_DEF_LEVEL);
@@ -602,16 +600,14 @@ void ath9k_ani_reset(struct ath_hw *ah, bool is_scanning)
602 /* 600 /*
603 * restore historical levels for this channel 601 * restore historical levels for this channel
604 */ 602 */
605 ath_print(common, ATH_DBG_ANI, 603 ath_dbg(common, ATH_DBG_ANI,
606 "Restore history: opmode %u " 604 "Restore history: opmode %u chan %d Mhz/0x%x is_scanning=%d ofdm:%d cck:%d\n",
607 "chan %d Mhz/0x%x is_scanning=%d " 605 ah->opmode,
608 "ofdm:%d cck:%d\n", 606 chan->channel,
609 ah->opmode, 607 chan->channelFlags,
610 chan->channel, 608 is_scanning,
611 chan->channelFlags, 609 aniState->ofdmNoiseImmunityLevel,
612 is_scanning, 610 aniState->cckNoiseImmunityLevel);
613 aniState->ofdmNoiseImmunityLevel,
614 aniState->cckNoiseImmunityLevel);
615 611
616 ath9k_hw_set_ofdm_nil(ah, 612 ath9k_hw_set_ofdm_nil(ah,
617 aniState->ofdmNoiseImmunityLevel); 613 aniState->ofdmNoiseImmunityLevel);
@@ -666,19 +662,17 @@ static bool ath9k_hw_ani_read_counters(struct ath_hw *ah)
666 662
667 if (!use_new_ani(ah) && (phyCnt1 < ofdm_base || phyCnt2 < cck_base)) { 663 if (!use_new_ani(ah) && (phyCnt1 < ofdm_base || phyCnt2 < cck_base)) {
668 if (phyCnt1 < ofdm_base) { 664 if (phyCnt1 < ofdm_base) {
669 ath_print(common, ATH_DBG_ANI, 665 ath_dbg(common, ATH_DBG_ANI,
670 "phyCnt1 0x%x, resetting " 666 "phyCnt1 0x%x, resetting counter value to 0x%x\n",
671 "counter value to 0x%x\n", 667 phyCnt1, ofdm_base);
672 phyCnt1, ofdm_base);
673 REG_WRITE(ah, AR_PHY_ERR_1, ofdm_base); 668 REG_WRITE(ah, AR_PHY_ERR_1, ofdm_base);
674 REG_WRITE(ah, AR_PHY_ERR_MASK_1, 669 REG_WRITE(ah, AR_PHY_ERR_MASK_1,
675 AR_PHY_ERR_OFDM_TIMING); 670 AR_PHY_ERR_OFDM_TIMING);
676 } 671 }
677 if (phyCnt2 < cck_base) { 672 if (phyCnt2 < cck_base) {
678 ath_print(common, ATH_DBG_ANI, 673 ath_dbg(common, ATH_DBG_ANI,
679 "phyCnt2 0x%x, resetting " 674 "phyCnt2 0x%x, resetting counter value to 0x%x\n",
680 "counter value to 0x%x\n", 675 phyCnt2, cck_base);
681 phyCnt2, cck_base);
682 REG_WRITE(ah, AR_PHY_ERR_2, cck_base); 676 REG_WRITE(ah, AR_PHY_ERR_2, cck_base);
683 REG_WRITE(ah, AR_PHY_ERR_MASK_2, 677 REG_WRITE(ah, AR_PHY_ERR_MASK_2,
684 AR_PHY_ERR_CCK_TIMING); 678 AR_PHY_ERR_CCK_TIMING);
@@ -719,13 +713,12 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah, struct ath9k_channel *chan)
719 cckPhyErrRate = aniState->cckPhyErrCount * 1000 / 713 cckPhyErrRate = aniState->cckPhyErrCount * 1000 /
720 aniState->listenTime; 714 aniState->listenTime;
721 715
722 ath_print(common, ATH_DBG_ANI, 716 ath_dbg(common, ATH_DBG_ANI,
723 "listenTime=%d OFDM:%d errs=%d/s CCK:%d " 717 "listenTime=%d OFDM:%d errs=%d/s CCK:%d errs=%d/s ofdm_turn=%d\n",
724 "errs=%d/s ofdm_turn=%d\n", 718 aniState->listenTime,
725 aniState->listenTime, 719 aniState->ofdmNoiseImmunityLevel,
726 aniState->ofdmNoiseImmunityLevel, 720 ofdmPhyErrRate, aniState->cckNoiseImmunityLevel,
727 ofdmPhyErrRate, aniState->cckNoiseImmunityLevel, 721 cckPhyErrRate, aniState->ofdmsTurn);
728 cckPhyErrRate, aniState->ofdmsTurn);
729 722
730 if (aniState->listenTime > 5 * ah->aniperiod) { 723 if (aniState->listenTime > 5 * ah->aniperiod) {
731 if (ofdmPhyErrRate <= ah->config.ofdm_trig_low && 724 if (ofdmPhyErrRate <= ah->config.ofdm_trig_low &&
@@ -755,7 +748,7 @@ void ath9k_enable_mib_counters(struct ath_hw *ah)
755{ 748{
756 struct ath_common *common = ath9k_hw_common(ah); 749 struct ath_common *common = ath9k_hw_common(ah);
757 750
758 ath_print(common, ATH_DBG_ANI, "Enable MIB counters\n"); 751 ath_dbg(common, ATH_DBG_ANI, "Enable MIB counters\n");
759 752
760 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); 753 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
761 754
@@ -777,7 +770,7 @@ void ath9k_hw_disable_mib_counters(struct ath_hw *ah)
777{ 770{
778 struct ath_common *common = ath9k_hw_common(ah); 771 struct ath_common *common = ath9k_hw_common(ah);
779 772
780 ath_print(common, ATH_DBG_ANI, "Disable MIB counters\n"); 773 ath_dbg(common, ATH_DBG_ANI, "Disable MIB counters\n");
781 774
782 REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC); 775 REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC);
783 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); 776 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
@@ -834,10 +827,10 @@ void ath9k_hw_ani_setup(struct ath_hw *ah)
834{ 827{
835 int i; 828 int i;
836 829
837 const int totalSizeDesired[] = { -55, -55, -55, -55, -62 }; 830 static const int totalSizeDesired[] = { -55, -55, -55, -55, -62 };
838 const int coarseHigh[] = { -14, -14, -14, -14, -12 }; 831 static const int coarseHigh[] = { -14, -14, -14, -14, -12 };
839 const int coarseLow[] = { -64, -64, -64, -64, -70 }; 832 static const int coarseLow[] = { -64, -64, -64, -64, -70 };
840 const int firpwr[] = { -78, -78, -78, -78, -80 }; 833 static const int firpwr[] = { -78, -78, -78, -78, -80 };
841 834
842 for (i = 0; i < 5; i++) { 835 for (i = 0; i < 5; i++) {
843 ah->totalSizeDesired[i] = totalSizeDesired[i]; 836 ah->totalSizeDesired[i] = totalSizeDesired[i];
@@ -852,7 +845,7 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
852 struct ath_common *common = ath9k_hw_common(ah); 845 struct ath_common *common = ath9k_hw_common(ah);
853 int i; 846 int i;
854 847
855 ath_print(common, ATH_DBG_ANI, "Initialize ANI\n"); 848 ath_dbg(common, ATH_DBG_ANI, "Initialize ANI\n");
856 849
857 if (use_new_ani(ah)) { 850 if (use_new_ani(ah)) {
858 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_NEW; 851 ah->config.ofdm_trig_high = ATH9K_ANI_OFDM_TRIG_HIGH_NEW;
diff --git a/drivers/net/wireless/ath/ath9k/ar5008_phy.c b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
index ea9f4497f58c..ffcf44a4058b 100644
--- a/drivers/net/wireless/ath/ath9k/ar5008_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar5008_phy.c
@@ -130,9 +130,8 @@ static void ar5008_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
130 /* pre-reverse this field */ 130 /* pre-reverse this field */
131 tmp_reg = ath9k_hw_reverse_bits(new_bias, 3); 131 tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
132 132
133 ath_print(common, ATH_DBG_CONFIG, 133 ath_dbg(common, ATH_DBG_CONFIG, "Force rf_pwd_icsyndiv to %1d on %4d\n",
134 "Force rf_pwd_icsyndiv to %1d on %4d\n", 134 new_bias, synth_freq);
135 new_bias, synth_freq);
136 135
137 /* swizzle rf_pwd_icsyndiv */ 136 /* swizzle rf_pwd_icsyndiv */
138 ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3); 137 ar5008_hw_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3);
@@ -173,8 +172,7 @@ static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
173 channelSel = ((freq - 704) * 2 - 3040) / 10; 172 channelSel = ((freq - 704) * 2 - 3040) / 10;
174 bModeSynth = 1; 173 bModeSynth = 1;
175 } else { 174 } else {
176 ath_print(common, ATH_DBG_FATAL, 175 ath_err(common, "Invalid channel %u MHz\n", freq);
177 "Invalid channel %u MHz\n", freq);
178 return -EINVAL; 176 return -EINVAL;
179 } 177 }
180 178
@@ -206,8 +204,7 @@ static int ar5008_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
206 channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8); 204 channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
207 aModeRefSel = ath9k_hw_reverse_bits(1, 2); 205 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
208 } else { 206 } else {
209 ath_print(common, ATH_DBG_FATAL, 207 ath_err(common, "Invalid channel %u MHz\n", freq);
210 "Invalid channel %u MHz\n", freq);
211 return -EINVAL; 208 return -EINVAL;
212 } 209 }
213 210
@@ -244,13 +241,15 @@ static void ar5008_hw_spur_mitigate(struct ath_hw *ah,
244 int upper, lower, cur_vit_mask; 241 int upper, lower, cur_vit_mask;
245 int tmp, new; 242 int tmp, new;
246 int i; 243 int i;
247 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8, 244 static int pilot_mask_reg[4] = {
248 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60 245 AR_PHY_TIMING7, AR_PHY_TIMING8,
246 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
249 }; 247 };
250 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10, 248 static int chan_mask_reg[4] = {
251 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60 249 AR_PHY_TIMING9, AR_PHY_TIMING10,
250 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
252 }; 251 };
253 int inc[4] = { 0, 100, 0, 0 }; 252 static int inc[4] = { 0, 100, 0, 0 };
254 253
255 int8_t mask_m[123]; 254 int8_t mask_m[123];
256 int8_t mask_p[123]; 255 int8_t mask_p[123];
@@ -446,8 +445,7 @@ static int ar5008_hw_rf_alloc_ext_banks(struct ath_hw *ah)
446#define ATH_ALLOC_BANK(bank, size) do { \ 445#define ATH_ALLOC_BANK(bank, size) do { \
447 bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \ 446 bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \
448 if (!bank) { \ 447 if (!bank) { \
449 ath_print(common, ATH_DBG_FATAL, \ 448 ath_err(common, "Cannot allocate RF banks\n"); \
450 "Cannot allocate RF banks\n"); \
451 return -ENOMEM; \ 449 return -ENOMEM; \
452 } \ 450 } \
453 } while (0); 451 } while (0);
@@ -873,12 +871,11 @@ static int ar5008_hw_process_ini(struct ath_hw *ah,
873 channel->max_antenna_gain * 2, 871 channel->max_antenna_gain * 2,
874 channel->max_power * 2, 872 channel->max_power * 2,
875 min((u32) MAX_RATE_POWER, 873 min((u32) MAX_RATE_POWER,
876 (u32) regulatory->power_limit)); 874 (u32) regulatory->power_limit), false);
877 875
878 /* Write analog registers */ 876 /* Write analog registers */
879 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) { 877 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
880 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, 878 ath_err(ath9k_hw_common(ah), "ar5416SetRfRegs failed\n");
881 "ar5416SetRfRegs failed\n");
882 return -EIO; 879 return -EIO;
883 } 880 }
884 881
@@ -964,18 +961,6 @@ static void ar5008_hw_rfbus_done(struct ath_hw *ah)
964 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0); 961 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
965} 962}
966 963
967static void ar5008_hw_enable_rfkill(struct ath_hw *ah)
968{
969 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
970 AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
971
972 REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
973 AR_GPIO_INPUT_MUX2_RFSILENT);
974
975 ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
976 REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
977}
978
979static void ar5008_restore_chainmask(struct ath_hw *ah) 964static void ar5008_restore_chainmask(struct ath_hw *ah)
980{ 965{
981 int rx_chainmask = ah->rxchainmask; 966 int rx_chainmask = ah->rxchainmask;
@@ -1056,10 +1041,9 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
1056 u32 level = param; 1041 u32 level = param;
1057 1042
1058 if (level >= ARRAY_SIZE(ah->totalSizeDesired)) { 1043 if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
1059 ath_print(common, ATH_DBG_ANI, 1044 ath_dbg(common, ATH_DBG_ANI,
1060 "level out of range (%u > %u)\n", 1045 "level out of range (%u > %zu)\n",
1061 level, 1046 level, ARRAY_SIZE(ah->totalSizeDesired));
1062 (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
1063 return false; 1047 return false;
1064 } 1048 }
1065 1049
@@ -1084,12 +1068,12 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
1084 break; 1068 break;
1085 } 1069 }
1086 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{ 1070 case ATH9K_ANI_OFDM_WEAK_SIGNAL_DETECTION:{
1087 const int m1ThreshLow[] = { 127, 50 }; 1071 static const int m1ThreshLow[] = { 127, 50 };
1088 const int m2ThreshLow[] = { 127, 40 }; 1072 static const int m2ThreshLow[] = { 127, 40 };
1089 const int m1Thresh[] = { 127, 0x4d }; 1073 static const int m1Thresh[] = { 127, 0x4d };
1090 const int m2Thresh[] = { 127, 0x40 }; 1074 static const int m2Thresh[] = { 127, 0x40 };
1091 const int m2CountThr[] = { 31, 16 }; 1075 static const int m2CountThr[] = { 31, 16 };
1092 const int m2CountThrLow[] = { 63, 48 }; 1076 static const int m2CountThrLow[] = { 63, 48 };
1093 u32 on = param ? 1 : 0; 1077 u32 on = param ? 1 : 0;
1094 1078
1095 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW, 1079 REG_RMW_FIELD(ah, AR_PHY_SFCORR_LOW,
@@ -1141,7 +1125,7 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
1141 break; 1125 break;
1142 } 1126 }
1143 case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{ 1127 case ATH9K_ANI_CCK_WEAK_SIGNAL_THR:{
1144 const int weakSigThrCck[] = { 8, 6 }; 1128 static const int weakSigThrCck[] = { 8, 6 };
1145 u32 high = param ? 1 : 0; 1129 u32 high = param ? 1 : 0;
1146 1130
1147 REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT, 1131 REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT,
@@ -1157,14 +1141,13 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
1157 break; 1141 break;
1158 } 1142 }
1159 case ATH9K_ANI_FIRSTEP_LEVEL:{ 1143 case ATH9K_ANI_FIRSTEP_LEVEL:{
1160 const int firstep[] = { 0, 4, 8 }; 1144 static const int firstep[] = { 0, 4, 8 };
1161 u32 level = param; 1145 u32 level = param;
1162 1146
1163 if (level >= ARRAY_SIZE(firstep)) { 1147 if (level >= ARRAY_SIZE(firstep)) {
1164 ath_print(common, ATH_DBG_ANI, 1148 ath_dbg(common, ATH_DBG_ANI,
1165 "level out of range (%u > %u)\n", 1149 "level out of range (%u > %zu)\n",
1166 level, 1150 level, ARRAY_SIZE(firstep));
1167 (unsigned) ARRAY_SIZE(firstep));
1168 return false; 1151 return false;
1169 } 1152 }
1170 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, 1153 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
@@ -1178,14 +1161,13 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
1178 break; 1161 break;
1179 } 1162 }
1180 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{ 1163 case ATH9K_ANI_SPUR_IMMUNITY_LEVEL:{
1181 const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 }; 1164 static const int cycpwrThr1[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
1182 u32 level = param; 1165 u32 level = param;
1183 1166
1184 if (level >= ARRAY_SIZE(cycpwrThr1)) { 1167 if (level >= ARRAY_SIZE(cycpwrThr1)) {
1185 ath_print(common, ATH_DBG_ANI, 1168 ath_dbg(common, ATH_DBG_ANI,
1186 "level out of range (%u > %u)\n", 1169 "level out of range (%u > %zu)\n",
1187 level, 1170 level, ARRAY_SIZE(cycpwrThr1));
1188 (unsigned) ARRAY_SIZE(cycpwrThr1));
1189 return false; 1171 return false;
1190 } 1172 }
1191 REG_RMW_FIELD(ah, AR_PHY_TIMING5, 1173 REG_RMW_FIELD(ah, AR_PHY_TIMING5,
@@ -1201,25 +1183,22 @@ static bool ar5008_hw_ani_control_old(struct ath_hw *ah,
1201 case ATH9K_ANI_PRESENT: 1183 case ATH9K_ANI_PRESENT:
1202 break; 1184 break;
1203 default: 1185 default:
1204 ath_print(common, ATH_DBG_ANI, 1186 ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd);
1205 "invalid cmd %u\n", cmd);
1206 return false; 1187 return false;
1207 } 1188 }
1208 1189
1209 ath_print(common, ATH_DBG_ANI, "ANI parameters:\n"); 1190 ath_dbg(common, ATH_DBG_ANI, "ANI parameters:\n");
1210 ath_print(common, ATH_DBG_ANI, 1191 ath_dbg(common, ATH_DBG_ANI,
1211 "noiseImmunityLevel=%d, spurImmunityLevel=%d, " 1192 "noiseImmunityLevel=%d, spurImmunityLevel=%d, ofdmWeakSigDetectOff=%d\n",
1212 "ofdmWeakSigDetectOff=%d\n", 1193 aniState->noiseImmunityLevel,
1213 aniState->noiseImmunityLevel, 1194 aniState->spurImmunityLevel,
1214 aniState->spurImmunityLevel, 1195 !aniState->ofdmWeakSigDetectOff);
1215 !aniState->ofdmWeakSigDetectOff); 1196 ath_dbg(common, ATH_DBG_ANI,
1216 ath_print(common, ATH_DBG_ANI, 1197 "cckWeakSigThreshold=%d, firstepLevel=%d, listenTime=%d\n",
1217 "cckWeakSigThreshold=%d, " 1198 aniState->cckWeakSigThreshold,
1218 "firstepLevel=%d, listenTime=%d\n", 1199 aniState->firstepLevel,
1219 aniState->cckWeakSigThreshold, 1200 aniState->listenTime);
1220 aniState->firstepLevel, 1201 ath_dbg(common, ATH_DBG_ANI,
1221 aniState->listenTime);
1222 ath_print(common, ATH_DBG_ANI,
1223 "ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n", 1202 "ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
1224 aniState->ofdmPhyErrCount, 1203 aniState->ofdmPhyErrCount,
1225 aniState->cckPhyErrCount); 1204 aniState->cckPhyErrCount);
@@ -1304,12 +1283,12 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1304 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); 1283 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
1305 1284
1306 if (!on != aniState->ofdmWeakSigDetectOff) { 1285 if (!on != aniState->ofdmWeakSigDetectOff) {
1307 ath_print(common, ATH_DBG_ANI, 1286 ath_dbg(common, ATH_DBG_ANI,
1308 "** ch %d: ofdm weak signal: %s=>%s\n", 1287 "** ch %d: ofdm weak signal: %s=>%s\n",
1309 chan->channel, 1288 chan->channel,
1310 !aniState->ofdmWeakSigDetectOff ? 1289 !aniState->ofdmWeakSigDetectOff ?
1311 "on" : "off", 1290 "on" : "off",
1312 on ? "on" : "off"); 1291 on ? "on" : "off");
1313 if (on) 1292 if (on)
1314 ah->stats.ast_ani_ofdmon++; 1293 ah->stats.ast_ani_ofdmon++;
1315 else 1294 else
@@ -1322,11 +1301,9 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1322 u32 level = param; 1301 u32 level = param;
1323 1302
1324 if (level >= ARRAY_SIZE(firstep_table)) { 1303 if (level >= ARRAY_SIZE(firstep_table)) {
1325 ath_print(common, ATH_DBG_ANI, 1304 ath_dbg(common, ATH_DBG_ANI,
1326 "ATH9K_ANI_FIRSTEP_LEVEL: level " 1305 "ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n",
1327 "out of range (%u > %u)\n", 1306 level, ARRAY_SIZE(firstep_table));
1328 level,
1329 (unsigned) ARRAY_SIZE(firstep_table));
1330 return false; 1307 return false;
1331 } 1308 }
1332 1309
@@ -1361,24 +1338,22 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1361 AR_PHY_FIND_SIG_FIRSTEP_LOW, value2); 1338 AR_PHY_FIND_SIG_FIRSTEP_LOW, value2);
1362 1339
1363 if (level != aniState->firstepLevel) { 1340 if (level != aniState->firstepLevel) {
1364 ath_print(common, ATH_DBG_ANI, 1341 ath_dbg(common, ATH_DBG_ANI,
1365 "** ch %d: level %d=>%d[def:%d] " 1342 "** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n",
1366 "firstep[level]=%d ini=%d\n", 1343 chan->channel,
1367 chan->channel, 1344 aniState->firstepLevel,
1368 aniState->firstepLevel, 1345 level,
1369 level, 1346 ATH9K_ANI_FIRSTEP_LVL_NEW,
1370 ATH9K_ANI_FIRSTEP_LVL_NEW, 1347 value,
1371 value, 1348 aniState->iniDef.firstep);
1372 aniState->iniDef.firstep); 1349 ath_dbg(common, ATH_DBG_ANI,
1373 ath_print(common, ATH_DBG_ANI, 1350 "** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n",
1374 "** ch %d: level %d=>%d[def:%d] " 1351 chan->channel,
1375 "firstep_low[level]=%d ini=%d\n", 1352 aniState->firstepLevel,
1376 chan->channel, 1353 level,
1377 aniState->firstepLevel, 1354 ATH9K_ANI_FIRSTEP_LVL_NEW,
1378 level, 1355 value2,
1379 ATH9K_ANI_FIRSTEP_LVL_NEW, 1356 aniState->iniDef.firstepLow);
1380 value2,
1381 aniState->iniDef.firstepLow);
1382 if (level > aniState->firstepLevel) 1357 if (level > aniState->firstepLevel)
1383 ah->stats.ast_ani_stepup++; 1358 ah->stats.ast_ani_stepup++;
1384 else if (level < aniState->firstepLevel) 1359 else if (level < aniState->firstepLevel)
@@ -1391,11 +1366,9 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1391 u32 level = param; 1366 u32 level = param;
1392 1367
1393 if (level >= ARRAY_SIZE(cycpwrThr1_table)) { 1368 if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
1394 ath_print(common, ATH_DBG_ANI, 1369 ath_dbg(common, ATH_DBG_ANI,
1395 "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level " 1370 "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n",
1396 "out of range (%u > %u)\n", 1371 level, ARRAY_SIZE(cycpwrThr1_table));
1397 level,
1398 (unsigned) ARRAY_SIZE(cycpwrThr1_table));
1399 return false; 1372 return false;
1400 } 1373 }
1401 /* 1374 /*
@@ -1429,24 +1402,22 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1429 AR_PHY_EXT_TIMING5_CYCPWR_THR1, value2); 1402 AR_PHY_EXT_TIMING5_CYCPWR_THR1, value2);
1430 1403
1431 if (level != aniState->spurImmunityLevel) { 1404 if (level != aniState->spurImmunityLevel) {
1432 ath_print(common, ATH_DBG_ANI, 1405 ath_dbg(common, ATH_DBG_ANI,
1433 "** ch %d: level %d=>%d[def:%d] " 1406 "** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n",
1434 "cycpwrThr1[level]=%d ini=%d\n", 1407 chan->channel,
1435 chan->channel, 1408 aniState->spurImmunityLevel,
1436 aniState->spurImmunityLevel, 1409 level,
1437 level, 1410 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
1438 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, 1411 value,
1439 value, 1412 aniState->iniDef.cycpwrThr1);
1440 aniState->iniDef.cycpwrThr1); 1413 ath_dbg(common, ATH_DBG_ANI,
1441 ath_print(common, ATH_DBG_ANI, 1414 "** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n",
1442 "** ch %d: level %d=>%d[def:%d] " 1415 chan->channel,
1443 "cycpwrThr1Ext[level]=%d ini=%d\n", 1416 aniState->spurImmunityLevel,
1444 chan->channel, 1417 level,
1445 aniState->spurImmunityLevel, 1418 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
1446 level, 1419 value2,
1447 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, 1420 aniState->iniDef.cycpwrThr1Ext);
1448 value2,
1449 aniState->iniDef.cycpwrThr1Ext);
1450 if (level > aniState->spurImmunityLevel) 1421 if (level > aniState->spurImmunityLevel)
1451 ah->stats.ast_ani_spurup++; 1422 ah->stats.ast_ani_spurup++;
1452 else if (level < aniState->spurImmunityLevel) 1423 else if (level < aniState->spurImmunityLevel)
@@ -1465,22 +1436,19 @@ static bool ar5008_hw_ani_control_new(struct ath_hw *ah,
1465 case ATH9K_ANI_PRESENT: 1436 case ATH9K_ANI_PRESENT:
1466 break; 1437 break;
1467 default: 1438 default:
1468 ath_print(common, ATH_DBG_ANI, 1439 ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd);
1469 "invalid cmd %u\n", cmd);
1470 return false; 1440 return false;
1471 } 1441 }
1472 1442
1473 ath_print(common, ATH_DBG_ANI, 1443 ath_dbg(common, ATH_DBG_ANI,
1474 "ANI parameters: SI=%d, ofdmWS=%s FS=%d " 1444 "ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
1475 "MRCcck=%s listenTime=%d " 1445 aniState->spurImmunityLevel,
1476 "ofdmErrs=%d cckErrs=%d\n", 1446 !aniState->ofdmWeakSigDetectOff ? "on" : "off",
1477 aniState->spurImmunityLevel, 1447 aniState->firstepLevel,
1478 !aniState->ofdmWeakSigDetectOff ? "on" : "off", 1448 !aniState->mrcCCKOff ? "on" : "off",
1479 aniState->firstepLevel, 1449 aniState->listenTime,
1480 !aniState->mrcCCKOff ? "on" : "off", 1450 aniState->ofdmPhyErrCount,
1481 aniState->listenTime, 1451 aniState->cckPhyErrCount);
1482 aniState->ofdmPhyErrCount,
1483 aniState->cckPhyErrCount);
1484 return true; 1452 return true;
1485} 1453}
1486 1454
@@ -1490,25 +1458,25 @@ static void ar5008_hw_do_getnf(struct ath_hw *ah,
1490 int16_t nf; 1458 int16_t nf;
1491 1459
1492 nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR); 1460 nf = MS(REG_READ(ah, AR_PHY_CCA), AR_PHY_MINCCA_PWR);
1493 nfarray[0] = sign_extend(nf, 9); 1461 nfarray[0] = sign_extend32(nf, 8);
1494 1462
1495 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR_PHY_CH1_MINCCA_PWR); 1463 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR_PHY_CH1_MINCCA_PWR);
1496 nfarray[1] = sign_extend(nf, 9); 1464 nfarray[1] = sign_extend32(nf, 8);
1497 1465
1498 nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), AR_PHY_CH2_MINCCA_PWR); 1466 nf = MS(REG_READ(ah, AR_PHY_CH2_CCA), AR_PHY_CH2_MINCCA_PWR);
1499 nfarray[2] = sign_extend(nf, 9); 1467 nfarray[2] = sign_extend32(nf, 8);
1500 1468
1501 if (!IS_CHAN_HT40(ah->curchan)) 1469 if (!IS_CHAN_HT40(ah->curchan))
1502 return; 1470 return;
1503 1471
1504 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR); 1472 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
1505 nfarray[3] = sign_extend(nf, 9); 1473 nfarray[3] = sign_extend32(nf, 8);
1506 1474
1507 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR_PHY_CH1_EXT_MINCCA_PWR); 1475 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR_PHY_CH1_EXT_MINCCA_PWR);
1508 nfarray[4] = sign_extend(nf, 9); 1476 nfarray[4] = sign_extend32(nf, 8);
1509 1477
1510 nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), AR_PHY_CH2_EXT_MINCCA_PWR); 1478 nf = MS(REG_READ(ah, AR_PHY_CH2_EXT_CCA), AR_PHY_CH2_EXT_MINCCA_PWR);
1511 nfarray[5] = sign_extend(nf, 9); 1479 nfarray[5] = sign_extend32(nf, 8);
1512} 1480}
1513 1481
1514/* 1482/*
@@ -1526,13 +1494,12 @@ static void ar5008_hw_ani_cache_ini_regs(struct ath_hw *ah)
1526 1494
1527 iniDef = &aniState->iniDef; 1495 iniDef = &aniState->iniDef;
1528 1496
1529 ath_print(common, ATH_DBG_ANI, 1497 ath_dbg(common, ATH_DBG_ANI, "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
1530 "ver %d.%d opmode %u chan %d Mhz/0x%x\n", 1498 ah->hw_version.macVersion,
1531 ah->hw_version.macVersion, 1499 ah->hw_version.macRev,
1532 ah->hw_version.macRev, 1500 ah->opmode,
1533 ah->opmode, 1501 chan->channel,
1534 chan->channel, 1502 chan->channelFlags);
1535 chan->channelFlags);
1536 1503
1537 val = REG_READ(ah, AR_PHY_SFCORR); 1504 val = REG_READ(ah, AR_PHY_SFCORR);
1538 iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH); 1505 iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
@@ -1579,10 +1546,55 @@ static void ar5008_hw_set_nf_limits(struct ath_hw *ah)
1579 ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_5416_5GHZ; 1546 ah->nf_5g.nominal = AR_PHY_CCA_NOM_VAL_5416_5GHZ;
1580} 1547}
1581 1548
1549static void ar5008_hw_set_radar_params(struct ath_hw *ah,
1550 struct ath_hw_radar_conf *conf)
1551{
1552 u32 radar_0 = 0, radar_1 = 0;
1553
1554 if (!conf) {
1555 REG_CLR_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_ENA);
1556 return;
1557 }
1558
1559 radar_0 |= AR_PHY_RADAR_0_ENA | AR_PHY_RADAR_0_FFT_ENA;
1560 radar_0 |= SM(conf->fir_power, AR_PHY_RADAR_0_FIRPWR);
1561 radar_0 |= SM(conf->radar_rssi, AR_PHY_RADAR_0_RRSSI);
1562 radar_0 |= SM(conf->pulse_height, AR_PHY_RADAR_0_HEIGHT);
1563 radar_0 |= SM(conf->pulse_rssi, AR_PHY_RADAR_0_PRSSI);
1564 radar_0 |= SM(conf->pulse_inband, AR_PHY_RADAR_0_INBAND);
1565
1566 radar_1 |= AR_PHY_RADAR_1_MAX_RRSSI;
1567 radar_1 |= AR_PHY_RADAR_1_BLOCK_CHECK;
1568 radar_1 |= SM(conf->pulse_maxlen, AR_PHY_RADAR_1_MAXLEN);
1569 radar_1 |= SM(conf->pulse_inband_step, AR_PHY_RADAR_1_RELSTEP_THRESH);
1570 radar_1 |= SM(conf->radar_inband, AR_PHY_RADAR_1_RELPWR_THRESH);
1571
1572 REG_WRITE(ah, AR_PHY_RADAR_0, radar_0);
1573 REG_WRITE(ah, AR_PHY_RADAR_1, radar_1);
1574 if (conf->ext_channel)
1575 REG_SET_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
1576 else
1577 REG_CLR_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
1578}
1579
1580static void ar5008_hw_set_radar_conf(struct ath_hw *ah)
1581{
1582 struct ath_hw_radar_conf *conf = &ah->radar_conf;
1583
1584 conf->fir_power = -33;
1585 conf->radar_rssi = 20;
1586 conf->pulse_height = 10;
1587 conf->pulse_rssi = 24;
1588 conf->pulse_inband = 15;
1589 conf->pulse_maxlen = 255;
1590 conf->pulse_inband_step = 12;
1591 conf->radar_inband = 8;
1592}
1593
1582void ar5008_hw_attach_phy_ops(struct ath_hw *ah) 1594void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
1583{ 1595{
1584 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); 1596 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
1585 const u32 ar5416_cca_regs[6] = { 1597 static const u32 ar5416_cca_regs[6] = {
1586 AR_PHY_CCA, 1598 AR_PHY_CCA,
1587 AR_PHY_CH1_CCA, 1599 AR_PHY_CH1_CCA,
1588 AR_PHY_CH2_CCA, 1600 AR_PHY_CH2_CCA,
@@ -1605,10 +1617,10 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
1605 priv_ops->set_delta_slope = ar5008_hw_set_delta_slope; 1617 priv_ops->set_delta_slope = ar5008_hw_set_delta_slope;
1606 priv_ops->rfbus_req = ar5008_hw_rfbus_req; 1618 priv_ops->rfbus_req = ar5008_hw_rfbus_req;
1607 priv_ops->rfbus_done = ar5008_hw_rfbus_done; 1619 priv_ops->rfbus_done = ar5008_hw_rfbus_done;
1608 priv_ops->enable_rfkill = ar5008_hw_enable_rfkill;
1609 priv_ops->restore_chainmask = ar5008_restore_chainmask; 1620 priv_ops->restore_chainmask = ar5008_restore_chainmask;
1610 priv_ops->set_diversity = ar5008_set_diversity; 1621 priv_ops->set_diversity = ar5008_set_diversity;
1611 priv_ops->do_getnf = ar5008_hw_do_getnf; 1622 priv_ops->do_getnf = ar5008_hw_do_getnf;
1623 priv_ops->set_radar_params = ar5008_hw_set_radar_params;
1612 1624
1613 if (modparam_force_new_ani) { 1625 if (modparam_force_new_ani) {
1614 priv_ops->ani_control = ar5008_hw_ani_control_new; 1626 priv_ops->ani_control = ar5008_hw_ani_control_new;
@@ -1624,5 +1636,6 @@ void ar5008_hw_attach_phy_ops(struct ath_hw *ah)
1624 priv_ops->compute_pll_control = ar5008_hw_compute_pll_control; 1636 priv_ops->compute_pll_control = ar5008_hw_compute_pll_control;
1625 1637
1626 ar5008_hw_set_nf_limits(ah); 1638 ar5008_hw_set_nf_limits(ah);
1639 ar5008_hw_set_radar_conf(ah);
1627 memcpy(ah->nf_regs, ar5416_cca_regs, sizeof(ah->nf_regs)); 1640 memcpy(ah->nf_regs, ar5416_cca_regs, sizeof(ah->nf_regs));
1628} 1641}
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_calib.c b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
index 15f62cd0cc38..01880aa13e36 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_calib.c
@@ -39,18 +39,18 @@ static void ar9002_hw_setup_calibration(struct ath_hw *ah,
39 switch (currCal->calData->calType) { 39 switch (currCal->calData->calType) {
40 case IQ_MISMATCH_CAL: 40 case IQ_MISMATCH_CAL:
41 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ); 41 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
42 ath_print(common, ATH_DBG_CALIBRATE, 42 ath_dbg(common, ATH_DBG_CALIBRATE,
43 "starting IQ Mismatch Calibration\n"); 43 "starting IQ Mismatch Calibration\n");
44 break; 44 break;
45 case ADC_GAIN_CAL: 45 case ADC_GAIN_CAL:
46 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN); 46 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
47 ath_print(common, ATH_DBG_CALIBRATE, 47 ath_dbg(common, ATH_DBG_CALIBRATE,
48 "starting ADC Gain Calibration\n"); 48 "starting ADC Gain Calibration\n");
49 break; 49 break;
50 case ADC_DC_CAL: 50 case ADC_DC_CAL:
51 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER); 51 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
52 ath_print(common, ATH_DBG_CALIBRATE, 52 ath_dbg(common, ATH_DBG_CALIBRATE,
53 "starting ADC DC Calibration\n"); 53 "starting ADC DC Calibration\n");
54 break; 54 break;
55 } 55 }
56 56
@@ -107,11 +107,11 @@ static void ar9002_hw_iqcal_collect(struct ath_hw *ah)
107 REG_READ(ah, AR_PHY_CAL_MEAS_1(i)); 107 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
108 ah->totalIqCorrMeas[i] += 108 ah->totalIqCorrMeas[i] +=
109 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i)); 109 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
110 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, 110 ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
111 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n", 111 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
112 ah->cal_samples, i, ah->totalPowerMeasI[i], 112 ah->cal_samples, i, ah->totalPowerMeasI[i],
113 ah->totalPowerMeasQ[i], 113 ah->totalPowerMeasQ[i],
114 ah->totalIqCorrMeas[i]); 114 ah->totalIqCorrMeas[i]);
115 } 115 }
116} 116}
117 117
@@ -129,14 +129,13 @@ static void ar9002_hw_adc_gaincal_collect(struct ath_hw *ah)
129 ah->totalAdcQEvenPhase[i] += 129 ah->totalAdcQEvenPhase[i] +=
130 REG_READ(ah, AR_PHY_CAL_MEAS_3(i)); 130 REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
131 131
132 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, 132 ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
133 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; " 133 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; oddq=0x%08x; evenq=0x%08x;\n",
134 "oddq=0x%08x; evenq=0x%08x;\n", 134 ah->cal_samples, i,
135 ah->cal_samples, i, 135 ah->totalAdcIOddPhase[i],
136 ah->totalAdcIOddPhase[i], 136 ah->totalAdcIEvenPhase[i],
137 ah->totalAdcIEvenPhase[i], 137 ah->totalAdcQOddPhase[i],
138 ah->totalAdcQOddPhase[i], 138 ah->totalAdcQEvenPhase[i]);
139 ah->totalAdcQEvenPhase[i]);
140 } 139 }
141} 140}
142 141
@@ -154,14 +153,13 @@ static void ar9002_hw_adc_dccal_collect(struct ath_hw *ah)
154 ah->totalAdcDcOffsetQEvenPhase[i] += 153 ah->totalAdcDcOffsetQEvenPhase[i] +=
155 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i)); 154 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
156 155
157 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, 156 ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
158 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; " 157 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; oddq=0x%08x; evenq=0x%08x;\n",
159 "oddq=0x%08x; evenq=0x%08x;\n", 158 ah->cal_samples, i,
160 ah->cal_samples, i, 159 ah->totalAdcDcOffsetIOddPhase[i],
161 ah->totalAdcDcOffsetIOddPhase[i], 160 ah->totalAdcDcOffsetIEvenPhase[i],
162 ah->totalAdcDcOffsetIEvenPhase[i], 161 ah->totalAdcDcOffsetQOddPhase[i],
163 ah->totalAdcDcOffsetQOddPhase[i], 162 ah->totalAdcDcOffsetQEvenPhase[i]);
164 ah->totalAdcDcOffsetQEvenPhase[i]);
165 } 163 }
166} 164}
167 165
@@ -178,13 +176,13 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
178 powerMeasQ = ah->totalPowerMeasQ[i]; 176 powerMeasQ = ah->totalPowerMeasQ[i];
179 iqCorrMeas = ah->totalIqCorrMeas[i]; 177 iqCorrMeas = ah->totalIqCorrMeas[i];
180 178
181 ath_print(common, ATH_DBG_CALIBRATE, 179 ath_dbg(common, ATH_DBG_CALIBRATE,
182 "Starting IQ Cal and Correction for Chain %d\n", 180 "Starting IQ Cal and Correction for Chain %d\n",
183 i); 181 i);
184 182
185 ath_print(common, ATH_DBG_CALIBRATE, 183 ath_dbg(common, ATH_DBG_CALIBRATE,
186 "Orignal: Chn %diq_corr_meas = 0x%08x\n", 184 "Orignal: Chn %diq_corr_meas = 0x%08x\n",
187 i, ah->totalIqCorrMeas[i]); 185 i, ah->totalIqCorrMeas[i]);
188 186
189 iqCorrNeg = 0; 187 iqCorrNeg = 0;
190 188
@@ -193,12 +191,12 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
193 iqCorrNeg = 1; 191 iqCorrNeg = 1;
194 } 192 }
195 193
196 ath_print(common, ATH_DBG_CALIBRATE, 194 ath_dbg(common, ATH_DBG_CALIBRATE,
197 "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI); 195 "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
198 ath_print(common, ATH_DBG_CALIBRATE, 196 ath_dbg(common, ATH_DBG_CALIBRATE,
199 "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ); 197 "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
200 ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n", 198 ath_dbg(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
201 iqCorrNeg); 199 iqCorrNeg);
202 200
203 iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128; 201 iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
204 qCoffDenom = powerMeasQ / 64; 202 qCoffDenom = powerMeasQ / 64;
@@ -207,14 +205,14 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
207 (qCoffDenom != 0)) { 205 (qCoffDenom != 0)) {
208 iCoff = iqCorrMeas / iCoffDenom; 206 iCoff = iqCorrMeas / iCoffDenom;
209 qCoff = powerMeasI / qCoffDenom - 64; 207 qCoff = powerMeasI / qCoffDenom - 64;
210 ath_print(common, ATH_DBG_CALIBRATE, 208 ath_dbg(common, ATH_DBG_CALIBRATE,
211 "Chn %d iCoff = 0x%08x\n", i, iCoff); 209 "Chn %d iCoff = 0x%08x\n", i, iCoff);
212 ath_print(common, ATH_DBG_CALIBRATE, 210 ath_dbg(common, ATH_DBG_CALIBRATE,
213 "Chn %d qCoff = 0x%08x\n", i, qCoff); 211 "Chn %d qCoff = 0x%08x\n", i, qCoff);
214 212
215 iCoff = iCoff & 0x3f; 213 iCoff = iCoff & 0x3f;
216 ath_print(common, ATH_DBG_CALIBRATE, 214 ath_dbg(common, ATH_DBG_CALIBRATE,
217 "New: Chn %d iCoff = 0x%08x\n", i, iCoff); 215 "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
218 if (iqCorrNeg == 0x0) 216 if (iqCorrNeg == 0x0)
219 iCoff = 0x40 - iCoff; 217 iCoff = 0x40 - iCoff;
220 218
@@ -223,9 +221,9 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
223 else if (qCoff <= -16) 221 else if (qCoff <= -16)
224 qCoff = -16; 222 qCoff = -16;
225 223
226 ath_print(common, ATH_DBG_CALIBRATE, 224 ath_dbg(common, ATH_DBG_CALIBRATE,
227 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n", 225 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
228 i, iCoff, qCoff); 226 i, iCoff, qCoff);
229 227
230 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i), 228 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
231 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF, 229 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
@@ -233,9 +231,9 @@ static void ar9002_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
233 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i), 231 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
234 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF, 232 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
235 qCoff); 233 qCoff);
236 ath_print(common, ATH_DBG_CALIBRATE, 234 ath_dbg(common, ATH_DBG_CALIBRATE,
237 "IQ Cal and Correction done for Chain %d\n", 235 "IQ Cal and Correction done for Chain %d\n",
238 i); 236 i);
239 } 237 }
240 } 238 }
241 239
@@ -255,21 +253,21 @@ static void ar9002_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
255 qOddMeasOffset = ah->totalAdcQOddPhase[i]; 253 qOddMeasOffset = ah->totalAdcQOddPhase[i];
256 qEvenMeasOffset = ah->totalAdcQEvenPhase[i]; 254 qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
257 255
258 ath_print(common, ATH_DBG_CALIBRATE, 256 ath_dbg(common, ATH_DBG_CALIBRATE,
259 "Starting ADC Gain Cal for Chain %d\n", i); 257 "Starting ADC Gain Cal for Chain %d\n", i);
260 258
261 ath_print(common, ATH_DBG_CALIBRATE, 259 ath_dbg(common, ATH_DBG_CALIBRATE,
262 "Chn %d pwr_meas_odd_i = 0x%08x\n", i, 260 "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
263 iOddMeasOffset); 261 iOddMeasOffset);
264 ath_print(common, ATH_DBG_CALIBRATE, 262 ath_dbg(common, ATH_DBG_CALIBRATE,
265 "Chn %d pwr_meas_even_i = 0x%08x\n", i, 263 "Chn %d pwr_meas_even_i = 0x%08x\n", i,
266 iEvenMeasOffset); 264 iEvenMeasOffset);
267 ath_print(common, ATH_DBG_CALIBRATE, 265 ath_dbg(common, ATH_DBG_CALIBRATE,
268 "Chn %d pwr_meas_odd_q = 0x%08x\n", i, 266 "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
269 qOddMeasOffset); 267 qOddMeasOffset);
270 ath_print(common, ATH_DBG_CALIBRATE, 268 ath_dbg(common, ATH_DBG_CALIBRATE,
271 "Chn %d pwr_meas_even_q = 0x%08x\n", i, 269 "Chn %d pwr_meas_even_q = 0x%08x\n", i,
272 qEvenMeasOffset); 270 qEvenMeasOffset);
273 271
274 if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) { 272 if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
275 iGainMismatch = 273 iGainMismatch =
@@ -279,20 +277,20 @@ static void ar9002_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
279 ((qOddMeasOffset * 32) / 277 ((qOddMeasOffset * 32) /
280 qEvenMeasOffset) & 0x3f; 278 qEvenMeasOffset) & 0x3f;
281 279
282 ath_print(common, ATH_DBG_CALIBRATE, 280 ath_dbg(common, ATH_DBG_CALIBRATE,
283 "Chn %d gain_mismatch_i = 0x%08x\n", i, 281 "Chn %d gain_mismatch_i = 0x%08x\n", i,
284 iGainMismatch); 282 iGainMismatch);
285 ath_print(common, ATH_DBG_CALIBRATE, 283 ath_dbg(common, ATH_DBG_CALIBRATE,
286 "Chn %d gain_mismatch_q = 0x%08x\n", i, 284 "Chn %d gain_mismatch_q = 0x%08x\n", i,
287 qGainMismatch); 285 qGainMismatch);
288 286
289 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i)); 287 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
290 val &= 0xfffff000; 288 val &= 0xfffff000;
291 val |= (qGainMismatch) | (iGainMismatch << 6); 289 val |= (qGainMismatch) | (iGainMismatch << 6);
292 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val); 290 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
293 291
294 ath_print(common, ATH_DBG_CALIBRATE, 292 ath_dbg(common, ATH_DBG_CALIBRATE,
295 "ADC Gain Cal done for Chain %d\n", i); 293 "ADC Gain Cal done for Chain %d\n", i);
296 } 294 }
297 } 295 }
298 296
@@ -317,41 +315,41 @@ static void ar9002_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
317 qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i]; 315 qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
318 qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i]; 316 qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
319 317
320 ath_print(common, ATH_DBG_CALIBRATE, 318 ath_dbg(common, ATH_DBG_CALIBRATE,
321 "Starting ADC DC Offset Cal for Chain %d\n", i); 319 "Starting ADC DC Offset Cal for Chain %d\n", i);
322 320
323 ath_print(common, ATH_DBG_CALIBRATE, 321 ath_dbg(common, ATH_DBG_CALIBRATE,
324 "Chn %d pwr_meas_odd_i = %d\n", i, 322 "Chn %d pwr_meas_odd_i = %d\n", i,
325 iOddMeasOffset); 323 iOddMeasOffset);
326 ath_print(common, ATH_DBG_CALIBRATE, 324 ath_dbg(common, ATH_DBG_CALIBRATE,
327 "Chn %d pwr_meas_even_i = %d\n", i, 325 "Chn %d pwr_meas_even_i = %d\n", i,
328 iEvenMeasOffset); 326 iEvenMeasOffset);
329 ath_print(common, ATH_DBG_CALIBRATE, 327 ath_dbg(common, ATH_DBG_CALIBRATE,
330 "Chn %d pwr_meas_odd_q = %d\n", i, 328 "Chn %d pwr_meas_odd_q = %d\n", i,
331 qOddMeasOffset); 329 qOddMeasOffset);
332 ath_print(common, ATH_DBG_CALIBRATE, 330 ath_dbg(common, ATH_DBG_CALIBRATE,
333 "Chn %d pwr_meas_even_q = %d\n", i, 331 "Chn %d pwr_meas_even_q = %d\n", i,
334 qEvenMeasOffset); 332 qEvenMeasOffset);
335 333
336 iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) / 334 iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
337 numSamples) & 0x1ff; 335 numSamples) & 0x1ff;
338 qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) / 336 qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
339 numSamples) & 0x1ff; 337 numSamples) & 0x1ff;
340 338
341 ath_print(common, ATH_DBG_CALIBRATE, 339 ath_dbg(common, ATH_DBG_CALIBRATE,
342 "Chn %d dc_offset_mismatch_i = 0x%08x\n", i, 340 "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
343 iDcMismatch); 341 iDcMismatch);
344 ath_print(common, ATH_DBG_CALIBRATE, 342 ath_dbg(common, ATH_DBG_CALIBRATE,
345 "Chn %d dc_offset_mismatch_q = 0x%08x\n", i, 343 "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
346 qDcMismatch); 344 qDcMismatch);
347 345
348 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i)); 346 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
349 val &= 0xc0000fff; 347 val &= 0xc0000fff;
350 val |= (qDcMismatch << 12) | (iDcMismatch << 21); 348 val |= (qDcMismatch << 12) | (iDcMismatch << 21);
351 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val); 349 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
352 350
353 ath_print(common, ATH_DBG_CALIBRATE, 351 ath_dbg(common, ATH_DBG_CALIBRATE,
354 "ADC DC Offset Cal done for Chain %d\n", i); 352 "ADC DC Offset Cal done for Chain %d\n", i);
355 } 353 }
356 354
357 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0), 355 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
@@ -540,7 +538,7 @@ static inline void ar9285_hw_pa_cal(struct ath_hw *ah, bool is_reset)
540 { 0x7838, 0 }, 538 { 0x7838, 0 },
541 }; 539 };
542 540
543 ath_print(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n"); 541 ath_dbg(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
544 542
545 /* PA CAL is not needed for high power solution */ 543 /* PA CAL is not needed for high power solution */
546 if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) == 544 if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
@@ -721,9 +719,8 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
721 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL); 719 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
722 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, 720 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
723 AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) { 721 AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
724 ath_print(common, ATH_DBG_CALIBRATE, "offset " 722 ath_dbg(common, ATH_DBG_CALIBRATE,
725 "calibration failed to complete in " 723 "offset calibration failed to complete in 1ms; noisy environment?\n");
726 "1ms; noisy ??\n");
727 return false; 724 return false;
728 } 725 }
729 REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN); 726 REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
@@ -736,8 +733,8 @@ static bool ar9285_hw_cl_cal(struct ath_hw *ah, struct ath9k_channel *chan)
736 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL); 733 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
737 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 734 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
738 0, AH_WAIT_TIMEOUT)) { 735 0, AH_WAIT_TIMEOUT)) {
739 ath_print(common, ATH_DBG_CALIBRATE, "offset calibration " 736 ath_dbg(common, ATH_DBG_CALIBRATE,
740 "failed to complete in 1ms; noisy ??\n"); 737 "offset calibration failed to complete in 1ms; noisy environment?\n");
741 return false; 738 return false;
742 } 739 }
743 740
@@ -829,9 +826,8 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
829 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, 826 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
830 AR_PHY_AGC_CONTROL_CAL, 827 AR_PHY_AGC_CONTROL_CAL,
831 0, AH_WAIT_TIMEOUT)) { 828 0, AH_WAIT_TIMEOUT)) {
832 ath_print(common, ATH_DBG_CALIBRATE, 829 ath_dbg(common, ATH_DBG_CALIBRATE,
833 "offset calibration failed to " 830 "offset calibration failed to complete in 1ms; noisy environment?\n");
834 "complete in 1ms; noisy environment?\n");
835 return false; 831 return false;
836 } 832 }
837 833
@@ -866,19 +862,19 @@ static bool ar9002_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
866 862
867 INIT_CAL(&ah->adcgain_caldata); 863 INIT_CAL(&ah->adcgain_caldata);
868 INSERT_CAL(ah, &ah->adcgain_caldata); 864 INSERT_CAL(ah, &ah->adcgain_caldata);
869 ath_print(common, ATH_DBG_CALIBRATE, 865 ath_dbg(common, ATH_DBG_CALIBRATE,
870 "enabling ADC Gain Calibration.\n"); 866 "enabling ADC Gain Calibration.\n");
871 867
872 INIT_CAL(&ah->adcdc_caldata); 868 INIT_CAL(&ah->adcdc_caldata);
873 INSERT_CAL(ah, &ah->adcdc_caldata); 869 INSERT_CAL(ah, &ah->adcdc_caldata);
874 ath_print(common, ATH_DBG_CALIBRATE, 870 ath_dbg(common, ATH_DBG_CALIBRATE,
875 "enabling ADC DC Calibration.\n"); 871 "enabling ADC DC Calibration.\n");
876 } 872 }
877 873
878 INIT_CAL(&ah->iq_caldata); 874 INIT_CAL(&ah->iq_caldata);
879 INSERT_CAL(ah, &ah->iq_caldata); 875 INSERT_CAL(ah, &ah->iq_caldata);
880 ath_print(common, ATH_DBG_CALIBRATE, 876 ath_dbg(common, ATH_DBG_CALIBRATE,
881 "enabling IQ Calibration.\n"); 877 "enabling IQ Calibration.\n");
882 878
883 ah->cal_list_curr = ah->cal_list; 879 ah->cal_list_curr = ah->cal_list;
884 880
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_hw.c b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
index 48261b7252d0..f8a7771faee2 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_hw.c
@@ -22,28 +22,10 @@
22 22
23int modparam_force_new_ani; 23int modparam_force_new_ani;
24module_param_named(force_new_ani, modparam_force_new_ani, int, 0444); 24module_param_named(force_new_ani, modparam_force_new_ani, int, 0444);
25MODULE_PARM_DESC(nohwcrypt, "Force new ANI for AR5008, AR9001, AR9002"); 25MODULE_PARM_DESC(force_new_ani, "Force new ANI for AR5008, AR9001, AR9002");
26 26
27/* General hardware code for the A5008/AR9001/AR9002 hadware families */ 27/* General hardware code for the A5008/AR9001/AR9002 hadware families */
28 28
29static bool ar9002_hw_macversion_supported(u32 macversion)
30{
31 switch (macversion) {
32 case AR_SREV_VERSION_5416_PCI:
33 case AR_SREV_VERSION_5416_PCIE:
34 case AR_SREV_VERSION_9160:
35 case AR_SREV_VERSION_9100:
36 case AR_SREV_VERSION_9280:
37 case AR_SREV_VERSION_9285:
38 case AR_SREV_VERSION_9287:
39 case AR_SREV_VERSION_9271:
40 return true;
41 default:
42 break;
43 }
44 return false;
45}
46
47static void ar9002_hw_init_mode_regs(struct ath_hw *ah) 29static void ar9002_hw_init_mode_regs(struct ath_hw *ah)
48{ 30{
49 if (AR_SREV_9271(ah)) { 31 if (AR_SREV_9271(ah)) {
@@ -494,9 +476,9 @@ int ar9002_hw_rf_claim(struct ath_hw *ah)
494 case AR_RAD2122_SREV_MAJOR: 476 case AR_RAD2122_SREV_MAJOR:
495 break; 477 break;
496 default: 478 default:
497 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, 479 ath_err(ath9k_hw_common(ah),
498 "Radio Chip Rev 0x%02X not supported\n", 480 "Radio Chip Rev 0x%02X not supported\n",
499 val & AR_RADIO_SREV_MAJOR); 481 val & AR_RADIO_SREV_MAJOR);
500 return -EOPNOTSUPP; 482 return -EOPNOTSUPP;
501 } 483 }
502 484
@@ -565,7 +547,6 @@ void ar9002_hw_attach_ops(struct ath_hw *ah)
565 547
566 priv_ops->init_mode_regs = ar9002_hw_init_mode_regs; 548 priv_ops->init_mode_regs = ar9002_hw_init_mode_regs;
567 priv_ops->init_mode_gain_regs = ar9002_hw_init_mode_gain_regs; 549 priv_ops->init_mode_gain_regs = ar9002_hw_init_mode_gain_regs;
568 priv_ops->macversion_supported = ar9002_hw_macversion_supported;
569 550
570 ops->config_pci_powersave = ar9002_hw_configpcipowersave; 551 ops->config_pci_powersave = ar9002_hw_configpcipowersave;
571 552
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
index 50dda394f8be..399ab3bb299b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
@@ -90,13 +90,10 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
90 90
91 *masked = isr & ATH9K_INT_COMMON; 91 *masked = isr & ATH9K_INT_COMMON;
92 92
93 if (ah->config.rx_intr_mitigation) { 93 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM |
94 if (isr & (AR_ISR_RXMINTR | AR_ISR_RXINTM)) 94 AR_ISR_RXOK | AR_ISR_RXERR))
95 *masked |= ATH9K_INT_RX;
96 }
97
98 if (isr & (AR_ISR_RXOK | AR_ISR_RXERR))
99 *masked |= ATH9K_INT_RX; 95 *masked |= ATH9K_INT_RX;
96
100 if (isr & 97 if (isr &
101 (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR | 98 (AR_ISR_TXOK | AR_ISR_TXDESC | AR_ISR_TXERR |
102 AR_ISR_TXEOL)) { 99 AR_ISR_TXEOL)) {
@@ -114,16 +111,8 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
114 } 111 }
115 112
116 if (isr & AR_ISR_RXORN) { 113 if (isr & AR_ISR_RXORN) {
117 ath_print(common, ATH_DBG_INTERRUPT, 114 ath_dbg(common, ATH_DBG_INTERRUPT,
118 "receive FIFO overrun interrupt\n"); 115 "receive FIFO overrun interrupt\n");
119 }
120
121 if (!AR_SREV_9100(ah)) {
122 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) {
123 u32 isr5 = REG_READ(ah, AR_ISR_S5_S);
124 if (isr5 & AR_ISR_S5_TIM_TIMER)
125 *masked |= ATH9K_INT_TIM_TIMER;
126 }
127 } 116 }
128 117
129 *masked |= mask2; 118 *masked |= mask2;
@@ -136,17 +125,18 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
136 u32 s5_s; 125 u32 s5_s;
137 126
138 s5_s = REG_READ(ah, AR_ISR_S5_S); 127 s5_s = REG_READ(ah, AR_ISR_S5_S);
139 if (isr & AR_ISR_GENTMR) { 128 ah->intr_gen_timer_trigger =
140 ah->intr_gen_timer_trigger =
141 MS(s5_s, AR_ISR_S5_GENTIMER_TRIG); 129 MS(s5_s, AR_ISR_S5_GENTIMER_TRIG);
142 130
143 ah->intr_gen_timer_thresh = 131 ah->intr_gen_timer_thresh =
144 MS(s5_s, AR_ISR_S5_GENTIMER_THRESH); 132 MS(s5_s, AR_ISR_S5_GENTIMER_THRESH);
145 133
146 if (ah->intr_gen_timer_trigger) 134 if (ah->intr_gen_timer_trigger)
147 *masked |= ATH9K_INT_GENTIMER; 135 *masked |= ATH9K_INT_GENTIMER;
148 136
149 } 137 if ((s5_s & AR_ISR_S5_TIM_TIMER) &&
138 !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
139 *masked |= ATH9K_INT_TIM_TIMER;
150 } 140 }
151 141
152 if (sync_cause) { 142 if (sync_cause) {
@@ -157,25 +147,25 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
157 147
158 if (fatal_int) { 148 if (fatal_int) {
159 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) { 149 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
160 ath_print(common, ATH_DBG_ANY, 150 ath_dbg(common, ATH_DBG_ANY,
161 "received PCI FATAL interrupt\n"); 151 "received PCI FATAL interrupt\n");
162 } 152 }
163 if (sync_cause & AR_INTR_SYNC_HOST1_PERR) { 153 if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
164 ath_print(common, ATH_DBG_ANY, 154 ath_dbg(common, ATH_DBG_ANY,
165 "received PCI PERR interrupt\n"); 155 "received PCI PERR interrupt\n");
166 } 156 }
167 *masked |= ATH9K_INT_FATAL; 157 *masked |= ATH9K_INT_FATAL;
168 } 158 }
169 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) { 159 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
170 ath_print(common, ATH_DBG_INTERRUPT, 160 ath_dbg(common, ATH_DBG_INTERRUPT,
171 "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n"); 161 "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
172 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF); 162 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
173 REG_WRITE(ah, AR_RC, 0); 163 REG_WRITE(ah, AR_RC, 0);
174 *masked |= ATH9K_INT_FATAL; 164 *masked |= ATH9K_INT_FATAL;
175 } 165 }
176 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) { 166 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
177 ath_print(common, ATH_DBG_INTERRUPT, 167 ath_dbg(common, ATH_DBG_INTERRUPT,
178 "AR_INTR_SYNC_LOCAL_TIMEOUT\n"); 168 "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
179 } 169 }
180 170
181 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause); 171 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
@@ -218,77 +208,70 @@ static int ar9002_hw_proc_txdesc(struct ath_hw *ah, void *ds,
218 struct ath_tx_status *ts) 208 struct ath_tx_status *ts)
219{ 209{
220 struct ar5416_desc *ads = AR5416DESC(ds); 210 struct ar5416_desc *ads = AR5416DESC(ds);
211 u32 status;
221 212
222 if ((ads->ds_txstatus9 & AR_TxDone) == 0) 213 status = ACCESS_ONCE(ads->ds_txstatus9);
214 if ((status & AR_TxDone) == 0)
223 return -EINPROGRESS; 215 return -EINPROGRESS;
224 216
225 ts->ts_seqnum = MS(ads->ds_txstatus9, AR_SeqNum);
226 ts->ts_tstamp = ads->AR_SendTimestamp; 217 ts->ts_tstamp = ads->AR_SendTimestamp;
227 ts->ts_status = 0; 218 ts->ts_status = 0;
228 ts->ts_flags = 0; 219 ts->ts_flags = 0;
229 220
230 if (ads->ds_txstatus1 & AR_FrmXmitOK) 221 if (status & AR_TxOpExceeded)
222 ts->ts_status |= ATH9K_TXERR_XTXOP;
223 ts->tid = MS(status, AR_TxTid);
224 ts->ts_rateindex = MS(status, AR_FinalTxIdx);
225 ts->ts_seqnum = MS(status, AR_SeqNum);
226
227 status = ACCESS_ONCE(ads->ds_txstatus0);
228 ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00);
229 ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01);
230 ts->ts_rssi_ctl2 = MS(status, AR_TxRSSIAnt02);
231 if (status & AR_TxBaStatus) {
232 ts->ts_flags |= ATH9K_TX_BA;
233 ts->ba_low = ads->AR_BaBitmapLow;
234 ts->ba_high = ads->AR_BaBitmapHigh;
235 }
236
237 status = ACCESS_ONCE(ads->ds_txstatus1);
238 if (status & AR_FrmXmitOK)
231 ts->ts_status |= ATH9K_TX_ACKED; 239 ts->ts_status |= ATH9K_TX_ACKED;
232 if (ads->ds_txstatus1 & AR_ExcessiveRetries) 240 else {
233 ts->ts_status |= ATH9K_TXERR_XRETRY; 241 if (status & AR_ExcessiveRetries)
234 if (ads->ds_txstatus1 & AR_Filtered) 242 ts->ts_status |= ATH9K_TXERR_XRETRY;
235 ts->ts_status |= ATH9K_TXERR_FILT; 243 if (status & AR_Filtered)
236 if (ads->ds_txstatus1 & AR_FIFOUnderrun) { 244 ts->ts_status |= ATH9K_TXERR_FILT;
237 ts->ts_status |= ATH9K_TXERR_FIFO; 245 if (status & AR_FIFOUnderrun) {
238 ath9k_hw_updatetxtriglevel(ah, true); 246 ts->ts_status |= ATH9K_TXERR_FIFO;
247 ath9k_hw_updatetxtriglevel(ah, true);
248 }
239 } 249 }
240 if (ads->ds_txstatus9 & AR_TxOpExceeded) 250 if (status & AR_TxTimerExpired)
241 ts->ts_status |= ATH9K_TXERR_XTXOP;
242 if (ads->ds_txstatus1 & AR_TxTimerExpired)
243 ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED; 251 ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
244 252 if (status & AR_DescCfgErr)
245 if (ads->ds_txstatus1 & AR_DescCfgErr)
246 ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR; 253 ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
247 if (ads->ds_txstatus1 & AR_TxDataUnderrun) { 254 if (status & AR_TxDataUnderrun) {
248 ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN; 255 ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
249 ath9k_hw_updatetxtriglevel(ah, true); 256 ath9k_hw_updatetxtriglevel(ah, true);
250 } 257 }
251 if (ads->ds_txstatus1 & AR_TxDelimUnderrun) { 258 if (status & AR_TxDelimUnderrun) {
252 ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN; 259 ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
253 ath9k_hw_updatetxtriglevel(ah, true); 260 ath9k_hw_updatetxtriglevel(ah, true);
254 } 261 }
255 if (ads->ds_txstatus0 & AR_TxBaStatus) { 262 ts->ts_shortretry = MS(status, AR_RTSFailCnt);
256 ts->ts_flags |= ATH9K_TX_BA; 263 ts->ts_longretry = MS(status, AR_DataFailCnt);
257 ts->ba_low = ads->AR_BaBitmapLow; 264 ts->ts_virtcol = MS(status, AR_VirtRetryCnt);
258 ts->ba_high = ads->AR_BaBitmapHigh;
259 }
260 265
261 ts->ts_rateindex = MS(ads->ds_txstatus9, AR_FinalTxIdx); 266 status = ACCESS_ONCE(ads->ds_txstatus5);
262 switch (ts->ts_rateindex) { 267 ts->ts_rssi = MS(status, AR_TxRSSICombined);
263 case 0: 268 ts->ts_rssi_ext0 = MS(status, AR_TxRSSIAnt10);
264 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate0); 269 ts->ts_rssi_ext1 = MS(status, AR_TxRSSIAnt11);
265 break; 270 ts->ts_rssi_ext2 = MS(status, AR_TxRSSIAnt12);
266 case 1:
267 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate1);
268 break;
269 case 2:
270 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate2);
271 break;
272 case 3:
273 ts->ts_ratecode = MS(ads->ds_ctl3, AR_XmitRate3);
274 break;
275 }
276 271
277 ts->ts_rssi = MS(ads->ds_txstatus5, AR_TxRSSICombined);
278 ts->ts_rssi_ctl0 = MS(ads->ds_txstatus0, AR_TxRSSIAnt00);
279 ts->ts_rssi_ctl1 = MS(ads->ds_txstatus0, AR_TxRSSIAnt01);
280 ts->ts_rssi_ctl2 = MS(ads->ds_txstatus0, AR_TxRSSIAnt02);
281 ts->ts_rssi_ext0 = MS(ads->ds_txstatus5, AR_TxRSSIAnt10);
282 ts->ts_rssi_ext1 = MS(ads->ds_txstatus5, AR_TxRSSIAnt11);
283 ts->ts_rssi_ext2 = MS(ads->ds_txstatus5, AR_TxRSSIAnt12);
284 ts->evm0 = ads->AR_TxEVM0; 272 ts->evm0 = ads->AR_TxEVM0;
285 ts->evm1 = ads->AR_TxEVM1; 273 ts->evm1 = ads->AR_TxEVM1;
286 ts->evm2 = ads->AR_TxEVM2; 274 ts->evm2 = ads->AR_TxEVM2;
287 ts->ts_shortretry = MS(ads->ds_txstatus1, AR_RTSFailCnt);
288 ts->ts_longretry = MS(ads->ds_txstatus1, AR_DataFailCnt);
289 ts->ts_virtcol = MS(ads->ds_txstatus1, AR_VirtRetryCnt);
290 ts->tid = MS(ads->ds_txstatus9, AR_TxTid);
291 ts->ts_antenna = 0;
292 275
293 return 0; 276 return 0;
294} 277}
@@ -300,7 +283,6 @@ static void ar9002_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
300{ 283{
301 struct ar5416_desc *ads = AR5416DESC(ds); 284 struct ar5416_desc *ads = AR5416DESC(ds);
302 285
303 txPower += ah->txpower_indexoffset;
304 if (txPower > 63) 286 if (txPower > 63)
305 txPower = 63; 287 txPower = 63;
306 288
diff --git a/drivers/net/wireless/ath/ath9k/ar9002_phy.c b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
index c00cdc67b55b..7d68d61e406b 100644
--- a/drivers/net/wireless/ath/ath9k/ar9002_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9002_phy.c
@@ -175,13 +175,15 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
175 int upper, lower, cur_vit_mask; 175 int upper, lower, cur_vit_mask;
176 int tmp, newVal; 176 int tmp, newVal;
177 int i; 177 int i;
178 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8, 178 static const int pilot_mask_reg[4] = {
179 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60 179 AR_PHY_TIMING7, AR_PHY_TIMING8,
180 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
180 }; 181 };
181 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10, 182 static const int chan_mask_reg[4] = {
182 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60 183 AR_PHY_TIMING9, AR_PHY_TIMING10,
184 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
183 }; 185 };
184 int inc[4] = { 0, 100, 0, 0 }; 186 static const int inc[4] = { 0, 100, 0, 0 };
185 struct chan_centers centers; 187 struct chan_centers centers;
186 188
187 int8_t mask_m[123]; 189 int8_t mask_m[123];
@@ -201,13 +203,14 @@ static void ar9002_hw_spur_mitigate(struct ath_hw *ah,
201 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { 203 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
202 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz); 204 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
203 205
206 if (AR_NO_SPUR == cur_bb_spur)
207 break;
208
204 if (is2GHz) 209 if (is2GHz)
205 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ; 210 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
206 else 211 else
207 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ; 212 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
208 213
209 if (AR_NO_SPUR == cur_bb_spur)
210 break;
211 cur_bb_spur = cur_bb_spur - freq; 214 cur_bb_spur = cur_bb_spur - freq;
212 215
213 if (IS_CHAN_HT40(chan)) { 216 if (IS_CHAN_HT40(chan)) {
@@ -473,21 +476,21 @@ static void ar9002_hw_do_getnf(struct ath_hw *ah,
473 int16_t nf; 476 int16_t nf;
474 477
475 nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR); 478 nf = MS(REG_READ(ah, AR_PHY_CCA), AR9280_PHY_MINCCA_PWR);
476 nfarray[0] = sign_extend(nf, 9); 479 nfarray[0] = sign_extend32(nf, 8);
477 480
478 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR9280_PHY_EXT_MINCCA_PWR); 481 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR9280_PHY_EXT_MINCCA_PWR);
479 if (IS_CHAN_HT40(ah->curchan)) 482 if (IS_CHAN_HT40(ah->curchan))
480 nfarray[3] = sign_extend(nf, 9); 483 nfarray[3] = sign_extend32(nf, 8);
481 484
482 if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) 485 if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
483 return; 486 return;
484 487
485 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR); 488 nf = MS(REG_READ(ah, AR_PHY_CH1_CCA), AR9280_PHY_CH1_MINCCA_PWR);
486 nfarray[1] = sign_extend(nf, 9); 489 nfarray[1] = sign_extend32(nf, 8);
487 490
488 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR9280_PHY_CH1_EXT_MINCCA_PWR); 491 nf = MS(REG_READ(ah, AR_PHY_CH1_EXT_CCA), AR9280_PHY_CH1_EXT_MINCCA_PWR);
489 if (IS_CHAN_HT40(ah->curchan)) 492 if (IS_CHAN_HT40(ah->curchan))
490 nfarray[4] = sign_extend(nf, 9); 493 nfarray[4] = sign_extend32(nf, 8);
491} 494}
492 495
493static void ar9002_hw_set_nf_limits(struct ath_hw *ah) 496static void ar9002_hw_set_nf_limits(struct ath_hw *ah)
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
index a14a5e43cf56..81f9cf294dec 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_2p2_initvals.h
@@ -34,9 +34,9 @@ static const u32 ar9300_2p2_radio_postamble[][5] = {
34 34
35static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = { 35static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
36 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 36 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
37 {0x0000a2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800}, 37 {0x0000a2dc, 0x00033800, 0x00033800, 0x00637800, 0x00637800},
38 {0x0000a2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000}, 38 {0x0000a2e0, 0x0003c000, 0x0003c000, 0x03838000, 0x03838000},
39 {0x0000a2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000}, 39 {0x0000a2e4, 0x03fc0000, 0x03fc0000, 0x03fc0000, 0x03fc0000},
40 {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 40 {0x0000a2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
41 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9}, 41 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d9, 0x000050d9},
42 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 42 {0x0000a500, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
@@ -56,21 +56,21 @@ static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
56 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24}, 56 {0x0000a538, 0x4702244a, 0x4702244a, 0x34000e24, 0x34000e24},
57 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640}, 57 {0x0000a53c, 0x4b02244c, 0x4b02244c, 0x38001640, 0x38001640},
58 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660}, 58 {0x0000a540, 0x4e02246c, 0x4e02246c, 0x3c001660, 0x3c001660},
59 {0x0000a544, 0x5302266c, 0x5302266c, 0x3f001861, 0x3f001861}, 59 {0x0000a544, 0x52022470, 0x52022470, 0x3f001861, 0x3f001861},
60 {0x0000a548, 0x5702286c, 0x5702286c, 0x43001a81, 0x43001a81}, 60 {0x0000a548, 0x55022490, 0x55022490, 0x43001a81, 0x43001a81},
61 {0x0000a54c, 0x5c02486b, 0x5c02486b, 0x47001a83, 0x47001a83}, 61 {0x0000a54c, 0x59022492, 0x59022492, 0x47001a83, 0x47001a83},
62 {0x0000a550, 0x61024a6c, 0x61024a6c, 0x4a001c84, 0x4a001c84}, 62 {0x0000a550, 0x5d022692, 0x5d022692, 0x4a001c84, 0x4a001c84},
63 {0x0000a554, 0x66026a6c, 0x66026a6c, 0x4e001ce3, 0x4e001ce3}, 63 {0x0000a554, 0x61022892, 0x61022892, 0x4e001ce3, 0x4e001ce3},
64 {0x0000a558, 0x6b026e6c, 0x6b026e6c, 0x52001ce5, 0x52001ce5}, 64 {0x0000a558, 0x65024890, 0x65024890, 0x52001ce5, 0x52001ce5},
65 {0x0000a55c, 0x7002708c, 0x7002708c, 0x56001ce9, 0x56001ce9}, 65 {0x0000a55c, 0x69024892, 0x69024892, 0x56001ce9, 0x56001ce9},
66 {0x0000a560, 0x7302b08a, 0x7302b08a, 0x5a001ceb, 0x5a001ceb}, 66 {0x0000a560, 0x6e024c92, 0x6e024c92, 0x5a001ceb, 0x5a001ceb},
67 {0x0000a564, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, 67 {0x0000a564, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
68 {0x0000a568, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, 68 {0x0000a568, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
69 {0x0000a56c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, 69 {0x0000a56c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
70 {0x0000a570, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, 70 {0x0000a570, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
71 {0x0000a574, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, 71 {0x0000a574, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
72 {0x0000a578, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, 72 {0x0000a578, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
73 {0x0000a57c, 0x7702b08c, 0x7702b08c, 0x5d001eec, 0x5d001eec}, 73 {0x0000a57c, 0x74026e92, 0x74026e92, 0x5d001eec, 0x5d001eec},
74 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000}, 74 {0x0000a580, 0x00800000, 0x00800000, 0x00800000, 0x00800000},
75 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002}, 75 {0x0000a584, 0x06800003, 0x06800003, 0x04800002, 0x04800002},
76 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004}, 76 {0x0000a588, 0x0a800020, 0x0a800020, 0x08800004, 0x08800004},
@@ -88,44 +88,44 @@ static const u32 ar9300Modes_lowest_ob_db_tx_gain_table_2p2[][5] = {
88 {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24}, 88 {0x0000a5b8, 0x4782244a, 0x4782244a, 0x34800e24, 0x34800e24},
89 {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640}, 89 {0x0000a5bc, 0x4b82244c, 0x4b82244c, 0x38801640, 0x38801640},
90 {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660}, 90 {0x0000a5c0, 0x4e82246c, 0x4e82246c, 0x3c801660, 0x3c801660},
91 {0x0000a5c4, 0x5382266c, 0x5382266c, 0x3f801861, 0x3f801861}, 91 {0x0000a5c4, 0x52822470, 0x52822470, 0x3f801861, 0x3f801861},
92 {0x0000a5c8, 0x5782286c, 0x5782286c, 0x43801a81, 0x43801a81}, 92 {0x0000a5c8, 0x55822490, 0x55822490, 0x43801a81, 0x43801a81},
93 {0x0000a5cc, 0x5c82486b, 0x5c82486b, 0x47801a83, 0x47801a83}, 93 {0x0000a5cc, 0x59822492, 0x59822492, 0x47801a83, 0x47801a83},
94 {0x0000a5d0, 0x61824a6c, 0x61824a6c, 0x4a801c84, 0x4a801c84}, 94 {0x0000a5d0, 0x5d822692, 0x5d822692, 0x4a801c84, 0x4a801c84},
95 {0x0000a5d4, 0x66826a6c, 0x66826a6c, 0x4e801ce3, 0x4e801ce3}, 95 {0x0000a5d4, 0x61822892, 0x61822892, 0x4e801ce3, 0x4e801ce3},
96 {0x0000a5d8, 0x6b826e6c, 0x6b826e6c, 0x52801ce5, 0x52801ce5}, 96 {0x0000a5d8, 0x65824890, 0x65824890, 0x52801ce5, 0x52801ce5},
97 {0x0000a5dc, 0x7082708c, 0x7082708c, 0x56801ce9, 0x56801ce9}, 97 {0x0000a5dc, 0x69824892, 0x69824892, 0x56801ce9, 0x56801ce9},
98 {0x0000a5e0, 0x7382b08a, 0x7382b08a, 0x5a801ceb, 0x5a801ceb}, 98 {0x0000a5e0, 0x6e824c92, 0x6e824c92, 0x5a801ceb, 0x5a801ceb},
99 {0x0000a5e4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, 99 {0x0000a5e4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
100 {0x0000a5e8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, 100 {0x0000a5e8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
101 {0x0000a5ec, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, 101 {0x0000a5ec, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
102 {0x0000a5f0, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, 102 {0x0000a5f0, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
103 {0x0000a5f4, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, 103 {0x0000a5f4, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
104 {0x0000a5f8, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, 104 {0x0000a5f8, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
105 {0x0000a5fc, 0x7782b08c, 0x7782b08c, 0x5d801eec, 0x5d801eec}, 105 {0x0000a5fc, 0x74826e92, 0x74826e92, 0x5d801eec, 0x5d801eec},
106 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 106 {0x0000a600, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
107 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 107 {0x0000a604, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
108 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 108 {0x0000a608, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
109 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 109 {0x0000a60c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
110 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 110 {0x0000a610, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
111 {0x0000a614, 0x01404000, 0x01404000, 0x01404000, 0x01404000}, 111 {0x0000a614, 0x02004000, 0x02004000, 0x01404000, 0x01404000},
112 {0x0000a618, 0x01404501, 0x01404501, 0x01404501, 0x01404501}, 112 {0x0000a618, 0x02004801, 0x02004801, 0x01404501, 0x01404501},
113 {0x0000a61c, 0x02008802, 0x02008802, 0x02008501, 0x02008501}, 113 {0x0000a61c, 0x02808a02, 0x02808a02, 0x02008501, 0x02008501},
114 {0x0000a620, 0x0300cc03, 0x0300cc03, 0x0280ca03, 0x0280ca03}, 114 {0x0000a620, 0x0380ce03, 0x0380ce03, 0x0280ca03, 0x0280ca03},
115 {0x0000a624, 0x0300cc03, 0x0300cc03, 0x03010c04, 0x03010c04}, 115 {0x0000a624, 0x04411104, 0x04411104, 0x03010c04, 0x03010c04},
116 {0x0000a628, 0x0300cc03, 0x0300cc03, 0x04014c04, 0x04014c04}, 116 {0x0000a628, 0x04411104, 0x04411104, 0x04014c04, 0x04014c04},
117 {0x0000a62c, 0x03810c03, 0x03810c03, 0x04015005, 0x04015005}, 117 {0x0000a62c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
118 {0x0000a630, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005}, 118 {0x0000a630, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
119 {0x0000a634, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005}, 119 {0x0000a634, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
120 {0x0000a638, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005}, 120 {0x0000a638, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
121 {0x0000a63c, 0x03810e04, 0x03810e04, 0x04015005, 0x04015005}, 121 {0x0000a63c, 0x04411104, 0x04411104, 0x04015005, 0x04015005},
122 {0x0000b2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800}, 122 {0x0000b2dc, 0x00033800, 0x00033800, 0x00637800, 0x00637800},
123 {0x0000b2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000}, 123 {0x0000b2e0, 0x0003c000, 0x0003c000, 0x03838000, 0x03838000},
124 {0x0000b2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000}, 124 {0x0000b2e4, 0x03fc0000, 0x03fc0000, 0x03fc0000, 0x03fc0000},
125 {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 125 {0x0000b2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
126 {0x0000c2dc, 0x0380c7fc, 0x0380c7fc, 0x00637800, 0x00637800}, 126 {0x0000c2dc, 0x00033800, 0x00033800, 0x00637800, 0x00637800},
127 {0x0000c2e0, 0x0000f800, 0x0000f800, 0x03838000, 0x03838000}, 127 {0x0000c2e0, 0x0003c000, 0x0003c000, 0x03838000, 0x03838000},
128 {0x0000c2e4, 0x03ff0000, 0x03ff0000, 0x03fc0000, 0x03fc0000}, 128 {0x0000c2e4, 0x03fc0000, 0x03fc0000, 0x03fc0000, 0x03fc0000},
129 {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 129 {0x0000c2e8, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
130 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4}, 130 {0x00016044, 0x012492d4, 0x012492d4, 0x012492d4, 0x012492d4},
131 {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001}, 131 {0x00016048, 0x62480001, 0x62480001, 0x62480001, 0x62480001},
@@ -638,6 +638,7 @@ static const u32 ar9300_2p2_baseband_postamble[][5] = {
638 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, 638 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
639 {0x0000a204, 0x000037c0, 0x000037c4, 0x000037c4, 0x000037c0}, 639 {0x0000a204, 0x000037c0, 0x000037c4, 0x000037c4, 0x000037c0},
640 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004}, 640 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
641 {0x0000a22c, 0x01026a2f, 0x01026a2f, 0x01026a2f, 0x01026a2f},
641 {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b}, 642 {0x0000a230, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b},
642 {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff}, 643 {0x0000a234, 0x00000fff, 0x10000fff, 0x10000fff, 0x00000fff},
643 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018}, 644 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
@@ -680,7 +681,7 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
680 {0x0000981c, 0x00020028}, 681 {0x0000981c, 0x00020028},
681 {0x00009834, 0x6400a290}, 682 {0x00009834, 0x6400a290},
682 {0x00009838, 0x0108ecff}, 683 {0x00009838, 0x0108ecff},
683 {0x0000983c, 0x14750600}, 684 {0x0000983c, 0x0d000600},
684 {0x00009880, 0x201fff00}, 685 {0x00009880, 0x201fff00},
685 {0x00009884, 0x00001042}, 686 {0x00009884, 0x00001042},
686 {0x000098a4, 0x00200400}, 687 {0x000098a4, 0x00200400},
@@ -722,7 +723,6 @@ static const u32 ar9300_2p2_baseband_core[][2] = {
722 {0x0000a220, 0x00000000}, 723 {0x0000a220, 0x00000000},
723 {0x0000a224, 0x00000000}, 724 {0x0000a224, 0x00000000},
724 {0x0000a228, 0x10002310}, 725 {0x0000a228, 0x10002310},
725 {0x0000a22c, 0x01036a27},
726 {0x0000a23c, 0x00000000}, 726 {0x0000a23c, 0x00000000},
727 {0x0000a244, 0x0c000000}, 727 {0x0000a244, 0x0c000000},
728 {0x0000a2a0, 0x00000001}, 728 {0x0000a2a0, 0x00000001},
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_calib.c b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
index 9e6edffe0bd1..4a4cd88429c0 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_calib.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_calib.c
@@ -18,6 +18,16 @@
18#include "hw-ops.h" 18#include "hw-ops.h"
19#include "ar9003_phy.h" 19#include "ar9003_phy.h"
20 20
21#define MPASS 3
22#define MAX_MEASUREMENT 8
23#define MAX_DIFFERENCE 10
24
25struct coeff {
26 int mag_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MPASS];
27 int phs_coeff[AR9300_MAX_CHAINS][MAX_MEASUREMENT][MPASS];
28 int iqc_coeff[2];
29};
30
21enum ar9003_cal_types { 31enum ar9003_cal_types {
22 IQ_MISMATCH_CAL = BIT(0), 32 IQ_MISMATCH_CAL = BIT(0),
23 TEMP_COMP_CAL = BIT(1), 33 TEMP_COMP_CAL = BIT(1),
@@ -40,8 +50,8 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
40 currCal->calData->calCountMax); 50 currCal->calData->calCountMax);
41 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ); 51 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
42 52
43 ath_print(common, ATH_DBG_CALIBRATE, 53 ath_dbg(common, ATH_DBG_CALIBRATE,
44 "starting IQ Mismatch Calibration\n"); 54 "starting IQ Mismatch Calibration\n");
45 55
46 /* Kick-off cal */ 56 /* Kick-off cal */
47 REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL); 57 REG_SET_BIT(ah, AR_PHY_TIMING4, AR_PHY_TIMING4_DO_CAL);
@@ -52,8 +62,8 @@ static void ar9003_hw_setup_calibration(struct ath_hw *ah,
52 REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM, 62 REG_RMW_FIELD(ah, AR_PHY_65NM_CH0_THERM,
53 AR_PHY_65NM_CH0_THERM_START, 1); 63 AR_PHY_65NM_CH0_THERM_START, 1);
54 64
55 ath_print(common, ATH_DBG_CALIBRATE, 65 ath_dbg(common, ATH_DBG_CALIBRATE,
56 "starting Temperature Compensation Calibration\n"); 66 "starting Temperature Compensation Calibration\n");
57 break; 67 break;
58 } 68 }
59} 69}
@@ -181,11 +191,11 @@ static void ar9003_hw_iqcal_collect(struct ath_hw *ah)
181 REG_READ(ah, AR_PHY_CAL_MEAS_1(i)); 191 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
182 ah->totalIqCorrMeas[i] += 192 ah->totalIqCorrMeas[i] +=
183 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i)); 193 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
184 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE, 194 ath_dbg(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
185 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n", 195 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
186 ah->cal_samples, i, ah->totalPowerMeasI[i], 196 ah->cal_samples, i, ah->totalPowerMeasI[i],
187 ah->totalPowerMeasQ[i], 197 ah->totalPowerMeasQ[i],
188 ah->totalIqCorrMeas[i]); 198 ah->totalIqCorrMeas[i]);
189 } 199 }
190} 200}
191 201
@@ -196,7 +206,7 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
196 u32 qCoffDenom, iCoffDenom; 206 u32 qCoffDenom, iCoffDenom;
197 int32_t qCoff, iCoff; 207 int32_t qCoff, iCoff;
198 int iqCorrNeg, i; 208 int iqCorrNeg, i;
199 const u_int32_t offset_array[3] = { 209 static const u_int32_t offset_array[3] = {
200 AR_PHY_RX_IQCAL_CORR_B0, 210 AR_PHY_RX_IQCAL_CORR_B0,
201 AR_PHY_RX_IQCAL_CORR_B1, 211 AR_PHY_RX_IQCAL_CORR_B1,
202 AR_PHY_RX_IQCAL_CORR_B2, 212 AR_PHY_RX_IQCAL_CORR_B2,
@@ -207,13 +217,13 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
207 powerMeasQ = ah->totalPowerMeasQ[i]; 217 powerMeasQ = ah->totalPowerMeasQ[i];
208 iqCorrMeas = ah->totalIqCorrMeas[i]; 218 iqCorrMeas = ah->totalIqCorrMeas[i];
209 219
210 ath_print(common, ATH_DBG_CALIBRATE, 220 ath_dbg(common, ATH_DBG_CALIBRATE,
211 "Starting IQ Cal and Correction for Chain %d\n", 221 "Starting IQ Cal and Correction for Chain %d\n",
212 i); 222 i);
213 223
214 ath_print(common, ATH_DBG_CALIBRATE, 224 ath_dbg(common, ATH_DBG_CALIBRATE,
215 "Orignal: Chn %diq_corr_meas = 0x%08x\n", 225 "Orignal: Chn %diq_corr_meas = 0x%08x\n",
216 i, ah->totalIqCorrMeas[i]); 226 i, ah->totalIqCorrMeas[i]);
217 227
218 iqCorrNeg = 0; 228 iqCorrNeg = 0;
219 229
@@ -222,12 +232,12 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
222 iqCorrNeg = 1; 232 iqCorrNeg = 1;
223 } 233 }
224 234
225 ath_print(common, ATH_DBG_CALIBRATE, 235 ath_dbg(common, ATH_DBG_CALIBRATE,
226 "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI); 236 "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
227 ath_print(common, ATH_DBG_CALIBRATE, 237 ath_dbg(common, ATH_DBG_CALIBRATE,
228 "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ); 238 "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
229 ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n", 239 ath_dbg(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
230 iqCorrNeg); 240 iqCorrNeg);
231 241
232 iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 256; 242 iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 256;
233 qCoffDenom = powerMeasQ / 64; 243 qCoffDenom = powerMeasQ / 64;
@@ -235,10 +245,10 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
235 if ((iCoffDenom != 0) && (qCoffDenom != 0)) { 245 if ((iCoffDenom != 0) && (qCoffDenom != 0)) {
236 iCoff = iqCorrMeas / iCoffDenom; 246 iCoff = iqCorrMeas / iCoffDenom;
237 qCoff = powerMeasI / qCoffDenom - 64; 247 qCoff = powerMeasI / qCoffDenom - 64;
238 ath_print(common, ATH_DBG_CALIBRATE, 248 ath_dbg(common, ATH_DBG_CALIBRATE,
239 "Chn %d iCoff = 0x%08x\n", i, iCoff); 249 "Chn %d iCoff = 0x%08x\n", i, iCoff);
240 ath_print(common, ATH_DBG_CALIBRATE, 250 ath_dbg(common, ATH_DBG_CALIBRATE,
241 "Chn %d qCoff = 0x%08x\n", i, qCoff); 251 "Chn %d qCoff = 0x%08x\n", i, qCoff);
242 252
243 /* Force bounds on iCoff */ 253 /* Force bounds on iCoff */
244 if (iCoff >= 63) 254 if (iCoff >= 63)
@@ -259,14 +269,13 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
259 iCoff = iCoff & 0x7f; 269 iCoff = iCoff & 0x7f;
260 qCoff = qCoff & 0x7f; 270 qCoff = qCoff & 0x7f;
261 271
262 ath_print(common, ATH_DBG_CALIBRATE, 272 ath_dbg(common, ATH_DBG_CALIBRATE,
263 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n", 273 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
264 i, iCoff, qCoff); 274 i, iCoff, qCoff);
265 ath_print(common, ATH_DBG_CALIBRATE, 275 ath_dbg(common, ATH_DBG_CALIBRATE,
266 "Register offset (0x%04x) " 276 "Register offset (0x%04x) before update = 0x%x\n",
267 "before update = 0x%x\n", 277 offset_array[i],
268 offset_array[i], 278 REG_READ(ah, offset_array[i]));
269 REG_READ(ah, offset_array[i]));
270 279
271 REG_RMW_FIELD(ah, offset_array[i], 280 REG_RMW_FIELD(ah, offset_array[i],
272 AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF, 281 AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
@@ -274,33 +283,29 @@ static void ar9003_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
274 REG_RMW_FIELD(ah, offset_array[i], 283 REG_RMW_FIELD(ah, offset_array[i],
275 AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF, 284 AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
276 qCoff); 285 qCoff);
277 ath_print(common, ATH_DBG_CALIBRATE, 286 ath_dbg(common, ATH_DBG_CALIBRATE,
278 "Register offset (0x%04x) QI COFF " 287 "Register offset (0x%04x) QI COFF (bitfields 0x%08x) after update = 0x%x\n",
279 "(bitfields 0x%08x) after update = 0x%x\n", 288 offset_array[i],
280 offset_array[i], 289 AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF,
281 AR_PHY_RX_IQCAL_CORR_IQCORR_Q_I_COFF, 290 REG_READ(ah, offset_array[i]));
282 REG_READ(ah, offset_array[i])); 291 ath_dbg(common, ATH_DBG_CALIBRATE,
283 ath_print(common, ATH_DBG_CALIBRATE, 292 "Register offset (0x%04x) QQ COFF (bitfields 0x%08x) after update = 0x%x\n",
284 "Register offset (0x%04x) QQ COFF " 293 offset_array[i],
285 "(bitfields 0x%08x) after update = 0x%x\n", 294 AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF,
286 offset_array[i], 295 REG_READ(ah, offset_array[i]));
287 AR_PHY_RX_IQCAL_CORR_IQCORR_Q_Q_COFF, 296
288 REG_READ(ah, offset_array[i])); 297 ath_dbg(common, ATH_DBG_CALIBRATE,
289 298 "IQ Cal and Correction done for Chain %d\n", i);
290 ath_print(common, ATH_DBG_CALIBRATE,
291 "IQ Cal and Correction done for Chain %d\n",
292 i);
293 } 299 }
294 } 300 }
295 301
296 REG_SET_BIT(ah, AR_PHY_RX_IQCAL_CORR_B0, 302 REG_SET_BIT(ah, AR_PHY_RX_IQCAL_CORR_B0,
297 AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE); 303 AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE);
298 ath_print(common, ATH_DBG_CALIBRATE, 304 ath_dbg(common, ATH_DBG_CALIBRATE,
299 "IQ Cal and Correction (offset 0x%04x) enabled " 305 "IQ Cal and Correction (offset 0x%04x) enabled (bit position 0x%08x). New Value 0x%08x\n",
300 "(bit position 0x%08x). New Value 0x%08x\n", 306 (unsigned) (AR_PHY_RX_IQCAL_CORR_B0),
301 (unsigned) (AR_PHY_RX_IQCAL_CORR_B0), 307 AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE,
302 AR_PHY_RX_IQCAL_CORR_IQCORR_ENABLE, 308 REG_READ(ah, AR_PHY_RX_IQCAL_CORR_B0));
303 REG_READ(ah, AR_PHY_RX_IQCAL_CORR_B0));
304} 309}
305 310
306static const struct ath9k_percal_data iq_cal_single_sample = { 311static const struct ath9k_percal_data iq_cal_single_sample = {
@@ -340,7 +345,7 @@ static bool ar9003_hw_solve_iq_cal(struct ath_hw *ah,
340 f2 = (f1 * f1 + f3 * f3) / result_shift; 345 f2 = (f1 * f1 + f3 * f3) / result_shift;
341 346
342 if (!f2) { 347 if (!f2) {
343 ath_print(common, ATH_DBG_CALIBRATE, "Divide by 0\n"); 348 ath_dbg(common, ATH_DBG_CALIBRATE, "Divide by 0\n");
344 return false; 349 return false;
345 } 350 }
346 351
@@ -461,11 +466,14 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
461 466
462 if ((i2_p_q2_a0_d0 == 0) || (i2_p_q2_a0_d1 == 0) || 467 if ((i2_p_q2_a0_d0 == 0) || (i2_p_q2_a0_d1 == 0) ||
463 (i2_p_q2_a1_d0 == 0) || (i2_p_q2_a1_d1 == 0)) { 468 (i2_p_q2_a1_d0 == 0) || (i2_p_q2_a1_d1 == 0)) {
464 ath_print(common, ATH_DBG_CALIBRATE, 469 ath_dbg(common, ATH_DBG_CALIBRATE,
465 "Divide by 0:\na0_d0=%d\n" 470 "Divide by 0:\n"
466 "a0_d1=%d\na2_d0=%d\na1_d1=%d\n", 471 "a0_d0=%d\n"
467 i2_p_q2_a0_d0, i2_p_q2_a0_d1, 472 "a0_d1=%d\n"
468 i2_p_q2_a1_d0, i2_p_q2_a1_d1); 473 "a2_d0=%d\n"
474 "a1_d1=%d\n",
475 i2_p_q2_a0_d0, i2_p_q2_a0_d1,
476 i2_p_q2_a1_d0, i2_p_q2_a1_d1);
469 return false; 477 return false;
470 } 478 }
471 479
@@ -498,9 +506,9 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
498 mag2 = ar9003_hw_find_mag_approx(ah, cos_2phi_2, sin_2phi_2); 506 mag2 = ar9003_hw_find_mag_approx(ah, cos_2phi_2, sin_2phi_2);
499 507
500 if ((mag1 == 0) || (mag2 == 0)) { 508 if ((mag1 == 0) || (mag2 == 0)) {
501 ath_print(common, ATH_DBG_CALIBRATE, 509 ath_dbg(common, ATH_DBG_CALIBRATE,
502 "Divide by 0: mag1=%d, mag2=%d\n", 510 "Divide by 0: mag1=%d, mag2=%d\n",
503 mag1, mag2); 511 mag1, mag2);
504 return false; 512 return false;
505 } 513 }
506 514
@@ -517,8 +525,8 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
517 mag_a0_d0, phs_a0_d0, 525 mag_a0_d0, phs_a0_d0,
518 mag_a1_d0, 526 mag_a1_d0,
519 phs_a1_d0, solved_eq)) { 527 phs_a1_d0, solved_eq)) {
520 ath_print(common, ATH_DBG_CALIBRATE, 528 ath_dbg(common, ATH_DBG_CALIBRATE,
521 "Call to ar9003_hw_solve_iq_cal() failed.\n"); 529 "Call to ar9003_hw_solve_iq_cal() failed.\n");
522 return false; 530 return false;
523 } 531 }
524 532
@@ -527,14 +535,14 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
527 mag_rx = solved_eq[2]; 535 mag_rx = solved_eq[2];
528 phs_rx = solved_eq[3]; 536 phs_rx = solved_eq[3];
529 537
530 ath_print(common, ATH_DBG_CALIBRATE, 538 ath_dbg(common, ATH_DBG_CALIBRATE,
531 "chain %d: mag mismatch=%d phase mismatch=%d\n", 539 "chain %d: mag mismatch=%d phase mismatch=%d\n",
532 chain_idx, mag_tx/res_scale, phs_tx/res_scale); 540 chain_idx, mag_tx/res_scale, phs_tx/res_scale);
533 541
534 if (res_scale == mag_tx) { 542 if (res_scale == mag_tx) {
535 ath_print(common, ATH_DBG_CALIBRATE, 543 ath_dbg(common, ATH_DBG_CALIBRATE,
536 "Divide by 0: mag_tx=%d, res_scale=%d\n", 544 "Divide by 0: mag_tx=%d, res_scale=%d\n",
537 mag_tx, res_scale); 545 mag_tx, res_scale);
538 return false; 546 return false;
539 } 547 }
540 548
@@ -545,9 +553,9 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
545 q_q_coff = (mag_corr_tx * 128 / res_scale); 553 q_q_coff = (mag_corr_tx * 128 / res_scale);
546 q_i_coff = (phs_corr_tx * 256 / res_scale); 554 q_i_coff = (phs_corr_tx * 256 / res_scale);
547 555
548 ath_print(common, ATH_DBG_CALIBRATE, 556 ath_dbg(common, ATH_DBG_CALIBRATE,
549 "tx chain %d: mag corr=%d phase corr=%d\n", 557 "tx chain %d: mag corr=%d phase corr=%d\n",
550 chain_idx, q_q_coff, q_i_coff); 558 chain_idx, q_q_coff, q_i_coff);
551 559
552 if (q_i_coff < -63) 560 if (q_i_coff < -63)
553 q_i_coff = -63; 561 q_i_coff = -63;
@@ -560,14 +568,14 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
560 568
561 iqc_coeff[0] = (q_q_coff * 128) + q_i_coff; 569 iqc_coeff[0] = (q_q_coff * 128) + q_i_coff;
562 570
563 ath_print(common, ATH_DBG_CALIBRATE, 571 ath_dbg(common, ATH_DBG_CALIBRATE,
564 "tx chain %d: iq corr coeff=%x\n", 572 "tx chain %d: iq corr coeff=%x\n",
565 chain_idx, iqc_coeff[0]); 573 chain_idx, iqc_coeff[0]);
566 574
567 if (-mag_rx == res_scale) { 575 if (-mag_rx == res_scale) {
568 ath_print(common, ATH_DBG_CALIBRATE, 576 ath_dbg(common, ATH_DBG_CALIBRATE,
569 "Divide by 0: mag_rx=%d, res_scale=%d\n", 577 "Divide by 0: mag_rx=%d, res_scale=%d\n",
570 mag_rx, res_scale); 578 mag_rx, res_scale);
571 return false; 579 return false;
572 } 580 }
573 581
@@ -578,9 +586,9 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
578 q_q_coff = (mag_corr_rx * 128 / res_scale); 586 q_q_coff = (mag_corr_rx * 128 / res_scale);
579 q_i_coff = (phs_corr_rx * 256 / res_scale); 587 q_i_coff = (phs_corr_rx * 256 / res_scale);
580 588
581 ath_print(common, ATH_DBG_CALIBRATE, 589 ath_dbg(common, ATH_DBG_CALIBRATE,
582 "rx chain %d: mag corr=%d phase corr=%d\n", 590 "rx chain %d: mag corr=%d phase corr=%d\n",
583 chain_idx, q_q_coff, q_i_coff); 591 chain_idx, q_q_coff, q_i_coff);
584 592
585 if (q_i_coff < -63) 593 if (q_i_coff < -63)
586 q_i_coff = -63; 594 q_i_coff = -63;
@@ -593,140 +601,367 @@ static bool ar9003_hw_calc_iq_corr(struct ath_hw *ah,
593 601
594 iqc_coeff[1] = (q_q_coff * 128) + q_i_coff; 602 iqc_coeff[1] = (q_q_coff * 128) + q_i_coff;
595 603
596 ath_print(common, ATH_DBG_CALIBRATE, 604 ath_dbg(common, ATH_DBG_CALIBRATE,
597 "rx chain %d: iq corr coeff=%x\n", 605 "rx chain %d: iq corr coeff=%x\n",
598 chain_idx, iqc_coeff[1]); 606 chain_idx, iqc_coeff[1]);
607
608 return true;
609}
610
611static bool ar9003_hw_compute_closest_pass_and_avg(int *mp_coeff, int *mp_avg)
612{
613 int diff[MPASS];
614
615 diff[0] = abs(mp_coeff[0] - mp_coeff[1]);
616 diff[1] = abs(mp_coeff[1] - mp_coeff[2]);
617 diff[2] = abs(mp_coeff[2] - mp_coeff[0]);
618
619 if (diff[0] > MAX_DIFFERENCE &&
620 diff[1] > MAX_DIFFERENCE &&
621 diff[2] > MAX_DIFFERENCE)
622 return false;
623
624 if (diff[0] <= diff[1] && diff[0] <= diff[2])
625 *mp_avg = (mp_coeff[0] + mp_coeff[1]) / 2;
626 else if (diff[1] <= diff[2])
627 *mp_avg = (mp_coeff[1] + mp_coeff[2]) / 2;
628 else
629 *mp_avg = (mp_coeff[2] + mp_coeff[0]) / 2;
599 630
600 return true; 631 return true;
601} 632}
602 633
634static void ar9003_hw_tx_iqcal_load_avg_2_passes(struct ath_hw *ah,
635 u8 num_chains,
636 struct coeff *coeff)
637{
638 struct ath_common *common = ath9k_hw_common(ah);
639 int i, im, nmeasurement;
640 int magnitude, phase;
641 u32 tx_corr_coeff[MAX_MEASUREMENT][AR9300_MAX_CHAINS];
642
643 memset(tx_corr_coeff, 0, sizeof(tx_corr_coeff));
644 for (i = 0; i < MAX_MEASUREMENT / 2; i++) {
645 tx_corr_coeff[i * 2][0] = tx_corr_coeff[(i * 2) + 1][0] =
646 AR_PHY_TX_IQCAL_CORR_COEFF_B0(i);
647 if (!AR_SREV_9485(ah)) {
648 tx_corr_coeff[i * 2][1] =
649 tx_corr_coeff[(i * 2) + 1][1] =
650 AR_PHY_TX_IQCAL_CORR_COEFF_B1(i);
651
652 tx_corr_coeff[i * 2][2] =
653 tx_corr_coeff[(i * 2) + 1][2] =
654 AR_PHY_TX_IQCAL_CORR_COEFF_B2(i);
655 }
656 }
657
658 /* Load the average of 2 passes */
659 for (i = 0; i < num_chains; i++) {
660 if (AR_SREV_9485(ah))
661 nmeasurement = REG_READ_FIELD(ah,
662 AR_PHY_TX_IQCAL_STATUS_B0_9485,
663 AR_PHY_CALIBRATED_GAINS_0);
664 else
665 nmeasurement = REG_READ_FIELD(ah,
666 AR_PHY_TX_IQCAL_STATUS_B0,
667 AR_PHY_CALIBRATED_GAINS_0);
668
669 if (nmeasurement > MAX_MEASUREMENT)
670 nmeasurement = MAX_MEASUREMENT;
671
672 for (im = 0; im < nmeasurement; im++) {
673 /*
674 * Determine which 2 passes are closest and compute avg
675 * magnitude
676 */
677 if (!ar9003_hw_compute_closest_pass_and_avg(coeff->mag_coeff[i][im],
678 &magnitude))
679 goto disable_txiqcal;
680
681 /*
682 * Determine which 2 passes are closest and compute avg
683 * phase
684 */
685 if (!ar9003_hw_compute_closest_pass_and_avg(coeff->phs_coeff[i][im],
686 &phase))
687 goto disable_txiqcal;
688
689 coeff->iqc_coeff[0] = (magnitude & 0x7f) |
690 ((phase & 0x7f) << 7);
691
692 if ((im % 2) == 0)
693 REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
694 AR_PHY_TX_IQCAL_CORR_COEFF_00_COEFF_TABLE,
695 coeff->iqc_coeff[0]);
696 else
697 REG_RMW_FIELD(ah, tx_corr_coeff[im][i],
698 AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE,
699 coeff->iqc_coeff[0]);
700 }
701 }
702
703 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_3,
704 AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN, 0x1);
705 REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
706 AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
707
708 return;
709
710disable_txiqcal:
711 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_3,
712 AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN, 0x0);
713 REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
714 AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x0);
715
716 ath_dbg(common, ATH_DBG_CALIBRATE, "TX IQ Cal disabled\n");
717}
718
603static void ar9003_hw_tx_iq_cal(struct ath_hw *ah) 719static void ar9003_hw_tx_iq_cal(struct ath_hw *ah)
604{ 720{
605 struct ath_common *common = ath9k_hw_common(ah); 721 struct ath_common *common = ath9k_hw_common(ah);
606 const u32 txiqcal_status[AR9300_MAX_CHAINS] = { 722 static const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
607 AR_PHY_TX_IQCAL_STATUS_B0, 723 AR_PHY_TX_IQCAL_STATUS_B0,
608 AR_PHY_TX_IQCAL_STATUS_B1, 724 AR_PHY_TX_IQCAL_STATUS_B1,
609 AR_PHY_TX_IQCAL_STATUS_B2, 725 AR_PHY_TX_IQCAL_STATUS_B2,
610 }; 726 };
611 const u32 tx_corr_coeff[AR9300_MAX_CHAINS] = { 727 static const u32 chan_info_tab[] = {
612 AR_PHY_TX_IQCAL_CORR_COEFF_01_B0,
613 AR_PHY_TX_IQCAL_CORR_COEFF_01_B1,
614 AR_PHY_TX_IQCAL_CORR_COEFF_01_B2,
615 };
616 const u32 rx_corr[AR9300_MAX_CHAINS] = {
617 AR_PHY_RX_IQCAL_CORR_B0,
618 AR_PHY_RX_IQCAL_CORR_B1,
619 AR_PHY_RX_IQCAL_CORR_B2,
620 };
621 const u_int32_t chan_info_tab[] = {
622 AR_PHY_CHAN_INFO_TAB_0, 728 AR_PHY_CHAN_INFO_TAB_0,
623 AR_PHY_CHAN_INFO_TAB_1, 729 AR_PHY_CHAN_INFO_TAB_1,
624 AR_PHY_CHAN_INFO_TAB_2, 730 AR_PHY_CHAN_INFO_TAB_2,
625 }; 731 };
732 struct coeff coeff;
626 s32 iq_res[6]; 733 s32 iq_res[6];
627 s32 iqc_coeff[2]; 734 s32 i, j, ip, im, nmeasurement;
628 s32 i, j; 735 u8 nchains = get_streams(common->tx_chainmask);
629 u32 num_chains = 0; 736
737 for (ip = 0; ip < MPASS; ip++) {
738 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1,
739 AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT,
740 DELPT);
741 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START,
742 AR_PHY_TX_IQCAL_START_DO_CAL,
743 AR_PHY_TX_IQCAL_START_DO_CAL);
744
745 if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START,
746 AR_PHY_TX_IQCAL_START_DO_CAL,
747 0, AH_WAIT_TIMEOUT)) {
748 ath_dbg(common, ATH_DBG_CALIBRATE,
749 "Tx IQ Cal not complete.\n");
750 goto TX_IQ_CAL_FAILED;
751 }
630 752
631 for (i = 0; i < AR9300_MAX_CHAINS; i++) { 753 nmeasurement = REG_READ_FIELD(ah, AR_PHY_TX_IQCAL_STATUS_B0,
632 if (ah->txchainmask & (1 << i)) 754 AR_PHY_CALIBRATED_GAINS_0);
633 num_chains++; 755 if (nmeasurement > MAX_MEASUREMENT)
634 } 756 nmeasurement = MAX_MEASUREMENT;
757
758 for (i = 0; i < nchains; i++) {
759 ath_dbg(common, ATH_DBG_CALIBRATE,
760 "Doing Tx IQ Cal for chain %d.\n", i);
761 for (im = 0; im < nmeasurement; im++) {
762 if (REG_READ(ah, txiqcal_status[i]) &
763 AR_PHY_TX_IQCAL_STATUS_FAILED) {
764 ath_dbg(common, ATH_DBG_CALIBRATE,
765 "Tx IQ Cal failed for chain %d.\n", i);
766 goto TX_IQ_CAL_FAILED;
767 }
635 768
636 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1, 769 for (j = 0; j < 3; j++) {
637 AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT, 770 u8 idx = 2 * j,
638 DELPT); 771 offset = 4 * (3 * im + j);
639 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START,
640 AR_PHY_TX_IQCAL_START_DO_CAL,
641 AR_PHY_TX_IQCAL_START_DO_CAL);
642 772
643 if (!ath9k_hw_wait(ah, AR_PHY_TX_IQCAL_START, 773 REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY,
644 AR_PHY_TX_IQCAL_START_DO_CAL, 774 AR_PHY_CHAN_INFO_TAB_S2_READ,
645 0, AH_WAIT_TIMEOUT)) { 775 0);
646 ath_print(common, ATH_DBG_CALIBRATE,
647 "Tx IQ Cal not complete.\n");
648 goto TX_IQ_CAL_FAILED;
649 }
650 776
651 for (i = 0; i < num_chains; i++) { 777 /* 32 bits */
652 ath_print(common, ATH_DBG_CALIBRATE, 778 iq_res[idx] = REG_READ(ah,
653 "Doing Tx IQ Cal for chain %d.\n", i); 779 chan_info_tab[i] +
780 offset);
654 781
655 if (REG_READ(ah, txiqcal_status[i]) & 782 REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY,
656 AR_PHY_TX_IQCAL_STATUS_FAILED) { 783 AR_PHY_CHAN_INFO_TAB_S2_READ,
657 ath_print(common, ATH_DBG_CALIBRATE, 784 1);
658 "Tx IQ Cal failed for chain %d.\n", i);
659 goto TX_IQ_CAL_FAILED;
660 }
661 785
662 for (j = 0; j < 3; j++) { 786 /* 16 bits */
663 u_int8_t idx = 2 * j, 787 iq_res[idx+1] = 0xffff & REG_READ(ah,
664 offset = 4 * j; 788 chan_info_tab[i] +
789 offset);
665 790
666 REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY, 791 ath_dbg(common, ATH_DBG_CALIBRATE,
667 AR_PHY_CHAN_INFO_TAB_S2_READ, 0); 792 "IQ RES[%d]=0x%x IQ_RES[%d]=0x%x\n",
668 793 idx, iq_res[idx], idx+1, iq_res[idx+1]);
669 /* 32 bits */ 794 }
670 iq_res[idx] = REG_READ(ah, chan_info_tab[i] + offset);
671 795
672 REG_RMW_FIELD(ah, AR_PHY_CHAN_INFO_MEMORY, 796 if (!ar9003_hw_calc_iq_corr(ah, i, iq_res,
673 AR_PHY_CHAN_INFO_TAB_S2_READ, 1); 797 coeff.iqc_coeff)) {
798 ath_dbg(common, ATH_DBG_CALIBRATE,
799 "Failed in calculation of IQ correction.\n");
800 goto TX_IQ_CAL_FAILED;
801 }
802 coeff.mag_coeff[i][im][ip] =
803 coeff.iqc_coeff[0] & 0x7f;
804 coeff.phs_coeff[i][im][ip] =
805 (coeff.iqc_coeff[0] >> 7) & 0x7f;
674 806
675 /* 16 bits */ 807 if (coeff.mag_coeff[i][im][ip] > 63)
676 iq_res[idx+1] = 0xffff & REG_READ(ah, 808 coeff.mag_coeff[i][im][ip] -= 128;
677 chan_info_tab[i] + 809 if (coeff.phs_coeff[i][im][ip] > 63)
678 offset); 810 coeff.phs_coeff[i][im][ip] -= 128;
679 811
680 ath_print(common, ATH_DBG_CALIBRATE, 812 }
681 "IQ RES[%d]=0x%x IQ_RES[%d]=0x%x\n",
682 idx, iq_res[idx], idx+1, iq_res[idx+1]);
683 }
684
685 if (!ar9003_hw_calc_iq_corr(ah, i, iq_res, iqc_coeff)) {
686 ath_print(common, ATH_DBG_CALIBRATE,
687 "Failed in calculation of IQ correction.\n");
688 goto TX_IQ_CAL_FAILED;
689 } 813 }
690
691 ath_print(common, ATH_DBG_CALIBRATE,
692 "IQ_COEFF[0] = 0x%x IQ_COEFF[1] = 0x%x\n",
693 iqc_coeff[0], iqc_coeff[1]);
694
695 REG_RMW_FIELD(ah, tx_corr_coeff[i],
696 AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE,
697 iqc_coeff[0]);
698 REG_RMW_FIELD(ah, rx_corr[i],
699 AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_Q_COFF,
700 iqc_coeff[1] >> 7);
701 REG_RMW_FIELD(ah, rx_corr[i],
702 AR_PHY_RX_IQCAL_CORR_LOOPBACK_IQCORR_Q_I_COFF,
703 iqc_coeff[1]);
704 } 814 }
705 815
706 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_3, 816 ar9003_hw_tx_iqcal_load_avg_2_passes(ah, nchains, &coeff);
707 AR_PHY_TX_IQCAL_CONTROL_3_IQCORR_EN, 0x1);
708 REG_RMW_FIELD(ah, AR_PHY_RX_IQCAL_CORR_B0,
709 AR_PHY_RX_IQCAL_CORR_B0_LOOPBACK_IQCORR_EN, 0x1);
710 817
711 return; 818 return;
712 819
713TX_IQ_CAL_FAILED: 820TX_IQ_CAL_FAILED:
714 ath_print(common, ATH_DBG_CALIBRATE, "Tx IQ Cal failed\n"); 821 ath_dbg(common, ATH_DBG_CALIBRATE, "Tx IQ Cal failed\n");
822}
823
824static void ar9003_hw_tx_iq_cal_run(struct ath_hw *ah)
825{
826 u8 tx_gain_forced;
827
828 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_CONTROL_1_9485,
829 AR_PHY_TX_IQCAQL_CONTROL_1_IQCORR_I_Q_COFF_DELPT, DELPT);
830 tx_gain_forced = REG_READ_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
831 AR_PHY_TXGAIN_FORCE);
832 if (tx_gain_forced)
833 REG_RMW_FIELD(ah, AR_PHY_TX_FORCED_GAIN,
834 AR_PHY_TXGAIN_FORCE, 0);
835
836 REG_RMW_FIELD(ah, AR_PHY_TX_IQCAL_START_9485,
837 AR_PHY_TX_IQCAL_START_DO_CAL_9485, 1);
715} 838}
716 839
840static void ar9003_hw_tx_iq_cal_post_proc(struct ath_hw *ah)
841{
842 struct ath_common *common = ath9k_hw_common(ah);
843 const u32 txiqcal_status[AR9300_MAX_CHAINS] = {
844 AR_PHY_TX_IQCAL_STATUS_B0_9485,
845 AR_PHY_TX_IQCAL_STATUS_B1,
846 AR_PHY_TX_IQCAL_STATUS_B2,
847 };
848 const u_int32_t chan_info_tab[] = {
849 AR_PHY_CHAN_INFO_TAB_0,
850 AR_PHY_CHAN_INFO_TAB_1,
851 AR_PHY_CHAN_INFO_TAB_2,
852 };
853 struct coeff coeff;
854 s32 iq_res[6];
855 u8 num_chains = 0;
856 int i, ip, im, j;
857 int nmeasurement;
858
859 for (i = 0; i < AR9300_MAX_CHAINS; i++) {
860 if (ah->txchainmask & (1 << i))
861 num_chains++;
862 }
863
864 for (ip = 0; ip < MPASS; ip++) {
865 for (i = 0; i < num_chains; i++) {
866 nmeasurement = REG_READ_FIELD(ah,
867 AR_PHY_TX_IQCAL_STATUS_B0_9485,
868 AR_PHY_CALIBRATED_GAINS_0);
869 if (nmeasurement > MAX_MEASUREMENT)
870 nmeasurement = MAX_MEASUREMENT;
871
872 for (im = 0; im < nmeasurement; im++) {
873 ath_dbg(common, ATH_DBG_CALIBRATE,
874 "Doing Tx IQ Cal for chain %d.\n", i);
875
876 if (REG_READ(ah, txiqcal_status[i]) &
877 AR_PHY_TX_IQCAL_STATUS_FAILED) {
878 ath_dbg(common, ATH_DBG_CALIBRATE,
879 "Tx IQ Cal failed for chain %d.\n", i);
880 goto tx_iqcal_fail;
881 }
882
883 for (j = 0; j < 3; j++) {
884 u32 idx = 2 * j, offset = 4 * (3 * im + j);
885
886 REG_RMW_FIELD(ah,
887 AR_PHY_CHAN_INFO_MEMORY,
888 AR_PHY_CHAN_INFO_TAB_S2_READ,
889 0);
890
891 /* 32 bits */
892 iq_res[idx] = REG_READ(ah,
893 chan_info_tab[i] +
894 offset);
895
896 REG_RMW_FIELD(ah,
897 AR_PHY_CHAN_INFO_MEMORY,
898 AR_PHY_CHAN_INFO_TAB_S2_READ,
899 1);
900
901 /* 16 bits */
902 iq_res[idx + 1] = 0xffff & REG_READ(ah,
903 chan_info_tab[i] + offset);
904
905 ath_dbg(common, ATH_DBG_CALIBRATE,
906 "IQ RES[%d]=0x%x"
907 "IQ_RES[%d]=0x%x\n",
908 idx, iq_res[idx], idx + 1,
909 iq_res[idx + 1]);
910 }
911
912 if (!ar9003_hw_calc_iq_corr(ah, i, iq_res,
913 coeff.iqc_coeff)) {
914 ath_dbg(common, ATH_DBG_CALIBRATE,
915 "Failed in calculation of IQ correction.\n");
916 goto tx_iqcal_fail;
917 }
918
919 coeff.mag_coeff[i][im][ip] =
920 coeff.iqc_coeff[0] & 0x7f;
921 coeff.phs_coeff[i][im][ip] =
922 (coeff.iqc_coeff[0] >> 7) & 0x7f;
923
924 if (coeff.mag_coeff[i][im][ip] > 63)
925 coeff.mag_coeff[i][im][ip] -= 128;
926 if (coeff.phs_coeff[i][im][ip] > 63)
927 coeff.phs_coeff[i][im][ip] -= 128;
928 }
929 }
930 }
931 ar9003_hw_tx_iqcal_load_avg_2_passes(ah, num_chains, &coeff);
932
933 return;
934
935tx_iqcal_fail:
936 ath_dbg(common, ATH_DBG_CALIBRATE, "Tx IQ Cal failed\n");
937 return;
938}
717static bool ar9003_hw_init_cal(struct ath_hw *ah, 939static bool ar9003_hw_init_cal(struct ath_hw *ah,
718 struct ath9k_channel *chan) 940 struct ath9k_channel *chan)
719{ 941{
720 struct ath_common *common = ath9k_hw_common(ah); 942 struct ath_common *common = ath9k_hw_common(ah);
943 int val;
721 944
722 /* 945 val = REG_READ(ah, AR_ENT_OTP);
723 * 0x7 = 0b111 , AR9003 needs to be configured for 3-chain mode before 946 ath_dbg(common, ATH_DBG_CALIBRATE, "ath9k: AR_ENT_OTP 0x%x\n", val);
724 * running AGC/TxIQ cals 947
725 */ 948 if (AR_SREV_9485(ah))
726 ar9003_hw_set_chain_masks(ah, 0x7, 0x7); 949 ar9003_hw_set_chain_masks(ah, 0x1, 0x1);
950 else if (val & AR_ENT_OTP_CHAIN2_DISABLE)
951 ar9003_hw_set_chain_masks(ah, 0x3, 0x3);
952 else
953 /*
954 * 0x7 = 0b111 , AR9003 needs to be configured for 3-chain
955 * mode before running AGC/TxIQ cals
956 */
957 ar9003_hw_set_chain_masks(ah, 0x7, 0x7);
727 958
728 /* Do Tx IQ Calibration */ 959 /* Do Tx IQ Calibration */
729 ar9003_hw_tx_iq_cal(ah); 960 if (AR_SREV_9485(ah))
961 ar9003_hw_tx_iq_cal_run(ah);
962 else
963 ar9003_hw_tx_iq_cal(ah);
964
730 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS); 965 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_DIS);
731 udelay(5); 966 udelay(5);
732 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN); 967 REG_WRITE(ah, AR_PHY_ACTIVE, AR_PHY_ACTIVE_EN);
@@ -739,12 +974,14 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
739 /* Poll for offset calibration complete */ 974 /* Poll for offset calibration complete */
740 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 975 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
741 0, AH_WAIT_TIMEOUT)) { 976 0, AH_WAIT_TIMEOUT)) {
742 ath_print(common, ATH_DBG_CALIBRATE, 977 ath_dbg(common, ATH_DBG_CALIBRATE,
743 "offset calibration failed to " 978 "offset calibration failed to complete in 1ms; noisy environment?\n");
744 "complete in 1ms; noisy environment?\n");
745 return false; 979 return false;
746 } 980 }
747 981
982 if (AR_SREV_9485(ah))
983 ar9003_hw_tx_iq_cal_post_proc(ah);
984
748 /* Revert chainmasks to their original values before NF cal */ 985 /* Revert chainmasks to their original values before NF cal */
749 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask); 986 ar9003_hw_set_chain_masks(ah, ah->rxchainmask, ah->txchainmask);
750 987
@@ -757,15 +994,15 @@ static bool ar9003_hw_init_cal(struct ath_hw *ah,
757 if (ah->supp_cals & IQ_MISMATCH_CAL) { 994 if (ah->supp_cals & IQ_MISMATCH_CAL) {
758 INIT_CAL(&ah->iq_caldata); 995 INIT_CAL(&ah->iq_caldata);
759 INSERT_CAL(ah, &ah->iq_caldata); 996 INSERT_CAL(ah, &ah->iq_caldata);
760 ath_print(common, ATH_DBG_CALIBRATE, 997 ath_dbg(common, ATH_DBG_CALIBRATE,
761 "enabling IQ Calibration.\n"); 998 "enabling IQ Calibration.\n");
762 } 999 }
763 1000
764 if (ah->supp_cals & TEMP_COMP_CAL) { 1001 if (ah->supp_cals & TEMP_COMP_CAL) {
765 INIT_CAL(&ah->tempCompCalData); 1002 INIT_CAL(&ah->tempCompCalData);
766 INSERT_CAL(ah, &ah->tempCompCalData); 1003 INSERT_CAL(ah, &ah->tempCompCalData);
767 ath_print(common, ATH_DBG_CALIBRATE, 1004 ath_dbg(common, ATH_DBG_CALIBRATE,
768 "enabling Temperature Compensation Calibration.\n"); 1005 "enabling Temperature Compensation Calibration.\n");
769 } 1006 }
770 1007
771 /* Initialize current pointer to first element in list */ 1008 /* Initialize current pointer to first element in list */
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index a7b82f0085d2..4819747fa4c3 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -22,12 +22,14 @@
22#define COMP_CKSUM_LEN 2 22#define COMP_CKSUM_LEN 2
23 23
24#define AR_CH0_TOP (0x00016288) 24#define AR_CH0_TOP (0x00016288)
25#define AR_CH0_TOP_XPABIASLVL (0x3) 25#define AR_CH0_TOP_XPABIASLVL (0x300)
26#define AR_CH0_TOP_XPABIASLVL_S (8) 26#define AR_CH0_TOP_XPABIASLVL_S (8)
27 27
28#define AR_CH0_THERM (0x00016290) 28#define AR_CH0_THERM (0x00016290)
29#define AR_CH0_THERM_SPARE (0x3f) 29#define AR_CH0_THERM_XPABIASLVL_MSB 0x3
30#define AR_CH0_THERM_SPARE_S (0) 30#define AR_CH0_THERM_XPABIASLVL_MSB_S 0
31#define AR_CH0_THERM_XPASHORT2GND 0x4
32#define AR_CH0_THERM_XPASHORT2GND_S 2
31 33
32#define AR_SWITCH_TABLE_COM_ALL (0xffff) 34#define AR_SWITCH_TABLE_COM_ALL (0xffff)
33#define AR_SWITCH_TABLE_COM_ALL_S (0) 35#define AR_SWITCH_TABLE_COM_ALL_S (0)
@@ -57,6 +59,12 @@
57 59
58#define CTL(_tpower, _flag) ((_tpower) | ((_flag) << 6)) 60#define CTL(_tpower, _flag) ((_tpower) | ((_flag) << 6))
59 61
62#define EEPROM_DATA_LEN_9485 1088
63
64static int ar9003_hw_power_interpolate(int32_t x,
65 int32_t *px, int32_t *py, u_int16_t np);
66
67
60static const struct ar9300_eeprom ar9300_default = { 68static const struct ar9300_eeprom ar9300_default = {
61 .eepromVersion = 2, 69 .eepromVersion = 2,
62 .templateVersion = 2, 70 .templateVersion = 2,
@@ -67,7 +75,7 @@ static const struct ar9300_eeprom ar9300_default = {
67 .regDmn = { LE16(0), LE16(0x1f) }, 75 .regDmn = { LE16(0), LE16(0x1f) },
68 .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */ 76 .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
69 .opCapFlags = { 77 .opCapFlags = {
70 .opFlags = AR9300_OPFLAGS_11G | AR9300_OPFLAGS_11A, 78 .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
71 .eepMisc = 0, 79 .eepMisc = 0,
72 }, 80 },
73 .rfSilent = 0, 81 .rfSilent = 0,
@@ -146,13 +154,16 @@ static const struct ar9300_eeprom ar9300_default = {
146 .txEndToRxOn = 0x2, 154 .txEndToRxOn = 0x2,
147 .txFrameToXpaOn = 0xe, 155 .txFrameToXpaOn = 0xe,
148 .thresh62 = 28, 156 .thresh62 = 28,
149 .papdRateMaskHt20 = LE32(0x80c080), 157 .papdRateMaskHt20 = LE32(0x0cf0e0e0),
150 .papdRateMaskHt40 = LE32(0x80c080), 158 .papdRateMaskHt40 = LE32(0x6cf0e0e0),
151 .futureModal = { 159 .futureModal = {
152 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 160 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
153 0, 0, 0, 0, 0, 0, 0, 0
154 }, 161 },
155 }, 162 },
163 .base_ext1 = {
164 .ant_div_control = 0,
165 .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
166 },
156 .calFreqPier2G = { 167 .calFreqPier2G = {
157 FREQ2FBIN(2412, 1), 168 FREQ2FBIN(2412, 1),
158 FREQ2FBIN(2437, 1), 169 FREQ2FBIN(2437, 1),
@@ -287,8 +298,7 @@ static const struct ar9300_eeprom ar9300_default = {
287 /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1), 298 /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
288 /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1), 299 /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
289 /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1), 300 /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
290 /* Data[11].ctlEdges[3].bChannel */ 301 /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
291 FREQ2FBIN(2462, 1),
292 } 302 }
293 }, 303 },
294 .ctlPowerData_2G = { 304 .ctlPowerData_2G = {
@@ -346,13 +356,20 @@ static const struct ar9300_eeprom ar9300_default = {
346 .txEndToRxOn = 0x2, 356 .txEndToRxOn = 0x2,
347 .txFrameToXpaOn = 0xe, 357 .txFrameToXpaOn = 0xe,
348 .thresh62 = 28, 358 .thresh62 = 28,
349 .papdRateMaskHt20 = LE32(0xf0e0e0), 359 .papdRateMaskHt20 = LE32(0x0c80c080),
350 .papdRateMaskHt40 = LE32(0xf0e0e0), 360 .papdRateMaskHt40 = LE32(0x0080c080),
351 .futureModal = { 361 .futureModal = {
352 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 362 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
353 0, 0, 0, 0, 0, 0, 0, 0
354 }, 363 },
355 }, 364 },
365 .base_ext2 = {
366 .tempSlopeLow = 0,
367 .tempSlopeHigh = 0,
368 .xatten1DBLow = {0, 0, 0},
369 .xatten1MarginLow = {0, 0, 0},
370 .xatten1DBHigh = {0, 0, 0},
371 .xatten1MarginHigh = {0, 0, 0}
372 },
356 .calFreqPier5G = { 373 .calFreqPier5G = {
357 FREQ2FBIN(5180, 0), 374 FREQ2FBIN(5180, 0),
358 FREQ2FBIN(5220, 0), 375 FREQ2FBIN(5220, 0),
@@ -626,9 +643,2341 @@ static const struct ar9300_eeprom ar9300_default = {
626 } 643 }
627}; 644};
628 645
646static const struct ar9300_eeprom ar9300_x113 = {
647 .eepromVersion = 2,
648 .templateVersion = 6,
649 .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0},
650 .custData = {"x113-023-f0000"},
651 .baseEepHeader = {
652 .regDmn = { LE16(0), LE16(0x1f) },
653 .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
654 .opCapFlags = {
655 .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
656 .eepMisc = 0,
657 },
658 .rfSilent = 0,
659 .blueToothOptions = 0,
660 .deviceCap = 0,
661 .deviceType = 5, /* takes lower byte in eeprom location */
662 .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
663 .params_for_tuning_caps = {0, 0},
664 .featureEnable = 0x0d,
665 /*
666 * bit0 - enable tx temp comp - disabled
667 * bit1 - enable tx volt comp - disabled
668 * bit2 - enable fastClock - enabled
669 * bit3 - enable doubling - enabled
670 * bit4 - enable internal regulator - disabled
671 * bit5 - enable pa predistortion - disabled
672 */
673 .miscConfiguration = 0, /* bit0 - turn down drivestrength */
674 .eepromWriteEnableGpio = 6,
675 .wlanDisableGpio = 0,
676 .wlanLedGpio = 8,
677 .rxBandSelectGpio = 0xff,
678 .txrxgain = 0x21,
679 .swreg = 0,
680 },
681 .modalHeader2G = {
682 /* ar9300_modal_eep_header 2g */
683 /* 4 idle,t1,t2,b(4 bits per setting) */
684 .antCtrlCommon = LE32(0x110),
685 /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
686 .antCtrlCommon2 = LE32(0x44444),
687
688 /*
689 * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
690 * rx1, rx12, b (2 bits each)
691 */
692 .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) },
693
694 /*
695 * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db
696 * for ar9280 (0xa20c/b20c 5:0)
697 */
698 .xatten1DB = {0, 0, 0},
699
700 /*
701 * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
702 * for ar9280 (0xa20c/b20c 16:12
703 */
704 .xatten1Margin = {0, 0, 0},
705 .tempSlope = 25,
706 .voltSlope = 0,
707
708 /*
709 * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
710 * channels in usual fbin coding format
711 */
712 .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0},
713
714 /*
715 * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
716 * if the register is per chain
717 */
718 .noiseFloorThreshCh = {-1, 0, 0},
719 .ob = {1, 1, 1},/* 3 chain */
720 .db_stage2 = {1, 1, 1}, /* 3 chain */
721 .db_stage3 = {0, 0, 0},
722 .db_stage4 = {0, 0, 0},
723 .xpaBiasLvl = 0,
724 .txFrameToDataStart = 0x0e,
725 .txFrameToPaOn = 0x0e,
726 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
727 .antennaGain = 0,
728 .switchSettling = 0x2c,
729 .adcDesiredSize = -30,
730 .txEndToXpaOff = 0,
731 .txEndToRxOn = 0x2,
732 .txFrameToXpaOn = 0xe,
733 .thresh62 = 28,
734 .papdRateMaskHt20 = LE32(0x0c80c080),
735 .papdRateMaskHt40 = LE32(0x0080c080),
736 .futureModal = {
737 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
738 },
739 },
740 .base_ext1 = {
741 .ant_div_control = 0,
742 .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
743 },
744 .calFreqPier2G = {
745 FREQ2FBIN(2412, 1),
746 FREQ2FBIN(2437, 1),
747 FREQ2FBIN(2472, 1),
748 },
749 /* ar9300_cal_data_per_freq_op_loop 2g */
750 .calPierData2G = {
751 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
752 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
753 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
754 },
755 .calTarget_freqbin_Cck = {
756 FREQ2FBIN(2412, 1),
757 FREQ2FBIN(2472, 1),
758 },
759 .calTarget_freqbin_2G = {
760 FREQ2FBIN(2412, 1),
761 FREQ2FBIN(2437, 1),
762 FREQ2FBIN(2472, 1)
763 },
764 .calTarget_freqbin_2GHT20 = {
765 FREQ2FBIN(2412, 1),
766 FREQ2FBIN(2437, 1),
767 FREQ2FBIN(2472, 1)
768 },
769 .calTarget_freqbin_2GHT40 = {
770 FREQ2FBIN(2412, 1),
771 FREQ2FBIN(2437, 1),
772 FREQ2FBIN(2472, 1)
773 },
774 .calTargetPowerCck = {
775 /* 1L-5L,5S,11L,11S */
776 { {34, 34, 34, 34} },
777 { {34, 34, 34, 34} },
778 },
779 .calTargetPower2G = {
780 /* 6-24,36,48,54 */
781 { {34, 34, 32, 32} },
782 { {34, 34, 32, 32} },
783 { {34, 34, 32, 32} },
784 },
785 .calTargetPower2GHT20 = {
786 { {32, 32, 32, 32, 32, 28, 32, 32, 30, 28, 0, 0, 0, 0} },
787 { {32, 32, 32, 32, 32, 28, 32, 32, 30, 28, 0, 0, 0, 0} },
788 { {32, 32, 32, 32, 32, 28, 32, 32, 30, 28, 0, 0, 0, 0} },
789 },
790 .calTargetPower2GHT40 = {
791 { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
792 { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
793 { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
794 },
795 .ctlIndex_2G = {
796 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
797 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
798 },
799 .ctl_freqbin_2G = {
800 {
801 FREQ2FBIN(2412, 1),
802 FREQ2FBIN(2417, 1),
803 FREQ2FBIN(2457, 1),
804 FREQ2FBIN(2462, 1)
805 },
806 {
807 FREQ2FBIN(2412, 1),
808 FREQ2FBIN(2417, 1),
809 FREQ2FBIN(2462, 1),
810 0xFF,
811 },
812
813 {
814 FREQ2FBIN(2412, 1),
815 FREQ2FBIN(2417, 1),
816 FREQ2FBIN(2462, 1),
817 0xFF,
818 },
819 {
820 FREQ2FBIN(2422, 1),
821 FREQ2FBIN(2427, 1),
822 FREQ2FBIN(2447, 1),
823 FREQ2FBIN(2452, 1)
824 },
825
826 {
827 /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
828 /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
829 /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
830 /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
831 },
832
833 {
834 /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
835 /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
836 /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
837 0,
838 },
839
840 {
841 /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
842 /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
843 FREQ2FBIN(2472, 1),
844 0,
845 },
846
847 {
848 /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
849 /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
850 /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
851 /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
852 },
853
854 {
855 /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
856 /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
857 /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
858 },
859
860 {
861 /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
862 /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
863 /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
864 0
865 },
866
867 {
868 /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
869 /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
870 /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
871 0
872 },
873
874 {
875 /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
876 /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
877 /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
878 /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
879 }
880 },
881 .ctlPowerData_2G = {
882 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
883 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
884 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
885
886 { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
887 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
888 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
889
890 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
891 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
892 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
893
894 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
895 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
896 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
897 },
898 .modalHeader5G = {
899 /* 4 idle,t1,t2,b (4 bits per setting) */
900 .antCtrlCommon = LE32(0x220),
901 /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
902 .antCtrlCommon2 = LE32(0x11111),
903 /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
904 .antCtrlChain = {
905 LE16(0x150), LE16(0x150), LE16(0x150),
906 },
907 /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
908 .xatten1DB = {0, 0, 0},
909
910 /*
911 * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
912 * for merlin (0xa20c/b20c 16:12
913 */
914 .xatten1Margin = {0, 0, 0},
915 .tempSlope = 68,
916 .voltSlope = 0,
917 /* spurChans spur channels in usual fbin coding format */
918 .spurChans = {FREQ2FBIN(5500, 0), 0, 0, 0, 0},
919 /* noiseFloorThreshCh Check if the register is per chain */
920 .noiseFloorThreshCh = {-1, 0, 0},
921 .ob = {3, 3, 3}, /* 3 chain */
922 .db_stage2 = {3, 3, 3}, /* 3 chain */
923 .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
924 .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
925 .xpaBiasLvl = 0,
926 .txFrameToDataStart = 0x0e,
927 .txFrameToPaOn = 0x0e,
928 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
929 .antennaGain = 0,
930 .switchSettling = 0x2d,
931 .adcDesiredSize = -30,
932 .txEndToXpaOff = 0,
933 .txEndToRxOn = 0x2,
934 .txFrameToXpaOn = 0xe,
935 .thresh62 = 28,
936 .papdRateMaskHt20 = LE32(0x0cf0e0e0),
937 .papdRateMaskHt40 = LE32(0x6cf0e0e0),
938 .futureModal = {
939 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
940 },
941 },
942 .base_ext2 = {
943 .tempSlopeLow = 72,
944 .tempSlopeHigh = 105,
945 .xatten1DBLow = {0, 0, 0},
946 .xatten1MarginLow = {0, 0, 0},
947 .xatten1DBHigh = {0, 0, 0},
948 .xatten1MarginHigh = {0, 0, 0}
949 },
950 .calFreqPier5G = {
951 FREQ2FBIN(5180, 0),
952 FREQ2FBIN(5240, 0),
953 FREQ2FBIN(5320, 0),
954 FREQ2FBIN(5400, 0),
955 FREQ2FBIN(5500, 0),
956 FREQ2FBIN(5600, 0),
957 FREQ2FBIN(5745, 0),
958 FREQ2FBIN(5785, 0)
959 },
960 .calPierData5G = {
961 {
962 {0, 0, 0, 0, 0},
963 {0, 0, 0, 0, 0},
964 {0, 0, 0, 0, 0},
965 {0, 0, 0, 0, 0},
966 {0, 0, 0, 0, 0},
967 {0, 0, 0, 0, 0},
968 {0, 0, 0, 0, 0},
969 {0, 0, 0, 0, 0},
970 },
971 {
972 {0, 0, 0, 0, 0},
973 {0, 0, 0, 0, 0},
974 {0, 0, 0, 0, 0},
975 {0, 0, 0, 0, 0},
976 {0, 0, 0, 0, 0},
977 {0, 0, 0, 0, 0},
978 {0, 0, 0, 0, 0},
979 {0, 0, 0, 0, 0},
980 },
981 {
982 {0, 0, 0, 0, 0},
983 {0, 0, 0, 0, 0},
984 {0, 0, 0, 0, 0},
985 {0, 0, 0, 0, 0},
986 {0, 0, 0, 0, 0},
987 {0, 0, 0, 0, 0},
988 {0, 0, 0, 0, 0},
989 {0, 0, 0, 0, 0},
990 },
991
992 },
993 .calTarget_freqbin_5G = {
994 FREQ2FBIN(5180, 0),
995 FREQ2FBIN(5220, 0),
996 FREQ2FBIN(5320, 0),
997 FREQ2FBIN(5400, 0),
998 FREQ2FBIN(5500, 0),
999 FREQ2FBIN(5600, 0),
1000 FREQ2FBIN(5745, 0),
1001 FREQ2FBIN(5785, 0)
1002 },
1003 .calTarget_freqbin_5GHT20 = {
1004 FREQ2FBIN(5180, 0),
1005 FREQ2FBIN(5240, 0),
1006 FREQ2FBIN(5320, 0),
1007 FREQ2FBIN(5400, 0),
1008 FREQ2FBIN(5500, 0),
1009 FREQ2FBIN(5700, 0),
1010 FREQ2FBIN(5745, 0),
1011 FREQ2FBIN(5825, 0)
1012 },
1013 .calTarget_freqbin_5GHT40 = {
1014 FREQ2FBIN(5190, 0),
1015 FREQ2FBIN(5230, 0),
1016 FREQ2FBIN(5320, 0),
1017 FREQ2FBIN(5410, 0),
1018 FREQ2FBIN(5510, 0),
1019 FREQ2FBIN(5670, 0),
1020 FREQ2FBIN(5755, 0),
1021 FREQ2FBIN(5825, 0)
1022 },
1023 .calTargetPower5G = {
1024 /* 6-24,36,48,54 */
1025 { {42, 40, 40, 34} },
1026 { {42, 40, 40, 34} },
1027 { {42, 40, 40, 34} },
1028 { {42, 40, 40, 34} },
1029 { {42, 40, 40, 34} },
1030 { {42, 40, 40, 34} },
1031 { {42, 40, 40, 34} },
1032 { {42, 40, 40, 34} },
1033 },
1034 .calTargetPower5GHT20 = {
1035 /*
1036 * 0_8_16,1-3_9-11_17-19,
1037 * 4,5,6,7,12,13,14,15,20,21,22,23
1038 */
1039 { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
1040 { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
1041 { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
1042 { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
1043 { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
1044 { {40, 40, 40, 40, 32, 28, 40, 40, 32, 28, 40, 40, 32, 20} },
1045 { {38, 38, 38, 38, 32, 28, 38, 38, 32, 28, 38, 38, 32, 26} },
1046 { {36, 36, 36, 36, 32, 28, 36, 36, 32, 28, 36, 36, 32, 26} },
1047 },
1048 .calTargetPower5GHT40 = {
1049 /*
1050 * 0_8_16,1-3_9-11_17-19,
1051 * 4,5,6,7,12,13,14,15,20,21,22,23
1052 */
1053 { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
1054 { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
1055 { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
1056 { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
1057 { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
1058 { {40, 40, 40, 38, 30, 26, 40, 40, 30, 26, 40, 40, 30, 24} },
1059 { {36, 36, 36, 36, 30, 26, 36, 36, 30, 26, 36, 36, 30, 24} },
1060 { {34, 34, 34, 34, 30, 26, 34, 34, 30, 26, 34, 34, 30, 24} },
1061 },
1062 .ctlIndex_5G = {
1063 0x10, 0x16, 0x18, 0x40, 0x46,
1064 0x48, 0x30, 0x36, 0x38
1065 },
1066 .ctl_freqbin_5G = {
1067 {
1068 /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1069 /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
1070 /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
1071 /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
1072 /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
1073 /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
1074 /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
1075 /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
1076 },
1077 {
1078 /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1079 /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
1080 /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
1081 /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
1082 /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
1083 /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
1084 /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
1085 /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
1086 },
1087
1088 {
1089 /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
1090 /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
1091 /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
1092 /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
1093 /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
1094 /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
1095 /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
1096 /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
1097 },
1098
1099 {
1100 /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1101 /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
1102 /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
1103 /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
1104 /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
1105 /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
1106 /* Data[3].ctlEdges[6].bChannel */ 0xFF,
1107 /* Data[3].ctlEdges[7].bChannel */ 0xFF,
1108 },
1109
1110 {
1111 /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1112 /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
1113 /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
1114 /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
1115 /* Data[4].ctlEdges[4].bChannel */ 0xFF,
1116 /* Data[4].ctlEdges[5].bChannel */ 0xFF,
1117 /* Data[4].ctlEdges[6].bChannel */ 0xFF,
1118 /* Data[4].ctlEdges[7].bChannel */ 0xFF,
1119 },
1120
1121 {
1122 /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
1123 /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
1124 /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
1125 /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
1126 /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
1127 /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
1128 /* Data[5].ctlEdges[6].bChannel */ 0xFF,
1129 /* Data[5].ctlEdges[7].bChannel */ 0xFF
1130 },
1131
1132 {
1133 /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1134 /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
1135 /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
1136 /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
1137 /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
1138 /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
1139 /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
1140 /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
1141 },
1142
1143 {
1144 /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1145 /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
1146 /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
1147 /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
1148 /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
1149 /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
1150 /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
1151 /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
1152 },
1153
1154 {
1155 /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
1156 /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
1157 /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
1158 /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
1159 /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
1160 /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
1161 /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
1162 /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
1163 }
1164 },
1165 .ctlPowerData_5G = {
1166 {
1167 {
1168 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1169 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
1170 }
1171 },
1172 {
1173 {
1174 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1175 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
1176 }
1177 },
1178 {
1179 {
1180 CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
1181 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1182 }
1183 },
1184 {
1185 {
1186 CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
1187 CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
1188 }
1189 },
1190 {
1191 {
1192 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
1193 CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
1194 }
1195 },
1196 {
1197 {
1198 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1199 CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
1200 }
1201 },
1202 {
1203 {
1204 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1205 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1206 }
1207 },
1208 {
1209 {
1210 CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
1211 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
1212 }
1213 },
1214 {
1215 {
1216 CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
1217 CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
1218 }
1219 },
1220 }
1221};
1222
1223
1224static const struct ar9300_eeprom ar9300_h112 = {
1225 .eepromVersion = 2,
1226 .templateVersion = 3,
1227 .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0},
1228 .custData = {"h112-241-f0000"},
1229 .baseEepHeader = {
1230 .regDmn = { LE16(0), LE16(0x1f) },
1231 .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
1232 .opCapFlags = {
1233 .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
1234 .eepMisc = 0,
1235 },
1236 .rfSilent = 0,
1237 .blueToothOptions = 0,
1238 .deviceCap = 0,
1239 .deviceType = 5, /* takes lower byte in eeprom location */
1240 .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
1241 .params_for_tuning_caps = {0, 0},
1242 .featureEnable = 0x0d,
1243 /*
1244 * bit0 - enable tx temp comp - disabled
1245 * bit1 - enable tx volt comp - disabled
1246 * bit2 - enable fastClock - enabled
1247 * bit3 - enable doubling - enabled
1248 * bit4 - enable internal regulator - disabled
1249 * bit5 - enable pa predistortion - disabled
1250 */
1251 .miscConfiguration = 0, /* bit0 - turn down drivestrength */
1252 .eepromWriteEnableGpio = 6,
1253 .wlanDisableGpio = 0,
1254 .wlanLedGpio = 8,
1255 .rxBandSelectGpio = 0xff,
1256 .txrxgain = 0x10,
1257 .swreg = 0,
1258 },
1259 .modalHeader2G = {
1260 /* ar9300_modal_eep_header 2g */
1261 /* 4 idle,t1,t2,b(4 bits per setting) */
1262 .antCtrlCommon = LE32(0x110),
1263 /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
1264 .antCtrlCommon2 = LE32(0x44444),
1265
1266 /*
1267 * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
1268 * rx1, rx12, b (2 bits each)
1269 */
1270 .antCtrlChain = { LE16(0x150), LE16(0x150), LE16(0x150) },
1271
1272 /*
1273 * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db
1274 * for ar9280 (0xa20c/b20c 5:0)
1275 */
1276 .xatten1DB = {0, 0, 0},
1277
1278 /*
1279 * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
1280 * for ar9280 (0xa20c/b20c 16:12
1281 */
1282 .xatten1Margin = {0, 0, 0},
1283 .tempSlope = 25,
1284 .voltSlope = 0,
1285
1286 /*
1287 * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
1288 * channels in usual fbin coding format
1289 */
1290 .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0},
1291
1292 /*
1293 * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
1294 * if the register is per chain
1295 */
1296 .noiseFloorThreshCh = {-1, 0, 0},
1297 .ob = {1, 1, 1},/* 3 chain */
1298 .db_stage2 = {1, 1, 1}, /* 3 chain */
1299 .db_stage3 = {0, 0, 0},
1300 .db_stage4 = {0, 0, 0},
1301 .xpaBiasLvl = 0,
1302 .txFrameToDataStart = 0x0e,
1303 .txFrameToPaOn = 0x0e,
1304 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
1305 .antennaGain = 0,
1306 .switchSettling = 0x2c,
1307 .adcDesiredSize = -30,
1308 .txEndToXpaOff = 0,
1309 .txEndToRxOn = 0x2,
1310 .txFrameToXpaOn = 0xe,
1311 .thresh62 = 28,
1312 .papdRateMaskHt20 = LE32(0x80c080),
1313 .papdRateMaskHt40 = LE32(0x80c080),
1314 .futureModal = {
1315 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1316 },
1317 },
1318 .base_ext1 = {
1319 .ant_div_control = 0,
1320 .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
1321 },
1322 .calFreqPier2G = {
1323 FREQ2FBIN(2412, 1),
1324 FREQ2FBIN(2437, 1),
1325 FREQ2FBIN(2472, 1),
1326 },
1327 /* ar9300_cal_data_per_freq_op_loop 2g */
1328 .calPierData2G = {
1329 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
1330 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
1331 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
1332 },
1333 .calTarget_freqbin_Cck = {
1334 FREQ2FBIN(2412, 1),
1335 FREQ2FBIN(2484, 1),
1336 },
1337 .calTarget_freqbin_2G = {
1338 FREQ2FBIN(2412, 1),
1339 FREQ2FBIN(2437, 1),
1340 FREQ2FBIN(2472, 1)
1341 },
1342 .calTarget_freqbin_2GHT20 = {
1343 FREQ2FBIN(2412, 1),
1344 FREQ2FBIN(2437, 1),
1345 FREQ2FBIN(2472, 1)
1346 },
1347 .calTarget_freqbin_2GHT40 = {
1348 FREQ2FBIN(2412, 1),
1349 FREQ2FBIN(2437, 1),
1350 FREQ2FBIN(2472, 1)
1351 },
1352 .calTargetPowerCck = {
1353 /* 1L-5L,5S,11L,11S */
1354 { {34, 34, 34, 34} },
1355 { {34, 34, 34, 34} },
1356 },
1357 .calTargetPower2G = {
1358 /* 6-24,36,48,54 */
1359 { {34, 34, 32, 32} },
1360 { {34, 34, 32, 32} },
1361 { {34, 34, 32, 32} },
1362 },
1363 .calTargetPower2GHT20 = {
1364 { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 28, 28, 28, 24} },
1365 { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 28, 28, 28, 24} },
1366 { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 28, 28, 28, 24} },
1367 },
1368 .calTargetPower2GHT40 = {
1369 { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 26, 26, 26, 22} },
1370 { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 26, 26, 26, 22} },
1371 { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 26, 26, 26, 22} },
1372 },
1373 .ctlIndex_2G = {
1374 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
1375 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
1376 },
1377 .ctl_freqbin_2G = {
1378 {
1379 FREQ2FBIN(2412, 1),
1380 FREQ2FBIN(2417, 1),
1381 FREQ2FBIN(2457, 1),
1382 FREQ2FBIN(2462, 1)
1383 },
1384 {
1385 FREQ2FBIN(2412, 1),
1386 FREQ2FBIN(2417, 1),
1387 FREQ2FBIN(2462, 1),
1388 0xFF,
1389 },
1390
1391 {
1392 FREQ2FBIN(2412, 1),
1393 FREQ2FBIN(2417, 1),
1394 FREQ2FBIN(2462, 1),
1395 0xFF,
1396 },
1397 {
1398 FREQ2FBIN(2422, 1),
1399 FREQ2FBIN(2427, 1),
1400 FREQ2FBIN(2447, 1),
1401 FREQ2FBIN(2452, 1)
1402 },
1403
1404 {
1405 /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
1406 /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
1407 /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
1408 /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
1409 },
1410
1411 {
1412 /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
1413 /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
1414 /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
1415 0,
1416 },
1417
1418 {
1419 /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
1420 /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
1421 FREQ2FBIN(2472, 1),
1422 0,
1423 },
1424
1425 {
1426 /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
1427 /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
1428 /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
1429 /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
1430 },
1431
1432 {
1433 /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
1434 /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
1435 /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
1436 },
1437
1438 {
1439 /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
1440 /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
1441 /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
1442 0
1443 },
1444
1445 {
1446 /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
1447 /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
1448 /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
1449 0
1450 },
1451
1452 {
1453 /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
1454 /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
1455 /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
1456 /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
1457 }
1458 },
1459 .ctlPowerData_2G = {
1460 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
1461 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
1462 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
1463
1464 { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
1465 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
1466 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
1467
1468 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
1469 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
1470 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
1471
1472 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
1473 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
1474 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
1475 },
1476 .modalHeader5G = {
1477 /* 4 idle,t1,t2,b (4 bits per setting) */
1478 .antCtrlCommon = LE32(0x220),
1479 /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
1480 .antCtrlCommon2 = LE32(0x44444),
1481 /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
1482 .antCtrlChain = {
1483 LE16(0x150), LE16(0x150), LE16(0x150),
1484 },
1485 /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
1486 .xatten1DB = {0, 0, 0},
1487
1488 /*
1489 * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
1490 * for merlin (0xa20c/b20c 16:12
1491 */
1492 .xatten1Margin = {0, 0, 0},
1493 .tempSlope = 45,
1494 .voltSlope = 0,
1495 /* spurChans spur channels in usual fbin coding format */
1496 .spurChans = {0, 0, 0, 0, 0},
1497 /* noiseFloorThreshCh Check if the register is per chain */
1498 .noiseFloorThreshCh = {-1, 0, 0},
1499 .ob = {3, 3, 3}, /* 3 chain */
1500 .db_stage2 = {3, 3, 3}, /* 3 chain */
1501 .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
1502 .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
1503 .xpaBiasLvl = 0,
1504 .txFrameToDataStart = 0x0e,
1505 .txFrameToPaOn = 0x0e,
1506 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
1507 .antennaGain = 0,
1508 .switchSettling = 0x2d,
1509 .adcDesiredSize = -30,
1510 .txEndToXpaOff = 0,
1511 .txEndToRxOn = 0x2,
1512 .txFrameToXpaOn = 0xe,
1513 .thresh62 = 28,
1514 .papdRateMaskHt20 = LE32(0x0cf0e0e0),
1515 .papdRateMaskHt40 = LE32(0x6cf0e0e0),
1516 .futureModal = {
1517 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1518 },
1519 },
1520 .base_ext2 = {
1521 .tempSlopeLow = 40,
1522 .tempSlopeHigh = 50,
1523 .xatten1DBLow = {0, 0, 0},
1524 .xatten1MarginLow = {0, 0, 0},
1525 .xatten1DBHigh = {0, 0, 0},
1526 .xatten1MarginHigh = {0, 0, 0}
1527 },
1528 .calFreqPier5G = {
1529 FREQ2FBIN(5180, 0),
1530 FREQ2FBIN(5220, 0),
1531 FREQ2FBIN(5320, 0),
1532 FREQ2FBIN(5400, 0),
1533 FREQ2FBIN(5500, 0),
1534 FREQ2FBIN(5600, 0),
1535 FREQ2FBIN(5700, 0),
1536 FREQ2FBIN(5825, 0)
1537 },
1538 .calPierData5G = {
1539 {
1540 {0, 0, 0, 0, 0},
1541 {0, 0, 0, 0, 0},
1542 {0, 0, 0, 0, 0},
1543 {0, 0, 0, 0, 0},
1544 {0, 0, 0, 0, 0},
1545 {0, 0, 0, 0, 0},
1546 {0, 0, 0, 0, 0},
1547 {0, 0, 0, 0, 0},
1548 },
1549 {
1550 {0, 0, 0, 0, 0},
1551 {0, 0, 0, 0, 0},
1552 {0, 0, 0, 0, 0},
1553 {0, 0, 0, 0, 0},
1554 {0, 0, 0, 0, 0},
1555 {0, 0, 0, 0, 0},
1556 {0, 0, 0, 0, 0},
1557 {0, 0, 0, 0, 0},
1558 },
1559 {
1560 {0, 0, 0, 0, 0},
1561 {0, 0, 0, 0, 0},
1562 {0, 0, 0, 0, 0},
1563 {0, 0, 0, 0, 0},
1564 {0, 0, 0, 0, 0},
1565 {0, 0, 0, 0, 0},
1566 {0, 0, 0, 0, 0},
1567 {0, 0, 0, 0, 0},
1568 },
1569
1570 },
1571 .calTarget_freqbin_5G = {
1572 FREQ2FBIN(5180, 0),
1573 FREQ2FBIN(5240, 0),
1574 FREQ2FBIN(5320, 0),
1575 FREQ2FBIN(5400, 0),
1576 FREQ2FBIN(5500, 0),
1577 FREQ2FBIN(5600, 0),
1578 FREQ2FBIN(5700, 0),
1579 FREQ2FBIN(5825, 0)
1580 },
1581 .calTarget_freqbin_5GHT20 = {
1582 FREQ2FBIN(5180, 0),
1583 FREQ2FBIN(5240, 0),
1584 FREQ2FBIN(5320, 0),
1585 FREQ2FBIN(5400, 0),
1586 FREQ2FBIN(5500, 0),
1587 FREQ2FBIN(5700, 0),
1588 FREQ2FBIN(5745, 0),
1589 FREQ2FBIN(5825, 0)
1590 },
1591 .calTarget_freqbin_5GHT40 = {
1592 FREQ2FBIN(5180, 0),
1593 FREQ2FBIN(5240, 0),
1594 FREQ2FBIN(5320, 0),
1595 FREQ2FBIN(5400, 0),
1596 FREQ2FBIN(5500, 0),
1597 FREQ2FBIN(5700, 0),
1598 FREQ2FBIN(5745, 0),
1599 FREQ2FBIN(5825, 0)
1600 },
1601 .calTargetPower5G = {
1602 /* 6-24,36,48,54 */
1603 { {30, 30, 28, 24} },
1604 { {30, 30, 28, 24} },
1605 { {30, 30, 28, 24} },
1606 { {30, 30, 28, 24} },
1607 { {30, 30, 28, 24} },
1608 { {30, 30, 28, 24} },
1609 { {30, 30, 28, 24} },
1610 { {30, 30, 28, 24} },
1611 },
1612 .calTargetPower5GHT20 = {
1613 /*
1614 * 0_8_16,1-3_9-11_17-19,
1615 * 4,5,6,7,12,13,14,15,20,21,22,23
1616 */
1617 { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 20, 20, 20, 16} },
1618 { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 20, 20, 20, 16} },
1619 { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 18, 18, 18, 16} },
1620 { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 18, 18, 18, 16} },
1621 { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 16, 16, 16, 14} },
1622 { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 16, 16, 16, 14} },
1623 { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 14, 14, 14, 12} },
1624 { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 14, 14, 14, 12} },
1625 },
1626 .calTargetPower5GHT40 = {
1627 /*
1628 * 0_8_16,1-3_9-11_17-19,
1629 * 4,5,6,7,12,13,14,15,20,21,22,23
1630 */
1631 { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 18, 18, 18, 14} },
1632 { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 18, 18, 18, 14} },
1633 { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 16, 16, 16, 12} },
1634 { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 16, 16, 16, 12} },
1635 { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 14, 14, 14, 10} },
1636 { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 14, 14, 14, 10} },
1637 { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 12, 12, 12, 8} },
1638 { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 12, 12, 12, 8} },
1639 },
1640 .ctlIndex_5G = {
1641 0x10, 0x16, 0x18, 0x40, 0x46,
1642 0x48, 0x30, 0x36, 0x38
1643 },
1644 .ctl_freqbin_5G = {
1645 {
1646 /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1647 /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
1648 /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
1649 /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
1650 /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
1651 /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
1652 /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
1653 /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
1654 },
1655 {
1656 /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1657 /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
1658 /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
1659 /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
1660 /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
1661 /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
1662 /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
1663 /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
1664 },
1665
1666 {
1667 /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
1668 /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
1669 /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
1670 /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
1671 /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
1672 /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
1673 /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
1674 /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
1675 },
1676
1677 {
1678 /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1679 /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
1680 /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
1681 /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
1682 /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
1683 /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
1684 /* Data[3].ctlEdges[6].bChannel */ 0xFF,
1685 /* Data[3].ctlEdges[7].bChannel */ 0xFF,
1686 },
1687
1688 {
1689 /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1690 /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
1691 /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
1692 /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
1693 /* Data[4].ctlEdges[4].bChannel */ 0xFF,
1694 /* Data[4].ctlEdges[5].bChannel */ 0xFF,
1695 /* Data[4].ctlEdges[6].bChannel */ 0xFF,
1696 /* Data[4].ctlEdges[7].bChannel */ 0xFF,
1697 },
1698
1699 {
1700 /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
1701 /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
1702 /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
1703 /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
1704 /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
1705 /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
1706 /* Data[5].ctlEdges[6].bChannel */ 0xFF,
1707 /* Data[5].ctlEdges[7].bChannel */ 0xFF
1708 },
1709
1710 {
1711 /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1712 /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
1713 /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
1714 /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
1715 /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
1716 /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
1717 /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
1718 /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
1719 },
1720
1721 {
1722 /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
1723 /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
1724 /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
1725 /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
1726 /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
1727 /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
1728 /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
1729 /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
1730 },
1731
1732 {
1733 /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
1734 /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
1735 /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
1736 /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
1737 /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
1738 /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
1739 /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
1740 /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
1741 }
1742 },
1743 .ctlPowerData_5G = {
1744 {
1745 {
1746 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1747 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
1748 }
1749 },
1750 {
1751 {
1752 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1753 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
1754 }
1755 },
1756 {
1757 {
1758 CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
1759 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1760 }
1761 },
1762 {
1763 {
1764 CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
1765 CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
1766 }
1767 },
1768 {
1769 {
1770 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
1771 CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
1772 }
1773 },
1774 {
1775 {
1776 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1777 CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
1778 }
1779 },
1780 {
1781 {
1782 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1783 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
1784 }
1785 },
1786 {
1787 {
1788 CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
1789 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
1790 }
1791 },
1792 {
1793 {
1794 CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
1795 CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
1796 }
1797 },
1798 }
1799};
1800
1801
1802static const struct ar9300_eeprom ar9300_x112 = {
1803 .eepromVersion = 2,
1804 .templateVersion = 5,
1805 .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0},
1806 .custData = {"x112-041-f0000"},
1807 .baseEepHeader = {
1808 .regDmn = { LE16(0), LE16(0x1f) },
1809 .txrxMask = 0x77, /* 4 bits tx and 4 bits rx */
1810 .opCapFlags = {
1811 .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
1812 .eepMisc = 0,
1813 },
1814 .rfSilent = 0,
1815 .blueToothOptions = 0,
1816 .deviceCap = 0,
1817 .deviceType = 5, /* takes lower byte in eeprom location */
1818 .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
1819 .params_for_tuning_caps = {0, 0},
1820 .featureEnable = 0x0d,
1821 /*
1822 * bit0 - enable tx temp comp - disabled
1823 * bit1 - enable tx volt comp - disabled
1824 * bit2 - enable fastclock - enabled
1825 * bit3 - enable doubling - enabled
1826 * bit4 - enable internal regulator - disabled
1827 * bit5 - enable pa predistortion - disabled
1828 */
1829 .miscConfiguration = 0, /* bit0 - turn down drivestrength */
1830 .eepromWriteEnableGpio = 6,
1831 .wlanDisableGpio = 0,
1832 .wlanLedGpio = 8,
1833 .rxBandSelectGpio = 0xff,
1834 .txrxgain = 0x0,
1835 .swreg = 0,
1836 },
1837 .modalHeader2G = {
1838 /* ar9300_modal_eep_header 2g */
1839 /* 4 idle,t1,t2,b(4 bits per setting) */
1840 .antCtrlCommon = LE32(0x110),
1841 /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
1842 .antCtrlCommon2 = LE32(0x22222),
1843
1844 /*
1845 * antCtrlChain[ar9300_max_chains]; 6 idle, t, r,
1846 * rx1, rx12, b (2 bits each)
1847 */
1848 .antCtrlChain = { LE16(0x10), LE16(0x10), LE16(0x10) },
1849
1850 /*
1851 * xatten1DB[AR9300_max_chains]; 3 xatten1_db
1852 * for ar9280 (0xa20c/b20c 5:0)
1853 */
1854 .xatten1DB = {0x1b, 0x1b, 0x1b},
1855
1856 /*
1857 * xatten1Margin[ar9300_max_chains]; 3 xatten1_margin
1858 * for ar9280 (0xa20c/b20c 16:12
1859 */
1860 .xatten1Margin = {0x15, 0x15, 0x15},
1861 .tempSlope = 50,
1862 .voltSlope = 0,
1863
1864 /*
1865 * spurChans[OSPrey_eeprom_modal_sPURS]; spur
1866 * channels in usual fbin coding format
1867 */
1868 .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0},
1869
1870 /*
1871 * noiseFloorThreshch[ar9300_max_cHAINS]; 3 Check
1872 * if the register is per chain
1873 */
1874 .noiseFloorThreshCh = {-1, 0, 0},
1875 .ob = {1, 1, 1},/* 3 chain */
1876 .db_stage2 = {1, 1, 1}, /* 3 chain */
1877 .db_stage3 = {0, 0, 0},
1878 .db_stage4 = {0, 0, 0},
1879 .xpaBiasLvl = 0,
1880 .txFrameToDataStart = 0x0e,
1881 .txFrameToPaOn = 0x0e,
1882 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
1883 .antennaGain = 0,
1884 .switchSettling = 0x2c,
1885 .adcDesiredSize = -30,
1886 .txEndToXpaOff = 0,
1887 .txEndToRxOn = 0x2,
1888 .txFrameToXpaOn = 0xe,
1889 .thresh62 = 28,
1890 .papdRateMaskHt20 = LE32(0x0c80c080),
1891 .papdRateMaskHt40 = LE32(0x0080c080),
1892 .futureModal = {
1893 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1894 },
1895 },
1896 .base_ext1 = {
1897 .ant_div_control = 0,
1898 .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
1899 },
1900 .calFreqPier2G = {
1901 FREQ2FBIN(2412, 1),
1902 FREQ2FBIN(2437, 1),
1903 FREQ2FBIN(2472, 1),
1904 },
1905 /* ar9300_cal_data_per_freq_op_loop 2g */
1906 .calPierData2G = {
1907 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
1908 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
1909 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
1910 },
1911 .calTarget_freqbin_Cck = {
1912 FREQ2FBIN(2412, 1),
1913 FREQ2FBIN(2472, 1),
1914 },
1915 .calTarget_freqbin_2G = {
1916 FREQ2FBIN(2412, 1),
1917 FREQ2FBIN(2437, 1),
1918 FREQ2FBIN(2472, 1)
1919 },
1920 .calTarget_freqbin_2GHT20 = {
1921 FREQ2FBIN(2412, 1),
1922 FREQ2FBIN(2437, 1),
1923 FREQ2FBIN(2472, 1)
1924 },
1925 .calTarget_freqbin_2GHT40 = {
1926 FREQ2FBIN(2412, 1),
1927 FREQ2FBIN(2437, 1),
1928 FREQ2FBIN(2472, 1)
1929 },
1930 .calTargetPowerCck = {
1931 /* 1L-5L,5S,11L,11s */
1932 { {38, 38, 38, 38} },
1933 { {38, 38, 38, 38} },
1934 },
1935 .calTargetPower2G = {
1936 /* 6-24,36,48,54 */
1937 { {38, 38, 36, 34} },
1938 { {38, 38, 36, 34} },
1939 { {38, 38, 34, 32} },
1940 },
1941 .calTargetPower2GHT20 = {
1942 { {36, 36, 36, 36, 36, 34, 34, 32, 30, 28, 28, 28, 28, 26} },
1943 { {36, 36, 36, 36, 36, 34, 36, 34, 32, 30, 30, 30, 28, 26} },
1944 { {36, 36, 36, 36, 36, 34, 34, 32, 30, 28, 28, 28, 28, 26} },
1945 },
1946 .calTargetPower2GHT40 = {
1947 { {36, 36, 36, 36, 34, 32, 32, 30, 28, 26, 26, 26, 26, 24} },
1948 { {36, 36, 36, 36, 34, 32, 34, 32, 30, 28, 28, 28, 28, 24} },
1949 { {36, 36, 36, 36, 34, 32, 32, 30, 28, 26, 26, 26, 26, 24} },
1950 },
1951 .ctlIndex_2G = {
1952 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
1953 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
1954 },
1955 .ctl_freqbin_2G = {
1956 {
1957 FREQ2FBIN(2412, 1),
1958 FREQ2FBIN(2417, 1),
1959 FREQ2FBIN(2457, 1),
1960 FREQ2FBIN(2462, 1)
1961 },
1962 {
1963 FREQ2FBIN(2412, 1),
1964 FREQ2FBIN(2417, 1),
1965 FREQ2FBIN(2462, 1),
1966 0xFF,
1967 },
1968
1969 {
1970 FREQ2FBIN(2412, 1),
1971 FREQ2FBIN(2417, 1),
1972 FREQ2FBIN(2462, 1),
1973 0xFF,
1974 },
1975 {
1976 FREQ2FBIN(2422, 1),
1977 FREQ2FBIN(2427, 1),
1978 FREQ2FBIN(2447, 1),
1979 FREQ2FBIN(2452, 1)
1980 },
1981
1982 {
1983 /* Data[4].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
1984 /* Data[4].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
1985 /* Data[4].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
1986 /* Data[4].ctledges[3].bchannel */ FREQ2FBIN(2484, 1),
1987 },
1988
1989 {
1990 /* Data[5].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
1991 /* Data[5].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
1992 /* Data[5].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
1993 0,
1994 },
1995
1996 {
1997 /* Data[6].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
1998 /* Data[6].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
1999 FREQ2FBIN(2472, 1),
2000 0,
2001 },
2002
2003 {
2004 /* Data[7].ctledges[0].bchannel */ FREQ2FBIN(2422, 1),
2005 /* Data[7].ctledges[1].bchannel */ FREQ2FBIN(2427, 1),
2006 /* Data[7].ctledges[2].bchannel */ FREQ2FBIN(2447, 1),
2007 /* Data[7].ctledges[3].bchannel */ FREQ2FBIN(2462, 1),
2008 },
2009
2010 {
2011 /* Data[8].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
2012 /* Data[8].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
2013 /* Data[8].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
2014 },
2015
2016 {
2017 /* Data[9].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
2018 /* Data[9].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
2019 /* Data[9].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
2020 0
2021 },
2022
2023 {
2024 /* Data[10].ctledges[0].bchannel */ FREQ2FBIN(2412, 1),
2025 /* Data[10].ctledges[1].bchannel */ FREQ2FBIN(2417, 1),
2026 /* Data[10].ctledges[2].bchannel */ FREQ2FBIN(2472, 1),
2027 0
2028 },
2029
2030 {
2031 /* Data[11].ctledges[0].bchannel */ FREQ2FBIN(2422, 1),
2032 /* Data[11].ctledges[1].bchannel */ FREQ2FBIN(2427, 1),
2033 /* Data[11].ctledges[2].bchannel */ FREQ2FBIN(2447, 1),
2034 /* Data[11].ctledges[3].bchannel */ FREQ2FBIN(2462, 1),
2035 }
2036 },
2037 .ctlPowerData_2G = {
2038 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2039 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2040 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
2041
2042 { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
2043 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2044 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2045
2046 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
2047 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2048 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2049
2050 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2051 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
2052 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
2053 },
2054 .modalHeader5G = {
2055 /* 4 idle,t1,t2,b (4 bits per setting) */
2056 .antCtrlCommon = LE32(0x110),
2057 /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
2058 .antCtrlCommon2 = LE32(0x22222),
2059 /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
2060 .antCtrlChain = {
2061 LE16(0x0), LE16(0x0), LE16(0x0),
2062 },
2063 /* xatten1DB 3 xatten1_db for ar9280 (0xa20c/b20c 5:0) */
2064 .xatten1DB = {0x13, 0x19, 0x17},
2065
2066 /*
2067 * xatten1Margin[ar9300_max_chains]; 3 xatten1_margin
2068 * for merlin (0xa20c/b20c 16:12
2069 */
2070 .xatten1Margin = {0x19, 0x19, 0x19},
2071 .tempSlope = 70,
2072 .voltSlope = 15,
2073 /* spurChans spur channels in usual fbin coding format */
2074 .spurChans = {0, 0, 0, 0, 0},
2075 /* noiseFloorThreshch check if the register is per chain */
2076 .noiseFloorThreshCh = {-1, 0, 0},
2077 .ob = {3, 3, 3}, /* 3 chain */
2078 .db_stage2 = {3, 3, 3}, /* 3 chain */
2079 .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
2080 .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
2081 .xpaBiasLvl = 0,
2082 .txFrameToDataStart = 0x0e,
2083 .txFrameToPaOn = 0x0e,
2084 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
2085 .antennaGain = 0,
2086 .switchSettling = 0x2d,
2087 .adcDesiredSize = -30,
2088 .txEndToXpaOff = 0,
2089 .txEndToRxOn = 0x2,
2090 .txFrameToXpaOn = 0xe,
2091 .thresh62 = 28,
2092 .papdRateMaskHt20 = LE32(0x0cf0e0e0),
2093 .papdRateMaskHt40 = LE32(0x6cf0e0e0),
2094 .futureModal = {
2095 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2096 },
2097 },
2098 .base_ext2 = {
2099 .tempSlopeLow = 72,
2100 .tempSlopeHigh = 105,
2101 .xatten1DBLow = {0x10, 0x14, 0x10},
2102 .xatten1MarginLow = {0x19, 0x19 , 0x19},
2103 .xatten1DBHigh = {0x1d, 0x20, 0x24},
2104 .xatten1MarginHigh = {0x10, 0x10, 0x10}
2105 },
2106 .calFreqPier5G = {
2107 FREQ2FBIN(5180, 0),
2108 FREQ2FBIN(5220, 0),
2109 FREQ2FBIN(5320, 0),
2110 FREQ2FBIN(5400, 0),
2111 FREQ2FBIN(5500, 0),
2112 FREQ2FBIN(5600, 0),
2113 FREQ2FBIN(5700, 0),
2114 FREQ2FBIN(5785, 0)
2115 },
2116 .calPierData5G = {
2117 {
2118 {0, 0, 0, 0, 0},
2119 {0, 0, 0, 0, 0},
2120 {0, 0, 0, 0, 0},
2121 {0, 0, 0, 0, 0},
2122 {0, 0, 0, 0, 0},
2123 {0, 0, 0, 0, 0},
2124 {0, 0, 0, 0, 0},
2125 {0, 0, 0, 0, 0},
2126 },
2127 {
2128 {0, 0, 0, 0, 0},
2129 {0, 0, 0, 0, 0},
2130 {0, 0, 0, 0, 0},
2131 {0, 0, 0, 0, 0},
2132 {0, 0, 0, 0, 0},
2133 {0, 0, 0, 0, 0},
2134 {0, 0, 0, 0, 0},
2135 {0, 0, 0, 0, 0},
2136 },
2137 {
2138 {0, 0, 0, 0, 0},
2139 {0, 0, 0, 0, 0},
2140 {0, 0, 0, 0, 0},
2141 {0, 0, 0, 0, 0},
2142 {0, 0, 0, 0, 0},
2143 {0, 0, 0, 0, 0},
2144 {0, 0, 0, 0, 0},
2145 {0, 0, 0, 0, 0},
2146 },
2147
2148 },
2149 .calTarget_freqbin_5G = {
2150 FREQ2FBIN(5180, 0),
2151 FREQ2FBIN(5220, 0),
2152 FREQ2FBIN(5320, 0),
2153 FREQ2FBIN(5400, 0),
2154 FREQ2FBIN(5500, 0),
2155 FREQ2FBIN(5600, 0),
2156 FREQ2FBIN(5725, 0),
2157 FREQ2FBIN(5825, 0)
2158 },
2159 .calTarget_freqbin_5GHT20 = {
2160 FREQ2FBIN(5180, 0),
2161 FREQ2FBIN(5220, 0),
2162 FREQ2FBIN(5320, 0),
2163 FREQ2FBIN(5400, 0),
2164 FREQ2FBIN(5500, 0),
2165 FREQ2FBIN(5600, 0),
2166 FREQ2FBIN(5725, 0),
2167 FREQ2FBIN(5825, 0)
2168 },
2169 .calTarget_freqbin_5GHT40 = {
2170 FREQ2FBIN(5180, 0),
2171 FREQ2FBIN(5220, 0),
2172 FREQ2FBIN(5320, 0),
2173 FREQ2FBIN(5400, 0),
2174 FREQ2FBIN(5500, 0),
2175 FREQ2FBIN(5600, 0),
2176 FREQ2FBIN(5725, 0),
2177 FREQ2FBIN(5825, 0)
2178 },
2179 .calTargetPower5G = {
2180 /* 6-24,36,48,54 */
2181 { {32, 32, 28, 26} },
2182 { {32, 32, 28, 26} },
2183 { {32, 32, 28, 26} },
2184 { {32, 32, 26, 24} },
2185 { {32, 32, 26, 24} },
2186 { {32, 32, 24, 22} },
2187 { {30, 30, 24, 22} },
2188 { {30, 30, 24, 22} },
2189 },
2190 .calTargetPower5GHT20 = {
2191 /*
2192 * 0_8_16,1-3_9-11_17-19,
2193 * 4,5,6,7,12,13,14,15,20,21,22,23
2194 */
2195 { {32, 32, 32, 32, 28, 26, 32, 28, 26, 24, 24, 24, 22, 22} },
2196 { {32, 32, 32, 32, 28, 26, 32, 28, 26, 24, 24, 24, 22, 22} },
2197 { {32, 32, 32, 32, 28, 26, 32, 28, 26, 24, 24, 24, 22, 22} },
2198 { {32, 32, 32, 32, 28, 26, 32, 26, 24, 22, 22, 22, 20, 20} },
2199 { {32, 32, 32, 32, 28, 26, 32, 26, 24, 22, 20, 18, 16, 16} },
2200 { {32, 32, 32, 32, 28, 26, 32, 24, 20, 16, 18, 16, 14, 14} },
2201 { {30, 30, 30, 30, 28, 26, 30, 24, 20, 16, 18, 16, 14, 14} },
2202 { {30, 30, 30, 30, 28, 26, 30, 24, 20, 16, 18, 16, 14, 14} },
2203 },
2204 .calTargetPower5GHT40 = {
2205 /*
2206 * 0_8_16,1-3_9-11_17-19,
2207 * 4,5,6,7,12,13,14,15,20,21,22,23
2208 */
2209 { {32, 32, 32, 30, 28, 26, 30, 28, 26, 24, 24, 24, 22, 22} },
2210 { {32, 32, 32, 30, 28, 26, 30, 28, 26, 24, 24, 24, 22, 22} },
2211 { {32, 32, 32, 30, 28, 26, 30, 28, 26, 24, 24, 24, 22, 22} },
2212 { {32, 32, 32, 30, 28, 26, 30, 26, 24, 22, 22, 22, 20, 20} },
2213 { {32, 32, 32, 30, 28, 26, 30, 26, 24, 22, 20, 18, 16, 16} },
2214 { {32, 32, 32, 30, 28, 26, 30, 22, 20, 16, 18, 16, 14, 14} },
2215 { {30, 30, 30, 30, 28, 26, 30, 22, 20, 16, 18, 16, 14, 14} },
2216 { {30, 30, 30, 30, 28, 26, 30, 22, 20, 16, 18, 16, 14, 14} },
2217 },
2218 .ctlIndex_5G = {
2219 0x10, 0x16, 0x18, 0x40, 0x46,
2220 0x48, 0x30, 0x36, 0x38
2221 },
2222 .ctl_freqbin_5G = {
2223 {
2224 /* Data[0].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
2225 /* Data[0].ctledges[1].bchannel */ FREQ2FBIN(5260, 0),
2226 /* Data[0].ctledges[2].bchannel */ FREQ2FBIN(5280, 0),
2227 /* Data[0].ctledges[3].bchannel */ FREQ2FBIN(5500, 0),
2228 /* Data[0].ctledges[4].bchannel */ FREQ2FBIN(5600, 0),
2229 /* Data[0].ctledges[5].bchannel */ FREQ2FBIN(5700, 0),
2230 /* Data[0].ctledges[6].bchannel */ FREQ2FBIN(5745, 0),
2231 /* Data[0].ctledges[7].bchannel */ FREQ2FBIN(5825, 0)
2232 },
2233 {
2234 /* Data[1].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
2235 /* Data[1].ctledges[1].bchannel */ FREQ2FBIN(5260, 0),
2236 /* Data[1].ctledges[2].bchannel */ FREQ2FBIN(5280, 0),
2237 /* Data[1].ctledges[3].bchannel */ FREQ2FBIN(5500, 0),
2238 /* Data[1].ctledges[4].bchannel */ FREQ2FBIN(5520, 0),
2239 /* Data[1].ctledges[5].bchannel */ FREQ2FBIN(5700, 0),
2240 /* Data[1].ctledges[6].bchannel */ FREQ2FBIN(5745, 0),
2241 /* Data[1].ctledges[7].bchannel */ FREQ2FBIN(5825, 0)
2242 },
2243
2244 {
2245 /* Data[2].ctledges[0].bchannel */ FREQ2FBIN(5190, 0),
2246 /* Data[2].ctledges[1].bchannel */ FREQ2FBIN(5230, 0),
2247 /* Data[2].ctledges[2].bchannel */ FREQ2FBIN(5270, 0),
2248 /* Data[2].ctledges[3].bchannel */ FREQ2FBIN(5310, 0),
2249 /* Data[2].ctledges[4].bchannel */ FREQ2FBIN(5510, 0),
2250 /* Data[2].ctledges[5].bchannel */ FREQ2FBIN(5550, 0),
2251 /* Data[2].ctledges[6].bchannel */ FREQ2FBIN(5670, 0),
2252 /* Data[2].ctledges[7].bchannel */ FREQ2FBIN(5755, 0)
2253 },
2254
2255 {
2256 /* Data[3].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
2257 /* Data[3].ctledges[1].bchannel */ FREQ2FBIN(5200, 0),
2258 /* Data[3].ctledges[2].bchannel */ FREQ2FBIN(5260, 0),
2259 /* Data[3].ctledges[3].bchannel */ FREQ2FBIN(5320, 0),
2260 /* Data[3].ctledges[4].bchannel */ FREQ2FBIN(5500, 0),
2261 /* Data[3].ctledges[5].bchannel */ FREQ2FBIN(5700, 0),
2262 /* Data[3].ctledges[6].bchannel */ 0xFF,
2263 /* Data[3].ctledges[7].bchannel */ 0xFF,
2264 },
2265
2266 {
2267 /* Data[4].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
2268 /* Data[4].ctledges[1].bchannel */ FREQ2FBIN(5260, 0),
2269 /* Data[4].ctledges[2].bchannel */ FREQ2FBIN(5500, 0),
2270 /* Data[4].ctledges[3].bchannel */ FREQ2FBIN(5700, 0),
2271 /* Data[4].ctledges[4].bchannel */ 0xFF,
2272 /* Data[4].ctledges[5].bchannel */ 0xFF,
2273 /* Data[4].ctledges[6].bchannel */ 0xFF,
2274 /* Data[4].ctledges[7].bchannel */ 0xFF,
2275 },
2276
2277 {
2278 /* Data[5].ctledges[0].bchannel */ FREQ2FBIN(5190, 0),
2279 /* Data[5].ctledges[1].bchannel */ FREQ2FBIN(5270, 0),
2280 /* Data[5].ctledges[2].bchannel */ FREQ2FBIN(5310, 0),
2281 /* Data[5].ctledges[3].bchannel */ FREQ2FBIN(5510, 0),
2282 /* Data[5].ctledges[4].bchannel */ FREQ2FBIN(5590, 0),
2283 /* Data[5].ctledges[5].bchannel */ FREQ2FBIN(5670, 0),
2284 /* Data[5].ctledges[6].bchannel */ 0xFF,
2285 /* Data[5].ctledges[7].bchannel */ 0xFF
2286 },
2287
2288 {
2289 /* Data[6].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
2290 /* Data[6].ctledges[1].bchannel */ FREQ2FBIN(5200, 0),
2291 /* Data[6].ctledges[2].bchannel */ FREQ2FBIN(5220, 0),
2292 /* Data[6].ctledges[3].bchannel */ FREQ2FBIN(5260, 0),
2293 /* Data[6].ctledges[4].bchannel */ FREQ2FBIN(5500, 0),
2294 /* Data[6].ctledges[5].bchannel */ FREQ2FBIN(5600, 0),
2295 /* Data[6].ctledges[6].bchannel */ FREQ2FBIN(5700, 0),
2296 /* Data[6].ctledges[7].bchannel */ FREQ2FBIN(5745, 0)
2297 },
2298
2299 {
2300 /* Data[7].ctledges[0].bchannel */ FREQ2FBIN(5180, 0),
2301 /* Data[7].ctledges[1].bchannel */ FREQ2FBIN(5260, 0),
2302 /* Data[7].ctledges[2].bchannel */ FREQ2FBIN(5320, 0),
2303 /* Data[7].ctledges[3].bchannel */ FREQ2FBIN(5500, 0),
2304 /* Data[7].ctledges[4].bchannel */ FREQ2FBIN(5560, 0),
2305 /* Data[7].ctledges[5].bchannel */ FREQ2FBIN(5700, 0),
2306 /* Data[7].ctledges[6].bchannel */ FREQ2FBIN(5745, 0),
2307 /* Data[7].ctledges[7].bchannel */ FREQ2FBIN(5825, 0)
2308 },
2309
2310 {
2311 /* Data[8].ctledges[0].bchannel */ FREQ2FBIN(5190, 0),
2312 /* Data[8].ctledges[1].bchannel */ FREQ2FBIN(5230, 0),
2313 /* Data[8].ctledges[2].bchannel */ FREQ2FBIN(5270, 0),
2314 /* Data[8].ctledges[3].bchannel */ FREQ2FBIN(5510, 0),
2315 /* Data[8].ctledges[4].bchannel */ FREQ2FBIN(5550, 0),
2316 /* Data[8].ctledges[5].bchannel */ FREQ2FBIN(5670, 0),
2317 /* Data[8].ctledges[6].bchannel */ FREQ2FBIN(5755, 0),
2318 /* Data[8].ctledges[7].bchannel */ FREQ2FBIN(5795, 0)
2319 }
2320 },
2321 .ctlPowerData_5G = {
2322 {
2323 {
2324 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2325 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
2326 }
2327 },
2328 {
2329 {
2330 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2331 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
2332 }
2333 },
2334 {
2335 {
2336 CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
2337 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2338 }
2339 },
2340 {
2341 {
2342 CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
2343 CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
2344 }
2345 },
2346 {
2347 {
2348 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
2349 CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
2350 }
2351 },
2352 {
2353 {
2354 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2355 CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
2356 }
2357 },
2358 {
2359 {
2360 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2361 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2362 }
2363 },
2364 {
2365 {
2366 CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
2367 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
2368 }
2369 },
2370 {
2371 {
2372 CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
2373 CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
2374 }
2375 },
2376 }
2377};
2378
2379static const struct ar9300_eeprom ar9300_h116 = {
2380 .eepromVersion = 2,
2381 .templateVersion = 4,
2382 .macAddr = {0x00, 0x03, 0x7f, 0x0, 0x0, 0x0},
2383 .custData = {"h116-041-f0000"},
2384 .baseEepHeader = {
2385 .regDmn = { LE16(0), LE16(0x1f) },
2386 .txrxMask = 0x33, /* 4 bits tx and 4 bits rx */
2387 .opCapFlags = {
2388 .opFlags = AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A,
2389 .eepMisc = 0,
2390 },
2391 .rfSilent = 0,
2392 .blueToothOptions = 0,
2393 .deviceCap = 0,
2394 .deviceType = 5, /* takes lower byte in eeprom location */
2395 .pwrTableOffset = AR9300_PWR_TABLE_OFFSET,
2396 .params_for_tuning_caps = {0, 0},
2397 .featureEnable = 0x0d,
2398 /*
2399 * bit0 - enable tx temp comp - disabled
2400 * bit1 - enable tx volt comp - disabled
2401 * bit2 - enable fastClock - enabled
2402 * bit3 - enable doubling - enabled
2403 * bit4 - enable internal regulator - disabled
2404 * bit5 - enable pa predistortion - disabled
2405 */
2406 .miscConfiguration = 0, /* bit0 - turn down drivestrength */
2407 .eepromWriteEnableGpio = 6,
2408 .wlanDisableGpio = 0,
2409 .wlanLedGpio = 8,
2410 .rxBandSelectGpio = 0xff,
2411 .txrxgain = 0x10,
2412 .swreg = 0,
2413 },
2414 .modalHeader2G = {
2415 /* ar9300_modal_eep_header 2g */
2416 /* 4 idle,t1,t2,b(4 bits per setting) */
2417 .antCtrlCommon = LE32(0x110),
2418 /* 4 ra1l1, ra2l1, ra1l2, ra2l2, ra12 */
2419 .antCtrlCommon2 = LE32(0x44444),
2420
2421 /*
2422 * antCtrlChain[AR9300_MAX_CHAINS]; 6 idle, t, r,
2423 * rx1, rx12, b (2 bits each)
2424 */
2425 .antCtrlChain = { LE16(0x10), LE16(0x10), LE16(0x10) },
2426
2427 /*
2428 * xatten1DB[AR9300_MAX_CHAINS]; 3 xatten1_db
2429 * for ar9280 (0xa20c/b20c 5:0)
2430 */
2431 .xatten1DB = {0x1f, 0x1f, 0x1f},
2432
2433 /*
2434 * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
2435 * for ar9280 (0xa20c/b20c 16:12
2436 */
2437 .xatten1Margin = {0x12, 0x12, 0x12},
2438 .tempSlope = 25,
2439 .voltSlope = 0,
2440
2441 /*
2442 * spurChans[OSPREY_EEPROM_MODAL_SPURS]; spur
2443 * channels in usual fbin coding format
2444 */
2445 .spurChans = {FREQ2FBIN(2464, 1), 0, 0, 0, 0},
2446
2447 /*
2448 * noiseFloorThreshCh[AR9300_MAX_CHAINS]; 3 Check
2449 * if the register is per chain
2450 */
2451 .noiseFloorThreshCh = {-1, 0, 0},
2452 .ob = {1, 1, 1},/* 3 chain */
2453 .db_stage2 = {1, 1, 1}, /* 3 chain */
2454 .db_stage3 = {0, 0, 0},
2455 .db_stage4 = {0, 0, 0},
2456 .xpaBiasLvl = 0,
2457 .txFrameToDataStart = 0x0e,
2458 .txFrameToPaOn = 0x0e,
2459 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
2460 .antennaGain = 0,
2461 .switchSettling = 0x2c,
2462 .adcDesiredSize = -30,
2463 .txEndToXpaOff = 0,
2464 .txEndToRxOn = 0x2,
2465 .txFrameToXpaOn = 0xe,
2466 .thresh62 = 28,
2467 .papdRateMaskHt20 = LE32(0x0c80C080),
2468 .papdRateMaskHt40 = LE32(0x0080C080),
2469 .futureModal = {
2470 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2471 },
2472 },
2473 .base_ext1 = {
2474 .ant_div_control = 0,
2475 .future = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
2476 },
2477 .calFreqPier2G = {
2478 FREQ2FBIN(2412, 1),
2479 FREQ2FBIN(2437, 1),
2480 FREQ2FBIN(2472, 1),
2481 },
2482 /* ar9300_cal_data_per_freq_op_loop 2g */
2483 .calPierData2G = {
2484 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
2485 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
2486 { {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 0, 0, 0, 0, 0} },
2487 },
2488 .calTarget_freqbin_Cck = {
2489 FREQ2FBIN(2412, 1),
2490 FREQ2FBIN(2472, 1),
2491 },
2492 .calTarget_freqbin_2G = {
2493 FREQ2FBIN(2412, 1),
2494 FREQ2FBIN(2437, 1),
2495 FREQ2FBIN(2472, 1)
2496 },
2497 .calTarget_freqbin_2GHT20 = {
2498 FREQ2FBIN(2412, 1),
2499 FREQ2FBIN(2437, 1),
2500 FREQ2FBIN(2472, 1)
2501 },
2502 .calTarget_freqbin_2GHT40 = {
2503 FREQ2FBIN(2412, 1),
2504 FREQ2FBIN(2437, 1),
2505 FREQ2FBIN(2472, 1)
2506 },
2507 .calTargetPowerCck = {
2508 /* 1L-5L,5S,11L,11S */
2509 { {34, 34, 34, 34} },
2510 { {34, 34, 34, 34} },
2511 },
2512 .calTargetPower2G = {
2513 /* 6-24,36,48,54 */
2514 { {34, 34, 32, 32} },
2515 { {34, 34, 32, 32} },
2516 { {34, 34, 32, 32} },
2517 },
2518 .calTargetPower2GHT20 = {
2519 { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 0, 0, 0, 0} },
2520 { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 0, 0, 0, 0} },
2521 { {32, 32, 32, 32, 32, 30, 32, 32, 30, 28, 0, 0, 0, 0} },
2522 },
2523 .calTargetPower2GHT40 = {
2524 { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
2525 { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
2526 { {30, 30, 30, 30, 30, 28, 30, 30, 28, 26, 0, 0, 0, 0} },
2527 },
2528 .ctlIndex_2G = {
2529 0x11, 0x12, 0x15, 0x17, 0x41, 0x42,
2530 0x45, 0x47, 0x31, 0x32, 0x35, 0x37,
2531 },
2532 .ctl_freqbin_2G = {
2533 {
2534 FREQ2FBIN(2412, 1),
2535 FREQ2FBIN(2417, 1),
2536 FREQ2FBIN(2457, 1),
2537 FREQ2FBIN(2462, 1)
2538 },
2539 {
2540 FREQ2FBIN(2412, 1),
2541 FREQ2FBIN(2417, 1),
2542 FREQ2FBIN(2462, 1),
2543 0xFF,
2544 },
2545
2546 {
2547 FREQ2FBIN(2412, 1),
2548 FREQ2FBIN(2417, 1),
2549 FREQ2FBIN(2462, 1),
2550 0xFF,
2551 },
2552 {
2553 FREQ2FBIN(2422, 1),
2554 FREQ2FBIN(2427, 1),
2555 FREQ2FBIN(2447, 1),
2556 FREQ2FBIN(2452, 1)
2557 },
2558
2559 {
2560 /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
2561 /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
2562 /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
2563 /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(2484, 1),
2564 },
2565
2566 {
2567 /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
2568 /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
2569 /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
2570 0,
2571 },
2572
2573 {
2574 /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
2575 /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
2576 FREQ2FBIN(2472, 1),
2577 0,
2578 },
2579
2580 {
2581 /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
2582 /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
2583 /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
2584 /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
2585 },
2586
2587 {
2588 /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
2589 /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
2590 /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
2591 },
2592
2593 {
2594 /* Data[9].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
2595 /* Data[9].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
2596 /* Data[9].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
2597 0
2598 },
2599
2600 {
2601 /* Data[10].ctlEdges[0].bChannel */ FREQ2FBIN(2412, 1),
2602 /* Data[10].ctlEdges[1].bChannel */ FREQ2FBIN(2417, 1),
2603 /* Data[10].ctlEdges[2].bChannel */ FREQ2FBIN(2472, 1),
2604 0
2605 },
2606
2607 {
2608 /* Data[11].ctlEdges[0].bChannel */ FREQ2FBIN(2422, 1),
2609 /* Data[11].ctlEdges[1].bChannel */ FREQ2FBIN(2427, 1),
2610 /* Data[11].ctlEdges[2].bChannel */ FREQ2FBIN(2447, 1),
2611 /* Data[11].ctlEdges[3].bChannel */ FREQ2FBIN(2462, 1),
2612 }
2613 },
2614 .ctlPowerData_2G = {
2615 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2616 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2617 { { CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 1) } },
2618
2619 { { CTL(60, 1), CTL(60, 0), CTL(0, 0), CTL(0, 0) } },
2620 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2621 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2622
2623 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0) } },
2624 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2625 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2626
2627 { { CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 0) } },
2628 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
2629 { { CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 1) } },
2630 },
2631 .modalHeader5G = {
2632 /* 4 idle,t1,t2,b (4 bits per setting) */
2633 .antCtrlCommon = LE32(0x220),
2634 /* 4 ra1l1, ra2l1, ra1l2,ra2l2,ra12 */
2635 .antCtrlCommon2 = LE32(0x44444),
2636 /* antCtrlChain 6 idle, t,r,rx1,rx12,b (2 bits each) */
2637 .antCtrlChain = {
2638 LE16(0x150), LE16(0x150), LE16(0x150),
2639 },
2640 /* xatten1DB 3 xatten1_db for AR9280 (0xa20c/b20c 5:0) */
2641 .xatten1DB = {0x19, 0x19, 0x19},
2642
2643 /*
2644 * xatten1Margin[AR9300_MAX_CHAINS]; 3 xatten1_margin
2645 * for merlin (0xa20c/b20c 16:12
2646 */
2647 .xatten1Margin = {0x14, 0x14, 0x14},
2648 .tempSlope = 70,
2649 .voltSlope = 0,
2650 /* spurChans spur channels in usual fbin coding format */
2651 .spurChans = {0, 0, 0, 0, 0},
2652 /* noiseFloorThreshCh Check if the register is per chain */
2653 .noiseFloorThreshCh = {-1, 0, 0},
2654 .ob = {3, 3, 3}, /* 3 chain */
2655 .db_stage2 = {3, 3, 3}, /* 3 chain */
2656 .db_stage3 = {3, 3, 3}, /* doesn't exist for 2G */
2657 .db_stage4 = {3, 3, 3}, /* don't exist for 2G */
2658 .xpaBiasLvl = 0,
2659 .txFrameToDataStart = 0x0e,
2660 .txFrameToPaOn = 0x0e,
2661 .txClip = 3, /* 4 bits tx_clip, 4 bits dac_scale_cck */
2662 .antennaGain = 0,
2663 .switchSettling = 0x2d,
2664 .adcDesiredSize = -30,
2665 .txEndToXpaOff = 0,
2666 .txEndToRxOn = 0x2,
2667 .txFrameToXpaOn = 0xe,
2668 .thresh62 = 28,
2669 .papdRateMaskHt20 = LE32(0x0cf0e0e0),
2670 .papdRateMaskHt40 = LE32(0x6cf0e0e0),
2671 .futureModal = {
2672 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
2673 },
2674 },
2675 .base_ext2 = {
2676 .tempSlopeLow = 35,
2677 .tempSlopeHigh = 50,
2678 .xatten1DBLow = {0, 0, 0},
2679 .xatten1MarginLow = {0, 0, 0},
2680 .xatten1DBHigh = {0, 0, 0},
2681 .xatten1MarginHigh = {0, 0, 0}
2682 },
2683 .calFreqPier5G = {
2684 FREQ2FBIN(5180, 0),
2685 FREQ2FBIN(5220, 0),
2686 FREQ2FBIN(5320, 0),
2687 FREQ2FBIN(5400, 0),
2688 FREQ2FBIN(5500, 0),
2689 FREQ2FBIN(5600, 0),
2690 FREQ2FBIN(5700, 0),
2691 FREQ2FBIN(5785, 0)
2692 },
2693 .calPierData5G = {
2694 {
2695 {0, 0, 0, 0, 0},
2696 {0, 0, 0, 0, 0},
2697 {0, 0, 0, 0, 0},
2698 {0, 0, 0, 0, 0},
2699 {0, 0, 0, 0, 0},
2700 {0, 0, 0, 0, 0},
2701 {0, 0, 0, 0, 0},
2702 {0, 0, 0, 0, 0},
2703 },
2704 {
2705 {0, 0, 0, 0, 0},
2706 {0, 0, 0, 0, 0},
2707 {0, 0, 0, 0, 0},
2708 {0, 0, 0, 0, 0},
2709 {0, 0, 0, 0, 0},
2710 {0, 0, 0, 0, 0},
2711 {0, 0, 0, 0, 0},
2712 {0, 0, 0, 0, 0},
2713 },
2714 {
2715 {0, 0, 0, 0, 0},
2716 {0, 0, 0, 0, 0},
2717 {0, 0, 0, 0, 0},
2718 {0, 0, 0, 0, 0},
2719 {0, 0, 0, 0, 0},
2720 {0, 0, 0, 0, 0},
2721 {0, 0, 0, 0, 0},
2722 {0, 0, 0, 0, 0},
2723 },
2724
2725 },
2726 .calTarget_freqbin_5G = {
2727 FREQ2FBIN(5180, 0),
2728 FREQ2FBIN(5240, 0),
2729 FREQ2FBIN(5320, 0),
2730 FREQ2FBIN(5400, 0),
2731 FREQ2FBIN(5500, 0),
2732 FREQ2FBIN(5600, 0),
2733 FREQ2FBIN(5700, 0),
2734 FREQ2FBIN(5825, 0)
2735 },
2736 .calTarget_freqbin_5GHT20 = {
2737 FREQ2FBIN(5180, 0),
2738 FREQ2FBIN(5240, 0),
2739 FREQ2FBIN(5320, 0),
2740 FREQ2FBIN(5400, 0),
2741 FREQ2FBIN(5500, 0),
2742 FREQ2FBIN(5700, 0),
2743 FREQ2FBIN(5745, 0),
2744 FREQ2FBIN(5825, 0)
2745 },
2746 .calTarget_freqbin_5GHT40 = {
2747 FREQ2FBIN(5180, 0),
2748 FREQ2FBIN(5240, 0),
2749 FREQ2FBIN(5320, 0),
2750 FREQ2FBIN(5400, 0),
2751 FREQ2FBIN(5500, 0),
2752 FREQ2FBIN(5700, 0),
2753 FREQ2FBIN(5745, 0),
2754 FREQ2FBIN(5825, 0)
2755 },
2756 .calTargetPower5G = {
2757 /* 6-24,36,48,54 */
2758 { {30, 30, 28, 24} },
2759 { {30, 30, 28, 24} },
2760 { {30, 30, 28, 24} },
2761 { {30, 30, 28, 24} },
2762 { {30, 30, 28, 24} },
2763 { {30, 30, 28, 24} },
2764 { {30, 30, 28, 24} },
2765 { {30, 30, 28, 24} },
2766 },
2767 .calTargetPower5GHT20 = {
2768 /*
2769 * 0_8_16,1-3_9-11_17-19,
2770 * 4,5,6,7,12,13,14,15,20,21,22,23
2771 */
2772 { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 0, 0, 0, 0} },
2773 { {30, 30, 30, 28, 24, 20, 30, 28, 24, 20, 0, 0, 0, 0} },
2774 { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 0, 0, 0, 0} },
2775 { {30, 30, 30, 26, 22, 18, 30, 26, 22, 18, 0, 0, 0, 0} },
2776 { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 0, 0, 0, 0} },
2777 { {30, 30, 30, 24, 20, 16, 30, 24, 20, 16, 0, 0, 0, 0} },
2778 { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 0, 0, 0, 0} },
2779 { {30, 30, 30, 22, 18, 14, 30, 22, 18, 14, 0, 0, 0, 0} },
2780 },
2781 .calTargetPower5GHT40 = {
2782 /*
2783 * 0_8_16,1-3_9-11_17-19,
2784 * 4,5,6,7,12,13,14,15,20,21,22,23
2785 */
2786 { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 0, 0, 0, 0} },
2787 { {28, 28, 28, 26, 22, 18, 28, 26, 22, 18, 0, 0, 0, 0} },
2788 { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 0, 0, 0, 0} },
2789 { {28, 28, 28, 24, 20, 16, 28, 24, 20, 16, 0, 0, 0, 0} },
2790 { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 0, 0, 0, 0} },
2791 { {28, 28, 28, 22, 18, 14, 28, 22, 18, 14, 0, 0, 0, 0} },
2792 { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 0, 0, 0, 0} },
2793 { {28, 28, 28, 20, 16, 12, 28, 20, 16, 12, 0, 0, 0, 0} },
2794 },
2795 .ctlIndex_5G = {
2796 0x10, 0x16, 0x18, 0x40, 0x46,
2797 0x48, 0x30, 0x36, 0x38
2798 },
2799 .ctl_freqbin_5G = {
2800 {
2801 /* Data[0].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
2802 /* Data[0].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
2803 /* Data[0].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
2804 /* Data[0].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
2805 /* Data[0].ctlEdges[4].bChannel */ FREQ2FBIN(5600, 0),
2806 /* Data[0].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
2807 /* Data[0].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
2808 /* Data[0].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
2809 },
2810 {
2811 /* Data[1].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
2812 /* Data[1].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
2813 /* Data[1].ctlEdges[2].bChannel */ FREQ2FBIN(5280, 0),
2814 /* Data[1].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
2815 /* Data[1].ctlEdges[4].bChannel */ FREQ2FBIN(5520, 0),
2816 /* Data[1].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
2817 /* Data[1].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
2818 /* Data[1].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
2819 },
2820
2821 {
2822 /* Data[2].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
2823 /* Data[2].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
2824 /* Data[2].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
2825 /* Data[2].ctlEdges[3].bChannel */ FREQ2FBIN(5310, 0),
2826 /* Data[2].ctlEdges[4].bChannel */ FREQ2FBIN(5510, 0),
2827 /* Data[2].ctlEdges[5].bChannel */ FREQ2FBIN(5550, 0),
2828 /* Data[2].ctlEdges[6].bChannel */ FREQ2FBIN(5670, 0),
2829 /* Data[2].ctlEdges[7].bChannel */ FREQ2FBIN(5755, 0)
2830 },
2831
2832 {
2833 /* Data[3].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
2834 /* Data[3].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
2835 /* Data[3].ctlEdges[2].bChannel */ FREQ2FBIN(5260, 0),
2836 /* Data[3].ctlEdges[3].bChannel */ FREQ2FBIN(5320, 0),
2837 /* Data[3].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
2838 /* Data[3].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
2839 /* Data[3].ctlEdges[6].bChannel */ 0xFF,
2840 /* Data[3].ctlEdges[7].bChannel */ 0xFF,
2841 },
2842
2843 {
2844 /* Data[4].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
2845 /* Data[4].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
2846 /* Data[4].ctlEdges[2].bChannel */ FREQ2FBIN(5500, 0),
2847 /* Data[4].ctlEdges[3].bChannel */ FREQ2FBIN(5700, 0),
2848 /* Data[4].ctlEdges[4].bChannel */ 0xFF,
2849 /* Data[4].ctlEdges[5].bChannel */ 0xFF,
2850 /* Data[4].ctlEdges[6].bChannel */ 0xFF,
2851 /* Data[4].ctlEdges[7].bChannel */ 0xFF,
2852 },
2853
2854 {
2855 /* Data[5].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
2856 /* Data[5].ctlEdges[1].bChannel */ FREQ2FBIN(5270, 0),
2857 /* Data[5].ctlEdges[2].bChannel */ FREQ2FBIN(5310, 0),
2858 /* Data[5].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
2859 /* Data[5].ctlEdges[4].bChannel */ FREQ2FBIN(5590, 0),
2860 /* Data[5].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
2861 /* Data[5].ctlEdges[6].bChannel */ 0xFF,
2862 /* Data[5].ctlEdges[7].bChannel */ 0xFF
2863 },
2864
2865 {
2866 /* Data[6].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
2867 /* Data[6].ctlEdges[1].bChannel */ FREQ2FBIN(5200, 0),
2868 /* Data[6].ctlEdges[2].bChannel */ FREQ2FBIN(5220, 0),
2869 /* Data[6].ctlEdges[3].bChannel */ FREQ2FBIN(5260, 0),
2870 /* Data[6].ctlEdges[4].bChannel */ FREQ2FBIN(5500, 0),
2871 /* Data[6].ctlEdges[5].bChannel */ FREQ2FBIN(5600, 0),
2872 /* Data[6].ctlEdges[6].bChannel */ FREQ2FBIN(5700, 0),
2873 /* Data[6].ctlEdges[7].bChannel */ FREQ2FBIN(5745, 0)
2874 },
2875
2876 {
2877 /* Data[7].ctlEdges[0].bChannel */ FREQ2FBIN(5180, 0),
2878 /* Data[7].ctlEdges[1].bChannel */ FREQ2FBIN(5260, 0),
2879 /* Data[7].ctlEdges[2].bChannel */ FREQ2FBIN(5320, 0),
2880 /* Data[7].ctlEdges[3].bChannel */ FREQ2FBIN(5500, 0),
2881 /* Data[7].ctlEdges[4].bChannel */ FREQ2FBIN(5560, 0),
2882 /* Data[7].ctlEdges[5].bChannel */ FREQ2FBIN(5700, 0),
2883 /* Data[7].ctlEdges[6].bChannel */ FREQ2FBIN(5745, 0),
2884 /* Data[7].ctlEdges[7].bChannel */ FREQ2FBIN(5825, 0)
2885 },
2886
2887 {
2888 /* Data[8].ctlEdges[0].bChannel */ FREQ2FBIN(5190, 0),
2889 /* Data[8].ctlEdges[1].bChannel */ FREQ2FBIN(5230, 0),
2890 /* Data[8].ctlEdges[2].bChannel */ FREQ2FBIN(5270, 0),
2891 /* Data[8].ctlEdges[3].bChannel */ FREQ2FBIN(5510, 0),
2892 /* Data[8].ctlEdges[4].bChannel */ FREQ2FBIN(5550, 0),
2893 /* Data[8].ctlEdges[5].bChannel */ FREQ2FBIN(5670, 0),
2894 /* Data[8].ctlEdges[6].bChannel */ FREQ2FBIN(5755, 0),
2895 /* Data[8].ctlEdges[7].bChannel */ FREQ2FBIN(5795, 0)
2896 }
2897 },
2898 .ctlPowerData_5G = {
2899 {
2900 {
2901 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2902 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
2903 }
2904 },
2905 {
2906 {
2907 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2908 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
2909 }
2910 },
2911 {
2912 {
2913 CTL(60, 0), CTL(60, 1), CTL(60, 0), CTL(60, 1),
2914 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2915 }
2916 },
2917 {
2918 {
2919 CTL(60, 0), CTL(60, 1), CTL(60, 1), CTL(60, 0),
2920 CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
2921 }
2922 },
2923 {
2924 {
2925 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
2926 CTL(60, 0), CTL(60, 0), CTL(60, 0), CTL(60, 0),
2927 }
2928 },
2929 {
2930 {
2931 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2932 CTL(60, 1), CTL(60, 0), CTL(60, 0), CTL(60, 0),
2933 }
2934 },
2935 {
2936 {
2937 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2938 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 1),
2939 }
2940 },
2941 {
2942 {
2943 CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
2944 CTL(60, 1), CTL(60, 1), CTL(60, 1), CTL(60, 0),
2945 }
2946 },
2947 {
2948 {
2949 CTL(60, 1), CTL(60, 0), CTL(60, 1), CTL(60, 1),
2950 CTL(60, 1), CTL(60, 1), CTL(60, 0), CTL(60, 1),
2951 }
2952 },
2953 }
2954};
2955
2956
2957static const struct ar9300_eeprom *ar9300_eep_templates[] = {
2958 &ar9300_default,
2959 &ar9300_x112,
2960 &ar9300_h116,
2961 &ar9300_h112,
2962 &ar9300_x113,
2963};
2964
2965static const struct ar9300_eeprom *ar9003_eeprom_struct_find_by_id(int id)
2966{
2967#define N_LOOP (sizeof(ar9300_eep_templates) / sizeof(ar9300_eep_templates[0]))
2968 int it;
2969
2970 for (it = 0; it < N_LOOP; it++)
2971 if (ar9300_eep_templates[it]->templateVersion == id)
2972 return ar9300_eep_templates[it];
2973 return NULL;
2974#undef N_LOOP
2975}
2976
2977
629static u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz) 2978static u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
630{ 2979{
631 if (fbin == AR9300_BCHAN_UNUSED) 2980 if (fbin == AR5416_BCHAN_UNUSED)
632 return fbin; 2981 return fbin;
633 2982
634 return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin)); 2983 return (u16) ((is2GHz) ? (2300 + fbin) : (4800 + 5 * fbin));
@@ -639,6 +2988,16 @@ static int ath9k_hw_ar9300_check_eeprom(struct ath_hw *ah)
639 return 0; 2988 return 0;
640} 2989}
641 2990
2991static int interpolate(int x, int xa, int xb, int ya, int yb)
2992{
2993 int bf, factor, plus;
2994
2995 bf = 2 * (yb - ya) * (x - xa) / (xb - xa);
2996 factor = bf / 2;
2997 plus = bf % 2;
2998 return ya + factor + plus;
2999}
3000
642static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah, 3001static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
643 enum eeprom_param param) 3002 enum eeprom_param param)
644{ 3003{
@@ -676,6 +3035,10 @@ static u32 ath9k_hw_ar9300_get_eeprom(struct ath_hw *ah,
676 return le32_to_cpu(pBase->swreg); 3035 return le32_to_cpu(pBase->swreg);
677 case EEP_PAPRD: 3036 case EEP_PAPRD:
678 return !!(pBase->featureEnable & BIT(5)); 3037 return !!(pBase->featureEnable & BIT(5));
3038 case EEP_CHAIN_MASK_REDUCE:
3039 return (pBase->miscConfiguration >> 0x3) & 0x1;
3040 case EEP_ANT_DIV_CTL1:
3041 return le32_to_cpu(eep->base_ext1.ant_div_control);
679 default: 3042 default:
680 return 0; 3043 return 0;
681 } 3044 }
@@ -714,8 +3077,8 @@ static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer,
714 int i; 3077 int i;
715 3078
716 if ((address < 0) || ((address + count) / 2 > AR9300_EEPROM_SIZE - 1)) { 3079 if ((address < 0) || ((address + count) / 2 > AR9300_EEPROM_SIZE - 1)) {
717 ath_print(common, ATH_DBG_EEPROM, 3080 ath_dbg(common, ATH_DBG_EEPROM,
718 "eeprom address not in range\n"); 3081 "eeprom address not in range\n");
719 return false; 3082 return false;
720 } 3083 }
721 3084
@@ -746,11 +3109,41 @@ static bool ar9300_read_eeprom(struct ath_hw *ah, int address, u8 *buffer,
746 return true; 3109 return true;
747 3110
748error: 3111error:
749 ath_print(common, ATH_DBG_EEPROM, 3112 ath_dbg(common, ATH_DBG_EEPROM,
750 "unable to read eeprom region at offset %d\n", address); 3113 "unable to read eeprom region at offset %d\n", address);
751 return false; 3114 return false;
752} 3115}
753 3116
3117static bool ar9300_otp_read_word(struct ath_hw *ah, int addr, u32 *data)
3118{
3119 REG_READ(ah, AR9300_OTP_BASE + (4 * addr));
3120
3121 if (!ath9k_hw_wait(ah, AR9300_OTP_STATUS, AR9300_OTP_STATUS_TYPE,
3122 AR9300_OTP_STATUS_VALID, 1000))
3123 return false;
3124
3125 *data = REG_READ(ah, AR9300_OTP_READ_DATA);
3126 return true;
3127}
3128
3129static bool ar9300_read_otp(struct ath_hw *ah, int address, u8 *buffer,
3130 int count)
3131{
3132 u32 data;
3133 int i;
3134
3135 for (i = 0; i < count; i++) {
3136 int offset = 8 * ((address - i) % 4);
3137 if (!ar9300_otp_read_word(ah, (address - i) / 4, &data))
3138 return false;
3139
3140 buffer[i] = (data >> offset) & 0xff;
3141 }
3142
3143 return true;
3144}
3145
3146
754static void ar9300_comp_hdr_unpack(u8 *best, int *code, int *reference, 3147static void ar9300_comp_hdr_unpack(u8 *best, int *code, int *reference,
755 int *length, int *major, int *minor) 3148 int *length, int *major, int *minor)
756{ 3149{
@@ -801,17 +3194,15 @@ static bool ar9300_uncompress_block(struct ath_hw *ah,
801 length &= 0xff; 3194 length &= 0xff;
802 3195
803 if (length > 0 && spot >= 0 && spot+length <= mdataSize) { 3196 if (length > 0 && spot >= 0 && spot+length <= mdataSize) {
804 ath_print(common, ATH_DBG_EEPROM, 3197 ath_dbg(common, ATH_DBG_EEPROM,
805 "Restore at %d: spot=%d " 3198 "Restore at %d: spot=%d offset=%d length=%d\n",
806 "offset=%d length=%d\n", 3199 it, spot, offset, length);
807 it, spot, offset, length);
808 memcpy(&mptr[spot], &block[it+2], length); 3200 memcpy(&mptr[spot], &block[it+2], length);
809 spot += length; 3201 spot += length;
810 } else if (length > 0) { 3202 } else if (length > 0) {
811 ath_print(common, ATH_DBG_EEPROM, 3203 ath_dbg(common, ATH_DBG_EEPROM,
812 "Bad restore at %d: spot=%d " 3204 "Bad restore at %d: spot=%d offset=%d length=%d\n",
813 "offset=%d length=%d\n", 3205 it, spot, offset, length);
814 it, spot, offset, length);
815 return false; 3206 return false;
816 } 3207 }
817 } 3208 }
@@ -827,45 +3218,80 @@ static int ar9300_compress_decision(struct ath_hw *ah,
827{ 3218{
828 struct ath_common *common = ath9k_hw_common(ah); 3219 struct ath_common *common = ath9k_hw_common(ah);
829 u8 *dptr; 3220 u8 *dptr;
3221 const struct ar9300_eeprom *eep = NULL;
830 3222
831 switch (code) { 3223 switch (code) {
832 case _CompressNone: 3224 case _CompressNone:
833 if (length != mdata_size) { 3225 if (length != mdata_size) {
834 ath_print(common, ATH_DBG_EEPROM, 3226 ath_dbg(common, ATH_DBG_EEPROM,
835 "EEPROM structure size mismatch" 3227 "EEPROM structure size mismatch memory=%d eeprom=%d\n",
836 "memory=%d eeprom=%d\n", mdata_size, length); 3228 mdata_size, length);
837 return -1; 3229 return -1;
838 } 3230 }
839 memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length); 3231 memcpy(mptr, (u8 *) (word + COMP_HDR_LEN), length);
840 ath_print(common, ATH_DBG_EEPROM, "restored eeprom %d:" 3232 ath_dbg(common, ATH_DBG_EEPROM,
841 " uncompressed, length %d\n", it, length); 3233 "restored eeprom %d: uncompressed, length %d\n",
3234 it, length);
842 break; 3235 break;
843 case _CompressBlock: 3236 case _CompressBlock:
844 if (reference == 0) { 3237 if (reference == 0) {
845 dptr = mptr; 3238 dptr = mptr;
846 } else { 3239 } else {
847 if (reference != 2) { 3240 eep = ar9003_eeprom_struct_find_by_id(reference);
848 ath_print(common, ATH_DBG_EEPROM, 3241 if (eep == NULL) {
849 "cant find reference eeprom" 3242 ath_dbg(common, ATH_DBG_EEPROM,
850 "struct %d\n", reference); 3243 "cant find reference eeprom struct %d\n",
3244 reference);
851 return -1; 3245 return -1;
852 } 3246 }
853 memcpy(mptr, &ar9300_default, mdata_size); 3247 memcpy(mptr, eep, mdata_size);
854 } 3248 }
855 ath_print(common, ATH_DBG_EEPROM, 3249 ath_dbg(common, ATH_DBG_EEPROM,
856 "restore eeprom %d: block, reference %d," 3250 "restore eeprom %d: block, reference %d, length %d\n",
857 " length %d\n", it, reference, length); 3251 it, reference, length);
858 ar9300_uncompress_block(ah, mptr, mdata_size, 3252 ar9300_uncompress_block(ah, mptr, mdata_size,
859 (u8 *) (word + COMP_HDR_LEN), length); 3253 (u8 *) (word + COMP_HDR_LEN), length);
860 break; 3254 break;
861 default: 3255 default:
862 ath_print(common, ATH_DBG_EEPROM, "unknown compression" 3256 ath_dbg(common, ATH_DBG_EEPROM,
863 " code %d\n", code); 3257 "unknown compression code %d\n", code);
864 return -1; 3258 return -1;
865 } 3259 }
866 return 0; 3260 return 0;
867} 3261}
868 3262
3263typedef bool (*eeprom_read_op)(struct ath_hw *ah, int address, u8 *buffer,
3264 int count);
3265
3266static bool ar9300_check_header(void *data)
3267{
3268 u32 *word = data;
3269 return !(*word == 0 || *word == ~0);
3270}
3271
3272static bool ar9300_check_eeprom_header(struct ath_hw *ah, eeprom_read_op read,
3273 int base_addr)
3274{
3275 u8 header[4];
3276
3277 if (!read(ah, base_addr, header, 4))
3278 return false;
3279
3280 return ar9300_check_header(header);
3281}
3282
3283static int ar9300_eeprom_restore_flash(struct ath_hw *ah, u8 *mptr,
3284 int mdata_size)
3285{
3286 struct ath_common *common = ath9k_hw_common(ah);
3287 u16 *data = (u16 *) mptr;
3288 int i;
3289
3290 for (i = 0; i < mdata_size / 2; i++, data++)
3291 ath9k_hw_nvram_read(common, i, data);
3292
3293 return 0;
3294}
869/* 3295/*
870 * Read the configuration data from the eeprom. 3296 * Read the configuration data from the eeprom.
871 * The data can be put in any specified memory buffer. 3297 * The data can be put in any specified memory buffer.
@@ -886,6 +3312,10 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
886 int it; 3312 int it;
887 u16 checksum, mchecksum; 3313 u16 checksum, mchecksum;
888 struct ath_common *common = ath9k_hw_common(ah); 3314 struct ath_common *common = ath9k_hw_common(ah);
3315 eeprom_read_op read;
3316
3317 if (ath9k_hw_use_flash(ah))
3318 return ar9300_eeprom_restore_flash(ah, mptr, mdata_size);
889 3319
890 word = kzalloc(2048, GFP_KERNEL); 3320 word = kzalloc(2048, GFP_KERNEL);
891 if (!word) 3321 if (!word)
@@ -893,43 +3323,73 @@ static int ar9300_eeprom_restore_internal(struct ath_hw *ah,
893 3323
894 memcpy(mptr, &ar9300_default, mdata_size); 3324 memcpy(mptr, &ar9300_default, mdata_size);
895 3325
3326 read = ar9300_read_eeprom;
3327 if (AR_SREV_9485(ah))
3328 cptr = AR9300_BASE_ADDR_4K;
3329 else
3330 cptr = AR9300_BASE_ADDR;
3331 ath_dbg(common, ATH_DBG_EEPROM,
3332 "Trying EEPROM accesss at Address 0x%04x\n", cptr);
3333 if (ar9300_check_eeprom_header(ah, read, cptr))
3334 goto found;
3335
3336 cptr = AR9300_BASE_ADDR_512;
3337 ath_dbg(common, ATH_DBG_EEPROM,
3338 "Trying EEPROM accesss at Address 0x%04x\n", cptr);
3339 if (ar9300_check_eeprom_header(ah, read, cptr))
3340 goto found;
3341
3342 read = ar9300_read_otp;
896 cptr = AR9300_BASE_ADDR; 3343 cptr = AR9300_BASE_ADDR;
3344 ath_dbg(common, ATH_DBG_EEPROM,
3345 "Trying OTP accesss at Address 0x%04x\n", cptr);
3346 if (ar9300_check_eeprom_header(ah, read, cptr))
3347 goto found;
3348
3349 cptr = AR9300_BASE_ADDR_512;
3350 ath_dbg(common, ATH_DBG_EEPROM,
3351 "Trying OTP accesss at Address 0x%04x\n", cptr);
3352 if (ar9300_check_eeprom_header(ah, read, cptr))
3353 goto found;
3354
3355 goto fail;
3356
3357found:
3358 ath_dbg(common, ATH_DBG_EEPROM, "Found valid EEPROM data\n");
3359
897 for (it = 0; it < MSTATE; it++) { 3360 for (it = 0; it < MSTATE; it++) {
898 if (!ar9300_read_eeprom(ah, cptr, word, COMP_HDR_LEN)) 3361 if (!read(ah, cptr, word, COMP_HDR_LEN))
899 goto fail; 3362 goto fail;
900 3363
901 if ((word[0] == 0 && word[1] == 0 && word[2] == 0 && 3364 if (!ar9300_check_header(word))
902 word[3] == 0) || (word[0] == 0xff && word[1] == 0xff
903 && word[2] == 0xff && word[3] == 0xff))
904 break; 3365 break;
905 3366
906 ar9300_comp_hdr_unpack(word, &code, &reference, 3367 ar9300_comp_hdr_unpack(word, &code, &reference,
907 &length, &major, &minor); 3368 &length, &major, &minor);
908 ath_print(common, ATH_DBG_EEPROM, 3369 ath_dbg(common, ATH_DBG_EEPROM,
909 "Found block at %x: code=%d ref=%d" 3370 "Found block at %x: code=%d ref=%d length=%d major=%d minor=%d\n",
910 "length=%d major=%d minor=%d\n", cptr, code, 3371 cptr, code, reference, length, major, minor);
911 reference, length, major, minor); 3372 if ((!AR_SREV_9485(ah) && length >= 1024) ||
912 if (length >= 1024) { 3373 (AR_SREV_9485(ah) && length > EEPROM_DATA_LEN_9485)) {
913 ath_print(common, ATH_DBG_EEPROM, 3374 ath_dbg(common, ATH_DBG_EEPROM,
914 "Skipping bad header\n"); 3375 "Skipping bad header\n");
915 cptr -= COMP_HDR_LEN; 3376 cptr -= COMP_HDR_LEN;
916 continue; 3377 continue;
917 } 3378 }
918 3379
919 osize = length; 3380 osize = length;
920 ar9300_read_eeprom(ah, cptr, word, 3381 read(ah, cptr, word, COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
921 COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
922 checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length); 3382 checksum = ar9300_comp_cksum(&word[COMP_HDR_LEN], length);
923 mchecksum = word[COMP_HDR_LEN + osize] | 3383 mchecksum = word[COMP_HDR_LEN + osize] |
924 (word[COMP_HDR_LEN + osize + 1] << 8); 3384 (word[COMP_HDR_LEN + osize + 1] << 8);
925 ath_print(common, ATH_DBG_EEPROM, 3385 ath_dbg(common, ATH_DBG_EEPROM,
926 "checksum %x %x\n", checksum, mchecksum); 3386 "checksum %x %x\n", checksum, mchecksum);
927 if (checksum == mchecksum) { 3387 if (checksum == mchecksum) {
928 ar9300_compress_decision(ah, it, code, reference, mptr, 3388 ar9300_compress_decision(ah, it, code, reference, mptr,
929 word, length, mdata_size); 3389 word, length, mdata_size);
930 } else { 3390 } else {
931 ath_print(common, ATH_DBG_EEPROM, 3391 ath_dbg(common, ATH_DBG_EEPROM,
932 "skipping block with bad checksum\n"); 3392 "skipping block with bad checksum\n");
933 } 3393 }
934 cptr -= (COMP_HDR_LEN + osize + COMP_CKSUM_LEN); 3394 cptr -= (COMP_HDR_LEN + osize + COMP_CKSUM_LEN);
935 } 3395 }
@@ -970,18 +3430,6 @@ static int ath9k_hw_ar9300_get_eeprom_rev(struct ath_hw *ah)
970 return 0; 3430 return 0;
971} 3431}
972 3432
973static u8 ath9k_hw_ar9300_get_num_ant_config(struct ath_hw *ah,
974 enum ath9k_hal_freq_band freq_band)
975{
976 return 1;
977}
978
979static u32 ath9k_hw_ar9300_get_eeprom_antenna_cfg(struct ath_hw *ah,
980 struct ath9k_channel *chan)
981{
982 return -EINVAL;
983}
984
985static s32 ar9003_hw_xpa_bias_level_get(struct ath_hw *ah, bool is2ghz) 3433static s32 ar9003_hw_xpa_bias_level_get(struct ath_hw *ah, bool is2ghz)
986{ 3434{
987 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; 3435 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
@@ -995,9 +3443,15 @@ static s32 ar9003_hw_xpa_bias_level_get(struct ath_hw *ah, bool is2ghz)
995static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz) 3443static void ar9003_hw_xpa_bias_level_apply(struct ath_hw *ah, bool is2ghz)
996{ 3444{
997 int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz); 3445 int bias = ar9003_hw_xpa_bias_level_get(ah, is2ghz);
998 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, (bias & 0x3)); 3446
999 REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_SPARE, 3447 if (AR_SREV_9485(ah))
1000 ((bias >> 2) & 0x3)); 3448 REG_RMW_FIELD(ah, AR_CH0_TOP2, AR_CH0_TOP2_XPABIASLVL, bias);
3449 else {
3450 REG_RMW_FIELD(ah, AR_CH0_TOP, AR_CH0_TOP_XPABIASLVL, bias);
3451 REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_XPABIASLVL_MSB,
3452 bias >> 2);
3453 REG_RMW_FIELD(ah, AR_CH0_THERM, AR_CH0_THERM_XPASHORT2GND, 1);
3454 }
1001} 3455}
1002 3456
1003static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz) 3457static u32 ar9003_hw_ant_ctrl_common_get(struct ath_hw *ah, bool is2ghz)
@@ -1052,11 +3506,25 @@ static void ar9003_hw_ant_ctrl_apply(struct ath_hw *ah, bool is2ghz)
1052 value = ar9003_hw_ant_ctrl_chain_get(ah, 0, is2ghz); 3506 value = ar9003_hw_ant_ctrl_chain_get(ah, 0, is2ghz);
1053 REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_0, AR_SWITCH_TABLE_ALL, value); 3507 REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_0, AR_SWITCH_TABLE_ALL, value);
1054 3508
1055 value = ar9003_hw_ant_ctrl_chain_get(ah, 1, is2ghz); 3509 if (!AR_SREV_9485(ah)) {
1056 REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_1, AR_SWITCH_TABLE_ALL, value); 3510 value = ar9003_hw_ant_ctrl_chain_get(ah, 1, is2ghz);
3511 REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_1, AR_SWITCH_TABLE_ALL,
3512 value);
1057 3513
1058 value = ar9003_hw_ant_ctrl_chain_get(ah, 2, is2ghz); 3514 value = ar9003_hw_ant_ctrl_chain_get(ah, 2, is2ghz);
1059 REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_2, AR_SWITCH_TABLE_ALL, value); 3515 REG_RMW_FIELD(ah, AR_PHY_SWITCH_CHAIN_2, AR_SWITCH_TABLE_ALL,
3516 value);
3517 }
3518
3519 if (AR_SREV_9485(ah)) {
3520 value = ath9k_hw_ar9300_get_eeprom(ah, EEP_ANT_DIV_CTL1);
3521 REG_RMW_FIELD(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_CTRL_ALL,
3522 value);
3523 REG_RMW_FIELD(ah, AR_PHY_MC_GAIN_CTRL, AR_ANT_DIV_ENABLE,
3524 value >> 6);
3525 REG_RMW_FIELD(ah, AR_PHY_CCK_DETECT, AR_FAST_DIV_ENABLE,
3526 value >> 7);
3527 }
1060} 3528}
1061 3529
1062static void ar9003_hw_drive_strength_apply(struct ath_hw *ah) 3530static void ar9003_hw_drive_strength_apply(struct ath_hw *ah)
@@ -1100,28 +3568,177 @@ static void ar9003_hw_drive_strength_apply(struct ath_hw *ah)
1100 REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS4, reg); 3568 REG_WRITE(ah, AR_PHY_65NM_CH0_BIAS4, reg);
1101} 3569}
1102 3570
3571static u16 ar9003_hw_atten_chain_get(struct ath_hw *ah, int chain,
3572 struct ath9k_channel *chan)
3573{
3574 int f[3], t[3];
3575 u16 value;
3576 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
3577
3578 if (chain >= 0 && chain < 3) {
3579 if (IS_CHAN_2GHZ(chan))
3580 return eep->modalHeader2G.xatten1DB[chain];
3581 else if (eep->base_ext2.xatten1DBLow[chain] != 0) {
3582 t[0] = eep->base_ext2.xatten1DBLow[chain];
3583 f[0] = 5180;
3584 t[1] = eep->modalHeader5G.xatten1DB[chain];
3585 f[1] = 5500;
3586 t[2] = eep->base_ext2.xatten1DBHigh[chain];
3587 f[2] = 5785;
3588 value = ar9003_hw_power_interpolate((s32) chan->channel,
3589 f, t, 3);
3590 return value;
3591 } else
3592 return eep->modalHeader5G.xatten1DB[chain];
3593 }
3594
3595 return 0;
3596}
3597
3598
3599static u16 ar9003_hw_atten_chain_get_margin(struct ath_hw *ah, int chain,
3600 struct ath9k_channel *chan)
3601{
3602 int f[3], t[3];
3603 u16 value;
3604 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
3605
3606 if (chain >= 0 && chain < 3) {
3607 if (IS_CHAN_2GHZ(chan))
3608 return eep->modalHeader2G.xatten1Margin[chain];
3609 else if (eep->base_ext2.xatten1MarginLow[chain] != 0) {
3610 t[0] = eep->base_ext2.xatten1MarginLow[chain];
3611 f[0] = 5180;
3612 t[1] = eep->modalHeader5G.xatten1Margin[chain];
3613 f[1] = 5500;
3614 t[2] = eep->base_ext2.xatten1MarginHigh[chain];
3615 f[2] = 5785;
3616 value = ar9003_hw_power_interpolate((s32) chan->channel,
3617 f, t, 3);
3618 return value;
3619 } else
3620 return eep->modalHeader5G.xatten1Margin[chain];
3621 }
3622
3623 return 0;
3624}
3625
3626static void ar9003_hw_atten_apply(struct ath_hw *ah, struct ath9k_channel *chan)
3627{
3628 int i;
3629 u16 value;
3630 unsigned long ext_atten_reg[3] = {AR_PHY_EXT_ATTEN_CTL_0,
3631 AR_PHY_EXT_ATTEN_CTL_1,
3632 AR_PHY_EXT_ATTEN_CTL_2,
3633 };
3634
3635 /* Test value. if 0 then attenuation is unused. Don't load anything. */
3636 for (i = 0; i < 3; i++) {
3637 value = ar9003_hw_atten_chain_get(ah, i, chan);
3638 REG_RMW_FIELD(ah, ext_atten_reg[i],
3639 AR_PHY_EXT_ATTEN_CTL_XATTEN1_DB, value);
3640
3641 value = ar9003_hw_atten_chain_get_margin(ah, i, chan);
3642 REG_RMW_FIELD(ah, ext_atten_reg[i],
3643 AR_PHY_EXT_ATTEN_CTL_XATTEN1_MARGIN, value);
3644 }
3645}
3646
3647static bool is_pmu_set(struct ath_hw *ah, u32 pmu_reg, int pmu_set)
3648{
3649 int timeout = 100;
3650
3651 while (pmu_set != REG_READ(ah, pmu_reg)) {
3652 if (timeout-- == 0)
3653 return false;
3654 REG_WRITE(ah, pmu_reg, pmu_set);
3655 udelay(10);
3656 }
3657
3658 return true;
3659}
3660
1103static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah) 3661static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
1104{ 3662{
1105 int internal_regulator = 3663 int internal_regulator =
1106 ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR); 3664 ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR);
1107 3665
1108 if (internal_regulator) { 3666 if (internal_regulator) {
1109 /* Internal regulator is ON. Write swreg register. */ 3667 if (AR_SREV_9485(ah)) {
1110 int swreg = ath9k_hw_ar9300_get_eeprom(ah, EEP_SWREG); 3668 int reg_pmu_set;
1111 REG_WRITE(ah, AR_RTC_REG_CONTROL1, 3669
1112 REG_READ(ah, AR_RTC_REG_CONTROL1) & 3670 reg_pmu_set = REG_READ(ah, AR_PHY_PMU2) & ~AR_PHY_PMU2_PGM;
1113 (~AR_RTC_REG_CONTROL1_SWREG_PROGRAM)); 3671 REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
1114 REG_WRITE(ah, AR_RTC_REG_CONTROL0, swreg); 3672 if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
1115 /* Set REG_CONTROL1.SWREG_PROGRAM */ 3673 return;
1116 REG_WRITE(ah, AR_RTC_REG_CONTROL1, 3674
1117 REG_READ(ah, 3675 reg_pmu_set = (5 << 1) | (7 << 4) | (1 << 8) |
1118 AR_RTC_REG_CONTROL1) | 3676 (7 << 14) | (6 << 17) | (1 << 20) |
1119 AR_RTC_REG_CONTROL1_SWREG_PROGRAM); 3677 (3 << 24) | (1 << 28);
3678
3679 REG_WRITE(ah, AR_PHY_PMU1, reg_pmu_set);
3680 if (!is_pmu_set(ah, AR_PHY_PMU1, reg_pmu_set))
3681 return;
3682
3683 reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2) & ~0xFFC00000)
3684 | (4 << 26);
3685 REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
3686 if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
3687 return;
3688
3689 reg_pmu_set = (REG_READ(ah, AR_PHY_PMU2) & ~0x00200000)
3690 | (1 << 21);
3691 REG_WRITE(ah, AR_PHY_PMU2, reg_pmu_set);
3692 if (!is_pmu_set(ah, AR_PHY_PMU2, reg_pmu_set))
3693 return;
3694 } else {
3695 /* Internal regulator is ON. Write swreg register. */
3696 int swreg = ath9k_hw_ar9300_get_eeprom(ah, EEP_SWREG);
3697 REG_WRITE(ah, AR_RTC_REG_CONTROL1,
3698 REG_READ(ah, AR_RTC_REG_CONTROL1) &
3699 (~AR_RTC_REG_CONTROL1_SWREG_PROGRAM));
3700 REG_WRITE(ah, AR_RTC_REG_CONTROL0, swreg);
3701 /* Set REG_CONTROL1.SWREG_PROGRAM */
3702 REG_WRITE(ah, AR_RTC_REG_CONTROL1,
3703 REG_READ(ah,
3704 AR_RTC_REG_CONTROL1) |
3705 AR_RTC_REG_CONTROL1_SWREG_PROGRAM);
3706 }
1120 } else { 3707 } else {
1121 REG_WRITE(ah, AR_RTC_SLEEP_CLK, 3708 if (AR_SREV_9485(ah)) {
1122 (REG_READ(ah, 3709 REG_RMW_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM, 0);
1123 AR_RTC_SLEEP_CLK) | 3710 while (REG_READ_FIELD(ah, AR_PHY_PMU2,
1124 AR_RTC_FORCE_SWREG_PRD)); 3711 AR_PHY_PMU2_PGM))
3712 udelay(10);
3713
3714 REG_RMW_FIELD(ah, AR_PHY_PMU1, AR_PHY_PMU1_PWD, 0x1);
3715 while (!REG_READ_FIELD(ah, AR_PHY_PMU1,
3716 AR_PHY_PMU1_PWD))
3717 udelay(10);
3718 REG_RMW_FIELD(ah, AR_PHY_PMU2, AR_PHY_PMU2_PGM, 0x1);
3719 while (!REG_READ_FIELD(ah, AR_PHY_PMU2,
3720 AR_PHY_PMU2_PGM))
3721 udelay(10);
3722 } else
3723 REG_WRITE(ah, AR_RTC_SLEEP_CLK,
3724 (REG_READ(ah,
3725 AR_RTC_SLEEP_CLK) |
3726 AR_RTC_FORCE_SWREG_PRD));
3727 }
3728
3729}
3730
3731static void ar9003_hw_apply_tuning_caps(struct ath_hw *ah)
3732{
3733 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
3734 u8 tuning_caps_param = eep->baseEepHeader.params_for_tuning_caps[0];
3735
3736 if (eep->baseEepHeader.featureEnable & 0x40) {
3737 tuning_caps_param &= 0x7f;
3738 REG_RMW_FIELD(ah, AR_CH0_XTAL, AR_CH0_XTAL_CAPINDAC,
3739 tuning_caps_param);
3740 REG_RMW_FIELD(ah, AR_CH0_XTAL, AR_CH0_XTAL_CAPOUTDAC,
3741 tuning_caps_param);
1125 } 3742 }
1126} 3743}
1127 3744
@@ -1131,7 +3748,10 @@ static void ath9k_hw_ar9300_set_board_values(struct ath_hw *ah,
1131 ar9003_hw_xpa_bias_level_apply(ah, IS_CHAN_2GHZ(chan)); 3748 ar9003_hw_xpa_bias_level_apply(ah, IS_CHAN_2GHZ(chan));
1132 ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan)); 3749 ar9003_hw_ant_ctrl_apply(ah, IS_CHAN_2GHZ(chan));
1133 ar9003_hw_drive_strength_apply(ah); 3750 ar9003_hw_drive_strength_apply(ah);
3751 ar9003_hw_atten_apply(ah, chan);
1134 ar9003_hw_internal_regulator_apply(ah); 3752 ar9003_hw_internal_regulator_apply(ah);
3753 if (AR_SREV_9485(ah))
3754 ar9003_hw_apply_tuning_caps(ah);
1135} 3755}
1136 3756
1137static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah, 3757static void ath9k_hw_ar9300_set_addac(struct ath_hw *ah,
@@ -1192,7 +3812,7 @@ static int ar9003_hw_power_interpolate(int32_t x,
1192 if (hx == lx) 3812 if (hx == lx)
1193 y = ly; 3813 y = ly;
1194 else /* interpolate */ 3814 else /* interpolate */
1195 y = ly + (((x - lx) * (hy - ly)) / (hx - lx)); 3815 y = interpolate(x, lx, hx, ly, hy);
1196 } else /* only low is good, use it */ 3816 } else /* only low is good, use it */
1197 y = ly; 3817 y = ly;
1198 } else if (hhave) /* only high is good, use it */ 3818 } else if (hhave) /* only high is good, use it */
@@ -1561,22 +4181,9 @@ static void ar9003_hw_set_target_power_eeprom(struct ath_hw *ah, u16 freq,
1561 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_23, freq, 4181 ar9003_hw_eeprom_get_ht40_tgt_pwr(ah, HT_TARGET_RATE_23, freq,
1562 is2GHz) + ht40PowerIncForPdadc; 4182 is2GHz) + ht40PowerIncForPdadc;
1563 4183
1564 while (i < ar9300RateSize) { 4184 for (i = 0; i < ar9300RateSize; i++) {
1565 ath_print(common, ATH_DBG_EEPROM, 4185 ath_dbg(common, ATH_DBG_EEPROM,
1566 "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]); 4186 "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
1567 i++;
1568
1569 ath_print(common, ATH_DBG_EEPROM,
1570 "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
1571 i++;
1572
1573 ath_print(common, ATH_DBG_EEPROM,
1574 "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]);
1575 i++;
1576
1577 ath_print(common, ATH_DBG_EEPROM,
1578 "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
1579 i++;
1580 } 4187 }
1581} 4188}
1582 4189
@@ -1595,18 +4202,17 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
1595 struct ath_common *common = ath9k_hw_common(ah); 4202 struct ath_common *common = ath9k_hw_common(ah);
1596 4203
1597 if (ichain >= AR9300_MAX_CHAINS) { 4204 if (ichain >= AR9300_MAX_CHAINS) {
1598 ath_print(common, ATH_DBG_EEPROM, 4205 ath_dbg(common, ATH_DBG_EEPROM,
1599 "Invalid chain index, must be less than %d\n", 4206 "Invalid chain index, must be less than %d\n",
1600 AR9300_MAX_CHAINS); 4207 AR9300_MAX_CHAINS);
1601 return -1; 4208 return -1;
1602 } 4209 }
1603 4210
1604 if (mode) { /* 5GHz */ 4211 if (mode) { /* 5GHz */
1605 if (ipier >= AR9300_NUM_5G_CAL_PIERS) { 4212 if (ipier >= AR9300_NUM_5G_CAL_PIERS) {
1606 ath_print(common, ATH_DBG_EEPROM, 4213 ath_dbg(common, ATH_DBG_EEPROM,
1607 "Invalid 5GHz cal pier index, must " 4214 "Invalid 5GHz cal pier index, must be less than %d\n",
1608 "be less than %d\n", 4215 AR9300_NUM_5G_CAL_PIERS);
1609 AR9300_NUM_5G_CAL_PIERS);
1610 return -1; 4216 return -1;
1611 } 4217 }
1612 pCalPier = &(eep->calFreqPier5G[ipier]); 4218 pCalPier = &(eep->calFreqPier5G[ipier]);
@@ -1614,9 +4220,9 @@ static int ar9003_hw_cal_pier_get(struct ath_hw *ah,
1614 is2GHz = 0; 4220 is2GHz = 0;
1615 } else { 4221 } else {
1616 if (ipier >= AR9300_NUM_2G_CAL_PIERS) { 4222 if (ipier >= AR9300_NUM_2G_CAL_PIERS) {
1617 ath_print(common, ATH_DBG_EEPROM, 4223 ath_dbg(common, ATH_DBG_EEPROM,
1618 "Invalid 2GHz cal pier index, must " 4224 "Invalid 2GHz cal pier index, must be less than %d\n",
1619 "be less than %d\n", AR9300_NUM_2G_CAL_PIERS); 4225 AR9300_NUM_2G_CAL_PIERS);
1620 return -1; 4226 return -1;
1621 } 4227 }
1622 4228
@@ -1640,27 +4246,32 @@ static int ar9003_hw_power_control_override(struct ath_hw *ah,
1640{ 4246{
1641 int tempSlope = 0; 4247 int tempSlope = 0;
1642 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; 4248 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
4249 int f[3], t[3];
1643 4250
1644 REG_RMW(ah, AR_PHY_TPC_11_B0, 4251 REG_RMW(ah, AR_PHY_TPC_11_B0,
1645 (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S), 4252 (correction[0] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
1646 AR_PHY_TPC_OLPC_GAIN_DELTA); 4253 AR_PHY_TPC_OLPC_GAIN_DELTA);
1647 REG_RMW(ah, AR_PHY_TPC_11_B1, 4254 if (ah->caps.tx_chainmask & BIT(1))
1648 (correction[1] << AR_PHY_TPC_OLPC_GAIN_DELTA_S), 4255 REG_RMW(ah, AR_PHY_TPC_11_B1,
1649 AR_PHY_TPC_OLPC_GAIN_DELTA); 4256 (correction[1] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
1650 REG_RMW(ah, AR_PHY_TPC_11_B2, 4257 AR_PHY_TPC_OLPC_GAIN_DELTA);
1651 (correction[2] << AR_PHY_TPC_OLPC_GAIN_DELTA_S), 4258 if (ah->caps.tx_chainmask & BIT(2))
1652 AR_PHY_TPC_OLPC_GAIN_DELTA); 4259 REG_RMW(ah, AR_PHY_TPC_11_B2,
4260 (correction[2] << AR_PHY_TPC_OLPC_GAIN_DELTA_S),
4261 AR_PHY_TPC_OLPC_GAIN_DELTA);
1653 4262
1654 /* enable open loop power control on chip */ 4263 /* enable open loop power control on chip */
1655 REG_RMW(ah, AR_PHY_TPC_6_B0, 4264 REG_RMW(ah, AR_PHY_TPC_6_B0,
1656 (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S), 4265 (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
1657 AR_PHY_TPC_6_ERROR_EST_MODE); 4266 AR_PHY_TPC_6_ERROR_EST_MODE);
1658 REG_RMW(ah, AR_PHY_TPC_6_B1, 4267 if (ah->caps.tx_chainmask & BIT(1))
1659 (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S), 4268 REG_RMW(ah, AR_PHY_TPC_6_B1,
1660 AR_PHY_TPC_6_ERROR_EST_MODE); 4269 (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
1661 REG_RMW(ah, AR_PHY_TPC_6_B2, 4270 AR_PHY_TPC_6_ERROR_EST_MODE);
1662 (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S), 4271 if (ah->caps.tx_chainmask & BIT(2))
1663 AR_PHY_TPC_6_ERROR_EST_MODE); 4272 REG_RMW(ah, AR_PHY_TPC_6_B2,
4273 (3 << AR_PHY_TPC_6_ERROR_EST_MODE_S),
4274 AR_PHY_TPC_6_ERROR_EST_MODE);
1664 4275
1665 /* 4276 /*
1666 * enable temperature compensation 4277 * enable temperature compensation
@@ -1668,7 +4279,16 @@ static int ar9003_hw_power_control_override(struct ath_hw *ah,
1668 */ 4279 */
1669 if (frequency < 4000) 4280 if (frequency < 4000)
1670 tempSlope = eep->modalHeader2G.tempSlope; 4281 tempSlope = eep->modalHeader2G.tempSlope;
1671 else 4282 else if (eep->base_ext2.tempSlopeLow != 0) {
4283 t[0] = eep->base_ext2.tempSlopeLow;
4284 f[0] = 5180;
4285 t[1] = eep->modalHeader5G.tempSlope;
4286 f[1] = 5500;
4287 t[2] = eep->base_ext2.tempSlopeHigh;
4288 f[2] = 5785;
4289 tempSlope = ar9003_hw_power_interpolate((s32) frequency,
4290 f, t, 3);
4291 } else
1672 tempSlope = eep->modalHeader5G.tempSlope; 4292 tempSlope = eep->modalHeader5G.tempSlope;
1673 4293
1674 REG_RMW_FIELD(ah, AR_PHY_TPC_19, AR_PHY_TPC_19_ALPHA_THERM, tempSlope); 4294 REG_RMW_FIELD(ah, AR_PHY_TPC_19, AR_PHY_TPC_19_ALPHA_THERM, tempSlope);
@@ -1756,11 +4376,11 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
1756 4376
1757 /* interpolate */ 4377 /* interpolate */
1758 for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) { 4378 for (ichain = 0; ichain < AR9300_MAX_CHAINS; ichain++) {
1759 ath_print(common, ATH_DBG_EEPROM, 4379 ath_dbg(common, ATH_DBG_EEPROM,
1760 "ch=%d f=%d low=%d %d h=%d %d\n", 4380 "ch=%d f=%d low=%d %d h=%d %d\n",
1761 ichain, frequency, lfrequency[ichain], 4381 ichain, frequency, lfrequency[ichain],
1762 lcorrection[ichain], hfrequency[ichain], 4382 lcorrection[ichain], hfrequency[ichain],
1763 hcorrection[ichain]); 4383 hcorrection[ichain]);
1764 /* they're the same, so just pick one */ 4384 /* they're the same, so just pick one */
1765 if (hfrequency[ichain] == lfrequency[ichain]) { 4385 if (hfrequency[ichain] == lfrequency[ichain]) {
1766 correction[ichain] = lcorrection[ichain]; 4386 correction[ichain] = lcorrection[ichain];
@@ -1772,25 +4392,23 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
1772 /* so is the high frequency, interpolate */ 4392 /* so is the high frequency, interpolate */
1773 if (hfrequency[ichain] - frequency < 1000) { 4393 if (hfrequency[ichain] - frequency < 1000) {
1774 4394
1775 correction[ichain] = lcorrection[ichain] + 4395 correction[ichain] = interpolate(frequency,
1776 (((frequency - lfrequency[ichain]) * 4396 lfrequency[ichain],
1777 (hcorrection[ichain] - 4397 hfrequency[ichain],
1778 lcorrection[ichain])) / 4398 lcorrection[ichain],
1779 (hfrequency[ichain] - lfrequency[ichain])); 4399 hcorrection[ichain]);
1780 4400
1781 temperature[ichain] = ltemperature[ichain] + 4401 temperature[ichain] = interpolate(frequency,
1782 (((frequency - lfrequency[ichain]) * 4402 lfrequency[ichain],
1783 (htemperature[ichain] - 4403 hfrequency[ichain],
1784 ltemperature[ichain])) / 4404 ltemperature[ichain],
1785 (hfrequency[ichain] - lfrequency[ichain])); 4405 htemperature[ichain]);
1786 4406
1787 voltage[ichain] = 4407 voltage[ichain] = interpolate(frequency,
1788 lvoltage[ichain] + 4408 lfrequency[ichain],
1789 (((frequency - 4409 hfrequency[ichain],
1790 lfrequency[ichain]) * (hvoltage[ichain] - 4410 lvoltage[ichain],
1791 lvoltage[ichain])) 4411 hvoltage[ichain]);
1792 / (hfrequency[ichain] -
1793 lfrequency[ichain]));
1794 } 4412 }
1795 /* only low is good, use it */ 4413 /* only low is good, use it */
1796 else { 4414 else {
@@ -1814,9 +4432,9 @@ static int ar9003_hw_calibration_apply(struct ath_hw *ah, int frequency)
1814 ar9003_hw_power_control_override(ah, frequency, correction, voltage, 4432 ar9003_hw_power_control_override(ah, frequency, correction, voltage,
1815 temperature); 4433 temperature);
1816 4434
1817 ath_print(common, ATH_DBG_EEPROM, 4435 ath_dbg(common, ATH_DBG_EEPROM,
1818 "for frequency=%d, calibration correction = %d %d %d\n", 4436 "for frequency=%d, calibration correction = %d %d %d\n",
1819 frequency, correction[0], correction[1], correction[2]); 4437 frequency, correction[0], correction[1], correction[2]);
1820 4438
1821 return 0; 4439 return 0;
1822} 4440}
@@ -1858,7 +4476,7 @@ static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep,
1858 return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge - 1]); 4476 return CTL_EDGE_TPOWER(ctl_5g[idx].ctlEdges[edge - 1]);
1859 } 4477 }
1860 4478
1861 return AR9300_MAX_RATE_POWER; 4479 return MAX_RATE_POWER;
1862} 4480}
1863 4481
1864/* 4482/*
@@ -1867,7 +4485,7 @@ static u16 ar9003_hw_get_indirect_edge_power(struct ar9300_eeprom *eep,
1867static u16 ar9003_hw_get_max_edge_power(struct ar9300_eeprom *eep, 4485static u16 ar9003_hw_get_max_edge_power(struct ar9300_eeprom *eep,
1868 u16 freq, int idx, bool is2GHz) 4486 u16 freq, int idx, bool is2GHz)
1869{ 4487{
1870 u16 twiceMaxEdgePower = AR9300_MAX_RATE_POWER; 4488 u16 twiceMaxEdgePower = MAX_RATE_POWER;
1871 u8 *ctl_freqbin = is2GHz ? 4489 u8 *ctl_freqbin = is2GHz ?
1872 &eep->ctl_freqbin_2G[idx][0] : 4490 &eep->ctl_freqbin_2G[idx][0] :
1873 &eep->ctl_freqbin_5G[idx][0]; 4491 &eep->ctl_freqbin_5G[idx][0];
@@ -1877,7 +4495,7 @@ static u16 ar9003_hw_get_max_edge_power(struct ar9300_eeprom *eep,
1877 4495
1878 /* Get the edge power */ 4496 /* Get the edge power */
1879 for (edge = 0; 4497 for (edge = 0;
1880 (edge < num_edges) && (ctl_freqbin[edge] != AR9300_BCHAN_UNUSED); 4498 (edge < num_edges) && (ctl_freqbin[edge] != AR5416_BCHAN_UNUSED);
1881 edge++) { 4499 edge++) {
1882 /* 4500 /*
1883 * If there's an exact channel match or an inband flag set 4501 * If there's an exact channel match or an inband flag set
@@ -1915,21 +4533,23 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
1915 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 4533 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
1916 struct ath_common *common = ath9k_hw_common(ah); 4534 struct ath_common *common = ath9k_hw_common(ah);
1917 struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep; 4535 struct ar9300_eeprom *pEepData = &ah->eeprom.ar9300_eep;
1918 u16 twiceMaxEdgePower = AR9300_MAX_RATE_POWER; 4536 u16 twiceMaxEdgePower = MAX_RATE_POWER;
1919 static const u16 tpScaleReductionTable[5] = { 4537 static const u16 tpScaleReductionTable[5] = {
1920 0, 3, 6, 9, AR9300_MAX_RATE_POWER 4538 0, 3, 6, 9, MAX_RATE_POWER
1921 }; 4539 };
1922 int i; 4540 int i;
1923 int16_t twiceLargestAntenna; 4541 int16_t twiceLargestAntenna;
1924 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower; 4542 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
1925 u16 ctlModesFor11a[] = { 4543 static const u16 ctlModesFor11a[] = {
1926 CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40 4544 CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40
1927 }; 4545 };
1928 u16 ctlModesFor11g[] = { 4546 static const u16 ctlModesFor11g[] = {
1929 CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, 4547 CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT,
1930 CTL_11G_EXT, CTL_2GHT40 4548 CTL_11G_EXT, CTL_2GHT40
1931 }; 4549 };
1932 u16 numCtlModes, *pCtlMode, ctlMode, freq; 4550 u16 numCtlModes;
4551 const u16 *pCtlMode;
4552 u16 ctlMode, freq;
1933 struct chan_centers centers; 4553 struct chan_centers centers;
1934 u8 *ctlIndex; 4554 u8 *ctlIndex;
1935 u8 ctlNum; 4555 u8 ctlNum;
@@ -2019,11 +4639,10 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
2019 else 4639 else
2020 freq = centers.ctl_center; 4640 freq = centers.ctl_center;
2021 4641
2022 ath_print(common, ATH_DBG_REGULATORY, 4642 ath_dbg(common, ATH_DBG_REGULATORY,
2023 "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, " 4643 "LOOP-Mode ctlMode %d < %d, isHt40CtlMode %d, EXT_ADDITIVE %d\n",
2024 "EXT_ADDITIVE %d\n", 4644 ctlMode, numCtlModes, isHt40CtlMode,
2025 ctlMode, numCtlModes, isHt40CtlMode, 4645 (pCtlMode[ctlMode] & EXT_ADDITIVE));
2026 (pCtlMode[ctlMode] & EXT_ADDITIVE));
2027 4646
2028 /* walk through each CTL index stored in EEPROM */ 4647 /* walk through each CTL index stored in EEPROM */
2029 if (is2ghz) { 4648 if (is2ghz) {
@@ -2035,12 +4654,10 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
2035 } 4654 }
2036 4655
2037 for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) { 4656 for (i = 0; (i < ctlNum) && ctlIndex[i]; i++) {
2038 ath_print(common, ATH_DBG_REGULATORY, 4657 ath_dbg(common, ATH_DBG_REGULATORY,
2039 "LOOP-Ctlidx %d: cfgCtl 0x%2.2x " 4658 "LOOP-Ctlidx %d: cfgCtl 0x%2.2x pCtlMode 0x%2.2x ctlIndex 0x%2.2x chan %d\n",
2040 "pCtlMode 0x%2.2x ctlIndex 0x%2.2x " 4659 i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i],
2041 "chan %dn", 4660 chan->channel);
2042 i, cfgCtl, pCtlMode[ctlMode], ctlIndex[i],
2043 chan->channel);
2044 4661
2045 /* 4662 /*
2046 * compare test group from regulatory 4663 * compare test group from regulatory
@@ -2079,11 +4696,10 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
2079 4696
2080 minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); 4697 minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower);
2081 4698
2082 ath_print(common, ATH_DBG_REGULATORY, 4699 ath_dbg(common, ATH_DBG_REGULATORY,
2083 "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d " 4700 "SEL-Min ctlMode %d pCtlMode %d 2xMaxEdge %d sP %d minCtlPwr %d\n",
2084 "sP %d minCtlPwr %d\n", 4701 ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower,
2085 ctlMode, pCtlMode[ctlMode], twiceMaxEdgePower, 4702 scaledPower, minCtlPower);
2086 scaledPower, minCtlPower);
2087 4703
2088 /* Apply ctl mode to correct target power set */ 4704 /* Apply ctl mode to correct target power set */
2089 switch (pCtlMode[ctlMode]) { 4705 switch (pCtlMode[ctlMode]) {
@@ -2130,40 +4746,101 @@ static void ar9003_hw_set_power_per_rate_table(struct ath_hw *ah,
2130 } /* end ctl mode checking */ 4746 } /* end ctl mode checking */
2131} 4747}
2132 4748
4749static inline u8 mcsidx_to_tgtpwridx(unsigned int mcs_idx, u8 base_pwridx)
4750{
4751 u8 mod_idx = mcs_idx % 8;
4752
4753 if (mod_idx <= 3)
4754 return mod_idx ? (base_pwridx + 1) : base_pwridx;
4755 else
4756 return base_pwridx + 4 * (mcs_idx / 8) + mod_idx - 2;
4757}
4758
2133static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah, 4759static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
2134 struct ath9k_channel *chan, u16 cfgCtl, 4760 struct ath9k_channel *chan, u16 cfgCtl,
2135 u8 twiceAntennaReduction, 4761 u8 twiceAntennaReduction,
2136 u8 twiceMaxRegulatoryPower, 4762 u8 twiceMaxRegulatoryPower,
2137 u8 powerLimit) 4763 u8 powerLimit, bool test)
2138{ 4764{
4765 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
2139 struct ath_common *common = ath9k_hw_common(ah); 4766 struct ath_common *common = ath9k_hw_common(ah);
4767 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
4768 struct ar9300_modal_eep_header *modal_hdr;
2140 u8 targetPowerValT2[ar9300RateSize]; 4769 u8 targetPowerValT2[ar9300RateSize];
2141 unsigned int i = 0; 4770 u8 target_power_val_t2_eep[ar9300RateSize];
4771 unsigned int i = 0, paprd_scale_factor = 0;
4772 u8 pwr_idx, min_pwridx = 0;
2142 4773
2143 ar9003_hw_set_target_power_eeprom(ah, chan->channel, targetPowerValT2); 4774 ar9003_hw_set_target_power_eeprom(ah, chan->channel, targetPowerValT2);
4775
4776 if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) {
4777 if (IS_CHAN_2GHZ(chan))
4778 modal_hdr = &eep->modalHeader2G;
4779 else
4780 modal_hdr = &eep->modalHeader5G;
4781
4782 ah->paprd_ratemask =
4783 le32_to_cpu(modal_hdr->papdRateMaskHt20) &
4784 AR9300_PAPRD_RATE_MASK;
4785
4786 ah->paprd_ratemask_ht40 =
4787 le32_to_cpu(modal_hdr->papdRateMaskHt40) &
4788 AR9300_PAPRD_RATE_MASK;
4789
4790 paprd_scale_factor = ar9003_get_paprd_scale_factor(ah, chan);
4791 min_pwridx = IS_CHAN_HT40(chan) ? ALL_TARGET_HT40_0_8_16 :
4792 ALL_TARGET_HT20_0_8_16;
4793
4794 if (!ah->paprd_table_write_done) {
4795 memcpy(target_power_val_t2_eep, targetPowerValT2,
4796 sizeof(targetPowerValT2));
4797 for (i = 0; i < 24; i++) {
4798 pwr_idx = mcsidx_to_tgtpwridx(i, min_pwridx);
4799 if (ah->paprd_ratemask & (1 << i)) {
4800 if (targetPowerValT2[pwr_idx] &&
4801 targetPowerValT2[pwr_idx] ==
4802 target_power_val_t2_eep[pwr_idx])
4803 targetPowerValT2[pwr_idx] -=
4804 paprd_scale_factor;
4805 }
4806 }
4807 }
4808 memcpy(target_power_val_t2_eep, targetPowerValT2,
4809 sizeof(targetPowerValT2));
4810 }
4811
2144 ar9003_hw_set_power_per_rate_table(ah, chan, 4812 ar9003_hw_set_power_per_rate_table(ah, chan,
2145 targetPowerValT2, cfgCtl, 4813 targetPowerValT2, cfgCtl,
2146 twiceAntennaReduction, 4814 twiceAntennaReduction,
2147 twiceMaxRegulatoryPower, 4815 twiceMaxRegulatoryPower,
2148 powerLimit); 4816 powerLimit);
2149 4817
2150 while (i < ar9300RateSize) { 4818 if (ah->eep_ops->get_eeprom(ah, EEP_PAPRD)) {
2151 ath_print(common, ATH_DBG_EEPROM, 4819 for (i = 0; i < ar9300RateSize; i++) {
2152 "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]); 4820 if ((ah->paprd_ratemask & (1 << i)) &&
2153 i++; 4821 (abs(targetPowerValT2[i] -
2154 ath_print(common, ATH_DBG_EEPROM, 4822 target_power_val_t2_eep[i]) >
2155 "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]); 4823 paprd_scale_factor)) {
2156 i++; 4824 ah->paprd_ratemask &= ~(1 << i);
2157 ath_print(common, ATH_DBG_EEPROM, 4825 ath_dbg(common, ATH_DBG_EEPROM,
2158 "TPC[%02d] 0x%08x ", i, targetPowerValT2[i]); 4826 "paprd disabled for mcs %d\n", i);
2159 i++; 4827 }
2160 ath_print(common, ATH_DBG_EEPROM, 4828 }
2161 "TPC[%02d] 0x%08x\n\n", i, targetPowerValT2[i]);
2162 i++;
2163 } 4829 }
2164 4830
2165 /* Write target power array to registers */ 4831 regulatory->max_power_level = 0;
2166 ar9003_hw_tx_power_regwrite(ah, targetPowerValT2); 4832 for (i = 0; i < ar9300RateSize; i++) {
4833 if (targetPowerValT2[i] > regulatory->max_power_level)
4834 regulatory->max_power_level = targetPowerValT2[i];
4835 }
4836
4837 if (test)
4838 return;
4839
4840 for (i = 0; i < ar9300RateSize; i++) {
4841 ath_dbg(common, ATH_DBG_EEPROM,
4842 "TPC[%02d] 0x%08x\n", i, targetPowerValT2[i]);
4843 }
2167 4844
2168 /* 4845 /*
2169 * This is the TX power we send back to driver core, 4846 * This is the TX power we send back to driver core,
@@ -2183,8 +4860,24 @@ static void ath9k_hw_ar9300_set_txpower(struct ath_hw *ah,
2183 i = ALL_TARGET_HT20_0_8_16; /* ht20 */ 4860 i = ALL_TARGET_HT20_0_8_16; /* ht20 */
2184 4861
2185 ah->txpower_limit = targetPowerValT2[i]; 4862 ah->txpower_limit = targetPowerValT2[i];
4863 regulatory->max_power_level = targetPowerValT2[i];
2186 4864
4865 /* Write target power array to registers */
4866 ar9003_hw_tx_power_regwrite(ah, targetPowerValT2);
2187 ar9003_hw_calibration_apply(ah, chan->channel); 4867 ar9003_hw_calibration_apply(ah, chan->channel);
4868
4869 if (IS_CHAN_2GHZ(chan)) {
4870 if (IS_CHAN_HT40(chan))
4871 i = ALL_TARGET_HT40_0_8_16;
4872 else
4873 i = ALL_TARGET_HT20_0_8_16;
4874 } else {
4875 if (IS_CHAN_HT40(chan))
4876 i = ALL_TARGET_HT40_7;
4877 else
4878 i = ALL_TARGET_HT20_7;
4879 }
4880 ah->paprd_target_power = targetPowerValT2[i];
2188} 4881}
2189 4882
2190static u16 ath9k_hw_ar9300_get_spur_channel(struct ath_hw *ah, 4883static u16 ath9k_hw_ar9300_get_spur_channel(struct ath_hw *ah,
@@ -2207,14 +4900,43 @@ s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah)
2207 return (eep->baseEepHeader.txrxgain) & 0xf; /* bits 3:0 */ 4900 return (eep->baseEepHeader.txrxgain) & 0xf; /* bits 3:0 */
2208} 4901}
2209 4902
4903u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz)
4904{
4905 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
4906
4907 if (is_2ghz)
4908 return eep->modalHeader2G.spurChans;
4909 else
4910 return eep->modalHeader5G.spurChans;
4911}
4912
4913unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
4914 struct ath9k_channel *chan)
4915{
4916 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
4917
4918 if (IS_CHAN_2GHZ(chan))
4919 return MS(le32_to_cpu(eep->modalHeader2G.papdRateMaskHt20),
4920 AR9300_PAPRD_SCALE_1);
4921 else {
4922 if (chan->channel >= 5700)
4923 return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt20),
4924 AR9300_PAPRD_SCALE_1);
4925 else if (chan->channel >= 5400)
4926 return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40),
4927 AR9300_PAPRD_SCALE_2);
4928 else
4929 return MS(le32_to_cpu(eep->modalHeader5G.papdRateMaskHt40),
4930 AR9300_PAPRD_SCALE_1);
4931 }
4932}
4933
2210const struct eeprom_ops eep_ar9300_ops = { 4934const struct eeprom_ops eep_ar9300_ops = {
2211 .check_eeprom = ath9k_hw_ar9300_check_eeprom, 4935 .check_eeprom = ath9k_hw_ar9300_check_eeprom,
2212 .get_eeprom = ath9k_hw_ar9300_get_eeprom, 4936 .get_eeprom = ath9k_hw_ar9300_get_eeprom,
2213 .fill_eeprom = ath9k_hw_ar9300_fill_eeprom, 4937 .fill_eeprom = ath9k_hw_ar9300_fill_eeprom,
2214 .get_eeprom_ver = ath9k_hw_ar9300_get_eeprom_ver, 4938 .get_eeprom_ver = ath9k_hw_ar9300_get_eeprom_ver,
2215 .get_eeprom_rev = ath9k_hw_ar9300_get_eeprom_rev, 4939 .get_eeprom_rev = ath9k_hw_ar9300_get_eeprom_rev,
2216 .get_num_ant_config = ath9k_hw_ar9300_get_num_ant_config,
2217 .get_eeprom_antenna_cfg = ath9k_hw_ar9300_get_eeprom_antenna_cfg,
2218 .set_board_values = ath9k_hw_ar9300_set_board_values, 4940 .set_board_values = ath9k_hw_ar9300_set_board_values,
2219 .set_addac = ath9k_hw_ar9300_set_addac, 4941 .set_addac = ath9k_hw_ar9300_set_addac,
2220 .set_txpower = ath9k_hw_ar9300_set_txpower, 4942 .set_txpower = ath9k_hw_ar9300_set_txpower,
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 655b3033396c..afb0b5ee1865 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -20,47 +20,22 @@
20/* #define AR9300_NUM_CTLS 21 */ 20/* #define AR9300_NUM_CTLS 21 */
21#define AR9300_NUM_CTLS_5G 9 21#define AR9300_NUM_CTLS_5G 9
22#define AR9300_NUM_CTLS_2G 12 22#define AR9300_NUM_CTLS_2G 12
23#define AR9300_CTL_MODE_M 0xF
24#define AR9300_NUM_BAND_EDGES_5G 8 23#define AR9300_NUM_BAND_EDGES_5G 8
25#define AR9300_NUM_BAND_EDGES_2G 4 24#define AR9300_NUM_BAND_EDGES_2G 4
26#define AR9300_NUM_PD_GAINS 4
27#define AR9300_PD_GAINS_IN_MASK 4
28#define AR9300_PD_GAIN_ICEPTS 5
29#define AR9300_EEPROM_MODAL_SPURS 5
30#define AR9300_MAX_RATE_POWER 63
31#define AR9300_NUM_PDADC_VALUES 128
32#define AR9300_NUM_RATES 16
33#define AR9300_BCHAN_UNUSED 0xFF
34#define AR9300_MAX_PWR_RANGE_IN_HALF_DB 64
35#define AR9300_OPFLAGS_11A 0x01
36#define AR9300_OPFLAGS_11G 0x02
37#define AR9300_OPFLAGS_5G_HT40 0x04
38#define AR9300_OPFLAGS_2G_HT40 0x08
39#define AR9300_OPFLAGS_5G_HT20 0x10
40#define AR9300_OPFLAGS_2G_HT20 0x20
41#define AR9300_EEPMISC_BIG_ENDIAN 0x01 25#define AR9300_EEPMISC_BIG_ENDIAN 0x01
42#define AR9300_EEPMISC_WOW 0x02 26#define AR9300_EEPMISC_WOW 0x02
43#define AR9300_CUSTOMER_DATA_SIZE 20 27#define AR9300_CUSTOMER_DATA_SIZE 20
44 28
45#define FREQ2FBIN(x, y) ((y) ? ((x) - 2300) : (((x) - 4800) / 5))
46#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x)) 29#define FBIN2FREQ(x, y) ((y) ? (2300 + x) : (4800 + 5 * x))
47#define AR9300_MAX_CHAINS 3 30#define AR9300_MAX_CHAINS 3
48#define AR9300_ANT_16S 25 31#define AR9300_ANT_16S 25
49#define AR9300_FUTURE_MODAL_SZ 6 32#define AR9300_FUTURE_MODAL_SZ 6
50 33
51#define AR9300_NUM_ANT_CHAIN_FIELDS 7 34#define AR9300_PAPRD_RATE_MASK 0x01ffffff
52#define AR9300_NUM_ANT_COMMON_FIELDS 4 35#define AR9300_PAPRD_SCALE_1 0x0e000000
53#define AR9300_SIZE_ANT_CHAIN_FIELD 3 36#define AR9300_PAPRD_SCALE_1_S 25
54#define AR9300_SIZE_ANT_COMMON_FIELD 4 37#define AR9300_PAPRD_SCALE_2 0x70000000
55#define AR9300_ANT_CHAIN_MASK 0x7 38#define AR9300_PAPRD_SCALE_2_S 28
56#define AR9300_ANT_COMMON_MASK 0xf
57#define AR9300_CHAIN_0_IDX 0
58#define AR9300_CHAIN_1_IDX 1
59#define AR9300_CHAIN_2_IDX 2
60
61#define AR928X_NUM_ANT_CHAIN_FIELDS 6
62#define AR928X_SIZE_ANT_CHAIN_FIELD 2
63#define AR928X_ANT_CHAIN_MASK 0x3
64 39
65/* Delta from which to start power to pdadc table */ 40/* Delta from which to start power to pdadc table */
66/* This offset is used in both open loop and closed loop power control 41/* This offset is used in both open loop and closed loop power control
@@ -71,14 +46,20 @@
71 */ 46 */
72#define AR9300_PWR_TABLE_OFFSET 0 47#define AR9300_PWR_TABLE_OFFSET 0
73 48
74/* enable flags for voltage and temp compensation */
75#define ENABLE_TEMP_COMPENSATION 0x01
76#define ENABLE_VOLT_COMPENSATION 0x02
77/* byte addressable */ 49/* byte addressable */
78#define AR9300_EEPROM_SIZE (16*1024) 50#define AR9300_EEPROM_SIZE (16*1024)
79#define FIXED_CCA_THRESHOLD 15
80 51
52#define AR9300_BASE_ADDR_4K 0xfff
81#define AR9300_BASE_ADDR 0x3ff 53#define AR9300_BASE_ADDR 0x3ff
54#define AR9300_BASE_ADDR_512 0x1ff
55
56#define AR9300_OTP_BASE 0x14000
57#define AR9300_OTP_STATUS 0x15f18
58#define AR9300_OTP_STATUS_TYPE 0x7
59#define AR9300_OTP_STATUS_VALID 0x4
60#define AR9300_OTP_STATUS_ACCESS_BUSY 0x2
61#define AR9300_OTP_STATUS_SM_BUSY 0x1
62#define AR9300_OTP_READ_DATA 0x15f1c
82 63
83enum targetPowerHTRates { 64enum targetPowerHTRates {
84 HT_TARGET_RATE_0_8_16, 65 HT_TARGET_RATE_0_8_16,
@@ -216,7 +197,7 @@ struct ar9300_modal_eep_header {
216 int8_t tempSlope; 197 int8_t tempSlope;
217 int8_t voltSlope; 198 int8_t voltSlope;
218 /* spur channels in usual fbin coding format */ 199 /* spur channels in usual fbin coding format */
219 u8 spurChans[AR9300_EEPROM_MODAL_SPURS]; 200 u8 spurChans[AR_EEPROM_MODAL_SPURS];
220 /* 3 Check if the register is per chain */ 201 /* 3 Check if the register is per chain */
221 int8_t noiseFloorThreshCh[AR9300_MAX_CHAINS]; 202 int8_t noiseFloorThreshCh[AR9300_MAX_CHAINS];
222 u8 ob[AR9300_MAX_CHAINS]; 203 u8 ob[AR9300_MAX_CHAINS];
@@ -236,7 +217,7 @@ struct ar9300_modal_eep_header {
236 u8 thresh62; 217 u8 thresh62;
237 __le32 papdRateMaskHt20; 218 __le32 papdRateMaskHt20;
238 __le32 papdRateMaskHt40; 219 __le32 papdRateMaskHt40;
239 u8 futureModal[24]; 220 u8 futureModal[10];
240} __packed; 221} __packed;
241 222
242struct ar9300_cal_data_per_freq_op_loop { 223struct ar9300_cal_data_per_freq_op_loop {
@@ -269,6 +250,20 @@ struct cal_ctl_data_5g {
269 u8 ctlEdges[AR9300_NUM_BAND_EDGES_5G]; 250 u8 ctlEdges[AR9300_NUM_BAND_EDGES_5G];
270} __packed; 251} __packed;
271 252
253struct ar9300_BaseExtension_1 {
254 u8 ant_div_control;
255 u8 future[13];
256} __packed;
257
258struct ar9300_BaseExtension_2 {
259 int8_t tempSlopeLow;
260 int8_t tempSlopeHigh;
261 u8 xatten1DBLow[AR9300_MAX_CHAINS];
262 u8 xatten1MarginLow[AR9300_MAX_CHAINS];
263 u8 xatten1DBHigh[AR9300_MAX_CHAINS];
264 u8 xatten1MarginHigh[AR9300_MAX_CHAINS];
265} __packed;
266
272struct ar9300_eeprom { 267struct ar9300_eeprom {
273 u8 eepromVersion; 268 u8 eepromVersion;
274 u8 templateVersion; 269 u8 templateVersion;
@@ -278,6 +273,7 @@ struct ar9300_eeprom {
278 struct ar9300_base_eep_hdr baseEepHeader; 273 struct ar9300_base_eep_hdr baseEepHeader;
279 274
280 struct ar9300_modal_eep_header modalHeader2G; 275 struct ar9300_modal_eep_header modalHeader2G;
276 struct ar9300_BaseExtension_1 base_ext1;
281 u8 calFreqPier2G[AR9300_NUM_2G_CAL_PIERS]; 277 u8 calFreqPier2G[AR9300_NUM_2G_CAL_PIERS];
282 struct ar9300_cal_data_per_freq_op_loop 278 struct ar9300_cal_data_per_freq_op_loop
283 calPierData2G[AR9300_MAX_CHAINS][AR9300_NUM_2G_CAL_PIERS]; 279 calPierData2G[AR9300_MAX_CHAINS][AR9300_NUM_2G_CAL_PIERS];
@@ -297,6 +293,7 @@ struct ar9300_eeprom {
297 u8 ctl_freqbin_2G[AR9300_NUM_CTLS_2G][AR9300_NUM_BAND_EDGES_2G]; 293 u8 ctl_freqbin_2G[AR9300_NUM_CTLS_2G][AR9300_NUM_BAND_EDGES_2G];
298 struct cal_ctl_data_2g ctlPowerData_2G[AR9300_NUM_CTLS_2G]; 294 struct cal_ctl_data_2g ctlPowerData_2G[AR9300_NUM_CTLS_2G];
299 struct ar9300_modal_eep_header modalHeader5G; 295 struct ar9300_modal_eep_header modalHeader5G;
296 struct ar9300_BaseExtension_2 base_ext2;
300 u8 calFreqPier5G[AR9300_NUM_5G_CAL_PIERS]; 297 u8 calFreqPier5G[AR9300_NUM_5G_CAL_PIERS];
301 struct ar9300_cal_data_per_freq_op_loop 298 struct ar9300_cal_data_per_freq_op_loop
302 calPierData5G[AR9300_MAX_CHAINS][AR9300_NUM_5G_CAL_PIERS]; 299 calPierData5G[AR9300_MAX_CHAINS][AR9300_NUM_5G_CAL_PIERS];
@@ -317,4 +314,8 @@ struct ar9300_eeprom {
317s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah); 314s32 ar9003_hw_get_tx_gain_idx(struct ath_hw *ah);
318s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah); 315s32 ar9003_hw_get_rx_gain_idx(struct ath_hw *ah);
319 316
317u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz);
318
319unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
320 struct ath9k_channel *chan);
320#endif 321#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_hw.c b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
index c2a057156bfa..6137634e46ca 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_hw.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_hw.c
@@ -17,20 +17,10 @@
17#include "hw.h" 17#include "hw.h"
18#include "ar9003_mac.h" 18#include "ar9003_mac.h"
19#include "ar9003_2p2_initvals.h" 19#include "ar9003_2p2_initvals.h"
20#include "ar9485_initvals.h"
20 21
21/* General hardware code for the AR9003 hadware family */ 22/* General hardware code for the AR9003 hadware family */
22 23
23static bool ar9003_hw_macversion_supported(u32 macversion)
24{
25 switch (macversion) {
26 case AR_SREV_VERSION_9300:
27 return true;
28 default:
29 break;
30 }
31 return false;
32}
33
34/* 24/*
35 * The AR9003 family uses a new INI format (pre, core, post 25 * The AR9003 family uses a new INI format (pre, core, post
36 * arrays per subsystem). This provides support for the 26 * arrays per subsystem). This provides support for the
@@ -38,72 +28,134 @@ static bool ar9003_hw_macversion_supported(u32 macversion)
38 */ 28 */
39static void ar9003_hw_init_mode_regs(struct ath_hw *ah) 29static void ar9003_hw_init_mode_regs(struct ath_hw *ah)
40{ 30{
41 /* mac */ 31 if (AR_SREV_9485(ah)) {
42 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0); 32 /* mac */
43 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE], 33 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
44 ar9300_2p2_mac_core, 34 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
45 ARRAY_SIZE(ar9300_2p2_mac_core), 2); 35 ar9485_1_0_mac_core,
46 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST], 36 ARRAY_SIZE(ar9485_1_0_mac_core), 2);
47 ar9300_2p2_mac_postamble, 37 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
48 ARRAY_SIZE(ar9300_2p2_mac_postamble), 5); 38 ar9485_1_0_mac_postamble,
49 39 ARRAY_SIZE(ar9485_1_0_mac_postamble), 5);
50 /* bb */ 40
51 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0); 41 /* bb */
52 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE], 42 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], ar9485_1_0,
53 ar9300_2p2_baseband_core, 43 ARRAY_SIZE(ar9485_1_0), 2);
54 ARRAY_SIZE(ar9300_2p2_baseband_core), 2); 44 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
55 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST], 45 ar9485_1_0_baseband_core,
56 ar9300_2p2_baseband_postamble, 46 ARRAY_SIZE(ar9485_1_0_baseband_core), 2);
57 ARRAY_SIZE(ar9300_2p2_baseband_postamble), 5); 47 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
58 48 ar9485_1_0_baseband_postamble,
59 /* radio */ 49 ARRAY_SIZE(ar9485_1_0_baseband_postamble), 5);
60 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0); 50
61 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE], 51 /* radio */
62 ar9300_2p2_radio_core, 52 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
63 ARRAY_SIZE(ar9300_2p2_radio_core), 2); 53 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
64 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST], 54 ar9485_1_0_radio_core,
65 ar9300_2p2_radio_postamble, 55 ARRAY_SIZE(ar9485_1_0_radio_core), 2);
66 ARRAY_SIZE(ar9300_2p2_radio_postamble), 5); 56 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
67 57 ar9485_1_0_radio_postamble,
68 /* soc */ 58 ARRAY_SIZE(ar9485_1_0_radio_postamble), 2);
69 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE], 59
70 ar9300_2p2_soc_preamble, 60 /* soc */
71 ARRAY_SIZE(ar9300_2p2_soc_preamble), 2); 61 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
72 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0); 62 ar9485_1_0_soc_preamble,
73 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST], 63 ARRAY_SIZE(ar9485_1_0_soc_preamble), 2);
74 ar9300_2p2_soc_postamble, 64 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
75 ARRAY_SIZE(ar9300_2p2_soc_postamble), 5); 65 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST], NULL, 0, 0);
76 66
77 /* rx/tx gain */ 67 /* rx/tx gain */
78 INIT_INI_ARRAY(&ah->iniModesRxGain, 68 INIT_INI_ARRAY(&ah->iniModesRxGain,
79 ar9300Common_rx_gain_table_2p2, 69 ar9485Common_rx_gain_1_0,
80 ARRAY_SIZE(ar9300Common_rx_gain_table_2p2), 2); 70 ARRAY_SIZE(ar9485Common_rx_gain_1_0), 2);
81 INIT_INI_ARRAY(&ah->iniModesTxGain, 71 INIT_INI_ARRAY(&ah->iniModesTxGain,
82 ar9300Modes_lowest_ob_db_tx_gain_table_2p2, 72 ar9485Modes_lowest_ob_db_tx_gain_1_0,
83 ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2), 73 ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0),
84 5); 74 5);
85 75
86 /* Load PCIE SERDES settings from INI */ 76 /* Load PCIE SERDES settings from INI */
87 77
88 /* Awake Setting */ 78 /* Awake Setting */
89 79
90 INIT_INI_ARRAY(&ah->iniPcieSerdes, 80 INIT_INI_ARRAY(&ah->iniPcieSerdes,
91 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2, 81 ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1,
92 ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p2), 82 ARRAY_SIZE(ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1),
93 2); 83 2);
94 84
95 /* Sleep Setting */ 85 /* Sleep Setting */
96 86
97 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower, 87 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
98 ar9300PciePhy_clkreq_enable_L1_2p2, 88 ar9485_1_0_pcie_phy_pll_on_clkreq_enable_L1,
99 ARRAY_SIZE(ar9300PciePhy_clkreq_enable_L1_2p2), 89 ARRAY_SIZE(ar9485_1_0_pcie_phy_pll_on_clkreq_enable_L1),
100 2); 90 2);
101 91 } else {
102 /* Fast clock modal settings */ 92 /* mac */
103 INIT_INI_ARRAY(&ah->iniModesAdditional, 93 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_PRE], NULL, 0, 0);
104 ar9300Modes_fast_clock_2p2, 94 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_CORE],
105 ARRAY_SIZE(ar9300Modes_fast_clock_2p2), 95 ar9300_2p2_mac_core,
106 3); 96 ARRAY_SIZE(ar9300_2p2_mac_core), 2);
97 INIT_INI_ARRAY(&ah->iniMac[ATH_INI_POST],
98 ar9300_2p2_mac_postamble,
99 ARRAY_SIZE(ar9300_2p2_mac_postamble), 5);
100
101 /* bb */
102 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_PRE], NULL, 0, 0);
103 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_CORE],
104 ar9300_2p2_baseband_core,
105 ARRAY_SIZE(ar9300_2p2_baseband_core), 2);
106 INIT_INI_ARRAY(&ah->iniBB[ATH_INI_POST],
107 ar9300_2p2_baseband_postamble,
108 ARRAY_SIZE(ar9300_2p2_baseband_postamble), 5);
109
110 /* radio */
111 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_PRE], NULL, 0, 0);
112 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_CORE],
113 ar9300_2p2_radio_core,
114 ARRAY_SIZE(ar9300_2p2_radio_core), 2);
115 INIT_INI_ARRAY(&ah->iniRadio[ATH_INI_POST],
116 ar9300_2p2_radio_postamble,
117 ARRAY_SIZE(ar9300_2p2_radio_postamble), 5);
118
119 /* soc */
120 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_PRE],
121 ar9300_2p2_soc_preamble,
122 ARRAY_SIZE(ar9300_2p2_soc_preamble), 2);
123 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_CORE], NULL, 0, 0);
124 INIT_INI_ARRAY(&ah->iniSOC[ATH_INI_POST],
125 ar9300_2p2_soc_postamble,
126 ARRAY_SIZE(ar9300_2p2_soc_postamble), 5);
127
128 /* rx/tx gain */
129 INIT_INI_ARRAY(&ah->iniModesRxGain,
130 ar9300Common_rx_gain_table_2p2,
131 ARRAY_SIZE(ar9300Common_rx_gain_table_2p2), 2);
132 INIT_INI_ARRAY(&ah->iniModesTxGain,
133 ar9300Modes_lowest_ob_db_tx_gain_table_2p2,
134 ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2),
135 5);
136
137 /* Load PCIE SERDES settings from INI */
138
139 /* Awake Setting */
140
141 INIT_INI_ARRAY(&ah->iniPcieSerdes,
142 ar9300PciePhy_pll_on_clkreq_disable_L1_2p2,
143 ARRAY_SIZE(ar9300PciePhy_pll_on_clkreq_disable_L1_2p2),
144 2);
145
146 /* Sleep Setting */
147
148 INIT_INI_ARRAY(&ah->iniPcieSerdesLowPower,
149 ar9300PciePhy_clkreq_enable_L1_2p2,
150 ARRAY_SIZE(ar9300PciePhy_clkreq_enable_L1_2p2),
151 2);
152
153 /* Fast clock modal settings */
154 INIT_INI_ARRAY(&ah->iniModesAdditional,
155 ar9300Modes_fast_clock_2p2,
156 ARRAY_SIZE(ar9300Modes_fast_clock_2p2),
157 3);
158 }
107} 159}
108 160
109static void ar9003_tx_gain_table_apply(struct ath_hw *ah) 161static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
@@ -111,22 +163,52 @@ static void ar9003_tx_gain_table_apply(struct ath_hw *ah)
111 switch (ar9003_hw_get_tx_gain_idx(ah)) { 163 switch (ar9003_hw_get_tx_gain_idx(ah)) {
112 case 0: 164 case 0:
113 default: 165 default:
114 INIT_INI_ARRAY(&ah->iniModesTxGain, 166 if (AR_SREV_9485(ah))
115 ar9300Modes_lowest_ob_db_tx_gain_table_2p2, 167 INIT_INI_ARRAY(&ah->iniModesTxGain,
116 ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2), 168 ar9485Modes_lowest_ob_db_tx_gain_1_0,
117 5); 169 ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0),
170 5);
171 else
172 INIT_INI_ARRAY(&ah->iniModesTxGain,
173 ar9300Modes_lowest_ob_db_tx_gain_table_2p2,
174 ARRAY_SIZE(ar9300Modes_lowest_ob_db_tx_gain_table_2p2),
175 5);
118 break; 176 break;
119 case 1: 177 case 1:
120 INIT_INI_ARRAY(&ah->iniModesTxGain, 178 if (AR_SREV_9485(ah))
121 ar9300Modes_high_ob_db_tx_gain_table_2p2, 179 INIT_INI_ARRAY(&ah->iniModesTxGain,
122 ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p2), 180 ar9485Modes_high_ob_db_tx_gain_1_0,
123 5); 181 ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0),
182 5);
183 else
184 INIT_INI_ARRAY(&ah->iniModesTxGain,
185 ar9300Modes_high_ob_db_tx_gain_table_2p2,
186 ARRAY_SIZE(ar9300Modes_high_ob_db_tx_gain_table_2p2),
187 5);
124 break; 188 break;
125 case 2: 189 case 2:
126 INIT_INI_ARRAY(&ah->iniModesTxGain, 190 if (AR_SREV_9485(ah))
127 ar9300Modes_low_ob_db_tx_gain_table_2p2, 191 INIT_INI_ARRAY(&ah->iniModesTxGain,
128 ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p2), 192 ar9485Modes_low_ob_db_tx_gain_1_0,
129 5); 193 ARRAY_SIZE(ar9485Modes_lowest_ob_db_tx_gain_1_0),
194 5);
195 else
196 INIT_INI_ARRAY(&ah->iniModesTxGain,
197 ar9300Modes_low_ob_db_tx_gain_table_2p2,
198 ARRAY_SIZE(ar9300Modes_low_ob_db_tx_gain_table_2p2),
199 5);
200 break;
201 case 3:
202 if (AR_SREV_9485(ah))
203 INIT_INI_ARRAY(&ah->iniModesTxGain,
204 ar9485Modes_high_power_tx_gain_1_0,
205 ARRAY_SIZE(ar9485Modes_high_power_tx_gain_1_0),
206 5);
207 else
208 INIT_INI_ARRAY(&ah->iniModesTxGain,
209 ar9300Modes_high_power_tx_gain_table_2p2,
210 ARRAY_SIZE(ar9300Modes_high_power_tx_gain_table_2p2),
211 5);
130 break; 212 break;
131 } 213 }
132} 214}
@@ -136,16 +218,28 @@ static void ar9003_rx_gain_table_apply(struct ath_hw *ah)
136 switch (ar9003_hw_get_rx_gain_idx(ah)) { 218 switch (ar9003_hw_get_rx_gain_idx(ah)) {
137 case 0: 219 case 0:
138 default: 220 default:
139 INIT_INI_ARRAY(&ah->iniModesRxGain, 221 if (AR_SREV_9485(ah))
140 ar9300Common_rx_gain_table_2p2, 222 INIT_INI_ARRAY(&ah->iniModesRxGain,
141 ARRAY_SIZE(ar9300Common_rx_gain_table_2p2), 223 ar9485Common_rx_gain_1_0,
142 2); 224 ARRAY_SIZE(ar9485Common_rx_gain_1_0),
225 2);
226 else
227 INIT_INI_ARRAY(&ah->iniModesRxGain,
228 ar9300Common_rx_gain_table_2p2,
229 ARRAY_SIZE(ar9300Common_rx_gain_table_2p2),
230 2);
143 break; 231 break;
144 case 1: 232 case 1:
145 INIT_INI_ARRAY(&ah->iniModesRxGain, 233 if (AR_SREV_9485(ah))
146 ar9300Common_wo_xlna_rx_gain_table_2p2, 234 INIT_INI_ARRAY(&ah->iniModesRxGain,
147 ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p2), 235 ar9485Common_wo_xlna_rx_gain_1_0,
148 2); 236 ARRAY_SIZE(ar9485Common_wo_xlna_rx_gain_1_0),
237 2);
238 else
239 INIT_INI_ARRAY(&ah->iniModesRxGain,
240 ar9300Common_wo_xlna_rx_gain_table_2p2,
241 ARRAY_SIZE(ar9300Common_wo_xlna_rx_gain_table_2p2),
242 2);
149 break; 243 break;
150 } 244 }
151} 245}
@@ -216,7 +310,6 @@ void ar9003_hw_attach_ops(struct ath_hw *ah)
216 310
217 priv_ops->init_mode_regs = ar9003_hw_init_mode_regs; 311 priv_ops->init_mode_regs = ar9003_hw_init_mode_regs;
218 priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs; 312 priv_ops->init_mode_gain_regs = ar9003_hw_init_mode_gain_regs;
219 priv_ops->macversion_supported = ar9003_hw_macversion_supported;
220 313
221 ops->config_pci_powersave = ar9003_hw_configpcipowersave; 314 ops->config_pci_powersave = ar9003_hw_configpcipowersave;
222 315
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
index 3b424ca1ba84..4ceddbbdfcee 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
@@ -182,8 +182,8 @@ static bool ar9003_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked)
182 } 182 }
183 183
184 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) 184 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT)
185 ath_print(common, ATH_DBG_INTERRUPT, 185 ath_dbg(common, ATH_DBG_INTERRUPT,
186 "AR_INTR_SYNC_LOCAL_TIMEOUT\n"); 186 "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
187 187
188 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause); 188 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
189 (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR); 189 (void) REG_READ(ah, AR_INTR_SYNC_CAUSE_CLR);
@@ -237,73 +237,76 @@ static int ar9003_hw_proc_txdesc(struct ath_hw *ah, void *ds,
237 struct ath_tx_status *ts) 237 struct ath_tx_status *ts)
238{ 238{
239 struct ar9003_txs *ads; 239 struct ar9003_txs *ads;
240 u32 status;
240 241
241 ads = &ah->ts_ring[ah->ts_tail]; 242 ads = &ah->ts_ring[ah->ts_tail];
242 243
243 if ((ads->status8 & AR_TxDone) == 0) 244 status = ACCESS_ONCE(ads->status8);
245 if ((status & AR_TxDone) == 0)
244 return -EINPROGRESS; 246 return -EINPROGRESS;
245 247
246 ah->ts_tail = (ah->ts_tail + 1) % ah->ts_size; 248 ah->ts_tail = (ah->ts_tail + 1) % ah->ts_size;
247 249
248 if ((MS(ads->ds_info, AR_DescId) != ATHEROS_VENDOR_ID) || 250 if ((MS(ads->ds_info, AR_DescId) != ATHEROS_VENDOR_ID) ||
249 (MS(ads->ds_info, AR_TxRxDesc) != 1)) { 251 (MS(ads->ds_info, AR_TxRxDesc) != 1)) {
250 ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT, 252 ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT,
251 "Tx Descriptor error %x\n", ads->ds_info); 253 "Tx Descriptor error %x\n", ads->ds_info);
252 memset(ads, 0, sizeof(*ads)); 254 memset(ads, 0, sizeof(*ads));
253 return -EIO; 255 return -EIO;
254 } 256 }
255 257
258 if (status & AR_TxOpExceeded)
259 ts->ts_status |= ATH9K_TXERR_XTXOP;
260 ts->ts_rateindex = MS(status, AR_FinalTxIdx);
261 ts->ts_seqnum = MS(status, AR_SeqNum);
262 ts->tid = MS(status, AR_TxTid);
263
256 ts->qid = MS(ads->ds_info, AR_TxQcuNum); 264 ts->qid = MS(ads->ds_info, AR_TxQcuNum);
257 ts->desc_id = MS(ads->status1, AR_TxDescId); 265 ts->desc_id = MS(ads->status1, AR_TxDescId);
258 ts->ts_seqnum = MS(ads->status8, AR_SeqNum);
259 ts->ts_tstamp = ads->status4; 266 ts->ts_tstamp = ads->status4;
260 ts->ts_status = 0; 267 ts->ts_status = 0;
261 ts->ts_flags = 0; 268 ts->ts_flags = 0;
262 269
263 if (ads->status3 & AR_ExcessiveRetries) 270 status = ACCESS_ONCE(ads->status2);
271 ts->ts_rssi_ctl0 = MS(status, AR_TxRSSIAnt00);
272 ts->ts_rssi_ctl1 = MS(status, AR_TxRSSIAnt01);
273 ts->ts_rssi_ctl2 = MS(status, AR_TxRSSIAnt02);
274 if (status & AR_TxBaStatus) {
275 ts->ts_flags |= ATH9K_TX_BA;
276 ts->ba_low = ads->status5;
277 ts->ba_high = ads->status6;
278 }
279
280 status = ACCESS_ONCE(ads->status3);
281 if (status & AR_ExcessiveRetries)
264 ts->ts_status |= ATH9K_TXERR_XRETRY; 282 ts->ts_status |= ATH9K_TXERR_XRETRY;
265 if (ads->status3 & AR_Filtered) 283 if (status & AR_Filtered)
266 ts->ts_status |= ATH9K_TXERR_FILT; 284 ts->ts_status |= ATH9K_TXERR_FILT;
267 if (ads->status3 & AR_FIFOUnderrun) { 285 if (status & AR_FIFOUnderrun) {
268 ts->ts_status |= ATH9K_TXERR_FIFO; 286 ts->ts_status |= ATH9K_TXERR_FIFO;
269 ath9k_hw_updatetxtriglevel(ah, true); 287 ath9k_hw_updatetxtriglevel(ah, true);
270 } 288 }
271 if (ads->status8 & AR_TxOpExceeded) 289 if (status & AR_TxTimerExpired)
272 ts->ts_status |= ATH9K_TXERR_XTXOP;
273 if (ads->status3 & AR_TxTimerExpired)
274 ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED; 290 ts->ts_status |= ATH9K_TXERR_TIMER_EXPIRED;
275 291 if (status & AR_DescCfgErr)
276 if (ads->status3 & AR_DescCfgErr)
277 ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR; 292 ts->ts_flags |= ATH9K_TX_DESC_CFG_ERR;
278 if (ads->status3 & AR_TxDataUnderrun) { 293 if (status & AR_TxDataUnderrun) {
279 ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN; 294 ts->ts_flags |= ATH9K_TX_DATA_UNDERRUN;
280 ath9k_hw_updatetxtriglevel(ah, true); 295 ath9k_hw_updatetxtriglevel(ah, true);
281 } 296 }
282 if (ads->status3 & AR_TxDelimUnderrun) { 297 if (status & AR_TxDelimUnderrun) {
283 ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN; 298 ts->ts_flags |= ATH9K_TX_DELIM_UNDERRUN;
284 ath9k_hw_updatetxtriglevel(ah, true); 299 ath9k_hw_updatetxtriglevel(ah, true);
285 } 300 }
286 if (ads->status2 & AR_TxBaStatus) { 301 ts->ts_shortretry = MS(status, AR_RTSFailCnt);
287 ts->ts_flags |= ATH9K_TX_BA; 302 ts->ts_longretry = MS(status, AR_DataFailCnt);
288 ts->ba_low = ads->status5; 303 ts->ts_virtcol = MS(status, AR_VirtRetryCnt);
289 ts->ba_high = ads->status6;
290 }
291 304
292 ts->ts_rateindex = MS(ads->status8, AR_FinalTxIdx); 305 status = ACCESS_ONCE(ads->status7);
293 306 ts->ts_rssi = MS(status, AR_TxRSSICombined);
294 ts->ts_rssi = MS(ads->status7, AR_TxRSSICombined); 307 ts->ts_rssi_ext0 = MS(status, AR_TxRSSIAnt10);
295 ts->ts_rssi_ctl0 = MS(ads->status2, AR_TxRSSIAnt00); 308 ts->ts_rssi_ext1 = MS(status, AR_TxRSSIAnt11);
296 ts->ts_rssi_ctl1 = MS(ads->status2, AR_TxRSSIAnt01); 309 ts->ts_rssi_ext2 = MS(status, AR_TxRSSIAnt12);
297 ts->ts_rssi_ctl2 = MS(ads->status2, AR_TxRSSIAnt02);
298 ts->ts_rssi_ext0 = MS(ads->status7, AR_TxRSSIAnt10);
299 ts->ts_rssi_ext1 = MS(ads->status7, AR_TxRSSIAnt11);
300 ts->ts_rssi_ext2 = MS(ads->status7, AR_TxRSSIAnt12);
301 ts->ts_shortretry = MS(ads->status3, AR_RTSFailCnt);
302 ts->ts_longretry = MS(ads->status3, AR_DataFailCnt);
303 ts->ts_virtcol = MS(ads->status3, AR_VirtRetryCnt);
304 ts->ts_antenna = 0;
305
306 ts->tid = MS(ads->status8, AR_TxTid);
307 310
308 memset(ads, 0, sizeof(*ads)); 311 memset(ads, 0, sizeof(*ads));
309 312
@@ -319,7 +322,6 @@ static void ar9003_hw_set11n_txdesc(struct ath_hw *ah, void *ds,
319 if (txpower > ah->txpower_limit) 322 if (txpower > ah->txpower_limit)
320 txpower = ah->txpower_limit; 323 txpower = ah->txpower_limit;
321 324
322 txpower += ah->txpower_indexoffset;
323 if (txpower > 63) 325 if (txpower > 63)
324 txpower = 63; 326 txpower = 63;
325 327
@@ -407,12 +409,36 @@ static void ar9003_hw_set11n_ratescenario(struct ath_hw *ah, void *ds,
407static void ar9003_hw_set11n_aggr_first(struct ath_hw *ah, void *ds, 409static void ar9003_hw_set11n_aggr_first(struct ath_hw *ah, void *ds,
408 u32 aggrLen) 410 u32 aggrLen)
409{ 411{
412#define FIRST_DESC_NDELIMS 60
410 struct ar9003_txc *ads = (struct ar9003_txc *) ds; 413 struct ar9003_txc *ads = (struct ar9003_txc *) ds;
411 414
412 ads->ctl12 |= (AR_IsAggr | AR_MoreAggr); 415 ads->ctl12 |= (AR_IsAggr | AR_MoreAggr);
413 416
414 ads->ctl17 &= ~AR_AggrLen; 417 if (ah->ent_mode & AR_ENT_OTP_MPSD) {
415 ads->ctl17 |= SM(aggrLen, AR_AggrLen); 418 u32 ctl17, ndelim;
419 /*
420 * Add delimiter when using RTS/CTS with aggregation
421 * and non enterprise AR9003 card
422 */
423 ctl17 = ads->ctl17;
424 ndelim = MS(ctl17, AR_PadDelim);
425
426 if (ndelim < FIRST_DESC_NDELIMS) {
427 aggrLen += (FIRST_DESC_NDELIMS - ndelim) * 4;
428 ndelim = FIRST_DESC_NDELIMS;
429 }
430
431 ctl17 &= ~AR_AggrLen;
432 ctl17 |= SM(aggrLen, AR_AggrLen);
433
434 ctl17 &= ~AR_PadDelim;
435 ctl17 |= SM(ndelim, AR_PadDelim);
436
437 ads->ctl17 = ctl17;
438 } else {
439 ads->ctl17 &= ~AR_AggrLen;
440 ads->ctl17 |= SM(aggrLen, AR_AggrLen);
441 }
416} 442}
417 443
418static void ar9003_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds, 444static void ar9003_hw_set11n_aggr_middle(struct ath_hw *ah, void *ds,
@@ -587,9 +613,9 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
587 * possibly be reviewing the last subframe. AR_CRCErr 613 * possibly be reviewing the last subframe. AR_CRCErr
588 * is the CRC of the actual data. 614 * is the CRC of the actual data.
589 */ 615 */
590 if (rxsp->status11 & AR_CRCErr) { 616 if (rxsp->status11 & AR_CRCErr)
591 rxs->rs_status |= ATH9K_RXERR_CRC; 617 rxs->rs_status |= ATH9K_RXERR_CRC;
592 } else if (rxsp->status11 & AR_PHYErr) { 618 if (rxsp->status11 & AR_PHYErr) {
593 phyerr = MS(rxsp->status11, AR_PHYErrCode); 619 phyerr = MS(rxsp->status11, AR_PHYErrCode);
594 /* 620 /*
595 * If we reach a point here where AR_PostDelimCRCErr is 621 * If we reach a point here where AR_PostDelimCRCErr is
@@ -612,11 +638,12 @@ int ath9k_hw_process_rxdesc_edma(struct ath_hw *ah, struct ath_rx_status *rxs,
612 rxs->rs_phyerr = phyerr; 638 rxs->rs_phyerr = phyerr;
613 } 639 }
614 640
615 } else if (rxsp->status11 & AR_DecryptCRCErr) { 641 }
642 if (rxsp->status11 & AR_DecryptCRCErr)
616 rxs->rs_status |= ATH9K_RXERR_DECRYPT; 643 rxs->rs_status |= ATH9K_RXERR_DECRYPT;
617 } else if (rxsp->status11 & AR_MichaelErr) { 644 if (rxsp->status11 & AR_MichaelErr)
618 rxs->rs_status |= ATH9K_RXERR_MIC; 645 rxs->rs_status |= ATH9K_RXERR_MIC;
619 } else if (rxsp->status11 & AR_KeyMiss) 646 if (rxsp->status11 & AR_KeyMiss)
620 rxs->rs_status |= ATH9K_RXERR_DECRYPT; 647 rxs->rs_status |= ATH9K_RXERR_DECRYPT;
621 } 648 }
622 649
@@ -631,10 +658,10 @@ void ath9k_hw_reset_txstatus_ring(struct ath_hw *ah)
631 memset((void *) ah->ts_ring, 0, 658 memset((void *) ah->ts_ring, 0,
632 ah->ts_size * sizeof(struct ar9003_txs)); 659 ah->ts_size * sizeof(struct ar9003_txs));
633 660
634 ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT, 661 ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT,
635 "TS Start 0x%x End 0x%x Virt %p, Size %d\n", 662 "TS Start 0x%x End 0x%x Virt %p, Size %d\n",
636 ah->ts_paddr_start, ah->ts_paddr_end, 663 ah->ts_paddr_start, ah->ts_paddr_end,
637 ah->ts_ring, ah->ts_size); 664 ah->ts_ring, ah->ts_size);
638 665
639 REG_WRITE(ah, AR_Q_STATUS_RING_START, ah->ts_paddr_start); 666 REG_WRITE(ah, AR_Q_STATUS_RING_START, ah->ts_paddr_start);
640 REG_WRITE(ah, AR_Q_STATUS_RING_END, ah->ts_paddr_end); 667 REG_WRITE(ah, AR_Q_STATUS_RING_END, ah->ts_paddr_end);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.h b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
index 9f2cea70a840..45cc7e80436c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.h
@@ -65,7 +65,7 @@ struct ar9003_rxs {
65 u32 status9; 65 u32 status9;
66 u32 status10; 66 u32 status10;
67 u32 status11; 67 u32 status11;
68} __packed; 68} __packed __aligned(4);
69 69
70/* Transmit Control Descriptor */ 70/* Transmit Control Descriptor */
71struct ar9003_txc { 71struct ar9003_txc {
@@ -93,7 +93,7 @@ struct ar9003_txc {
93 u32 ctl21; /* DMA control 21 */ 93 u32 ctl21; /* DMA control 21 */
94 u32 ctl22; /* DMA control 22 */ 94 u32 ctl22; /* DMA control 22 */
95 u32 pad[9]; /* pad to cache line (128 bytes/32 dwords) */ 95 u32 pad[9]; /* pad to cache line (128 bytes/32 dwords) */
96} __packed; 96} __packed __aligned(4);
97 97
98struct ar9003_txs { 98struct ar9003_txs {
99 u32 ds_info; 99 u32 ds_info;
@@ -105,7 +105,7 @@ struct ar9003_txs {
105 u32 status6; 105 u32 status6;
106 u32 status7; 106 u32 status7;
107 u32 status8; 107 u32 status8;
108} __packed; 108} __packed __aligned(4);
109 109
110void ar9003_hw_attach_mac_ops(struct ath_hw *hw); 110void ar9003_hw_attach_mac_ops(struct ath_hw *hw);
111void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size); 111void ath9k_hw_set_rx_bufsize(struct ath_hw *ah, u16 buf_size);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
index 716db414c258..356d2fd78822 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_paprd.c
@@ -19,45 +19,124 @@
19 19
20void ar9003_paprd_enable(struct ath_hw *ah, bool val) 20void ar9003_paprd_enable(struct ath_hw *ah, bool val)
21{ 21{
22 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
23 struct ath9k_channel *chan = ah->curchan;
24
25 if (val) {
26 ah->paprd_table_write_done = true;
27
28 ah->eep_ops->set_txpower(ah, chan,
29 ath9k_regd_get_ctl(regulatory, chan),
30 chan->chan->max_antenna_gain * 2,
31 chan->chan->max_power * 2,
32 min((u32) MAX_RATE_POWER,
33 (u32) regulatory->power_limit), false);
34 }
35
22 REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B0, 36 REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B0,
23 AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val); 37 AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
24 REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B1, 38 if (ah->caps.tx_chainmask & BIT(1))
25 AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val); 39 REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B1,
26 REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B2, 40 AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
27 AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val); 41 if (ah->caps.tx_chainmask & BIT(2))
42 REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL0_B2,
43 AR_PHY_PAPRD_CTRL0_PAPRD_ENABLE, !!val);
28} 44}
29EXPORT_SYMBOL(ar9003_paprd_enable); 45EXPORT_SYMBOL(ar9003_paprd_enable);
30 46
31static void ar9003_paprd_setup_single_table(struct ath_hw *ah) 47static int ar9003_get_training_power_2g(struct ath_hw *ah)
48{
49 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
50 struct ar9300_modal_eep_header *hdr = &eep->modalHeader2G;
51 unsigned int power, scale, delta;
52
53 scale = MS(le32_to_cpu(hdr->papdRateMaskHt20), AR9300_PAPRD_SCALE_1);
54 power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5,
55 AR_PHY_POWERTX_RATE5_POWERTXHT20_0);
56
57 delta = abs((int) ah->paprd_target_power - (int) power);
58 if (delta > scale)
59 return -1;
60
61 if (delta < 4)
62 power -= 4 - delta;
63
64 return power;
65}
66
67static int ar9003_get_training_power_5g(struct ath_hw *ah)
32{ 68{
69 struct ath_common *common = ath9k_hw_common(ah);
33 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep; 70 struct ar9300_eeprom *eep = &ah->eeprom.ar9300_eep;
34 struct ar9300_modal_eep_header *hdr; 71 struct ar9300_modal_eep_header *hdr = &eep->modalHeader5G;
35 const u32 ctrl0[3] = { 72 struct ath9k_channel *chan = ah->curchan;
73 unsigned int power, scale, delta;
74
75 if (chan->channel >= 5700)
76 scale = MS(le32_to_cpu(hdr->papdRateMaskHt20),
77 AR9300_PAPRD_SCALE_1);
78 else if (chan->channel >= 5400)
79 scale = MS(le32_to_cpu(hdr->papdRateMaskHt40),
80 AR9300_PAPRD_SCALE_2);
81 else
82 scale = MS(le32_to_cpu(hdr->papdRateMaskHt40),
83 AR9300_PAPRD_SCALE_1);
84
85 if (IS_CHAN_HT40(chan))
86 power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE8,
87 AR_PHY_POWERTX_RATE8_POWERTXHT40_5);
88 else
89 power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE6,
90 AR_PHY_POWERTX_RATE6_POWERTXHT20_5);
91
92 power += scale;
93 delta = abs((int) ah->paprd_target_power - (int) power);
94 if (delta > scale)
95 return -1;
96
97 power += 2 * get_streams(common->tx_chainmask);
98 return power;
99}
100
101static int ar9003_paprd_setup_single_table(struct ath_hw *ah)
102{
103 struct ath_common *common = ath9k_hw_common(ah);
104 static const u32 ctrl0[3] = {
36 AR_PHY_PAPRD_CTRL0_B0, 105 AR_PHY_PAPRD_CTRL0_B0,
37 AR_PHY_PAPRD_CTRL0_B1, 106 AR_PHY_PAPRD_CTRL0_B1,
38 AR_PHY_PAPRD_CTRL0_B2 107 AR_PHY_PAPRD_CTRL0_B2
39 }; 108 };
40 const u32 ctrl1[3] = { 109 static const u32 ctrl1[3] = {
41 AR_PHY_PAPRD_CTRL1_B0, 110 AR_PHY_PAPRD_CTRL1_B0,
42 AR_PHY_PAPRD_CTRL1_B1, 111 AR_PHY_PAPRD_CTRL1_B1,
43 AR_PHY_PAPRD_CTRL1_B2 112 AR_PHY_PAPRD_CTRL1_B2
44 }; 113 };
45 u32 am_mask, ht40_mask; 114 int training_power;
46 int i; 115 int i;
47 116
48 if (ah->curchan && IS_CHAN_5GHZ(ah->curchan)) 117 if (IS_CHAN_2GHZ(ah->curchan))
49 hdr = &eep->modalHeader5G; 118 training_power = ar9003_get_training_power_2g(ah);
50 else 119 else
51 hdr = &eep->modalHeader2G; 120 training_power = ar9003_get_training_power_5g(ah);
52
53 am_mask = le32_to_cpu(hdr->papdRateMaskHt20);
54 ht40_mask = le32_to_cpu(hdr->papdRateMaskHt40);
55 121
56 REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2AM, AR_PHY_PAPRD_AM2AM_MASK, am_mask); 122 if (training_power < 0) {
57 REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2PM, AR_PHY_PAPRD_AM2PM_MASK, am_mask); 123 ath_dbg(common, ATH_DBG_CALIBRATE,
58 REG_RMW_FIELD(ah, AR_PHY_PAPRD_HT40, AR_PHY_PAPRD_HT40_MASK, ht40_mask); 124 "PAPRD target power delta out of range");
59 125 return -ERANGE;
60 for (i = 0; i < 3; i++) { 126 }
127 ah->paprd_training_power = training_power;
128 ath_dbg(common, ATH_DBG_CALIBRATE,
129 "Training power: %d, Target power: %d\n",
130 ah->paprd_training_power, ah->paprd_target_power);
131
132 REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2AM, AR_PHY_PAPRD_AM2AM_MASK,
133 ah->paprd_ratemask);
134 REG_RMW_FIELD(ah, AR_PHY_PAPRD_AM2PM, AR_PHY_PAPRD_AM2PM_MASK,
135 ah->paprd_ratemask);
136 REG_RMW_FIELD(ah, AR_PHY_PAPRD_HT40, AR_PHY_PAPRD_HT40_MASK,
137 ah->paprd_ratemask_ht40);
138
139 for (i = 0; i < ah->caps.max_txchains; i++) {
61 REG_RMW_FIELD(ah, ctrl0[i], 140 REG_RMW_FIELD(ah, ctrl0[i],
62 AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK, 1); 141 AR_PHY_PAPRD_CTRL0_USE_SINGLE_TABLE_MASK, 1);
63 REG_RMW_FIELD(ah, ctrl1[i], 142 REG_RMW_FIELD(ah, ctrl1[i],
@@ -102,8 +181,14 @@ static void ar9003_paprd_setup_single_table(struct ath_hw *ah)
102 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES, 7); 181 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_NUM_CORR_STAGES, 7);
103 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, 182 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
104 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL, 1); 183 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_MIN_LOOPBACK_DEL, 1);
105 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, 184 if (AR_SREV_9485(ah))
106 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP, -6); 185 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
186 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP,
187 -3);
188 else
189 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
190 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP,
191 -6);
107 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3, 192 REG_RMW_FIELD(ah, AR_PHY_PAPRD_TRAINER_CNTL3,
108 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE, 193 AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE,
109 -15); 194 -15);
@@ -132,6 +217,7 @@ static void ar9003_paprd_setup_single_table(struct ath_hw *ah)
132 AR_PHY_PAPRD_PRE_POST_SCALING, 185706); 217 AR_PHY_PAPRD_PRE_POST_SCALING, 185706);
133 REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_7_B0, 218 REG_RMW_FIELD(ah, AR_PHY_PAPRD_PRE_POST_SCALE_7_B0,
134 AR_PHY_PAPRD_PRE_POST_SCALING, 175487); 219 AR_PHY_PAPRD_PRE_POST_SCALING, 175487);
220 return 0;
135} 221}
136 222
137static void ar9003_paprd_get_gain_table(struct ath_hw *ah) 223static void ar9003_paprd_get_gain_table(struct ath_hw *ah)
@@ -586,15 +672,10 @@ void ar9003_paprd_populate_single_table(struct ath_hw *ah,
586{ 672{
587 u32 *paprd_table_val = caldata->pa_table[chain]; 673 u32 *paprd_table_val = caldata->pa_table[chain];
588 u32 small_signal_gain = caldata->small_signal_gain[chain]; 674 u32 small_signal_gain = caldata->small_signal_gain[chain];
589 u32 training_power; 675 u32 training_power = ah->paprd_training_power;
590 u32 reg = 0; 676 u32 reg = 0;
591 int i; 677 int i;
592 678
593 training_power =
594 REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5,
595 AR_PHY_POWERTX_RATE5_POWERTXHT20_0);
596 training_power -= 4;
597
598 if (chain == 0) 679 if (chain == 0)
599 reg = AR_PHY_PAPRD_MEM_TAB_B0; 680 reg = AR_PHY_PAPRD_MEM_TAB_B0;
600 else if (chain == 1) 681 else if (chain == 1)
@@ -620,26 +701,22 @@ void ar9003_paprd_populate_single_table(struct ath_hw *ah,
620 AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL, 701 AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
621 training_power); 702 training_power);
622 703
623 REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B1, 704 if (ah->caps.tx_chainmask & BIT(1))
624 AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL, 705 REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B1,
625 training_power); 706 AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
707 training_power);
626 708
627 REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B2, 709 if (ah->caps.tx_chainmask & BIT(2))
628 AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL, 710 REG_RMW_FIELD(ah, AR_PHY_PAPRD_CTRL1_B2,
629 training_power); 711 AR_PHY_PAPRD_CTRL1_PAPRD_POWER_AT_AM2AM_CAL,
712 training_power);
630} 713}
631EXPORT_SYMBOL(ar9003_paprd_populate_single_table); 714EXPORT_SYMBOL(ar9003_paprd_populate_single_table);
632 715
633int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain) 716int ar9003_paprd_setup_gain_table(struct ath_hw *ah, int chain)
634{ 717{
635
636 unsigned int i, desired_gain, gain_index; 718 unsigned int i, desired_gain, gain_index;
637 unsigned int train_power; 719 unsigned int train_power = ah->paprd_training_power;
638
639 train_power = REG_READ_FIELD(ah, AR_PHY_POWERTX_RATE5,
640 AR_PHY_POWERTX_RATE5_POWERTXHT20_0);
641
642 train_power = train_power - 4;
643 720
644 desired_gain = ar9003_get_desired_gain(ah, chain, train_power); 721 desired_gain = ar9003_get_desired_gain(ah, chain, train_power);
645 722
@@ -705,7 +782,12 @@ EXPORT_SYMBOL(ar9003_paprd_create_curve);
705 782
706int ar9003_paprd_init_table(struct ath_hw *ah) 783int ar9003_paprd_init_table(struct ath_hw *ah)
707{ 784{
708 ar9003_paprd_setup_single_table(ah); 785 int ret;
786
787 ret = ar9003_paprd_setup_single_table(ah);
788 if (ret < 0)
789 return ret;
790
709 ar9003_paprd_get_gain_table(ah); 791 ar9003_paprd_get_gain_table(ah);
710 return 0; 792 return 0;
711} 793}
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 669b777729b3..8d60f4f09acc 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -75,7 +75,10 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
75 freq = centers.synth_center; 75 freq = centers.synth_center;
76 76
77 if (freq < 4800) { /* 2 GHz, fractional mode */ 77 if (freq < 4800) { /* 2 GHz, fractional mode */
78 channelSel = CHANSEL_2G(freq); 78 if (AR_SREV_9485(ah))
79 channelSel = CHANSEL_2G_9485(freq);
80 else
81 channelSel = CHANSEL_2G(freq);
79 /* Set to 2G mode */ 82 /* Set to 2G mode */
80 bMode = 1; 83 bMode = 1;
81 } else { 84 } else {
@@ -128,24 +131,53 @@ static int ar9003_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
128static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah, 131static void ar9003_hw_spur_mitigate_mrc_cck(struct ath_hw *ah,
129 struct ath9k_channel *chan) 132 struct ath9k_channel *chan)
130{ 133{
131 u32 spur_freq[4] = { 2420, 2440, 2464, 2480 }; 134 static const u32 spur_freq[4] = { 2420, 2440, 2464, 2480 };
132 int cur_bb_spur, negative = 0, cck_spur_freq; 135 int cur_bb_spur, negative = 0, cck_spur_freq;
133 int i; 136 int i;
137 int range, max_spur_cnts, synth_freq;
138 u8 *spur_fbin_ptr = NULL;
134 139
135 /* 140 /*
136 * Need to verify range +/- 10 MHz in control channel, otherwise spur 141 * Need to verify range +/- 10 MHz in control channel, otherwise spur
137 * is out-of-band and can be ignored. 142 * is out-of-band and can be ignored.
138 */ 143 */
139 144
140 for (i = 0; i < 4; i++) { 145 if (AR_SREV_9485(ah)) {
146 spur_fbin_ptr = ar9003_get_spur_chan_ptr(ah,
147 IS_CHAN_2GHZ(chan));
148 if (spur_fbin_ptr[0] == 0) /* No spur */
149 return;
150 max_spur_cnts = 5;
151 if (IS_CHAN_HT40(chan)) {
152 range = 19;
153 if (REG_READ_FIELD(ah, AR_PHY_GEN_CTRL,
154 AR_PHY_GC_DYN2040_PRI_CH) == 0)
155 synth_freq = chan->channel + 10;
156 else
157 synth_freq = chan->channel - 10;
158 } else {
159 range = 10;
160 synth_freq = chan->channel;
161 }
162 } else {
163 range = 10;
164 max_spur_cnts = 4;
165 synth_freq = chan->channel;
166 }
167
168 for (i = 0; i < max_spur_cnts; i++) {
141 negative = 0; 169 negative = 0;
142 cur_bb_spur = spur_freq[i] - chan->channel; 170 if (AR_SREV_9485(ah))
171 cur_bb_spur = FBIN2FREQ(spur_fbin_ptr[i],
172 IS_CHAN_2GHZ(chan)) - synth_freq;
173 else
174 cur_bb_spur = spur_freq[i] - synth_freq;
143 175
144 if (cur_bb_spur < 0) { 176 if (cur_bb_spur < 0) {
145 negative = 1; 177 negative = 1;
146 cur_bb_spur = -cur_bb_spur; 178 cur_bb_spur = -cur_bb_spur;
147 } 179 }
148 if (cur_bb_spur < 10) { 180 if (cur_bb_spur < range) {
149 cck_spur_freq = (int)((cur_bb_spur << 19) / 11); 181 cck_spur_freq = (int)((cur_bb_spur << 19) / 11);
150 182
151 if (negative == 1) 183 if (negative == 1)
@@ -487,7 +519,11 @@ void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx)
487 break; 519 break;
488 } 520 }
489 521
490 REG_WRITE(ah, AR_SELFGEN_MASK, tx); 522 if ((ah->caps.hw_caps & ATH9K_HW_CAP_APM) && (tx == 0x7))
523 REG_WRITE(ah, AR_SELFGEN_MASK, 0x3);
524 else
525 REG_WRITE(ah, AR_SELFGEN_MASK, tx);
526
491 if (tx == 0x5) { 527 if (tx == 0x5) {
492 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP, 528 REG_SET_BIT(ah, AR_PHY_ANALOG_SWAP,
493 AR_PHY_SWAP_ALT_CHAIN); 529 AR_PHY_SWAP_ALT_CHAIN);
@@ -542,10 +578,7 @@ static void ar9003_hw_prog_ini(struct ath_hw *ah,
542 u32 reg = INI_RA(iniArr, i, 0); 578 u32 reg = INI_RA(iniArr, i, 0);
543 u32 val = INI_RA(iniArr, i, column); 579 u32 val = INI_RA(iniArr, i, column);
544 580
545 if (reg >= 0x16000 && reg < 0x17000) 581 REG_WRITE(ah, reg, val);
546 ath9k_hw_analog_shift_regwrite(ah, reg, val);
547 else
548 REG_WRITE(ah, reg, val);
549 582
550 DO_DELAY(regWrites); 583 DO_DELAY(regWrites);
551 } 584 }
@@ -614,7 +647,7 @@ static int ar9003_hw_process_ini(struct ath_hw *ah,
614 channel->max_antenna_gain * 2, 647 channel->max_antenna_gain * 2,
615 channel->max_power * 2, 648 channel->max_power * 2,
616 min((u32) MAX_RATE_POWER, 649 min((u32) MAX_RATE_POWER,
617 (u32) regulatory->power_limit)); 650 (u32) regulatory->power_limit), false);
618 651
619 return 0; 652 return 0;
620} 653}
@@ -712,28 +745,6 @@ static void ar9003_hw_rfbus_done(struct ath_hw *ah)
712 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0); 745 REG_WRITE(ah, AR_PHY_RFBUS_REQ, 0);
713} 746}
714 747
715/*
716 * Set the interrupt and GPIO values so the ISR can disable RF
717 * on a switch signal. Assumes GPIO port and interrupt polarity
718 * are set prior to call.
719 */
720static void ar9003_hw_enable_rfkill(struct ath_hw *ah)
721{
722 /* Connect rfsilent_bb_l to baseband */
723 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
724 AR_GPIO_INPUT_EN_VAL_RFSILENT_BB);
725 /* Set input mux for rfsilent_bb_l to GPIO #0 */
726 REG_CLR_BIT(ah, AR_GPIO_INPUT_MUX2,
727 AR_GPIO_INPUT_MUX2_RFSILENT);
728
729 /*
730 * Configure the desired GPIO port for input and
731 * enable baseband rf silence.
732 */
733 ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
734 REG_SET_BIT(ah, AR_PHY_TEST, RFSILENT_BB);
735}
736
737static void ar9003_hw_set_diversity(struct ath_hw *ah, bool value) 748static void ar9003_hw_set_diversity(struct ath_hw *ah, bool value)
738{ 749{
739 u32 v = REG_READ(ah, AR_PHY_CCK_DETECT); 750 u32 v = REG_READ(ah, AR_PHY_CCK_DETECT);
@@ -820,12 +831,12 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
820 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW); 831 AR_PHY_SFCORR_LOW_USE_SELF_CORR_LOW);
821 832
822 if (!on != aniState->ofdmWeakSigDetectOff) { 833 if (!on != aniState->ofdmWeakSigDetectOff) {
823 ath_print(common, ATH_DBG_ANI, 834 ath_dbg(common, ATH_DBG_ANI,
824 "** ch %d: ofdm weak signal: %s=>%s\n", 835 "** ch %d: ofdm weak signal: %s=>%s\n",
825 chan->channel, 836 chan->channel,
826 !aniState->ofdmWeakSigDetectOff ? 837 !aniState->ofdmWeakSigDetectOff ?
827 "on" : "off", 838 "on" : "off",
828 on ? "on" : "off"); 839 on ? "on" : "off");
829 if (on) 840 if (on)
830 ah->stats.ast_ani_ofdmon++; 841 ah->stats.ast_ani_ofdmon++;
831 else 842 else
@@ -838,11 +849,9 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
838 u32 level = param; 849 u32 level = param;
839 850
840 if (level >= ARRAY_SIZE(firstep_table)) { 851 if (level >= ARRAY_SIZE(firstep_table)) {
841 ath_print(common, ATH_DBG_ANI, 852 ath_dbg(common, ATH_DBG_ANI,
842 "ATH9K_ANI_FIRSTEP_LEVEL: level " 853 "ATH9K_ANI_FIRSTEP_LEVEL: level out of range (%u > %zu)\n",
843 "out of range (%u > %u)\n", 854 level, ARRAY_SIZE(firstep_table));
844 level,
845 (unsigned) ARRAY_SIZE(firstep_table));
846 return false; 855 return false;
847 } 856 }
848 857
@@ -877,24 +886,22 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
877 AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW, value2); 886 AR_PHY_FIND_SIG_LOW_FIRSTEP_LOW, value2);
878 887
879 if (level != aniState->firstepLevel) { 888 if (level != aniState->firstepLevel) {
880 ath_print(common, ATH_DBG_ANI, 889 ath_dbg(common, ATH_DBG_ANI,
881 "** ch %d: level %d=>%d[def:%d] " 890 "** ch %d: level %d=>%d[def:%d] firstep[level]=%d ini=%d\n",
882 "firstep[level]=%d ini=%d\n", 891 chan->channel,
883 chan->channel, 892 aniState->firstepLevel,
884 aniState->firstepLevel, 893 level,
885 level, 894 ATH9K_ANI_FIRSTEP_LVL_NEW,
886 ATH9K_ANI_FIRSTEP_LVL_NEW, 895 value,
887 value, 896 aniState->iniDef.firstep);
888 aniState->iniDef.firstep); 897 ath_dbg(common, ATH_DBG_ANI,
889 ath_print(common, ATH_DBG_ANI, 898 "** ch %d: level %d=>%d[def:%d] firstep_low[level]=%d ini=%d\n",
890 "** ch %d: level %d=>%d[def:%d] " 899 chan->channel,
891 "firstep_low[level]=%d ini=%d\n", 900 aniState->firstepLevel,
892 chan->channel, 901 level,
893 aniState->firstepLevel, 902 ATH9K_ANI_FIRSTEP_LVL_NEW,
894 level, 903 value2,
895 ATH9K_ANI_FIRSTEP_LVL_NEW, 904 aniState->iniDef.firstepLow);
896 value2,
897 aniState->iniDef.firstepLow);
898 if (level > aniState->firstepLevel) 905 if (level > aniState->firstepLevel)
899 ah->stats.ast_ani_stepup++; 906 ah->stats.ast_ani_stepup++;
900 else if (level < aniState->firstepLevel) 907 else if (level < aniState->firstepLevel)
@@ -907,11 +914,9 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
907 u32 level = param; 914 u32 level = param;
908 915
909 if (level >= ARRAY_SIZE(cycpwrThr1_table)) { 916 if (level >= ARRAY_SIZE(cycpwrThr1_table)) {
910 ath_print(common, ATH_DBG_ANI, 917 ath_dbg(common, ATH_DBG_ANI,
911 "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level " 918 "ATH9K_ANI_SPUR_IMMUNITY_LEVEL: level out of range (%u > %zu)\n",
912 "out of range (%u > %u)\n", 919 level, ARRAY_SIZE(cycpwrThr1_table));
913 level,
914 (unsigned) ARRAY_SIZE(cycpwrThr1_table));
915 return false; 920 return false;
916 } 921 }
917 /* 922 /*
@@ -945,24 +950,22 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
945 AR_PHY_EXT_CYCPWR_THR1, value2); 950 AR_PHY_EXT_CYCPWR_THR1, value2);
946 951
947 if (level != aniState->spurImmunityLevel) { 952 if (level != aniState->spurImmunityLevel) {
948 ath_print(common, ATH_DBG_ANI, 953 ath_dbg(common, ATH_DBG_ANI,
949 "** ch %d: level %d=>%d[def:%d] " 954 "** ch %d: level %d=>%d[def:%d] cycpwrThr1[level]=%d ini=%d\n",
950 "cycpwrThr1[level]=%d ini=%d\n", 955 chan->channel,
951 chan->channel, 956 aniState->spurImmunityLevel,
952 aniState->spurImmunityLevel, 957 level,
953 level, 958 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
954 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, 959 value,
955 value, 960 aniState->iniDef.cycpwrThr1);
956 aniState->iniDef.cycpwrThr1); 961 ath_dbg(common, ATH_DBG_ANI,
957 ath_print(common, ATH_DBG_ANI, 962 "** ch %d: level %d=>%d[def:%d] cycpwrThr1Ext[level]=%d ini=%d\n",
958 "** ch %d: level %d=>%d[def:%d] " 963 chan->channel,
959 "cycpwrThr1Ext[level]=%d ini=%d\n", 964 aniState->spurImmunityLevel,
960 chan->channel, 965 level,
961 aniState->spurImmunityLevel, 966 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW,
962 level, 967 value2,
963 ATH9K_ANI_SPUR_IMMUNE_LVL_NEW, 968 aniState->iniDef.cycpwrThr1Ext);
964 value2,
965 aniState->iniDef.cycpwrThr1Ext);
966 if (level > aniState->spurImmunityLevel) 969 if (level > aniState->spurImmunityLevel)
967 ah->stats.ast_ani_spurup++; 970 ah->stats.ast_ani_spurup++;
968 else if (level < aniState->spurImmunityLevel) 971 else if (level < aniState->spurImmunityLevel)
@@ -982,11 +985,11 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
982 REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL, 985 REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL,
983 AR_PHY_MRC_CCK_MUX_REG, is_on); 986 AR_PHY_MRC_CCK_MUX_REG, is_on);
984 if (!is_on != aniState->mrcCCKOff) { 987 if (!is_on != aniState->mrcCCKOff) {
985 ath_print(common, ATH_DBG_ANI, 988 ath_dbg(common, ATH_DBG_ANI,
986 "** ch %d: MRC CCK: %s=>%s\n", 989 "** ch %d: MRC CCK: %s=>%s\n",
987 chan->channel, 990 chan->channel,
988 !aniState->mrcCCKOff ? "on" : "off", 991 !aniState->mrcCCKOff ? "on" : "off",
989 is_on ? "on" : "off"); 992 is_on ? "on" : "off");
990 if (is_on) 993 if (is_on)
991 ah->stats.ast_ani_ccklow++; 994 ah->stats.ast_ani_ccklow++;
992 else 995 else
@@ -998,22 +1001,19 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah,
998 case ATH9K_ANI_PRESENT: 1001 case ATH9K_ANI_PRESENT:
999 break; 1002 break;
1000 default: 1003 default:
1001 ath_print(common, ATH_DBG_ANI, 1004 ath_dbg(common, ATH_DBG_ANI, "invalid cmd %u\n", cmd);
1002 "invalid cmd %u\n", cmd);
1003 return false; 1005 return false;
1004 } 1006 }
1005 1007
1006 ath_print(common, ATH_DBG_ANI, 1008 ath_dbg(common, ATH_DBG_ANI,
1007 "ANI parameters: SI=%d, ofdmWS=%s FS=%d " 1009 "ANI parameters: SI=%d, ofdmWS=%s FS=%d MRCcck=%s listenTime=%d ofdmErrs=%d cckErrs=%d\n",
1008 "MRCcck=%s listenTime=%d " 1010 aniState->spurImmunityLevel,
1009 "ofdmErrs=%d cckErrs=%d\n", 1011 !aniState->ofdmWeakSigDetectOff ? "on" : "off",
1010 aniState->spurImmunityLevel, 1012 aniState->firstepLevel,
1011 !aniState->ofdmWeakSigDetectOff ? "on" : "off", 1013 !aniState->mrcCCKOff ? "on" : "off",
1012 aniState->firstepLevel, 1014 aniState->listenTime,
1013 !aniState->mrcCCKOff ? "on" : "off", 1015 aniState->ofdmPhyErrCount,
1014 aniState->listenTime, 1016 aniState->cckPhyErrCount);
1015 aniState->ofdmPhyErrCount,
1016 aniState->cckPhyErrCount);
1017 return true; 1017 return true;
1018} 1018}
1019 1019
@@ -1023,25 +1023,25 @@ static void ar9003_hw_do_getnf(struct ath_hw *ah,
1023 int16_t nf; 1023 int16_t nf;
1024 1024
1025 nf = MS(REG_READ(ah, AR_PHY_CCA_0), AR_PHY_MINCCA_PWR); 1025 nf = MS(REG_READ(ah, AR_PHY_CCA_0), AR_PHY_MINCCA_PWR);
1026 nfarray[0] = sign_extend(nf, 9); 1026 nfarray[0] = sign_extend32(nf, 8);
1027 1027
1028 nf = MS(REG_READ(ah, AR_PHY_CCA_1), AR_PHY_CH1_MINCCA_PWR); 1028 nf = MS(REG_READ(ah, AR_PHY_CCA_1), AR_PHY_CH1_MINCCA_PWR);
1029 nfarray[1] = sign_extend(nf, 9); 1029 nfarray[1] = sign_extend32(nf, 8);
1030 1030
1031 nf = MS(REG_READ(ah, AR_PHY_CCA_2), AR_PHY_CH2_MINCCA_PWR); 1031 nf = MS(REG_READ(ah, AR_PHY_CCA_2), AR_PHY_CH2_MINCCA_PWR);
1032 nfarray[2] = sign_extend(nf, 9); 1032 nfarray[2] = sign_extend32(nf, 8);
1033 1033
1034 if (!IS_CHAN_HT40(ah->curchan)) 1034 if (!IS_CHAN_HT40(ah->curchan))
1035 return; 1035 return;
1036 1036
1037 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR); 1037 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA), AR_PHY_EXT_MINCCA_PWR);
1038 nfarray[3] = sign_extend(nf, 9); 1038 nfarray[3] = sign_extend32(nf, 8);
1039 1039
1040 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_1), AR_PHY_CH1_EXT_MINCCA_PWR); 1040 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_1), AR_PHY_CH1_EXT_MINCCA_PWR);
1041 nfarray[4] = sign_extend(nf, 9); 1041 nfarray[4] = sign_extend32(nf, 8);
1042 1042
1043 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_2), AR_PHY_CH2_EXT_MINCCA_PWR); 1043 nf = MS(REG_READ(ah, AR_PHY_EXT_CCA_2), AR_PHY_CH2_EXT_MINCCA_PWR);
1044 nfarray[5] = sign_extend(nf, 9); 1044 nfarray[5] = sign_extend32(nf, 8);
1045} 1045}
1046 1046
1047static void ar9003_hw_set_nf_limits(struct ath_hw *ah) 1047static void ar9003_hw_set_nf_limits(struct ath_hw *ah)
@@ -1070,13 +1070,13 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
1070 aniState = &ah->curchan->ani; 1070 aniState = &ah->curchan->ani;
1071 iniDef = &aniState->iniDef; 1071 iniDef = &aniState->iniDef;
1072 1072
1073 ath_print(common, ATH_DBG_ANI, 1073 ath_dbg(common, ATH_DBG_ANI,
1074 "ver %d.%d opmode %u chan %d Mhz/0x%x\n", 1074 "ver %d.%d opmode %u chan %d Mhz/0x%x\n",
1075 ah->hw_version.macVersion, 1075 ah->hw_version.macVersion,
1076 ah->hw_version.macRev, 1076 ah->hw_version.macRev,
1077 ah->opmode, 1077 ah->opmode,
1078 chan->channel, 1078 chan->channel,
1079 chan->channelFlags); 1079 chan->channelFlags);
1080 1080
1081 val = REG_READ(ah, AR_PHY_SFCORR); 1081 val = REG_READ(ah, AR_PHY_SFCORR);
1082 iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH); 1082 iniDef->m1Thresh = MS(val, AR_PHY_SFCORR_M1_THRESH);
@@ -1113,10 +1113,55 @@ static void ar9003_hw_ani_cache_ini_regs(struct ath_hw *ah)
1113 aniState->mrcCCKOff = !ATH9K_ANI_ENABLE_MRC_CCK; 1113 aniState->mrcCCKOff = !ATH9K_ANI_ENABLE_MRC_CCK;
1114} 1114}
1115 1115
1116static void ar9003_hw_set_radar_params(struct ath_hw *ah,
1117 struct ath_hw_radar_conf *conf)
1118{
1119 u32 radar_0 = 0, radar_1 = 0;
1120
1121 if (!conf) {
1122 REG_CLR_BIT(ah, AR_PHY_RADAR_0, AR_PHY_RADAR_0_ENA);
1123 return;
1124 }
1125
1126 radar_0 |= AR_PHY_RADAR_0_ENA | AR_PHY_RADAR_0_FFT_ENA;
1127 radar_0 |= SM(conf->fir_power, AR_PHY_RADAR_0_FIRPWR);
1128 radar_0 |= SM(conf->radar_rssi, AR_PHY_RADAR_0_RRSSI);
1129 radar_0 |= SM(conf->pulse_height, AR_PHY_RADAR_0_HEIGHT);
1130 radar_0 |= SM(conf->pulse_rssi, AR_PHY_RADAR_0_PRSSI);
1131 radar_0 |= SM(conf->pulse_inband, AR_PHY_RADAR_0_INBAND);
1132
1133 radar_1 |= AR_PHY_RADAR_1_MAX_RRSSI;
1134 radar_1 |= AR_PHY_RADAR_1_BLOCK_CHECK;
1135 radar_1 |= SM(conf->pulse_maxlen, AR_PHY_RADAR_1_MAXLEN);
1136 radar_1 |= SM(conf->pulse_inband_step, AR_PHY_RADAR_1_RELSTEP_THRESH);
1137 radar_1 |= SM(conf->radar_inband, AR_PHY_RADAR_1_RELPWR_THRESH);
1138
1139 REG_WRITE(ah, AR_PHY_RADAR_0, radar_0);
1140 REG_WRITE(ah, AR_PHY_RADAR_1, radar_1);
1141 if (conf->ext_channel)
1142 REG_SET_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
1143 else
1144 REG_CLR_BIT(ah, AR_PHY_RADAR_EXT, AR_PHY_RADAR_EXT_ENA);
1145}
1146
1147static void ar9003_hw_set_radar_conf(struct ath_hw *ah)
1148{
1149 struct ath_hw_radar_conf *conf = &ah->radar_conf;
1150
1151 conf->fir_power = -28;
1152 conf->radar_rssi = 0;
1153 conf->pulse_height = 10;
1154 conf->pulse_rssi = 24;
1155 conf->pulse_inband = 8;
1156 conf->pulse_maxlen = 255;
1157 conf->pulse_inband_step = 12;
1158 conf->radar_inband = 8;
1159}
1160
1116void ar9003_hw_attach_phy_ops(struct ath_hw *ah) 1161void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
1117{ 1162{
1118 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah); 1163 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
1119 const u32 ar9300_cca_regs[6] = { 1164 static const u32 ar9300_cca_regs[6] = {
1120 AR_PHY_CCA_0, 1165 AR_PHY_CCA_0,
1121 AR_PHY_CCA_1, 1166 AR_PHY_CCA_1,
1122 AR_PHY_CCA_2, 1167 AR_PHY_CCA_2,
@@ -1136,13 +1181,14 @@ void ar9003_hw_attach_phy_ops(struct ath_hw *ah)
1136 priv_ops->set_delta_slope = ar9003_hw_set_delta_slope; 1181 priv_ops->set_delta_slope = ar9003_hw_set_delta_slope;
1137 priv_ops->rfbus_req = ar9003_hw_rfbus_req; 1182 priv_ops->rfbus_req = ar9003_hw_rfbus_req;
1138 priv_ops->rfbus_done = ar9003_hw_rfbus_done; 1183 priv_ops->rfbus_done = ar9003_hw_rfbus_done;
1139 priv_ops->enable_rfkill = ar9003_hw_enable_rfkill;
1140 priv_ops->set_diversity = ar9003_hw_set_diversity; 1184 priv_ops->set_diversity = ar9003_hw_set_diversity;
1141 priv_ops->ani_control = ar9003_hw_ani_control; 1185 priv_ops->ani_control = ar9003_hw_ani_control;
1142 priv_ops->do_getnf = ar9003_hw_do_getnf; 1186 priv_ops->do_getnf = ar9003_hw_do_getnf;
1143 priv_ops->ani_cache_ini_regs = ar9003_hw_ani_cache_ini_regs; 1187 priv_ops->ani_cache_ini_regs = ar9003_hw_ani_cache_ini_regs;
1188 priv_ops->set_radar_params = ar9003_hw_set_radar_params;
1144 1189
1145 ar9003_hw_set_nf_limits(ah); 1190 ar9003_hw_set_nf_limits(ah);
1191 ar9003_hw_set_radar_conf(ah);
1146 memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs)); 1192 memcpy(ah->nf_regs, ar9300_cca_regs, sizeof(ah->nf_regs));
1147} 1193}
1148 1194
@@ -1165,7 +1211,7 @@ void ar9003_hw_bb_watchdog_config(struct ath_hw *ah)
1165 ~(AR_PHY_WATCHDOG_NON_IDLE_ENABLE | 1211 ~(AR_PHY_WATCHDOG_NON_IDLE_ENABLE |
1166 AR_PHY_WATCHDOG_IDLE_ENABLE)); 1212 AR_PHY_WATCHDOG_IDLE_ENABLE));
1167 1213
1168 ath_print(common, ATH_DBG_RESET, "Disabled BB Watchdog\n"); 1214 ath_dbg(common, ATH_DBG_RESET, "Disabled BB Watchdog\n");
1169 return; 1215 return;
1170 } 1216 }
1171 1217
@@ -1201,9 +1247,9 @@ void ar9003_hw_bb_watchdog_config(struct ath_hw *ah)
1201 AR_PHY_WATCHDOG_IDLE_MASK | 1247 AR_PHY_WATCHDOG_IDLE_MASK |
1202 (AR_PHY_WATCHDOG_NON_IDLE_MASK & (idle_count << 2))); 1248 (AR_PHY_WATCHDOG_NON_IDLE_MASK & (idle_count << 2)));
1203 1249
1204 ath_print(common, ATH_DBG_RESET, 1250 ath_dbg(common, ATH_DBG_RESET,
1205 "Enabled BB Watchdog timeout (%u ms)\n", 1251 "Enabled BB Watchdog timeout (%u ms)\n",
1206 idle_tmo_ms); 1252 idle_tmo_ms);
1207} 1253}
1208 1254
1209void ar9003_hw_bb_watchdog_read(struct ath_hw *ah) 1255void ar9003_hw_bb_watchdog_read(struct ath_hw *ah)
@@ -1231,37 +1277,35 @@ void ar9003_hw_bb_watchdog_dbg_info(struct ath_hw *ah)
1231 return; 1277 return;
1232 1278
1233 status = ah->bb_watchdog_last_status; 1279 status = ah->bb_watchdog_last_status;
1234 ath_print(common, ATH_DBG_RESET, 1280 ath_dbg(common, ATH_DBG_RESET,
1235 "\n==== BB update: BB status=0x%08x ====\n", status); 1281 "\n==== BB update: BB status=0x%08x ====\n", status);
1236 ath_print(common, ATH_DBG_RESET, 1282 ath_dbg(common, ATH_DBG_RESET,
1237 "** BB state: wd=%u det=%u rdar=%u rOFDM=%d " 1283 "** BB state: wd=%u det=%u rdar=%u rOFDM=%d rCCK=%u tOFDM=%u tCCK=%u agc=%u src=%u **\n",
1238 "rCCK=%u tOFDM=%u tCCK=%u agc=%u src=%u **\n", 1284 MS(status, AR_PHY_WATCHDOG_INFO),
1239 MS(status, AR_PHY_WATCHDOG_INFO), 1285 MS(status, AR_PHY_WATCHDOG_DET_HANG),
1240 MS(status, AR_PHY_WATCHDOG_DET_HANG), 1286 MS(status, AR_PHY_WATCHDOG_RADAR_SM),
1241 MS(status, AR_PHY_WATCHDOG_RADAR_SM), 1287 MS(status, AR_PHY_WATCHDOG_RX_OFDM_SM),
1242 MS(status, AR_PHY_WATCHDOG_RX_OFDM_SM), 1288 MS(status, AR_PHY_WATCHDOG_RX_CCK_SM),
1243 MS(status, AR_PHY_WATCHDOG_RX_CCK_SM), 1289 MS(status, AR_PHY_WATCHDOG_TX_OFDM_SM),
1244 MS(status, AR_PHY_WATCHDOG_TX_OFDM_SM), 1290 MS(status, AR_PHY_WATCHDOG_TX_CCK_SM),
1245 MS(status, AR_PHY_WATCHDOG_TX_CCK_SM), 1291 MS(status, AR_PHY_WATCHDOG_AGC_SM),
1246 MS(status, AR_PHY_WATCHDOG_AGC_SM), 1292 MS(status, AR_PHY_WATCHDOG_SRCH_SM));
1247 MS(status,AR_PHY_WATCHDOG_SRCH_SM)); 1293
1248 1294 ath_dbg(common, ATH_DBG_RESET,
1249 ath_print(common, ATH_DBG_RESET, 1295 "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n",
1250 "** BB WD cntl: cntl1=0x%08x cntl2=0x%08x **\n", 1296 REG_READ(ah, AR_PHY_WATCHDOG_CTL_1),
1251 REG_READ(ah, AR_PHY_WATCHDOG_CTL_1), 1297 REG_READ(ah, AR_PHY_WATCHDOG_CTL_2));
1252 REG_READ(ah, AR_PHY_WATCHDOG_CTL_2)); 1298 ath_dbg(common, ATH_DBG_RESET,
1253 ath_print(common, ATH_DBG_RESET, 1299 "** BB mode: BB_gen_controls=0x%08x **\n",
1254 "** BB mode: BB_gen_controls=0x%08x **\n", 1300 REG_READ(ah, AR_PHY_GEN_CTRL));
1255 REG_READ(ah, AR_PHY_GEN_CTRL));
1256 1301
1257#define PCT(_field) (common->cc_survey._field * 100 / common->cc_survey.cycles) 1302#define PCT(_field) (common->cc_survey._field * 100 / common->cc_survey.cycles)
1258 if (common->cc_survey.cycles) 1303 if (common->cc_survey.cycles)
1259 ath_print(common, ATH_DBG_RESET, 1304 ath_dbg(common, ATH_DBG_RESET,
1260 "** BB busy times: rx_clear=%d%%, " 1305 "** BB busy times: rx_clear=%d%%, rx_frame=%d%%, tx_frame=%d%% **\n",
1261 "rx_frame=%d%%, tx_frame=%d%% **\n", 1306 PCT(rx_busy), PCT(rx_frame), PCT(tx_frame));
1262 PCT(rx_busy), PCT(rx_frame), PCT(tx_frame));
1263 1307
1264 ath_print(common, ATH_DBG_RESET, 1308 ath_dbg(common, ATH_DBG_RESET,
1265 "==== BB update: done ====\n\n"); 1309 "==== BB update: done ====\n\n");
1266} 1310}
1267EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info); 1311EXPORT_SYMBOL(ar9003_hw_bb_watchdog_dbg_info);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.h b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
index 3394dfe52b42..59bab6bd8a74 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.h
@@ -260,7 +260,13 @@
260#define AR_PHY_CCA_0 (AR_AGC_BASE + 0x1c) 260#define AR_PHY_CCA_0 (AR_AGC_BASE + 0x1c)
261#define AR_PHY_EXT_CCA0 (AR_AGC_BASE + 0x20) 261#define AR_PHY_EXT_CCA0 (AR_AGC_BASE + 0x20)
262#define AR_PHY_RESTART (AR_AGC_BASE + 0x24) 262#define AR_PHY_RESTART (AR_AGC_BASE + 0x24)
263
263#define AR_PHY_MC_GAIN_CTRL (AR_AGC_BASE + 0x28) 264#define AR_PHY_MC_GAIN_CTRL (AR_AGC_BASE + 0x28)
265#define AR_ANT_DIV_CTRL_ALL 0x7e000000
266#define AR_ANT_DIV_CTRL_ALL_S 25
267#define AR_ANT_DIV_ENABLE 0x1000000
268#define AR_ANT_DIV_ENABLE_S 24
269
264#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c) 270#define AR_PHY_EXTCHN_PWRTHR1 (AR_AGC_BASE + 0x2c)
265#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30) 271#define AR_PHY_EXT_CHN_WIN (AR_AGC_BASE + 0x30)
266#define AR_PHY_20_40_DET_THR (AR_AGC_BASE + 0x34) 272#define AR_PHY_20_40_DET_THR (AR_AGC_BASE + 0x34)
@@ -271,7 +277,11 @@
271#define AR_PHY_RX_GAIN_BOUNDS_2 (AR_AGC_BASE + 0x48) 277#define AR_PHY_RX_GAIN_BOUNDS_2 (AR_AGC_BASE + 0x48)
272#define AR_PHY_RSSI_0 (AR_AGC_BASE + 0x180) 278#define AR_PHY_RSSI_0 (AR_AGC_BASE + 0x180)
273#define AR_PHY_SPUR_CCK_REP0 (AR_AGC_BASE + 0x184) 279#define AR_PHY_SPUR_CCK_REP0 (AR_AGC_BASE + 0x184)
280
274#define AR_PHY_CCK_DETECT (AR_AGC_BASE + 0x1c0) 281#define AR_PHY_CCK_DETECT (AR_AGC_BASE + 0x1c0)
282#define AR_FAST_DIV_ENABLE 0x2000
283#define AR_FAST_DIV_ENABLE_S 13
284
275#define AR_PHY_DAG_CTRLCCK (AR_AGC_BASE + 0x1c4) 285#define AR_PHY_DAG_CTRLCCK (AR_AGC_BASE + 0x1c4)
276#define AR_PHY_IQCORR_CTRL_CCK (AR_AGC_BASE + 0x1c8) 286#define AR_PHY_IQCORR_CTRL_CCK (AR_AGC_BASE + 0x1c8)
277 287
@@ -536,10 +546,18 @@
536 546
537#define AR_PHY_TXGAIN_TABLE (AR_SM_BASE + 0x300) 547#define AR_PHY_TXGAIN_TABLE (AR_SM_BASE + 0x300)
538 548
549#define AR_PHY_TX_IQCAL_START_9485 (AR_SM_BASE + 0x3c4)
550#define AR_PHY_TX_IQCAL_START_DO_CAL_9485 0x80000000
551#define AR_PHY_TX_IQCAL_START_DO_CAL_9485_S 31
552#define AR_PHY_TX_IQCAL_CONTROL_1_9485 (AR_SM_BASE + 0x3c8)
553#define AR_PHY_TX_IQCAL_STATUS_B0_9485 (AR_SM_BASE + 0x3f0)
554
539#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + 0x448) 555#define AR_PHY_TX_IQCAL_CONTROL_1 (AR_SM_BASE + 0x448)
540#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + 0x440) 556#define AR_PHY_TX_IQCAL_START (AR_SM_BASE + 0x440)
541#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + 0x48c) 557#define AR_PHY_TX_IQCAL_STATUS_B0 (AR_SM_BASE + 0x48c)
542#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B0 (AR_SM_BASE + 0x450) 558#define AR_PHY_TX_IQCAL_CORR_COEFF_B0(_i) (AR_SM_BASE + \
559 (AR_SREV_9485(ah) ? \
560 0x3d0 : 0x450) + ((_i) << 2))
543 561
544#define AR_PHY_WATCHDOG_STATUS (AR_SM_BASE + 0x5c0) 562#define AR_PHY_WATCHDOG_STATUS (AR_SM_BASE + 0x5c0)
545#define AR_PHY_WATCHDOG_CTL_1 (AR_SM_BASE + 0x5c4) 563#define AR_PHY_WATCHDOG_CTL_1 (AR_SM_BASE + 0x5c4)
@@ -568,7 +586,7 @@
568#define AR_PHY_65NM_CH0_BIAS2 0x160c4 586#define AR_PHY_65NM_CH0_BIAS2 0x160c4
569#define AR_PHY_65NM_CH0_BIAS4 0x160cc 587#define AR_PHY_65NM_CH0_BIAS4 0x160cc
570#define AR_PHY_65NM_CH0_RXTX4 0x1610c 588#define AR_PHY_65NM_CH0_RXTX4 0x1610c
571#define AR_PHY_65NM_CH0_THERM 0x16290 589#define AR_PHY_65NM_CH0_THERM (AR_SREV_9485(ah) ? 0x1628c : 0x16290)
572 590
573#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000 591#define AR_PHY_65NM_CH0_THERM_LOCAL 0x80000000
574#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31 592#define AR_PHY_65NM_CH0_THERM_LOCAL_S 31
@@ -584,6 +602,24 @@
584#define AR_PHY_65NM_CH2_RXTX1 0x16900 602#define AR_PHY_65NM_CH2_RXTX1 0x16900
585#define AR_PHY_65NM_CH2_RXTX2 0x16904 603#define AR_PHY_65NM_CH2_RXTX2 0x16904
586 604
605#define AR_CH0_TOP2 (AR_SREV_9485(ah) ? 0x00016284 : 0x0001628c)
606#define AR_CH0_TOP2_XPABIASLVL 0xf000
607#define AR_CH0_TOP2_XPABIASLVL_S 12
608
609#define AR_CH0_XTAL (AR_SREV_9485(ah) ? 0x16290 : 0x16294)
610#define AR_CH0_XTAL_CAPINDAC 0x7f000000
611#define AR_CH0_XTAL_CAPINDAC_S 24
612#define AR_CH0_XTAL_CAPOUTDAC 0x00fe0000
613#define AR_CH0_XTAL_CAPOUTDAC_S 17
614
615#define AR_PHY_PMU1 0x16c40
616#define AR_PHY_PMU1_PWD 0x1
617#define AR_PHY_PMU1_PWD_S 0
618
619#define AR_PHY_PMU2 0x16c44
620#define AR_PHY_PMU2_PGM 0x00200000
621#define AR_PHY_PMU2_PGM_S 21
622
587#define AR_PHY_RX1DB_BIQUAD_LONG_SHIFT 0x00380000 623#define AR_PHY_RX1DB_BIQUAD_LONG_SHIFT 0x00380000
588#define AR_PHY_RX1DB_BIQUAD_LONG_SHIFT_S 19 624#define AR_PHY_RX1DB_BIQUAD_LONG_SHIFT_S 19
589#define AR_PHY_RX6DB_BIQUAD_LONG_SHIFT 0x00c00000 625#define AR_PHY_RX6DB_BIQUAD_LONG_SHIFT 0x00c00000
@@ -683,6 +719,7 @@
683#define AR_PHY_TPCGR1_FORCED_DAC_GAIN_S 1 719#define AR_PHY_TPCGR1_FORCED_DAC_GAIN_S 1
684#define AR_PHY_TPCGR1_FORCE_DAC_GAIN 0x00000001 720#define AR_PHY_TPCGR1_FORCE_DAC_GAIN 0x00000001
685#define AR_PHY_TXGAIN_FORCE 0x00000001 721#define AR_PHY_TXGAIN_FORCE 0x00000001
722#define AR_PHY_TXGAIN_FORCE_S 0
686#define AR_PHY_TXGAIN_FORCED_PADVGNRA 0x00003c00 723#define AR_PHY_TXGAIN_FORCED_PADVGNRA 0x00003c00
687#define AR_PHY_TXGAIN_FORCED_PADVGNRA_S 10 724#define AR_PHY_TXGAIN_FORCED_PADVGNRA_S 10
688#define AR_PHY_TXGAIN_FORCED_PADVGNRB 0x0003c000 725#define AR_PHY_TXGAIN_FORCED_PADVGNRB 0x0003c000
@@ -725,8 +762,13 @@
725#define AR_PHY_TX_IQCAL_START_DO_CAL_S 0 762#define AR_PHY_TX_IQCAL_START_DO_CAL_S 0
726 763
727#define AR_PHY_TX_IQCAL_STATUS_FAILED 0x00000001 764#define AR_PHY_TX_IQCAL_STATUS_FAILED 0x00000001
728#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE 0x00003fff 765#define AR_PHY_CALIBRATED_GAINS_0 0x3e
729#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE_S 0 766#define AR_PHY_CALIBRATED_GAINS_0_S 1
767
768#define AR_PHY_TX_IQCAL_CORR_COEFF_00_COEFF_TABLE 0x00003fff
769#define AR_PHY_TX_IQCAL_CORR_COEFF_00_COEFF_TABLE_S 0
770#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE 0x0fffc000
771#define AR_PHY_TX_IQCAL_CORR_COEFF_01_COEFF_TABLE_S 14
730 772
731#define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000 773#define AR_PHY_65NM_CH0_RXTX4_THERM_ON 0x10000000
732#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28 774#define AR_PHY_65NM_CH0_RXTX4_THERM_ON_S 28
@@ -785,7 +827,7 @@
785#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220) 827#define AR_PHY_TPC_11_B1 (AR_SM1_BASE + 0x220)
786#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + 0x240) 828#define AR_PHY_PDADC_TAB_1 (AR_SM1_BASE + 0x240)
787#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c) 829#define AR_PHY_TX_IQCAL_STATUS_B1 (AR_SM1_BASE + 0x48c)
788#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B1 (AR_SM1_BASE + 0x450) 830#define AR_PHY_TX_IQCAL_CORR_COEFF_B1(_i) (AR_SM_BASE + 0x450 + ((_i) << 2))
789 831
790/* 832/*
791 * Channel 2 Register Map 833 * Channel 2 Register Map
@@ -838,7 +880,7 @@
838#define AR_PHY_TPC_11_B2 (AR_SM2_BASE + 0x220) 880#define AR_PHY_TPC_11_B2 (AR_SM2_BASE + 0x220)
839#define AR_PHY_PDADC_TAB_2 (AR_SM2_BASE + 0x240) 881#define AR_PHY_PDADC_TAB_2 (AR_SM2_BASE + 0x240)
840#define AR_PHY_TX_IQCAL_STATUS_B2 (AR_SM2_BASE + 0x48c) 882#define AR_PHY_TX_IQCAL_STATUS_B2 (AR_SM2_BASE + 0x48c)
841#define AR_PHY_TX_IQCAL_CORR_COEFF_01_B2 (AR_SM2_BASE + 0x450) 883#define AR_PHY_TX_IQCAL_CORR_COEFF_B2(_i) (AR_SM2_BASE + 0x450 + ((_i) << 2))
842 884
843#define AR_PHY_TX_IQCAL_STATUS_B2_FAILED 0x00000001 885#define AR_PHY_TX_IQCAL_STATUS_B2_FAILED 0x00000001
844 886
@@ -945,7 +987,9 @@
945#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT 0x0ffe0000 987#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT 0x0ffe0000
946#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT_S 17 988#define AR_PHY_PAPRD_CTRL1_PAPRD_MAG_SCALE_FACT_S 17
947 989
948#define AR_PHY_PAPRD_TRAINER_CNTL1 (AR_SM_BASE + 0x490) 990#define AR_PHY_PAPRD_TRAINER_CNTL1 (AR_SM_BASE + \
991 (AR_SREV_9485(ah) ? \
992 0x580 : 0x490))
949#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE 0x00000001 993#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE 0x00000001
950#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE_S 0 994#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_CF_PAPRD_TRAIN_ENABLE_S 0
951#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING 0x0000007e 995#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_AGC2_SETTLING 0x0000007e
@@ -961,11 +1005,15 @@
961#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP 0x0003f000 1005#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP 0x0003f000
962#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP_S 12 1006#define AR_PHY_PAPRD_TRAINER_CNTL1_CF_PAPRD_LB_SKIP_S 12
963 1007
964#define AR_PHY_PAPRD_TRAINER_CNTL2 (AR_SM_BASE + 0x494) 1008#define AR_PHY_PAPRD_TRAINER_CNTL2 (AR_SM_BASE + \
1009 (AR_SREV_9485(ah) ? \
1010 0x584 : 0x494))
965#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN 0xFFFFFFFF 1011#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN 0xFFFFFFFF
966#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN_S 0 1012#define AR_PHY_PAPRD_TRAINER_CNTL2_CF_PAPRD_INIT_RX_BB_GAIN_S 0
967 1013
968#define AR_PHY_PAPRD_TRAINER_CNTL3 (AR_SM_BASE + 0x498) 1014#define AR_PHY_PAPRD_TRAINER_CNTL3 (AR_SM_BASE + \
1015 (AR_SREV_9485(ah) ? \
1016 0x588 : 0x498))
969#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE 0x0000003f 1017#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE 0x0000003f
970#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE_S 0 1018#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_ADC_DESIRED_SIZE_S 0
971#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP 0x00000fc0 1019#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_QUICK_DROP 0x00000fc0
@@ -981,7 +1029,9 @@
981#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE 0x20000000 1029#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE 0x20000000
982#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE_S 29 1030#define AR_PHY_PAPRD_TRAINER_CNTL3_CF_PAPRD_BBTXMIX_DISABLE_S 29
983 1031
984#define AR_PHY_PAPRD_TRAINER_CNTL4 (AR_SM_BASE + 0x49c) 1032#define AR_PHY_PAPRD_TRAINER_CNTL4 (AR_SM_BASE + \
1033 (AR_SREV_9485(ah) ? \
1034 0x58c : 0x49c))
985#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES 0x03ff0000 1035#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES 0x03ff0000
986#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES_S 16 1036#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_NUM_TRAIN_SAMPLES_S 16
987#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA 0x0000f000 1037#define AR_PHY_PAPRD_TRAINER_CNTL4_CF_PAPRD_SAFETY_DELTA 0x0000f000
@@ -1040,6 +1090,14 @@
1040#define AR_PHY_POWERTX_RATE5_POWERTXHT20_0 0x3F 1090#define AR_PHY_POWERTX_RATE5_POWERTXHT20_0 0x3F
1041#define AR_PHY_POWERTX_RATE5_POWERTXHT20_0_S 0 1091#define AR_PHY_POWERTX_RATE5_POWERTXHT20_0_S 0
1042 1092
1093#define AR_PHY_POWERTX_RATE6 (AR_SM_BASE + 0x1d4)
1094#define AR_PHY_POWERTX_RATE6_POWERTXHT20_5 0x3F00
1095#define AR_PHY_POWERTX_RATE6_POWERTXHT20_5_S 8
1096
1097#define AR_PHY_POWERTX_RATE8 (AR_SM_BASE + 0x1dc)
1098#define AR_PHY_POWERTX_RATE8_POWERTXHT40_5 0x3F00
1099#define AR_PHY_POWERTX_RATE8_POWERTXHT40_5_S 8
1100
1043void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx); 1101void ar9003_hw_set_chain_masks(struct ath_hw *ah, u8 rx, u8 tx);
1044 1102
1045#endif /* AR9003_PHY_H */ 1103#endif /* AR9003_PHY_H */
diff --git a/drivers/net/wireless/ath/ath9k/ar9485_initvals.h b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
new file mode 100644
index 000000000000..70de3d89a7b5
--- /dev/null
+++ b/drivers/net/wireless/ath/ath9k/ar9485_initvals.h
@@ -0,0 +1,943 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef INITVALS_9485_H
18#define INITVALS_9485_H
19
20static const u32 ar9485Common_1_0[][2] = {
21 /* Addr allmodes */
22 {0x00007010, 0x00000022},
23 {0x00007020, 0x00000000},
24 {0x00007034, 0x00000002},
25 {0x00007038, 0x000004c2},
26};
27
28static const u32 ar9485_1_0_mac_postamble[][5] = {
29 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
30 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
31 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
32 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
33 {0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00},
34 {0x0000801c, 0x128d8027, 0x128d804f, 0x12e00057, 0x12e0002b},
35 {0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810},
36 {0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a},
37 {0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440},
38};
39
40static const u32 ar9485_1_0_pcie_phy_pll_on_clkreq_disable_L1[][2] = {
41 /* Addr allmodes */
42 {0x00018c00, 0x10212e5e},
43 {0x00018c04, 0x000801d8},
44 {0x00018c08, 0x0000580c},
45};
46
47static const u32 ar9485Common_wo_xlna_rx_gain_1_0[][2] = {
48 /* Addr allmodes */
49 {0x0000a000, 0x00010000},
50 {0x0000a004, 0x00030002},
51 {0x0000a008, 0x00050004},
52 {0x0000a00c, 0x00810080},
53 {0x0000a010, 0x01800082},
54 {0x0000a014, 0x01820181},
55 {0x0000a018, 0x01840183},
56 {0x0000a01c, 0x01880185},
57 {0x0000a020, 0x018a0189},
58 {0x0000a024, 0x02850284},
59 {0x0000a028, 0x02890288},
60 {0x0000a02c, 0x03850384},
61 {0x0000a030, 0x03890388},
62 {0x0000a034, 0x038b038a},
63 {0x0000a038, 0x038d038c},
64 {0x0000a03c, 0x03910390},
65 {0x0000a040, 0x03930392},
66 {0x0000a044, 0x03950394},
67 {0x0000a048, 0x00000396},
68 {0x0000a04c, 0x00000000},
69 {0x0000a050, 0x00000000},
70 {0x0000a054, 0x00000000},
71 {0x0000a058, 0x00000000},
72 {0x0000a05c, 0x00000000},
73 {0x0000a060, 0x00000000},
74 {0x0000a064, 0x00000000},
75 {0x0000a068, 0x00000000},
76 {0x0000a06c, 0x00000000},
77 {0x0000a070, 0x00000000},
78 {0x0000a074, 0x00000000},
79 {0x0000a078, 0x00000000},
80 {0x0000a07c, 0x00000000},
81 {0x0000a080, 0x28282828},
82 {0x0000a084, 0x28282828},
83 {0x0000a088, 0x28282828},
84 {0x0000a08c, 0x28282828},
85 {0x0000a090, 0x28282828},
86 {0x0000a094, 0x21212128},
87 {0x0000a098, 0x171c1c1c},
88 {0x0000a09c, 0x02020212},
89 {0x0000a0a0, 0x00000202},
90 {0x0000a0a4, 0x00000000},
91 {0x0000a0a8, 0x00000000},
92 {0x0000a0ac, 0x00000000},
93 {0x0000a0b0, 0x00000000},
94 {0x0000a0b4, 0x00000000},
95 {0x0000a0b8, 0x00000000},
96 {0x0000a0bc, 0x00000000},
97 {0x0000a0c0, 0x001f0000},
98 {0x0000a0c4, 0x111f1100},
99 {0x0000a0c8, 0x111d111e},
100 {0x0000a0cc, 0x111b111c},
101 {0x0000a0d0, 0x22032204},
102 {0x0000a0d4, 0x22012202},
103 {0x0000a0d8, 0x221f2200},
104 {0x0000a0dc, 0x221d221e},
105 {0x0000a0e0, 0x33013302},
106 {0x0000a0e4, 0x331f3300},
107 {0x0000a0e8, 0x4402331e},
108 {0x0000a0ec, 0x44004401},
109 {0x0000a0f0, 0x441e441f},
110 {0x0000a0f4, 0x55015502},
111 {0x0000a0f8, 0x551f5500},
112 {0x0000a0fc, 0x6602551e},
113 {0x0000a100, 0x66006601},
114 {0x0000a104, 0x661e661f},
115 {0x0000a108, 0x7703661d},
116 {0x0000a10c, 0x77017702},
117 {0x0000a110, 0x00007700},
118 {0x0000a114, 0x00000000},
119 {0x0000a118, 0x00000000},
120 {0x0000a11c, 0x00000000},
121 {0x0000a120, 0x00000000},
122 {0x0000a124, 0x00000000},
123 {0x0000a128, 0x00000000},
124 {0x0000a12c, 0x00000000},
125 {0x0000a130, 0x00000000},
126 {0x0000a134, 0x00000000},
127 {0x0000a138, 0x00000000},
128 {0x0000a13c, 0x00000000},
129 {0x0000a140, 0x001f0000},
130 {0x0000a144, 0x111f1100},
131 {0x0000a148, 0x111d111e},
132 {0x0000a14c, 0x111b111c},
133 {0x0000a150, 0x22032204},
134 {0x0000a154, 0x22012202},
135 {0x0000a158, 0x221f2200},
136 {0x0000a15c, 0x221d221e},
137 {0x0000a160, 0x33013302},
138 {0x0000a164, 0x331f3300},
139 {0x0000a168, 0x4402331e},
140 {0x0000a16c, 0x44004401},
141 {0x0000a170, 0x441e441f},
142 {0x0000a174, 0x55015502},
143 {0x0000a178, 0x551f5500},
144 {0x0000a17c, 0x6602551e},
145 {0x0000a180, 0x66006601},
146 {0x0000a184, 0x661e661f},
147 {0x0000a188, 0x7703661d},
148 {0x0000a18c, 0x77017702},
149 {0x0000a190, 0x00007700},
150 {0x0000a194, 0x00000000},
151 {0x0000a198, 0x00000000},
152 {0x0000a19c, 0x00000000},
153 {0x0000a1a0, 0x00000000},
154 {0x0000a1a4, 0x00000000},
155 {0x0000a1a8, 0x00000000},
156 {0x0000a1ac, 0x00000000},
157 {0x0000a1b0, 0x00000000},
158 {0x0000a1b4, 0x00000000},
159 {0x0000a1b8, 0x00000000},
160 {0x0000a1bc, 0x00000000},
161 {0x0000a1c0, 0x00000000},
162 {0x0000a1c4, 0x00000000},
163 {0x0000a1c8, 0x00000000},
164 {0x0000a1cc, 0x00000000},
165 {0x0000a1d0, 0x00000000},
166 {0x0000a1d4, 0x00000000},
167 {0x0000a1d8, 0x00000000},
168 {0x0000a1dc, 0x00000000},
169 {0x0000a1e0, 0x00000000},
170 {0x0000a1e4, 0x00000000},
171 {0x0000a1e8, 0x00000000},
172 {0x0000a1ec, 0x00000000},
173 {0x0000a1f0, 0x00000396},
174 {0x0000a1f4, 0x00000396},
175 {0x0000a1f8, 0x00000396},
176 {0x0000a1fc, 0x00000296},
177};
178
179static const u32 ar9485Modes_high_power_tx_gain_1_0[][5] = {
180 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
181 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
182 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
183 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
184 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
185 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
186 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
187 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
188 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
189 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
190 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
191 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
192 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
193 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
194 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x2e000a20, 0x2e000a20},
195 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x34000e20, 0x34000e20},
196 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000e22, 0x38000e22},
197 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3c000e24, 0x3c000e24},
198 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x40000e26, 0x40000e26},
199 {0x0000a544, 0x6502feca, 0x6502feca, 0x43001640, 0x43001640},
200 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46001660, 0x46001660},
201 {0x0000a54c, 0x7203feca, 0x7203feca, 0x49001861, 0x49001861},
202 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4c001a81, 0x4c001a81},
203 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4f001a83, 0x4f001a83},
204 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x54001c85, 0x54001c85},
205 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x58001ce5, 0x58001ce5},
206 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5b001ce9, 0x5b001ce9},
207 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x60001eeb, 0x60001eeb},
208 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
209 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
210 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
211 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
212 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
213 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
214 {0x00016044, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db},
215};
216
217static const u32 ar9485_1_0[][2] = {
218 /* Addr allmodes */
219 {0x0000a580, 0x00000000},
220 {0x0000a584, 0x00000000},
221 {0x0000a588, 0x00000000},
222 {0x0000a58c, 0x00000000},
223 {0x0000a590, 0x00000000},
224 {0x0000a594, 0x00000000},
225 {0x0000a598, 0x00000000},
226 {0x0000a59c, 0x00000000},
227 {0x0000a5a0, 0x00000000},
228 {0x0000a5a4, 0x00000000},
229 {0x0000a5a8, 0x00000000},
230 {0x0000a5ac, 0x00000000},
231 {0x0000a5b0, 0x00000000},
232 {0x0000a5b4, 0x00000000},
233 {0x0000a5b8, 0x00000000},
234 {0x0000a5bc, 0x00000000},
235};
236
237static const u32 ar9485_1_0_radio_core[][2] = {
238 /* Addr allmodes */
239 {0x00016000, 0x36db6db6},
240 {0x00016004, 0x6db6db40},
241 {0x00016008, 0x73800000},
242 {0x0001600c, 0x00000000},
243 {0x00016040, 0x7f80fff8},
244 {0x00016048, 0x6c92426e},
245 {0x0001604c, 0x000f0278},
246 {0x00016050, 0x6db6db6c},
247 {0x00016054, 0x6db60000},
248 {0x00016080, 0x00080000},
249 {0x00016084, 0x0e48048c},
250 {0x00016088, 0x14214514},
251 {0x0001608c, 0x119f081e},
252 {0x00016090, 0x24926490},
253 {0x00016098, 0xd28b3330},
254 {0x000160a0, 0xc2108ffe},
255 {0x000160a4, 0x812fc370},
256 {0x000160a8, 0x423c8000},
257 {0x000160b4, 0x92480040},
258 {0x000160c0, 0x006db6db},
259 {0x000160c4, 0x0186db60},
260 {0x000160c8, 0x6db6db6c},
261 {0x000160cc, 0x6de6fbe0},
262 {0x000160d0, 0xf7dfcf3c},
263 {0x00016100, 0x04cb0001},
264 {0x00016104, 0xfff80015},
265 {0x00016108, 0x00080010},
266 {0x00016144, 0x01884080},
267 {0x00016148, 0x00008040},
268 {0x00016180, 0x08453333},
269 {0x00016184, 0x18e82f01},
270 {0x00016188, 0x00000000},
271 {0x0001618c, 0x00000000},
272 {0x00016240, 0x08400000},
273 {0x00016244, 0x1bf90f00},
274 {0x00016248, 0x00000000},
275 {0x0001624c, 0x00000000},
276 {0x00016280, 0x01000015},
277 {0x00016284, 0x00d30000},
278 {0x00016288, 0x00318000},
279 {0x0001628c, 0x50000000},
280 {0x00016290, 0x4b96210f},
281 {0x00016380, 0x00000000},
282 {0x00016384, 0x00000000},
283 {0x00016388, 0x00800700},
284 {0x0001638c, 0x00800700},
285 {0x00016390, 0x00800700},
286 {0x00016394, 0x00000000},
287 {0x00016398, 0x00000000},
288 {0x0001639c, 0x00000000},
289 {0x000163a0, 0x00000001},
290 {0x000163a4, 0x00000001},
291 {0x000163a8, 0x00000000},
292 {0x000163ac, 0x00000000},
293 {0x000163b0, 0x00000000},
294 {0x000163b4, 0x00000000},
295 {0x000163b8, 0x00000000},
296 {0x000163bc, 0x00000000},
297 {0x000163c0, 0x000000a0},
298 {0x000163c4, 0x000c0000},
299 {0x000163c8, 0x14021402},
300 {0x000163cc, 0x00001402},
301 {0x000163d0, 0x00000000},
302 {0x000163d4, 0x00000000},
303 {0x00016c40, 0x1319c178},
304 {0x00016c44, 0x10000000},
305};
306
307static const u32 ar9485Modes_lowest_ob_db_tx_gain_1_0[][5] = {
308 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
309 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
310 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
311 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
312 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
313 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
314 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
315 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
316 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
317 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
318 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
319 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
320 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
321 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
322 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x2e000a20, 0x2e000a20},
323 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x34000e20, 0x34000e20},
324 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000e22, 0x38000e22},
325 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3c000e24, 0x3c000e24},
326 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x40000e26, 0x40000e26},
327 {0x0000a544, 0x6502feca, 0x6502feca, 0x43001640, 0x43001640},
328 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46001660, 0x46001660},
329 {0x0000a54c, 0x7203feca, 0x7203feca, 0x49001861, 0x49001861},
330 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4c001a81, 0x4c001a81},
331 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4f001a83, 0x4f001a83},
332 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x54001c85, 0x54001c85},
333 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x58001ce5, 0x58001ce5},
334 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5b001ce9, 0x5b001ce9},
335 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x60001eeb, 0x60001eeb},
336 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
337 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
338 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
339 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
340 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
341 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
342 {0x00016044, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db},
343};
344
345static const u32 ar9485_1_0_baseband_core[][2] = {
346 /* Addr allmodes */
347 {0x00009800, 0xafe68e30},
348 {0x00009804, 0xfd14e000},
349 {0x00009808, 0x9c0a8f6b},
350 {0x0000980c, 0x04800000},
351 {0x00009814, 0x9280c00a},
352 {0x00009818, 0x00000000},
353 {0x0000981c, 0x00020028},
354 {0x00009834, 0x5f3ca3de},
355 {0x00009838, 0x0108ecff},
356 {0x0000983c, 0x14750600},
357 {0x00009880, 0x201fff00},
358 {0x00009884, 0x00001042},
359 {0x000098a4, 0x00200400},
360 {0x000098b0, 0x52440bbe},
361 {0x000098bc, 0x00000002},
362 {0x000098d0, 0x004b6a8e},
363 {0x000098d4, 0x00000820},
364 {0x000098dc, 0x00000000},
365 {0x000098f0, 0x00000000},
366 {0x000098f4, 0x00000000},
367 {0x00009c04, 0x00000000},
368 {0x00009c08, 0x03200000},
369 {0x00009c0c, 0x00000000},
370 {0x00009c10, 0x00000000},
371 {0x00009c14, 0x00046384},
372 {0x00009c18, 0x05b6b440},
373 {0x00009c1c, 0x00b6b440},
374 {0x00009d00, 0xc080a333},
375 {0x00009d04, 0x40206c10},
376 {0x00009d08, 0x009c4060},
377 {0x00009d0c, 0x1883800a},
378 {0x00009d10, 0x01834061},
379 {0x00009d14, 0x00c00400},
380 {0x00009d18, 0x00000000},
381 {0x00009d1c, 0x00000000},
382 {0x00009e08, 0x0038233c},
383 {0x00009e24, 0x990bb515},
384 {0x00009e28, 0x0a6f0000},
385 {0x00009e30, 0x06336f77},
386 {0x00009e34, 0x6af6532f},
387 {0x00009e38, 0x0cc80c00},
388 {0x00009e40, 0x0d261820},
389 {0x00009e4c, 0x00001004},
390 {0x00009e50, 0x00ff03f1},
391 {0x00009fc0, 0x80be4788},
392 {0x00009fc4, 0x0001efb5},
393 {0x00009fcc, 0x40000014},
394 {0x0000a20c, 0x00000000},
395 {0x0000a210, 0x00000000},
396 {0x0000a220, 0x00000000},
397 {0x0000a224, 0x00000000},
398 {0x0000a228, 0x10002310},
399 {0x0000a23c, 0x00000000},
400 {0x0000a244, 0x0c000000},
401 {0x0000a2a0, 0x00000001},
402 {0x0000a2c0, 0x00000001},
403 {0x0000a2c8, 0x00000000},
404 {0x0000a2cc, 0x18c43433},
405 {0x0000a2d4, 0x00000000},
406 {0x0000a2dc, 0x00000000},
407 {0x0000a2e0, 0x00000000},
408 {0x0000a2e4, 0x00000000},
409 {0x0000a2e8, 0x00000000},
410 {0x0000a2ec, 0x00000000},
411 {0x0000a2f0, 0x00000000},
412 {0x0000a2f4, 0x00000000},
413 {0x0000a2f8, 0x00000000},
414 {0x0000a344, 0x00000000},
415 {0x0000a34c, 0x00000000},
416 {0x0000a350, 0x0000a000},
417 {0x0000a364, 0x00000000},
418 {0x0000a370, 0x00000000},
419 {0x0000a390, 0x00000001},
420 {0x0000a394, 0x00000444},
421 {0x0000a398, 0x001f0e0f},
422 {0x0000a39c, 0x0075393f},
423 {0x0000a3a0, 0xb79f6427},
424 {0x0000a3a4, 0x00000000},
425 {0x0000a3a8, 0xaaaaaaaa},
426 {0x0000a3ac, 0x3c466478},
427 {0x0000a3c0, 0x20202020},
428 {0x0000a3c4, 0x22222220},
429 {0x0000a3c8, 0x20200020},
430 {0x0000a3cc, 0x20202020},
431 {0x0000a3d0, 0x20202020},
432 {0x0000a3d4, 0x20202020},
433 {0x0000a3d8, 0x20202020},
434 {0x0000a3dc, 0x20202020},
435 {0x0000a3e0, 0x20202020},
436 {0x0000a3e4, 0x20202020},
437 {0x0000a3e8, 0x20202020},
438 {0x0000a3ec, 0x20202020},
439 {0x0000a3f0, 0x00000000},
440 {0x0000a3f4, 0x00000006},
441 {0x0000a3f8, 0x0cdbd380},
442 {0x0000a3fc, 0x000f0f01},
443 {0x0000a400, 0x8fa91f01},
444 {0x0000a404, 0x00000000},
445 {0x0000a408, 0x0e79e5c6},
446 {0x0000a40c, 0x00820820},
447 {0x0000a414, 0x1ce739ce},
448 {0x0000a418, 0x2d0011ce},
449 {0x0000a41c, 0x1ce739ce},
450 {0x0000a420, 0x000001ce},
451 {0x0000a424, 0x1ce739ce},
452 {0x0000a428, 0x000001ce},
453 {0x0000a42c, 0x1ce739ce},
454 {0x0000a430, 0x1ce739ce},
455 {0x0000a434, 0x00000000},
456 {0x0000a438, 0x00001801},
457 {0x0000a43c, 0x00000000},
458 {0x0000a440, 0x00000000},
459 {0x0000a444, 0x00000000},
460 {0x0000a448, 0x04000000},
461 {0x0000a44c, 0x00000001},
462 {0x0000a450, 0x00010000},
463 {0x0000a458, 0x00000000},
464 {0x0000a5c4, 0x3fad9d74},
465 {0x0000a5c8, 0x0048060a},
466 {0x0000a5cc, 0x00000637},
467 {0x0000a760, 0x03020100},
468 {0x0000a764, 0x09080504},
469 {0x0000a768, 0x0d0c0b0a},
470 {0x0000a76c, 0x13121110},
471 {0x0000a770, 0x31301514},
472 {0x0000a774, 0x35343332},
473 {0x0000a778, 0x00000036},
474 {0x0000a780, 0x00000838},
475 {0x0000a7c0, 0x00000000},
476 {0x0000a7c4, 0xfffffffc},
477 {0x0000a7c8, 0x00000000},
478 {0x0000a7cc, 0x00000000},
479 {0x0000a7d0, 0x00000000},
480 {0x0000a7d4, 0x00000004},
481 {0x0000a7dc, 0x00000001},
482};
483
484static const u32 ar9485Modes_high_ob_db_tx_gain_1_0[][5] = {
485 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
486 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
487 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
488 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
489 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
490 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
491 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
492 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
493 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
494 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
495 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
496 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
497 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
498 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
499 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x2e000a20, 0x2e000a20},
500 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x34000e20, 0x34000e20},
501 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000e22, 0x38000e22},
502 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3c000e24, 0x3c000e24},
503 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x40000e26, 0x40000e26},
504 {0x0000a544, 0x6502feca, 0x6502feca, 0x43001640, 0x43001640},
505 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46001660, 0x46001660},
506 {0x0000a54c, 0x7203feca, 0x7203feca, 0x49001861, 0x49001861},
507 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4c001a81, 0x4c001a81},
508 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4f001a83, 0x4f001a83},
509 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x54001c85, 0x54001c85},
510 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x58001ce5, 0x58001ce5},
511 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5b001ce9, 0x5b001ce9},
512 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x60001eeb, 0x60001eeb},
513 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
514 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
515 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
516 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
517 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
518 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
519 {0x00016044, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db},
520};
521
522static const u32 ar9485Common_rx_gain_1_0[][2] = {
523 /* Addr allmodes */
524 {0x0000a000, 0x00010000},
525 {0x0000a004, 0x00030002},
526 {0x0000a008, 0x00050004},
527 {0x0000a00c, 0x00810080},
528 {0x0000a010, 0x01800082},
529 {0x0000a014, 0x01820181},
530 {0x0000a018, 0x01840183},
531 {0x0000a01c, 0x01880185},
532 {0x0000a020, 0x018a0189},
533 {0x0000a024, 0x02850284},
534 {0x0000a028, 0x02890288},
535 {0x0000a02c, 0x03850384},
536 {0x0000a030, 0x03890388},
537 {0x0000a034, 0x038b038a},
538 {0x0000a038, 0x038d038c},
539 {0x0000a03c, 0x03910390},
540 {0x0000a040, 0x03930392},
541 {0x0000a044, 0x03950394},
542 {0x0000a048, 0x00000396},
543 {0x0000a04c, 0x00000000},
544 {0x0000a050, 0x00000000},
545 {0x0000a054, 0x00000000},
546 {0x0000a058, 0x00000000},
547 {0x0000a05c, 0x00000000},
548 {0x0000a060, 0x00000000},
549 {0x0000a064, 0x00000000},
550 {0x0000a068, 0x00000000},
551 {0x0000a06c, 0x00000000},
552 {0x0000a070, 0x00000000},
553 {0x0000a074, 0x00000000},
554 {0x0000a078, 0x00000000},
555 {0x0000a07c, 0x00000000},
556 {0x0000a080, 0x28282828},
557 {0x0000a084, 0x28282828},
558 {0x0000a088, 0x28282828},
559 {0x0000a08c, 0x28282828},
560 {0x0000a090, 0x28282828},
561 {0x0000a094, 0x21212128},
562 {0x0000a098, 0x171c1c1c},
563 {0x0000a09c, 0x02020212},
564 {0x0000a0a0, 0x00000202},
565 {0x0000a0a4, 0x00000000},
566 {0x0000a0a8, 0x00000000},
567 {0x0000a0ac, 0x00000000},
568 {0x0000a0b0, 0x00000000},
569 {0x0000a0b4, 0x00000000},
570 {0x0000a0b8, 0x00000000},
571 {0x0000a0bc, 0x00000000},
572 {0x0000a0c0, 0x001f0000},
573 {0x0000a0c4, 0x111f1100},
574 {0x0000a0c8, 0x111d111e},
575 {0x0000a0cc, 0x111b111c},
576 {0x0000a0d0, 0x22032204},
577 {0x0000a0d4, 0x22012202},
578 {0x0000a0d8, 0x221f2200},
579 {0x0000a0dc, 0x221d221e},
580 {0x0000a0e0, 0x33013302},
581 {0x0000a0e4, 0x331f3300},
582 {0x0000a0e8, 0x4402331e},
583 {0x0000a0ec, 0x44004401},
584 {0x0000a0f0, 0x441e441f},
585 {0x0000a0f4, 0x55015502},
586 {0x0000a0f8, 0x551f5500},
587 {0x0000a0fc, 0x6602551e},
588 {0x0000a100, 0x66006601},
589 {0x0000a104, 0x661e661f},
590 {0x0000a108, 0x7703661d},
591 {0x0000a10c, 0x77017702},
592 {0x0000a110, 0x00007700},
593 {0x0000a114, 0x00000000},
594 {0x0000a118, 0x00000000},
595 {0x0000a11c, 0x00000000},
596 {0x0000a120, 0x00000000},
597 {0x0000a124, 0x00000000},
598 {0x0000a128, 0x00000000},
599 {0x0000a12c, 0x00000000},
600 {0x0000a130, 0x00000000},
601 {0x0000a134, 0x00000000},
602 {0x0000a138, 0x00000000},
603 {0x0000a13c, 0x00000000},
604 {0x0000a140, 0x001f0000},
605 {0x0000a144, 0x111f1100},
606 {0x0000a148, 0x111d111e},
607 {0x0000a14c, 0x111b111c},
608 {0x0000a150, 0x22032204},
609 {0x0000a154, 0x22012202},
610 {0x0000a158, 0x221f2200},
611 {0x0000a15c, 0x221d221e},
612 {0x0000a160, 0x33013302},
613 {0x0000a164, 0x331f3300},
614 {0x0000a168, 0x4402331e},
615 {0x0000a16c, 0x44004401},
616 {0x0000a170, 0x441e441f},
617 {0x0000a174, 0x55015502},
618 {0x0000a178, 0x551f5500},
619 {0x0000a17c, 0x6602551e},
620 {0x0000a180, 0x66006601},
621 {0x0000a184, 0x661e661f},
622 {0x0000a188, 0x7703661d},
623 {0x0000a18c, 0x77017702},
624 {0x0000a190, 0x00007700},
625 {0x0000a194, 0x00000000},
626 {0x0000a198, 0x00000000},
627 {0x0000a19c, 0x00000000},
628 {0x0000a1a0, 0x00000000},
629 {0x0000a1a4, 0x00000000},
630 {0x0000a1a8, 0x00000000},
631 {0x0000a1ac, 0x00000000},
632 {0x0000a1b0, 0x00000000},
633 {0x0000a1b4, 0x00000000},
634 {0x0000a1b8, 0x00000000},
635 {0x0000a1bc, 0x00000000},
636 {0x0000a1c0, 0x00000000},
637 {0x0000a1c4, 0x00000000},
638 {0x0000a1c8, 0x00000000},
639 {0x0000a1cc, 0x00000000},
640 {0x0000a1d0, 0x00000000},
641 {0x0000a1d4, 0x00000000},
642 {0x0000a1d8, 0x00000000},
643 {0x0000a1dc, 0x00000000},
644 {0x0000a1e0, 0x00000000},
645 {0x0000a1e4, 0x00000000},
646 {0x0000a1e8, 0x00000000},
647 {0x0000a1ec, 0x00000000},
648 {0x0000a1f0, 0x00000396},
649 {0x0000a1f4, 0x00000396},
650 {0x0000a1f8, 0x00000396},
651 {0x0000a1fc, 0x00000296},
652};
653
654static const u32 ar9485_1_0_pcie_phy_pll_on_clkreq_enable_L1[][2] = {
655 /* Addr allmodes */
656 {0x00018c00, 0x10252e5e},
657 {0x00018c04, 0x000801d8},
658 {0x00018c08, 0x0000580c},
659};
660
661static const u32 ar9485_1_0_pcie_phy_clkreq_enable_L1[][2] = {
662 /* Addr allmodes */
663 {0x00018c00, 0x10253e5e},
664 {0x00018c04, 0x000801d8},
665 {0x00018c08, 0x0000580c},
666};
667
668static const u32 ar9485_1_0_soc_preamble[][2] = {
669 /* Addr allmodes */
670 {0x000040a4, 0x00a0c9c9},
671 {0x00007048, 0x00000004},
672};
673
674static const u32 ar9485_fast_clock_1_0_baseband_postamble[][3] = {
675 /* Addr 5G_HT20 5G_HT40 */
676 {0x00009e00, 0x03721821, 0x03721821},
677 {0x0000a230, 0x0000400b, 0x00004016},
678 {0x0000a254, 0x00000898, 0x00001130},
679};
680
681static const u32 ar9485_1_0_baseband_postamble[][5] = {
682 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
683 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
684 {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
685 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
686 {0x00009828, 0x06903081, 0x06903081, 0x06903881, 0x06903881},
687 {0x0000982c, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4},
688 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
689 {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
690 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0},
691 {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020},
692 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
693 {0x00009e10, 0x7ec88d2e, 0x7ec88d2e, 0x7ec80d2e, 0x7ec80d2e},
694 {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e},
695 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
696 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
697 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
698 {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
699 {0x00009e3c, 0xcf946220, 0xcf946220, 0xcf946222, 0xcf946222},
700 {0x00009e44, 0x02321e27, 0x02321e27, 0x02282324, 0x02282324},
701 {0x00009e48, 0x5030201a, 0x5030201a, 0x50302010, 0x50302010},
702 {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
703 {0x0000a204, 0x01303fc0, 0x01303fc4, 0x01303fc4, 0x01303fc0},
704 {0x0000a208, 0x00000104, 0x00000104, 0x00000004, 0x00000004},
705 {0x0000a230, 0x0000400a, 0x00004014, 0x00004016, 0x0000400b},
706 {0x0000a234, 0x10000fff, 0x10000fff, 0x10000fff, 0x10000fff},
707 {0x0000a238, 0xffb81018, 0xffb81018, 0xffb81018, 0xffb81018},
708 {0x0000a250, 0x00000000, 0x00000000, 0x00000210, 0x00000108},
709 {0x0000a254, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898},
710 {0x0000a258, 0x02020002, 0x02020002, 0x02020002, 0x02020002},
711 {0x0000a25c, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e},
712 {0x0000a260, 0x3a021501, 0x3a021501, 0x3a021501, 0x3a021501},
713 {0x0000a264, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e},
714 {0x0000a280, 0x00000007, 0x00000007, 0x0000000b, 0x0000000b},
715 {0x0000a284, 0x00000000, 0x00000000, 0x000002a0, 0x000002a0},
716 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
717 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
718 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
719 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071982},
720 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
721 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
722 {0x0000be04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
723 {0x0000be18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
724};
725
726static const u32 ar9485Modes_low_ob_db_tx_gain_1_0[][5] = {
727 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
728 {0x0000a410, 0x000050d9, 0x000050d9, 0x000050d8, 0x000050d8},
729 {0x0000a500, 0x00022200, 0x00022200, 0x00000000, 0x00000000},
730 {0x0000a504, 0x05062002, 0x05062002, 0x04000002, 0x04000002},
731 {0x0000a508, 0x0c002e00, 0x0c002e00, 0x08000004, 0x08000004},
732 {0x0000a50c, 0x11062202, 0x11062202, 0x0d000200, 0x0d000200},
733 {0x0000a510, 0x17022e00, 0x17022e00, 0x11000202, 0x11000202},
734 {0x0000a514, 0x1d000ec2, 0x1d000ec2, 0x15000400, 0x15000400},
735 {0x0000a518, 0x25020ec0, 0x25020ec0, 0x19000402, 0x19000402},
736 {0x0000a51c, 0x2b020ec3, 0x2b020ec3, 0x1d000404, 0x1d000404},
737 {0x0000a520, 0x2f001f04, 0x2f001f04, 0x21000603, 0x21000603},
738 {0x0000a524, 0x35001fc4, 0x35001fc4, 0x25000605, 0x25000605},
739 {0x0000a528, 0x3c022f04, 0x3c022f04, 0x2a000a03, 0x2a000a03},
740 {0x0000a52c, 0x41023e85, 0x41023e85, 0x2c000a04, 0x2c000a04},
741 {0x0000a530, 0x48023ec6, 0x48023ec6, 0x2e000a20, 0x2e000a20},
742 {0x0000a534, 0x4d023f01, 0x4d023f01, 0x34000e20, 0x34000e20},
743 {0x0000a538, 0x53023f4b, 0x53023f4b, 0x38000e22, 0x38000e22},
744 {0x0000a53c, 0x5a027f09, 0x5a027f09, 0x3c000e24, 0x3c000e24},
745 {0x0000a540, 0x5f027fc9, 0x5f027fc9, 0x40000e26, 0x40000e26},
746 {0x0000a544, 0x6502feca, 0x6502feca, 0x43001640, 0x43001640},
747 {0x0000a548, 0x6b02ff4a, 0x6b02ff4a, 0x46001660, 0x46001660},
748 {0x0000a54c, 0x7203feca, 0x7203feca, 0x49001861, 0x49001861},
749 {0x0000a550, 0x7703ff0b, 0x7703ff0b, 0x4c001a81, 0x4c001a81},
750 {0x0000a554, 0x7d06ffcb, 0x7d06ffcb, 0x4f001a83, 0x4f001a83},
751 {0x0000a558, 0x8407ff0b, 0x8407ff0b, 0x54001c85, 0x54001c85},
752 {0x0000a55c, 0x8907ffcb, 0x8907ffcb, 0x58001ce5, 0x58001ce5},
753 {0x0000a560, 0x900fff0b, 0x900fff0b, 0x5b001ce9, 0x5b001ce9},
754 {0x0000a564, 0x960fffcb, 0x960fffcb, 0x60001eeb, 0x60001eeb},
755 {0x0000a568, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
756 {0x0000a56c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
757 {0x0000a570, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
758 {0x0000a574, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
759 {0x0000a578, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
760 {0x0000a57c, 0x9c1fff0b, 0x9c1fff0b, 0x60001eeb, 0x60001eeb},
761 {0x00016044, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db, 0x05b6b2db},
762};
763
764static const u32 ar9485_1_0_pcie_phy_clkreq_disable_L1[][2] = {
765 /* Addr allmodes */
766 {0x00018c00, 0x10213e5e},
767 {0x00018c04, 0x000801d8},
768 {0x00018c08, 0x0000580c},
769};
770
771static const u32 ar9485_1_0_radio_postamble[][2] = {
772 /* Addr allmodes */
773 {0x0001609c, 0x0b283f31},
774 {0x000160ac, 0x24611800},
775 {0x000160b0, 0x03284f3e},
776 {0x0001610c, 0x00170000},
777 {0x00016140, 0x10804008},
778};
779
780static const u32 ar9485_1_0_mac_core[][2] = {
781 /* Addr allmodes */
782 {0x00000008, 0x00000000},
783 {0x00000030, 0x00020085},
784 {0x00000034, 0x00000005},
785 {0x00000040, 0x00000000},
786 {0x00000044, 0x00000000},
787 {0x00000048, 0x00000008},
788 {0x0000004c, 0x00000010},
789 {0x00000050, 0x00000000},
790 {0x00001040, 0x002ffc0f},
791 {0x00001044, 0x002ffc0f},
792 {0x00001048, 0x002ffc0f},
793 {0x0000104c, 0x002ffc0f},
794 {0x00001050, 0x002ffc0f},
795 {0x00001054, 0x002ffc0f},
796 {0x00001058, 0x002ffc0f},
797 {0x0000105c, 0x002ffc0f},
798 {0x00001060, 0x002ffc0f},
799 {0x00001064, 0x002ffc0f},
800 {0x000010f0, 0x00000100},
801 {0x00001270, 0x00000000},
802 {0x000012b0, 0x00000000},
803 {0x000012f0, 0x00000000},
804 {0x0000143c, 0x00000000},
805 {0x0000147c, 0x00000000},
806 {0x00008000, 0x00000000},
807 {0x00008004, 0x00000000},
808 {0x00008008, 0x00000000},
809 {0x0000800c, 0x00000000},
810 {0x00008018, 0x00000000},
811 {0x00008020, 0x00000000},
812 {0x00008038, 0x00000000},
813 {0x0000803c, 0x00000000},
814 {0x00008040, 0x00000000},
815 {0x00008044, 0x00000000},
816 {0x00008048, 0x00000000},
817 {0x0000804c, 0xffffffff},
818 {0x00008054, 0x00000000},
819 {0x00008058, 0x00000000},
820 {0x0000805c, 0x000fc78f},
821 {0x00008060, 0x0000000f},
822 {0x00008064, 0x00000000},
823 {0x00008070, 0x00000310},
824 {0x00008074, 0x00000020},
825 {0x00008078, 0x00000000},
826 {0x0000809c, 0x0000000f},
827 {0x000080a0, 0x00000000},
828 {0x000080a4, 0x02ff0000},
829 {0x000080a8, 0x0e070605},
830 {0x000080ac, 0x0000000d},
831 {0x000080b0, 0x00000000},
832 {0x000080b4, 0x00000000},
833 {0x000080b8, 0x00000000},
834 {0x000080bc, 0x00000000},
835 {0x000080c0, 0x2a800000},
836 {0x000080c4, 0x06900168},
837 {0x000080c8, 0x13881c20},
838 {0x000080cc, 0x01f40000},
839 {0x000080d0, 0x00252500},
840 {0x000080d4, 0x00a00000},
841 {0x000080d8, 0x00400000},
842 {0x000080dc, 0x00000000},
843 {0x000080e0, 0xffffffff},
844 {0x000080e4, 0x0000ffff},
845 {0x000080e8, 0x3f3f3f3f},
846 {0x000080ec, 0x00000000},
847 {0x000080f0, 0x00000000},
848 {0x000080f4, 0x00000000},
849 {0x000080fc, 0x00020000},
850 {0x00008100, 0x00000000},
851 {0x00008108, 0x00000052},
852 {0x0000810c, 0x00000000},
853 {0x00008110, 0x00000000},
854 {0x00008114, 0x000007ff},
855 {0x00008118, 0x000000aa},
856 {0x0000811c, 0x00003210},
857 {0x00008124, 0x00000000},
858 {0x00008128, 0x00000000},
859 {0x0000812c, 0x00000000},
860 {0x00008130, 0x00000000},
861 {0x00008134, 0x00000000},
862 {0x00008138, 0x00000000},
863 {0x0000813c, 0x0000ffff},
864 {0x00008144, 0xffffffff},
865 {0x00008168, 0x00000000},
866 {0x0000816c, 0x00000000},
867 {0x00008170, 0x18486200},
868 {0x00008174, 0x33332210},
869 {0x00008178, 0x00000000},
870 {0x0000817c, 0x00020000},
871 {0x000081c0, 0x00000000},
872 {0x000081c4, 0x33332210},
873 {0x000081c8, 0x00000000},
874 {0x000081cc, 0x00000000},
875 {0x000081d4, 0x00000000},
876 {0x000081ec, 0x00000000},
877 {0x000081f0, 0x00000000},
878 {0x000081f4, 0x00000000},
879 {0x000081f8, 0x00000000},
880 {0x000081fc, 0x00000000},
881 {0x00008240, 0x00100000},
882 {0x00008244, 0x0010f400},
883 {0x00008248, 0x00000800},
884 {0x0000824c, 0x0001e800},
885 {0x00008250, 0x00000000},
886 {0x00008254, 0x00000000},
887 {0x00008258, 0x00000000},
888 {0x0000825c, 0x40000000},
889 {0x00008260, 0x00080922},
890 {0x00008264, 0x9ca00010},
891 {0x00008268, 0xffffffff},
892 {0x0000826c, 0x0000ffff},
893 {0x00008270, 0x00000000},
894 {0x00008274, 0x40000000},
895 {0x00008278, 0x003e4180},
896 {0x0000827c, 0x00000004},
897 {0x00008284, 0x0000002c},
898 {0x00008288, 0x0000002c},
899 {0x0000828c, 0x000000ff},
900 {0x00008294, 0x00000000},
901 {0x00008298, 0x00000000},
902 {0x0000829c, 0x00000000},
903 {0x00008300, 0x00000140},
904 {0x00008314, 0x00000000},
905 {0x0000831c, 0x0000010d},
906 {0x00008328, 0x00000000},
907 {0x0000832c, 0x00000007},
908 {0x00008330, 0x00000302},
909 {0x00008334, 0x00000700},
910 {0x00008338, 0x00ff0000},
911 {0x0000833c, 0x02400000},
912 {0x00008340, 0x000107ff},
913 {0x00008344, 0xa248105b},
914 {0x00008348, 0x008f0000},
915 {0x0000835c, 0x00000000},
916 {0x00008360, 0xffffffff},
917 {0x00008364, 0xffffffff},
918 {0x00008368, 0x00000000},
919 {0x00008370, 0x00000000},
920 {0x00008374, 0x000000ff},
921 {0x00008378, 0x00000000},
922 {0x0000837c, 0x00000000},
923 {0x00008380, 0xffffffff},
924 {0x00008384, 0xffffffff},
925 {0x00008390, 0xffffffff},
926 {0x00008394, 0xffffffff},
927 {0x00008398, 0x00000000},
928 {0x0000839c, 0x00000000},
929 {0x000083a0, 0x00000000},
930 {0x000083a4, 0x0000fa14},
931 {0x000083a8, 0x000f0c00},
932 {0x000083ac, 0x33332210},
933 {0x000083b0, 0x33332210},
934 {0x000083b4, 0x33332210},
935 {0x000083b8, 0x33332210},
936 {0x000083bc, 0x00000000},
937 {0x000083c0, 0x00000000},
938 {0x000083c4, 0x00000000},
939 {0x000083c8, 0x00000000},
940 {0x000083cc, 0x00000200},
941 {0x000083d0, 0x000301ff},
942};
943#endif
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 0963071e8f90..3681caf54282 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -57,6 +57,8 @@ struct ath_node;
57 57
58#define A_MAX(a, b) ((a) > (b) ? (a) : (b)) 58#define A_MAX(a, b) ((a) > (b) ? (a) : (b))
59 59
60#define ATH9K_PM_QOS_DEFAULT_VALUE 55
61
60#define TSF_TO_TU(_h,_l) \ 62#define TSF_TO_TU(_h,_l) \
61 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) 63 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
62 64
@@ -87,33 +89,19 @@ struct ath_config {
87/** 89/**
88 * enum buffer_type - Buffer type flags 90 * enum buffer_type - Buffer type flags
89 * 91 *
90 * @BUF_HT: Send this buffer using HT capabilities
91 * @BUF_AMPDU: This buffer is an ampdu, as part of an aggregate (during TX) 92 * @BUF_AMPDU: This buffer is an ampdu, as part of an aggregate (during TX)
92 * @BUF_AGGR: Indicates whether the buffer can be aggregated 93 * @BUF_AGGR: Indicates whether the buffer can be aggregated
93 * (used in aggregation scheduling) 94 * (used in aggregation scheduling)
94 * @BUF_RETRY: Indicates whether the buffer is retried
95 * @BUF_XRETRY: To denote excessive retries of the buffer 95 * @BUF_XRETRY: To denote excessive retries of the buffer
96 */ 96 */
97enum buffer_type { 97enum buffer_type {
98 BUF_HT = BIT(1),
99 BUF_AMPDU = BIT(2), 98 BUF_AMPDU = BIT(2),
100 BUF_AGGR = BIT(3), 99 BUF_AGGR = BIT(3),
101 BUF_RETRY = BIT(4),
102 BUF_XRETRY = BIT(5), 100 BUF_XRETRY = BIT(5),
103}; 101};
104 102
105#define bf_nframes bf_state.bfs_nframes
106#define bf_al bf_state.bfs_al
107#define bf_frmlen bf_state.bfs_frmlen
108#define bf_retries bf_state.bfs_retries
109#define bf_seqno bf_state.bfs_seqno
110#define bf_tidno bf_state.bfs_tidno
111#define bf_keyix bf_state.bfs_keyix
112#define bf_keytype bf_state.bfs_keytype
113#define bf_isht(bf) (bf->bf_state.bf_type & BUF_HT)
114#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU) 103#define bf_isampdu(bf) (bf->bf_state.bf_type & BUF_AMPDU)
115#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR) 104#define bf_isaggr(bf) (bf->bf_state.bf_type & BUF_AGGR)
116#define bf_isretried(bf) (bf->bf_state.bf_type & BUF_RETRY)
117#define bf_isxretried(bf) (bf->bf_state.bf_type & BUF_XRETRY) 105#define bf_isxretried(bf) (bf->bf_state.bf_type & BUF_XRETRY)
118 106
119#define ATH_TXSTATUS_RING_SIZE 64 107#define ATH_TXSTATUS_RING_SIZE 64
@@ -178,8 +166,8 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
178 166
179/* returns delimiter padding required given the packet length */ 167/* returns delimiter padding required given the packet length */
180#define ATH_AGGR_GET_NDELIM(_len) \ 168#define ATH_AGGR_GET_NDELIM(_len) \
181 (((((_len) + ATH_AGGR_DELIM_SZ) < ATH_AGGR_MINPLEN) ? \ 169 (((_len) >= ATH_AGGR_MINPLEN) ? 0 : \
182 (ATH_AGGR_MINPLEN - (_len) - ATH_AGGR_DELIM_SZ) : 0) >> 2) 170 DIV_ROUND_UP(ATH_AGGR_MINPLEN - (_len), ATH_AGGR_DELIM_SZ))
183 171
184#define BAW_WITHIN(_start, _bawsz, _seqno) \ 172#define BAW_WITHIN(_start, _bawsz, _seqno) \
185 ((((_seqno) - (_start)) & 4095) < (_bawsz)) 173 ((((_seqno) - (_start)) & 4095) < (_bawsz))
@@ -196,12 +184,12 @@ enum ATH_AGGR_STATUS {
196 184
197#define ATH_TXFIFO_DEPTH 8 185#define ATH_TXFIFO_DEPTH 8
198struct ath_txq { 186struct ath_txq {
199 int axq_class;
200 u32 axq_qnum; 187 u32 axq_qnum;
201 u32 *axq_link; 188 u32 *axq_link;
202 struct list_head axq_q; 189 struct list_head axq_q;
203 spinlock_t axq_lock; 190 spinlock_t axq_lock;
204 u32 axq_depth; 191 u32 axq_depth;
192 u32 axq_ampdu_depth;
205 bool stopped; 193 bool stopped;
206 bool axq_tx_inprogress; 194 bool axq_tx_inprogress;
207 struct list_head axq_acq; 195 struct list_head axq_acq;
@@ -209,27 +197,28 @@ struct ath_txq {
209 struct list_head txq_fifo_pending; 197 struct list_head txq_fifo_pending;
210 u8 txq_headidx; 198 u8 txq_headidx;
211 u8 txq_tailidx; 199 u8 txq_tailidx;
200 int pending_frames;
212}; 201};
213 202
214struct ath_atx_ac { 203struct ath_atx_ac {
204 struct ath_txq *txq;
215 int sched; 205 int sched;
216 int qnum;
217 struct list_head list; 206 struct list_head list;
218 struct list_head tid_q; 207 struct list_head tid_q;
219}; 208};
220 209
210struct ath_frame_info {
211 int framelen;
212 u32 keyix;
213 enum ath9k_key_type keytype;
214 u8 retries;
215 u16 seqno;
216};
217
221struct ath_buf_state { 218struct ath_buf_state {
222 int bfs_nframes;
223 u16 bfs_al;
224 u16 bfs_frmlen;
225 int bfs_seqno;
226 int bfs_tidno;
227 int bfs_retries;
228 u8 bf_type; 219 u8 bf_type;
229 u8 bfs_paprd; 220 u8 bfs_paprd;
230 unsigned long bfs_paprd_timestamp; 221 enum ath9k_internal_frame_type bfs_ftype;
231 u32 bfs_keyix;
232 enum ath9k_key_type bfs_keytype;
233}; 222};
234 223
235struct ath_buf { 224struct ath_buf {
@@ -242,7 +231,6 @@ struct ath_buf {
242 dma_addr_t bf_daddr; /* physical addr of desc */ 231 dma_addr_t bf_daddr; /* physical addr of desc */
243 dma_addr_t bf_buf_addr; /* physical addr of data buffer, for DMA */ 232 dma_addr_t bf_buf_addr; /* physical addr of data buffer, for DMA */
244 bool bf_stale; 233 bool bf_stale;
245 bool bf_tx_aborted;
246 u16 bf_flags; 234 u16 bf_flags;
247 struct ath_buf_state bf_state; 235 struct ath_buf_state bf_state;
248 struct ath_wiphy *aphy; 236 struct ath_wiphy *aphy;
@@ -271,7 +259,6 @@ struct ath_node {
271 struct ath_atx_ac ac[WME_NUM_AC]; 259 struct ath_atx_ac ac[WME_NUM_AC];
272 u16 maxampdu; 260 u16 maxampdu;
273 u8 mpdudensity; 261 u8 mpdudensity;
274 int last_rssi;
275}; 262};
276 263
277#define AGGR_CLEANUP BIT(1) 264#define AGGR_CLEANUP BIT(1)
@@ -280,6 +267,7 @@ struct ath_node {
280 267
281struct ath_tx_control { 268struct ath_tx_control {
282 struct ath_txq *txq; 269 struct ath_txq *txq;
270 struct ath_node *an;
283 int if_id; 271 int if_id;
284 enum ath9k_internal_frame_type frame_type; 272 enum ath9k_internal_frame_type frame_type;
285 u8 paprd; 273 u8 paprd;
@@ -292,12 +280,11 @@ struct ath_tx_control {
292struct ath_tx { 280struct ath_tx {
293 u16 seq_no; 281 u16 seq_no;
294 u32 txqsetup; 282 u32 txqsetup;
295 int hwq_map[WME_NUM_AC];
296 spinlock_t txbuflock; 283 spinlock_t txbuflock;
297 struct list_head txbuf; 284 struct list_head txbuf;
298 struct ath_txq txq[ATH9K_NUM_TX_QUEUES]; 285 struct ath_txq txq[ATH9K_NUM_TX_QUEUES];
299 struct ath_descdma txdma; 286 struct ath_descdma txdma;
300 int pending_frames[WME_NUM_AC]; 287 struct ath_txq *txq_map[WME_NUM_AC];
301}; 288};
302 289
303struct ath_rx_edma { 290struct ath_rx_edma {
@@ -311,7 +298,6 @@ struct ath_rx {
311 u8 rxotherant; 298 u8 rxotherant;
312 u32 *rxlink; 299 u32 *rxlink;
313 unsigned int rxfilter; 300 unsigned int rxfilter;
314 spinlock_t pcu_lock;
315 spinlock_t rxbuflock; 301 spinlock_t rxbuflock;
316 struct list_head rxbuf; 302 struct list_head rxbuf;
317 struct ath_descdma rxdma; 303 struct ath_descdma rxdma;
@@ -328,7 +314,6 @@ void ath_rx_cleanup(struct ath_softc *sc);
328int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp); 314int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp);
329struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype); 315struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype);
330void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq); 316void ath_tx_cleanupq(struct ath_softc *sc, struct ath_txq *txq);
331int ath_tx_setup(struct ath_softc *sc, int haltype);
332bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx); 317bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx);
333void ath_draintxq(struct ath_softc *sc, 318void ath_draintxq(struct ath_softc *sc,
334 struct ath_txq *txq, bool retry_tx); 319 struct ath_txq *txq, bool retry_tx);
@@ -343,7 +328,6 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
343 struct ath_tx_control *txctl); 328 struct ath_tx_control *txctl);
344void ath_tx_tasklet(struct ath_softc *sc); 329void ath_tx_tasklet(struct ath_softc *sc);
345void ath_tx_edma_tasklet(struct ath_softc *sc); 330void ath_tx_edma_tasklet(struct ath_softc *sc);
346void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb);
347int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta, 331int ath_tx_aggr_start(struct ath_softc *sc, struct ieee80211_sta *sta,
348 u16 tid, u16 *ssn); 332 u16 tid, u16 *ssn);
349void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid); 333void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid);
@@ -564,6 +548,7 @@ struct ath_ant_comb {
564#define SC_OP_BT_PRIORITY_DETECTED BIT(12) 548#define SC_OP_BT_PRIORITY_DETECTED BIT(12)
565#define SC_OP_BT_SCAN BIT(13) 549#define SC_OP_BT_SCAN BIT(13)
566#define SC_OP_ANI_RUN BIT(14) 550#define SC_OP_ANI_RUN BIT(14)
551#define SC_OP_ENABLE_APM BIT(15)
567 552
568/* Powersave flags */ 553/* Powersave flags */
569#define PS_WAIT_FOR_BEACON BIT(0) 554#define PS_WAIT_FOR_BEACON BIT(0)
@@ -601,13 +586,14 @@ struct ath_softc {
601 struct ath_hw *sc_ah; 586 struct ath_hw *sc_ah;
602 void __iomem *mem; 587 void __iomem *mem;
603 int irq; 588 int irq;
604 spinlock_t sc_resetlock;
605 spinlock_t sc_serial_rw; 589 spinlock_t sc_serial_rw;
606 spinlock_t sc_pm_lock; 590 spinlock_t sc_pm_lock;
591 spinlock_t sc_pcu_lock;
607 struct mutex mutex; 592 struct mutex mutex;
608 struct work_struct paprd_work; 593 struct work_struct paprd_work;
609 struct work_struct hw_check_work; 594 struct work_struct hw_check_work;
610 struct completion paprd_complete; 595 struct completion paprd_complete;
596 bool paprd_pending;
611 597
612 u32 intrstatus; 598 u32 intrstatus;
613 u32 sc_flags; /* SC_OP_* */ 599 u32 sc_flags; /* SC_OP_* */
@@ -665,11 +651,11 @@ struct ath_wiphy {
665 bool idle; 651 bool idle;
666 int chan_idx; 652 int chan_idx;
667 int chan_is_ht; 653 int chan_is_ht;
654 int last_rssi;
668}; 655};
669 656
670void ath9k_tasklet(unsigned long data); 657void ath9k_tasklet(unsigned long data);
671int ath_reset(struct ath_softc *sc, bool retry_tx); 658int ath_reset(struct ath_softc *sc, bool retry_tx);
672int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
673int ath_cabq_update(struct ath_softc *); 659int ath_cabq_update(struct ath_softc *);
674 660
675static inline void ath_read_cachesize(struct ath_common *common, int *csz) 661static inline void ath_read_cachesize(struct ath_common *common, int *csz)
@@ -678,17 +664,19 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
678} 664}
679 665
680extern struct ieee80211_ops ath9k_ops; 666extern struct ieee80211_ops ath9k_ops;
681extern int modparam_nohwcrypt; 667extern int ath9k_modparam_nohwcrypt;
682extern int led_blink; 668extern int led_blink;
669extern int ath9k_pm_qos_value;
670extern bool is_ath9k_unloaded;
683 671
684irqreturn_t ath_isr(int irq, void *dev); 672irqreturn_t ath_isr(int irq, void *dev);
673void ath9k_init_crypto(struct ath_softc *sc);
685int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid, 674int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
686 const struct ath_bus_ops *bus_ops); 675 const struct ath_bus_ops *bus_ops);
687void ath9k_deinit_device(struct ath_softc *sc); 676void ath9k_deinit_device(struct ath_softc *sc);
688void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw); 677void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw);
689void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw, 678void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
690 struct ath9k_channel *ichan); 679 struct ath9k_channel *ichan);
691void ath_update_chainmask(struct ath_softc *sc, int is_ht);
692int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw, 680int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
693 struct ath9k_channel *hchan); 681 struct ath9k_channel *hchan);
694 682
@@ -715,10 +703,12 @@ static inline void ath_ahb_exit(void) {};
715void ath9k_ps_wakeup(struct ath_softc *sc); 703void ath9k_ps_wakeup(struct ath_softc *sc);
716void ath9k_ps_restore(struct ath_softc *sc); 704void ath9k_ps_restore(struct ath_softc *sc);
717 705
706u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate);
707
718void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif); 708void ath9k_set_bssid_mask(struct ieee80211_hw *hw, struct ieee80211_vif *vif);
719int ath9k_wiphy_add(struct ath_softc *sc); 709int ath9k_wiphy_add(struct ath_softc *sc);
720int ath9k_wiphy_del(struct ath_wiphy *aphy); 710int ath9k_wiphy_del(struct ath_wiphy *aphy);
721void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb); 711void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype);
722int ath9k_wiphy_pause(struct ath_wiphy *aphy); 712int ath9k_wiphy_pause(struct ath_wiphy *aphy);
723int ath9k_wiphy_unpause(struct ath_wiphy *aphy); 713int ath9k_wiphy_unpause(struct ath_wiphy *aphy);
724int ath9k_wiphy_select(struct ath_wiphy *aphy); 714int ath9k_wiphy_select(struct ath_wiphy *aphy);
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 19891e7d49ae..385ba03134ba 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -28,7 +28,7 @@ int ath_beaconq_config(struct ath_softc *sc)
28 struct ath_hw *ah = sc->sc_ah; 28 struct ath_hw *ah = sc->sc_ah;
29 struct ath_common *common = ath9k_hw_common(ah); 29 struct ath_common *common = ath9k_hw_common(ah);
30 struct ath9k_tx_queue_info qi, qi_be; 30 struct ath9k_tx_queue_info qi, qi_be;
31 int qnum; 31 struct ath_txq *txq;
32 32
33 ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi); 33 ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi);
34 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) { 34 if (sc->sc_ah->opmode == NL80211_IFTYPE_AP) {
@@ -38,16 +38,16 @@ int ath_beaconq_config(struct ath_softc *sc)
38 qi.tqi_cwmax = 0; 38 qi.tqi_cwmax = 0;
39 } else { 39 } else {
40 /* Adhoc mode; important thing is to use 2x cwmin. */ 40 /* Adhoc mode; important thing is to use 2x cwmin. */
41 qnum = sc->tx.hwq_map[WME_AC_BE]; 41 txq = sc->tx.txq_map[WME_AC_BE];
42 ath9k_hw_get_txq_props(ah, qnum, &qi_be); 42 ath9k_hw_get_txq_props(ah, txq->axq_qnum, &qi_be);
43 qi.tqi_aifs = qi_be.tqi_aifs; 43 qi.tqi_aifs = qi_be.tqi_aifs;
44 qi.tqi_cwmin = 4*qi_be.tqi_cwmin; 44 qi.tqi_cwmin = 4*qi_be.tqi_cwmin;
45 qi.tqi_cwmax = qi_be.tqi_cwmax; 45 qi.tqi_cwmax = qi_be.tqi_cwmax;
46 } 46 }
47 47
48 if (!ath9k_hw_set_txq_props(ah, sc->beacon.beaconq, &qi)) { 48 if (!ath9k_hw_set_txq_props(ah, sc->beacon.beaconq, &qi)) {
49 ath_print(common, ATH_DBG_FATAL, 49 ath_err(common,
50 "Unable to update h/w beacon queue parameters\n"); 50 "Unable to update h/w beacon queue parameters\n");
51 return 0; 51 return 0;
52 } else { 52 } else {
53 ath9k_hw_resettxqueue(ah, sc->beacon.beaconq); 53 ath9k_hw_resettxqueue(ah, sc->beacon.beaconq);
@@ -103,12 +103,32 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
103 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4); 103 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
104 series[0].Tries = 1; 104 series[0].Tries = 1;
105 series[0].Rate = rate; 105 series[0].Rate = rate;
106 series[0].ChSel = common->tx_chainmask; 106 series[0].ChSel = ath_txchainmask_reduction(sc,
107 common->tx_chainmask, series[0].Rate);
107 series[0].RateFlags = (ctsrate) ? ATH9K_RATESERIES_RTS_CTS : 0; 108 series[0].RateFlags = (ctsrate) ? ATH9K_RATESERIES_RTS_CTS : 0;
108 ath9k_hw_set11n_ratescenario(ah, ds, ds, 0, ctsrate, ctsduration, 109 ath9k_hw_set11n_ratescenario(ah, ds, ds, 0, ctsrate, ctsduration,
109 series, 4, 0); 110 series, 4, 0);
110} 111}
111 112
113static void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
114{
115 struct ath_wiphy *aphy = hw->priv;
116 struct ath_softc *sc = aphy->sc;
117 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
118 struct ath_tx_control txctl;
119
120 memset(&txctl, 0, sizeof(struct ath_tx_control));
121 txctl.txq = sc->beacon.cabq;
122
123 ath_dbg(common, ATH_DBG_XMIT,
124 "transmitting CABQ packet, skb: %p\n", skb);
125
126 if (ath_tx_start(hw, skb, &txctl) != 0) {
127 ath_dbg(common, ATH_DBG_XMIT, "CABQ TX failed\n");
128 dev_kfree_skb_any(skb);
129 }
130}
131
112static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw, 132static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
113 struct ieee80211_vif *vif) 133 struct ieee80211_vif *vif)
114{ 134{
@@ -169,8 +189,7 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
169 dev_kfree_skb_any(skb); 189 dev_kfree_skb_any(skb);
170 bf->bf_mpdu = NULL; 190 bf->bf_mpdu = NULL;
171 bf->bf_buf_addr = 0; 191 bf->bf_buf_addr = 0;
172 ath_print(common, ATH_DBG_FATAL, 192 ath_err(common, "dma_mapping_error on beaconing\n");
173 "dma_mapping_error on beaconing\n");
174 return NULL; 193 return NULL;
175 } 194 }
176 195
@@ -190,8 +209,8 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
190 209
191 if (skb && cabq_depth) { 210 if (skb && cabq_depth) {
192 if (sc->nvifs > 1) { 211 if (sc->nvifs > 1) {
193 ath_print(common, ATH_DBG_BEACON, 212 ath_dbg(common, ATH_DBG_BEACON,
194 "Flushing previous cabq traffic\n"); 213 "Flushing previous cabq traffic\n");
195 ath_draintxq(sc, cabq, false); 214 ath_draintxq(sc, cabq, false);
196 } 215 }
197 } 216 }
@@ -263,7 +282,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
263 /* NB: the beacon data buffer must be 32-bit aligned. */ 282 /* NB: the beacon data buffer must be 32-bit aligned. */
264 skb = ieee80211_beacon_get(sc->hw, vif); 283 skb = ieee80211_beacon_get(sc->hw, vif);
265 if (skb == NULL) { 284 if (skb == NULL) {
266 ath_print(common, ATH_DBG_BEACON, "cannot get skb\n"); 285 ath_dbg(common, ATH_DBG_BEACON, "cannot get skb\n");
267 return -ENOMEM; 286 return -ENOMEM;
268 } 287 }
269 288
@@ -287,10 +306,9 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
287 tsfadjust = intval * avp->av_bslot / ATH_BCBUF; 306 tsfadjust = intval * avp->av_bslot / ATH_BCBUF;
288 avp->tsf_adjust = cpu_to_le64(TU_TO_USEC(tsfadjust)); 307 avp->tsf_adjust = cpu_to_le64(TU_TO_USEC(tsfadjust));
289 308
290 ath_print(common, ATH_DBG_BEACON, 309 ath_dbg(common, ATH_DBG_BEACON,
291 "stagger beacons, bslot %d intval " 310 "stagger beacons, bslot %d intval %u tsfadjust %llu\n",
292 "%u tsfadjust %llu\n", 311 avp->av_bslot, intval, (unsigned long long)tsfadjust);
293 avp->av_bslot, intval, (unsigned long long)tsfadjust);
294 312
295 ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp = 313 ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp =
296 avp->tsf_adjust; 314 avp->tsf_adjust;
@@ -304,8 +322,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
304 dev_kfree_skb_any(skb); 322 dev_kfree_skb_any(skb);
305 bf->bf_mpdu = NULL; 323 bf->bf_mpdu = NULL;
306 bf->bf_buf_addr = 0; 324 bf->bf_buf_addr = 0;
307 ath_print(common, ATH_DBG_FATAL, 325 ath_err(common, "dma_mapping_error on beacon alloc\n");
308 "dma_mapping_error on beacon alloc\n");
309 return -ENOMEM; 326 return -ENOMEM;
310 } 327 }
311 328
@@ -362,13 +379,13 @@ void ath_beacon_tasklet(unsigned long data)
362 sc->beacon.bmisscnt++; 379 sc->beacon.bmisscnt++;
363 380
364 if (sc->beacon.bmisscnt < BSTUCK_THRESH) { 381 if (sc->beacon.bmisscnt < BSTUCK_THRESH) {
365 ath_print(common, ATH_DBG_BSTUCK, 382 ath_dbg(common, ATH_DBG_BSTUCK,
366 "missed %u consecutive beacons\n", 383 "missed %u consecutive beacons\n",
367 sc->beacon.bmisscnt); 384 sc->beacon.bmisscnt);
368 ath9k_hw_bstuck_nfcal(ah); 385 ath9k_hw_bstuck_nfcal(ah);
369 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) { 386 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
370 ath_print(common, ATH_DBG_BSTUCK, 387 ath_dbg(common, ATH_DBG_BSTUCK,
371 "beacon is officially stuck\n"); 388 "beacon is officially stuck\n");
372 sc->sc_flags |= SC_OP_TSF_RESET; 389 sc->sc_flags |= SC_OP_TSF_RESET;
373 ath_reset(sc, true); 390 ath_reset(sc, true);
374 } 391 }
@@ -377,9 +394,9 @@ void ath_beacon_tasklet(unsigned long data)
377 } 394 }
378 395
379 if (sc->beacon.bmisscnt != 0) { 396 if (sc->beacon.bmisscnt != 0) {
380 ath_print(common, ATH_DBG_BSTUCK, 397 ath_dbg(common, ATH_DBG_BSTUCK,
381 "resume beacon xmit after %u misses\n", 398 "resume beacon xmit after %u misses\n",
382 sc->beacon.bmisscnt); 399 sc->beacon.bmisscnt);
383 sc->beacon.bmisscnt = 0; 400 sc->beacon.bmisscnt = 0;
384 } 401 }
385 402
@@ -405,9 +422,9 @@ void ath_beacon_tasklet(unsigned long data)
405 vif = sc->beacon.bslot[slot]; 422 vif = sc->beacon.bslot[slot];
406 aphy = sc->beacon.bslot_aphy[slot]; 423 aphy = sc->beacon.bslot_aphy[slot];
407 424
408 ath_print(common, ATH_DBG_BEACON, 425 ath_dbg(common, ATH_DBG_BEACON,
409 "slot %d [tsf %llu tsftu %u intval %u] vif %p\n", 426 "slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
410 slot, tsf, tsftu, intval, vif); 427 slot, tsf, tsftu, intval, vif);
411 428
412 bfaddr = 0; 429 bfaddr = 0;
413 if (vif) { 430 if (vif) {
@@ -449,8 +466,8 @@ void ath_beacon_tasklet(unsigned long data)
449 * are still pending on the queue. 466 * are still pending on the queue.
450 */ 467 */
451 if (!ath9k_hw_stoptxdma(ah, sc->beacon.beaconq)) { 468 if (!ath9k_hw_stoptxdma(ah, sc->beacon.beaconq)) {
452 ath_print(common, ATH_DBG_FATAL, 469 ath_err(common, "beacon queue %u did not stop?\n",
453 "beacon queue %u did not stop?\n", sc->beacon.beaconq); 470 sc->beacon.beaconq);
454 } 471 }
455 472
456 /* NB: cabq traffic should already be queued and primed */ 473 /* NB: cabq traffic should already be queued and primed */
@@ -503,7 +520,7 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
503 520
504 /* Set the computed AP beacon timers */ 521 /* Set the computed AP beacon timers */
505 522
506 ath9k_hw_set_interrupts(ah, 0); 523 ath9k_hw_disable_interrupts(ah);
507 ath9k_beacon_init(sc, nexttbtt, intval); 524 ath9k_beacon_init(sc, nexttbtt, intval);
508 sc->beacon.bmisscnt = 0; 525 sc->beacon.bmisscnt = 0;
509 ath9k_hw_set_interrupts(ah, ah->imask); 526 ath9k_hw_set_interrupts(ah, ah->imask);
@@ -536,8 +553,8 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
536 553
537 /* No need to configure beacon if we are not associated */ 554 /* No need to configure beacon if we are not associated */
538 if (!common->curaid) { 555 if (!common->curaid) {
539 ath_print(common, ATH_DBG_BEACON, 556 ath_dbg(common, ATH_DBG_BEACON,
540 "STA is not yet associated..skipping beacon config\n"); 557 "STA is not yet associated..skipping beacon config\n");
541 return; 558 return;
542 } 559 }
543 560
@@ -549,8 +566,6 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
549 * last beacon we received (which may be none). 566 * last beacon we received (which may be none).
550 */ 567 */
551 dtimperiod = conf->dtim_period; 568 dtimperiod = conf->dtim_period;
552 if (dtimperiod <= 0) /* NB: 0 if not known */
553 dtimperiod = 1;
554 dtimcount = conf->dtim_count; 569 dtimcount = conf->dtim_count;
555 if (dtimcount >= dtimperiod) /* NB: sanity check */ 570 if (dtimcount >= dtimperiod) /* NB: sanity check */
556 dtimcount = 0; 571 dtimcount = 0;
@@ -558,8 +573,6 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
558 cfpcount = 0; 573 cfpcount = 0;
559 574
560 sleepduration = conf->listen_interval * intval; 575 sleepduration = conf->listen_interval * intval;
561 if (sleepduration <= 0)
562 sleepduration = intval;
563 576
564 /* 577 /*
565 * Pull nexttbtt forward to reflect the current 578 * Pull nexttbtt forward to reflect the current
@@ -630,23 +643,22 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
630 /* TSF out of range threshold fixed at 1 second */ 643 /* TSF out of range threshold fixed at 1 second */
631 bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD; 644 bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
632 645
633 ath_print(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu); 646 ath_dbg(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
634 ath_print(common, ATH_DBG_BEACON, 647 ath_dbg(common, ATH_DBG_BEACON,
635 "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n", 648 "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
636 bs.bs_bmissthreshold, bs.bs_sleepduration, 649 bs.bs_bmissthreshold, bs.bs_sleepduration,
637 bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext); 650 bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
638 651
639 /* Set the computed STA beacon timers */ 652 /* Set the computed STA beacon timers */
640 653
641 ath9k_hw_set_interrupts(ah, 0); 654 ath9k_hw_disable_interrupts(ah);
642 ath9k_hw_set_sta_beacon_timers(ah, &bs); 655 ath9k_hw_set_sta_beacon_timers(ah, &bs);
643 ah->imask |= ATH9K_INT_BMISS; 656 ah->imask |= ATH9K_INT_BMISS;
644 ath9k_hw_set_interrupts(ah, ah->imask); 657 ath9k_hw_set_interrupts(ah, ah->imask);
645} 658}
646 659
647static void ath_beacon_config_adhoc(struct ath_softc *sc, 660static void ath_beacon_config_adhoc(struct ath_softc *sc,
648 struct ath_beacon_config *conf, 661 struct ath_beacon_config *conf)
649 struct ieee80211_vif *vif)
650{ 662{
651 struct ath_hw *ah = sc->sc_ah; 663 struct ath_hw *ah = sc->sc_ah;
652 struct ath_common *common = ath9k_hw_common(ah); 664 struct ath_common *common = ath9k_hw_common(ah);
@@ -670,9 +682,9 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
670 nexttbtt += intval; 682 nexttbtt += intval;
671 } while (nexttbtt < tsftu); 683 } while (nexttbtt < tsftu);
672 684
673 ath_print(common, ATH_DBG_BEACON, 685 ath_dbg(common, ATH_DBG_BEACON,
674 "IBSS nexttbtt %u intval %u (%u)\n", 686 "IBSS nexttbtt %u intval %u (%u)\n",
675 nexttbtt, intval, conf->beacon_interval); 687 nexttbtt, intval, conf->beacon_interval);
676 688
677 /* 689 /*
678 * In IBSS mode enable the beacon timers but only enable SWBA interrupts 690 * In IBSS mode enable the beacon timers but only enable SWBA interrupts
@@ -686,7 +698,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
686 698
687 /* Set the computed ADHOC beacon timers */ 699 /* Set the computed ADHOC beacon timers */
688 700
689 ath9k_hw_set_interrupts(ah, 0); 701 ath9k_hw_disable_interrupts(ah);
690 ath9k_beacon_init(sc, nexttbtt, intval); 702 ath9k_beacon_init(sc, nexttbtt, intval);
691 sc->beacon.bmisscnt = 0; 703 sc->beacon.bmisscnt = 0;
692 ath9k_hw_set_interrupts(ah, ah->imask); 704 ath9k_hw_set_interrupts(ah, ah->imask);
@@ -701,18 +713,17 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
701 /* Setup the beacon configuration parameters */ 713 /* Setup the beacon configuration parameters */
702 if (vif) { 714 if (vif) {
703 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf; 715 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
704
705 iftype = vif->type; 716 iftype = vif->type;
706
707 cur_conf->beacon_interval = bss_conf->beacon_int; 717 cur_conf->beacon_interval = bss_conf->beacon_int;
708 cur_conf->dtim_period = bss_conf->dtim_period; 718 cur_conf->dtim_period = bss_conf->dtim_period;
719 } else {
720 iftype = sc->sc_ah->opmode;
721 }
722
709 cur_conf->listen_interval = 1; 723 cur_conf->listen_interval = 1;
710 cur_conf->dtim_count = 1; 724 cur_conf->dtim_count = 1;
711 cur_conf->bmiss_timeout = 725 cur_conf->bmiss_timeout =
712 ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval; 726 ATH_DEFAULT_BMISS_LIMIT * cur_conf->beacon_interval;
713 } else {
714 iftype = sc->sc_ah->opmode;
715 }
716 727
717 /* 728 /*
718 * It looks like mac80211 may end up using beacon interval of zero in 729 * It looks like mac80211 may end up using beacon interval of zero in
@@ -723,20 +734,27 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
723 if (cur_conf->beacon_interval == 0) 734 if (cur_conf->beacon_interval == 0)
724 cur_conf->beacon_interval = 100; 735 cur_conf->beacon_interval = 100;
725 736
737 /*
738 * Some times we dont parse dtim period from mac80211, in that case
739 * use a default value
740 */
741 if (cur_conf->dtim_period == 0)
742 cur_conf->dtim_period = 1;
743
726 switch (iftype) { 744 switch (iftype) {
727 case NL80211_IFTYPE_AP: 745 case NL80211_IFTYPE_AP:
728 ath_beacon_config_ap(sc, cur_conf); 746 ath_beacon_config_ap(sc, cur_conf);
729 break; 747 break;
730 case NL80211_IFTYPE_ADHOC: 748 case NL80211_IFTYPE_ADHOC:
731 case NL80211_IFTYPE_MESH_POINT: 749 case NL80211_IFTYPE_MESH_POINT:
732 ath_beacon_config_adhoc(sc, cur_conf, vif); 750 ath_beacon_config_adhoc(sc, cur_conf);
733 break; 751 break;
734 case NL80211_IFTYPE_STATION: 752 case NL80211_IFTYPE_STATION:
735 ath_beacon_config_sta(sc, cur_conf); 753 ath_beacon_config_sta(sc, cur_conf);
736 break; 754 break;
737 default: 755 default:
738 ath_print(common, ATH_DBG_CONFIG, 756 ath_dbg(common, ATH_DBG_CONFIG,
739 "Unsupported beaconing mode\n"); 757 "Unsupported beaconing mode\n");
740 return; 758 return;
741 } 759 }
742 760
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 6a92e57fddf0..d33bf204c995 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -35,29 +35,6 @@ struct ath_btcoex_config {
35 bool bt_hold_rx_clear; 35 bool bt_hold_rx_clear;
36}; 36};
37 37
38static const u16 ath_subsysid_tbl[] = {
39 AR9280_COEX2WIRE_SUBSYSID,
40 AT9285_COEX3WIRE_SA_SUBSYSID,
41 AT9285_COEX3WIRE_DA_SUBSYSID
42};
43
44/*
45 * Checks the subsystem id of the device to see if it
46 * supports btcoex
47 */
48bool ath9k_hw_btcoex_supported(struct ath_hw *ah)
49{
50 int i;
51
52 if (!ah->hw_version.subsysid)
53 return false;
54
55 for (i = 0; i < ARRAY_SIZE(ath_subsysid_tbl); i++)
56 if (ah->hw_version.subsysid == ath_subsysid_tbl[i])
57 return true;
58
59 return false;
60}
61 38
62void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum) 39void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
63{ 40{
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 1ee5a15ccbb1..588dfd464dd1 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -49,7 +49,6 @@ struct ath_btcoex_hw {
49 u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */ 49 u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */
50}; 50};
51 51
52bool ath9k_hw_btcoex_supported(struct ath_hw *ah);
53void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah); 52void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah);
54void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah); 53void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah);
55void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum); 54void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum);
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 6d509484b5f6..b68a1acbddd0 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -97,12 +97,12 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath_hw *ah,
97 if (h[i].privNF > limit->max) { 97 if (h[i].privNF > limit->max) {
98 high_nf_mid = true; 98 high_nf_mid = true;
99 99
100 ath_print(common, ATH_DBG_CALIBRATE, 100 ath_dbg(common, ATH_DBG_CALIBRATE,
101 "NFmid[%d] (%d) > MAX (%d), %s\n", 101 "NFmid[%d] (%d) > MAX (%d), %s\n",
102 i, h[i].privNF, limit->max, 102 i, h[i].privNF, limit->max,
103 (cal->nfcal_interference ? 103 (cal->nfcal_interference ?
104 "not corrected (due to interference)" : 104 "not corrected (due to interference)" :
105 "correcting to MAX")); 105 "correcting to MAX"));
106 106
107 /* 107 /*
108 * Normally we limit the average noise floor by the 108 * Normally we limit the average noise floor by the
@@ -180,18 +180,18 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
180 return true; 180 return true;
181 181
182 if (currCal->calState != CAL_DONE) { 182 if (currCal->calState != CAL_DONE) {
183 ath_print(common, ATH_DBG_CALIBRATE, 183 ath_dbg(common, ATH_DBG_CALIBRATE,
184 "Calibration state incorrect, %d\n", 184 "Calibration state incorrect, %d\n",
185 currCal->calState); 185 currCal->calState);
186 return true; 186 return true;
187 } 187 }
188 188
189 if (!(ah->supp_cals & currCal->calData->calType)) 189 if (!(ah->supp_cals & currCal->calData->calType))
190 return true; 190 return true;
191 191
192 ath_print(common, ATH_DBG_CALIBRATE, 192 ath_dbg(common, ATH_DBG_CALIBRATE,
193 "Resetting Cal %d state for channel %u\n", 193 "Resetting Cal %d state for channel %u\n",
194 currCal->calData->calType, conf->channel->center_freq); 194 currCal->calData->calType, conf->channel->center_freq);
195 195
196 ah->caldata->CalValid &= ~currCal->calData->calType; 196 ah->caldata->CalValid &= ~currCal->calData->calType;
197 currCal->calState = CAL_WAITING; 197 currCal->calState = CAL_WAITING;
@@ -279,9 +279,9 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
279 * noisefloor until the next calibration timer. 279 * noisefloor until the next calibration timer.
280 */ 280 */
281 if (j == 1000) { 281 if (j == 1000) {
282 ath_print(common, ATH_DBG_ANY, "Timeout while waiting for nf " 282 ath_dbg(common, ATH_DBG_ANY,
283 "to load: AR_PHY_AGC_CONTROL=0x%x\n", 283 "Timeout while waiting for nf to load: AR_PHY_AGC_CONTROL=0x%x\n",
284 REG_READ(ah, AR_PHY_AGC_CONTROL)); 284 REG_READ(ah, AR_PHY_AGC_CONTROL));
285 return; 285 return;
286 } 286 }
287 287
@@ -318,19 +318,19 @@ static void ath9k_hw_nf_sanitize(struct ath_hw *ah, s16 *nf)
318 if (!nf[i]) 318 if (!nf[i])
319 continue; 319 continue;
320 320
321 ath_print(common, ATH_DBG_CALIBRATE, 321 ath_dbg(common, ATH_DBG_CALIBRATE,
322 "NF calibrated [%s] [chain %d] is %d\n", 322 "NF calibrated [%s] [chain %d] is %d\n",
323 (i >= 3 ? "ext" : "ctl"), i % 3, nf[i]); 323 (i >= 3 ? "ext" : "ctl"), i % 3, nf[i]);
324 324
325 if (nf[i] > ATH9K_NF_TOO_HIGH) { 325 if (nf[i] > ATH9K_NF_TOO_HIGH) {
326 ath_print(common, ATH_DBG_CALIBRATE, 326 ath_dbg(common, ATH_DBG_CALIBRATE,
327 "NF[%d] (%d) > MAX (%d), correcting to MAX", 327 "NF[%d] (%d) > MAX (%d), correcting to MAX\n",
328 i, nf[i], ATH9K_NF_TOO_HIGH); 328 i, nf[i], ATH9K_NF_TOO_HIGH);
329 nf[i] = limit->max; 329 nf[i] = limit->max;
330 } else if (nf[i] < limit->min) { 330 } else if (nf[i] < limit->min) {
331 ath_print(common, ATH_DBG_CALIBRATE, 331 ath_dbg(common, ATH_DBG_CALIBRATE,
332 "NF[%d] (%d) < MIN (%d), correcting to NOM", 332 "NF[%d] (%d) < MIN (%d), correcting to NOM\n",
333 i, nf[i], limit->min); 333 i, nf[i], limit->min);
334 nf[i] = limit->nominal; 334 nf[i] = limit->nominal;
335 } 335 }
336 } 336 }
@@ -347,8 +347,8 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
347 347
348 chan->channelFlags &= (~CHANNEL_CW_INT); 348 chan->channelFlags &= (~CHANNEL_CW_INT);
349 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) { 349 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
350 ath_print(common, ATH_DBG_CALIBRATE, 350 ath_dbg(common, ATH_DBG_CALIBRATE,
351 "NF did not complete in calibration window\n"); 351 "NF did not complete in calibration window\n");
352 return false; 352 return false;
353 } 353 }
354 354
@@ -357,10 +357,9 @@ bool ath9k_hw_getnf(struct ath_hw *ah, struct ath9k_channel *chan)
357 nf = nfarray[0]; 357 nf = nfarray[0];
358 if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh) 358 if (ath9k_hw_get_nf_thresh(ah, c->band, &nfThresh)
359 && nf > nfThresh) { 359 && nf > nfThresh) {
360 ath_print(common, ATH_DBG_CALIBRATE, 360 ath_dbg(common, ATH_DBG_CALIBRATE,
361 "noise floor failed detected; " 361 "noise floor failed detected; detected %d, threshold %d\n",
362 "detected %d, threshold %d\n", 362 nf, nfThresh);
363 nf, nfThresh);
364 chan->channelFlags |= CHANNEL_CW_INT; 363 chan->channelFlags |= CHANNEL_CW_INT;
365 } 364 }
366 365
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index f43a2d98421c..df1998d48253 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -107,12 +107,10 @@ static u32 ath9k_get_extchanmode(struct ieee80211_channel *chan,
107/* 107/*
108 * Update internal channel flags. 108 * Update internal channel flags.
109 */ 109 */
110void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw, 110void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
111 struct ath9k_channel *ichan) 111 struct ieee80211_channel *chan,
112 enum nl80211_channel_type channel_type)
112{ 113{
113 struct ieee80211_channel *chan = hw->conf.channel;
114 struct ieee80211_conf *conf = &hw->conf;
115
116 ichan->channel = chan->center_freq; 114 ichan->channel = chan->center_freq;
117 ichan->chan = chan; 115 ichan->chan = chan;
118 116
@@ -124,9 +122,8 @@ void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw,
124 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM; 122 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
125 } 123 }
126 124
127 if (conf_is_ht(conf)) 125 if (channel_type != NL80211_CHAN_NO_HT)
128 ichan->chanmode = ath9k_get_extchanmode(chan, 126 ichan->chanmode = ath9k_get_extchanmode(chan, channel_type);
129 conf->channel_type);
130} 127}
131EXPORT_SYMBOL(ath9k_cmn_update_ichannel); 128EXPORT_SYMBOL(ath9k_cmn_update_ichannel);
132 129
@@ -142,7 +139,7 @@ struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
142 139
143 chan_idx = curchan->hw_value; 140 chan_idx = curchan->hw_value;
144 channel = &ah->channels[chan_idx]; 141 channel = &ah->channels[chan_idx];
145 ath9k_cmn_update_ichannel(hw, channel); 142 ath9k_cmn_update_ichannel(channel, curchan, hw->conf.channel_type);
146 143
147 return channel; 144 return channel;
148} 145}
@@ -183,8 +180,8 @@ void ath9k_cmn_btcoex_bt_stomp(struct ath_common *common,
183 AR_STOMP_NONE_WLAN_WGHT); 180 AR_STOMP_NONE_WLAN_WGHT);
184 break; 181 break;
185 default: 182 default:
186 ath_print(common, ATH_DBG_BTCOEX, 183 ath_dbg(common, ATH_DBG_BTCOEX,
187 "Invalid Stomptype\n"); 184 "Invalid Stomptype\n");
188 break; 185 break;
189 } 186 }
190 187
diff --git a/drivers/net/wireless/ath/ath9k/common.h b/drivers/net/wireless/ath/ath9k/common.h
index fea3b3315391..a126bddebb0a 100644
--- a/drivers/net/wireless/ath/ath9k/common.h
+++ b/drivers/net/wireless/ath/ath9k/common.h
@@ -17,7 +17,6 @@
17#include <net/mac80211.h> 17#include <net/mac80211.h>
18 18
19#include "../ath.h" 19#include "../ath.h"
20#include "../debug.h"
21 20
22#include "hw.h" 21#include "hw.h"
23#include "hw-ops.h" 22#include "hw-ops.h"
@@ -31,10 +30,11 @@
31#define WME_MAX_BA WME_BA_BMP_SIZE 30#define WME_MAX_BA WME_BA_BMP_SIZE
32#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA) 31#define ATH_TID_MAX_BUFS (2 * WME_MAX_BA)
33 32
34#define WME_AC_BE 0 33/* These must match mac80211 skb queue mapping numbers */
35#define WME_AC_BK 1 34#define WME_AC_VO 0
36#define WME_AC_VI 2 35#define WME_AC_VI 1
37#define WME_AC_VO 3 36#define WME_AC_BE 2
37#define WME_AC_BK 3
38#define WME_NUM_AC 4 38#define WME_NUM_AC 4
39 39
40#define ATH_RSSI_DUMMY_MARKER 0x127 40#define ATH_RSSI_DUMMY_MARKER 0x127
@@ -62,8 +62,9 @@ enum ath_stomp_type {
62 62
63int ath9k_cmn_padpos(__le16 frame_control); 63int ath9k_cmn_padpos(__le16 frame_control);
64int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb); 64int ath9k_cmn_get_hw_crypto_keytype(struct sk_buff *skb);
65void ath9k_cmn_update_ichannel(struct ieee80211_hw *hw, 65void ath9k_cmn_update_ichannel(struct ath9k_channel *ichan,
66 struct ath9k_channel *ichan); 66 struct ieee80211_channel *chan,
67 enum nl80211_channel_type channel_type);
67struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw, 68struct ath9k_channel *ath9k_cmn_get_curchannel(struct ieee80211_hw *hw,
68 struct ath_hw *ah); 69 struct ath_hw *ah);
69int ath9k_cmn_count_streams(unsigned int chainmask, int max); 70int ath9k_cmn_count_streams(unsigned int chainmask, int max);
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 43e71a944cb1..3586c43077a7 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -24,8 +24,6 @@
24#define REG_READ_D(_ah, _reg) \ 24#define REG_READ_D(_ah, _reg) \
25 ath9k_hw_common(_ah)->ops->read((_ah), (_reg)) 25 ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
26 26
27static struct dentry *ath9k_debugfs_root;
28
29static int ath9k_debugfs_open(struct inode *inode, struct file *file) 27static int ath9k_debugfs_open(struct inode *inode, struct file *file)
30{ 28{
31 file->private_data = inode->i_private; 29 file->private_data = inode->i_private;
@@ -461,16 +459,16 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
461 459
462 /* Put variable-length stuff down here, and check for overflows. */ 460 /* Put variable-length stuff down here, and check for overflows. */
463 for (i = 0; i < sc->num_sec_wiphy; i++) { 461 for (i = 0; i < sc->num_sec_wiphy; i++) {
464 struct ath_wiphy *aphy = sc->sec_wiphy[i]; 462 struct ath_wiphy *aphy_tmp = sc->sec_wiphy[i];
465 if (aphy == NULL) 463 if (aphy_tmp == NULL)
466 continue; 464 continue;
467 chan = aphy->hw->conf.channel; 465 chan = aphy_tmp->hw->conf.channel;
468 len += snprintf(buf + len, sizeof(buf) - len, 466 len += snprintf(buf + len, sizeof(buf) - len,
469 "secondary: %s (%s chan=%d ht=%d)\n", 467 "secondary: %s (%s chan=%d ht=%d)\n",
470 wiphy_name(aphy->hw->wiphy), 468 wiphy_name(aphy_tmp->hw->wiphy),
471 ath_wiphy_state_str(aphy->state), 469 ath_wiphy_state_str(aphy_tmp->state),
472 ieee80211_frequency_to_channel(chan->center_freq), 470 ieee80211_frequency_to_channel(chan->center_freq),
473 aphy->chan_is_ht); 471 aphy_tmp->chan_is_ht);
474 } 472 }
475 if (len > sizeof(buf)) 473 if (len > sizeof(buf))
476 len = sizeof(buf); 474 len = sizeof(buf);
@@ -585,10 +583,10 @@ static const struct file_operations fops_wiphy = {
585 do { \ 583 do { \
586 len += snprintf(buf + len, size - len, \ 584 len += snprintf(buf + len, size - len, \
587 "%s%13u%11u%10u%10u\n", str, \ 585 "%s%13u%11u%10u%10u\n", str, \
588 sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BE]].elem, \ 586 sc->debug.stats.txstats[WME_AC_BE].elem, \
589 sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_BK]].elem, \ 587 sc->debug.stats.txstats[WME_AC_BK].elem, \
590 sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VI]].elem, \ 588 sc->debug.stats.txstats[WME_AC_VI].elem, \
591 sc->debug.stats.txstats[sc->tx.hwq_map[WME_AC_VO]].elem); \ 589 sc->debug.stats.txstats[WME_AC_VO].elem); \
592} while(0) 590} while(0)
593 591
594static ssize_t read_file_xmit(struct file *file, char __user *user_buf, 592static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
@@ -630,33 +628,35 @@ static ssize_t read_file_xmit(struct file *file, char __user *user_buf,
630 return retval; 628 return retval;
631} 629}
632 630
633void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq, 631void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
634 struct ath_buf *bf, struct ath_tx_status *ts) 632 struct ath_tx_status *ts)
635{ 633{
636 TX_STAT_INC(txq->axq_qnum, tx_pkts_all); 634 int qnum = skb_get_queue_mapping(bf->bf_mpdu);
637 sc->debug.stats.txstats[txq->axq_qnum].tx_bytes_all += bf->bf_mpdu->len; 635
636 TX_STAT_INC(qnum, tx_pkts_all);
637 sc->debug.stats.txstats[qnum].tx_bytes_all += bf->bf_mpdu->len;
638 638
639 if (bf_isampdu(bf)) { 639 if (bf_isampdu(bf)) {
640 if (bf_isxretried(bf)) 640 if (bf_isxretried(bf))
641 TX_STAT_INC(txq->axq_qnum, a_xretries); 641 TX_STAT_INC(qnum, a_xretries);
642 else 642 else
643 TX_STAT_INC(txq->axq_qnum, a_completed); 643 TX_STAT_INC(qnum, a_completed);
644 } else { 644 } else {
645 TX_STAT_INC(txq->axq_qnum, completed); 645 TX_STAT_INC(qnum, completed);
646 } 646 }
647 647
648 if (ts->ts_status & ATH9K_TXERR_FIFO) 648 if (ts->ts_status & ATH9K_TXERR_FIFO)
649 TX_STAT_INC(txq->axq_qnum, fifo_underrun); 649 TX_STAT_INC(qnum, fifo_underrun);
650 if (ts->ts_status & ATH9K_TXERR_XTXOP) 650 if (ts->ts_status & ATH9K_TXERR_XTXOP)
651 TX_STAT_INC(txq->axq_qnum, xtxop); 651 TX_STAT_INC(qnum, xtxop);
652 if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED) 652 if (ts->ts_status & ATH9K_TXERR_TIMER_EXPIRED)
653 TX_STAT_INC(txq->axq_qnum, timer_exp); 653 TX_STAT_INC(qnum, timer_exp);
654 if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR) 654 if (ts->ts_flags & ATH9K_TX_DESC_CFG_ERR)
655 TX_STAT_INC(txq->axq_qnum, desc_cfg_err); 655 TX_STAT_INC(qnum, desc_cfg_err);
656 if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN) 656 if (ts->ts_flags & ATH9K_TX_DATA_UNDERRUN)
657 TX_STAT_INC(txq->axq_qnum, data_underrun); 657 TX_STAT_INC(qnum, data_underrun);
658 if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN) 658 if (ts->ts_flags & ATH9K_TX_DELIM_UNDERRUN)
659 TX_STAT_INC(txq->axq_qnum, delim_underrun); 659 TX_STAT_INC(qnum, delim_underrun);
660} 660}
661 661
662static const struct file_operations fops_xmit = { 662static const struct file_operations fops_xmit = {
@@ -876,11 +876,8 @@ int ath9k_init_debug(struct ath_hw *ah)
876 struct ath_common *common = ath9k_hw_common(ah); 876 struct ath_common *common = ath9k_hw_common(ah);
877 struct ath_softc *sc = (struct ath_softc *) common->priv; 877 struct ath_softc *sc = (struct ath_softc *) common->priv;
878 878
879 if (!ath9k_debugfs_root) 879 sc->debug.debugfs_phy = debugfs_create_dir("ath9k",
880 return -ENOENT; 880 sc->hw->wiphy->debugfsdir);
881
882 sc->debug.debugfs_phy = debugfs_create_dir(wiphy_name(sc->hw->wiphy),
883 ath9k_debugfs_root);
884 if (!sc->debug.debugfs_phy) 881 if (!sc->debug.debugfs_phy)
885 return -ENOMEM; 882 return -ENOMEM;
886 883
@@ -933,29 +930,7 @@ int ath9k_init_debug(struct ath_hw *ah)
933 sc->debug.regidx = 0; 930 sc->debug.regidx = 0;
934 return 0; 931 return 0;
935err: 932err:
936 ath9k_exit_debug(ah);
937 return -ENOMEM;
938}
939
940void ath9k_exit_debug(struct ath_hw *ah)
941{
942 struct ath_common *common = ath9k_hw_common(ah);
943 struct ath_softc *sc = (struct ath_softc *) common->priv;
944
945 debugfs_remove_recursive(sc->debug.debugfs_phy); 933 debugfs_remove_recursive(sc->debug.debugfs_phy);
946} 934 sc->debug.debugfs_phy = NULL;
947 935 return -ENOMEM;
948int ath9k_debug_create_root(void)
949{
950 ath9k_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
951 if (!ath9k_debugfs_root)
952 return -ENOENT;
953
954 return 0;
955}
956
957void ath9k_debug_remove_root(void)
958{
959 debugfs_remove(ath9k_debugfs_root);
960 ath9k_debugfs_root = NULL;
961} 936}
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index bb0823242ba0..1e5078bd0344 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -164,13 +164,10 @@ struct ath9k_debug {
164}; 164};
165 165
166int ath9k_init_debug(struct ath_hw *ah); 166int ath9k_init_debug(struct ath_hw *ah);
167void ath9k_exit_debug(struct ath_hw *ah);
168 167
169int ath9k_debug_create_root(void);
170void ath9k_debug_remove_root(void);
171void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status); 168void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
172void ath_debug_stat_tx(struct ath_softc *sc, struct ath_txq *txq, 169void ath_debug_stat_tx(struct ath_softc *sc, struct ath_buf *bf,
173 struct ath_buf *bf, struct ath_tx_status *ts); 170 struct ath_tx_status *ts);
174void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs); 171void ath_debug_stat_rx(struct ath_softc *sc, struct ath_rx_status *rs);
175 172
176#else 173#else
@@ -180,26 +177,12 @@ static inline int ath9k_init_debug(struct ath_hw *ah)
180 return 0; 177 return 0;
181} 178}
182 179
183static inline void ath9k_exit_debug(struct ath_hw *ah)
184{
185}
186
187static inline int ath9k_debug_create_root(void)
188{
189 return 0;
190}
191
192static inline void ath9k_debug_remove_root(void)
193{
194}
195
196static inline void ath_debug_stat_interrupt(struct ath_softc *sc, 180static inline void ath_debug_stat_interrupt(struct ath_softc *sc,
197 enum ath9k_int status) 181 enum ath9k_int status)
198{ 182{
199} 183}
200 184
201static inline void ath_debug_stat_tx(struct ath_softc *sc, 185static inline void ath_debug_stat_tx(struct ath_softc *sc,
202 struct ath_txq *txq,
203 struct ath_buf *bf, 186 struct ath_buf *bf,
204 struct ath_tx_status *ts) 187 struct ath_tx_status *ts)
205{ 188{
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index 2bbf94d0191e..d05163159572 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -234,7 +234,7 @@ void ath9k_hw_get_target_powers(struct ath_hw *ah,
234u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower, 234u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower,
235 bool is2GHz, int num_band_edges) 235 bool is2GHz, int num_band_edges)
236{ 236{
237 u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER; 237 u16 twiceMaxEdgePower = MAX_RATE_POWER;
238 int i; 238 int i;
239 239
240 for (i = 0; (i < num_band_edges) && 240 for (i = 0; (i < num_band_edges) &&
@@ -273,12 +273,225 @@ void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah)
273 regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN; 273 regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
274 break; 274 break;
275 default: 275 default:
276 ath_print(common, ATH_DBG_EEPROM, 276 ath_dbg(common, ATH_DBG_EEPROM,
277 "Invalid chainmask configuration\n"); 277 "Invalid chainmask configuration\n");
278 break; 278 break;
279 } 279 }
280} 280}
281 281
282void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
283 struct ath9k_channel *chan,
284 void *pRawDataSet,
285 u8 *bChans, u16 availPiers,
286 u16 tPdGainOverlap,
287 u16 *pPdGainBoundaries, u8 *pPDADCValues,
288 u16 numXpdGains)
289{
290 int i, j, k;
291 int16_t ss;
292 u16 idxL = 0, idxR = 0, numPiers;
293 static u8 vpdTableL[AR5416_NUM_PD_GAINS]
294 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
295 static u8 vpdTableR[AR5416_NUM_PD_GAINS]
296 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
297 static u8 vpdTableI[AR5416_NUM_PD_GAINS]
298 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
299
300 u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
301 u8 minPwrT4[AR5416_NUM_PD_GAINS];
302 u8 maxPwrT4[AR5416_NUM_PD_GAINS];
303 int16_t vpdStep;
304 int16_t tmpVal;
305 u16 sizeCurrVpdTable, maxIndex, tgtIndex;
306 bool match;
307 int16_t minDelta = 0;
308 struct chan_centers centers;
309 int pdgain_boundary_default;
310 struct cal_data_per_freq *data_def = pRawDataSet;
311 struct cal_data_per_freq_4k *data_4k = pRawDataSet;
312 struct cal_data_per_freq_ar9287 *data_9287 = pRawDataSet;
313 bool eeprom_4k = AR_SREV_9285(ah) || AR_SREV_9271(ah);
314 int intercepts;
315
316 if (AR_SREV_9287(ah))
317 intercepts = AR9287_PD_GAIN_ICEPTS;
318 else
319 intercepts = AR5416_PD_GAIN_ICEPTS;
320
321 memset(&minPwrT4, 0, AR5416_NUM_PD_GAINS);
322 ath9k_hw_get_channel_centers(ah, chan, &centers);
323
324 for (numPiers = 0; numPiers < availPiers; numPiers++) {
325 if (bChans[numPiers] == AR5416_BCHAN_UNUSED)
326 break;
327 }
328
329 match = ath9k_hw_get_lower_upper_index((u8)FREQ2FBIN(centers.synth_center,
330 IS_CHAN_2GHZ(chan)),
331 bChans, numPiers, &idxL, &idxR);
332
333 if (match) {
334 if (AR_SREV_9287(ah)) {
335 /* FIXME: array overrun? */
336 for (i = 0; i < numXpdGains; i++) {
337 minPwrT4[i] = data_9287[idxL].pwrPdg[i][0];
338 maxPwrT4[i] = data_9287[idxL].pwrPdg[i][4];
339 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
340 data_9287[idxL].pwrPdg[i],
341 data_9287[idxL].vpdPdg[i],
342 intercepts,
343 vpdTableI[i]);
344 }
345 } else if (eeprom_4k) {
346 for (i = 0; i < numXpdGains; i++) {
347 minPwrT4[i] = data_4k[idxL].pwrPdg[i][0];
348 maxPwrT4[i] = data_4k[idxL].pwrPdg[i][4];
349 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
350 data_4k[idxL].pwrPdg[i],
351 data_4k[idxL].vpdPdg[i],
352 intercepts,
353 vpdTableI[i]);
354 }
355 } else {
356 for (i = 0; i < numXpdGains; i++) {
357 minPwrT4[i] = data_def[idxL].pwrPdg[i][0];
358 maxPwrT4[i] = data_def[idxL].pwrPdg[i][4];
359 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
360 data_def[idxL].pwrPdg[i],
361 data_def[idxL].vpdPdg[i],
362 intercepts,
363 vpdTableI[i]);
364 }
365 }
366 } else {
367 for (i = 0; i < numXpdGains; i++) {
368 if (AR_SREV_9287(ah)) {
369 pVpdL = data_9287[idxL].vpdPdg[i];
370 pPwrL = data_9287[idxL].pwrPdg[i];
371 pVpdR = data_9287[idxR].vpdPdg[i];
372 pPwrR = data_9287[idxR].pwrPdg[i];
373 } else if (eeprom_4k) {
374 pVpdL = data_4k[idxL].vpdPdg[i];
375 pPwrL = data_4k[idxL].pwrPdg[i];
376 pVpdR = data_4k[idxR].vpdPdg[i];
377 pPwrR = data_4k[idxR].pwrPdg[i];
378 } else {
379 pVpdL = data_def[idxL].vpdPdg[i];
380 pPwrL = data_def[idxL].pwrPdg[i];
381 pVpdR = data_def[idxR].vpdPdg[i];
382 pPwrR = data_def[idxR].pwrPdg[i];
383 }
384
385 minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
386
387 maxPwrT4[i] =
388 min(pPwrL[intercepts - 1],
389 pPwrR[intercepts - 1]);
390
391
392 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
393 pPwrL, pVpdL,
394 intercepts,
395 vpdTableL[i]);
396 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
397 pPwrR, pVpdR,
398 intercepts,
399 vpdTableR[i]);
400
401 for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
402 vpdTableI[i][j] =
403 (u8)(ath9k_hw_interpolate((u16)
404 FREQ2FBIN(centers.
405 synth_center,
406 IS_CHAN_2GHZ
407 (chan)),
408 bChans[idxL], bChans[idxR],
409 vpdTableL[i][j], vpdTableR[i][j]));
410 }
411 }
412 }
413
414 k = 0;
415
416 for (i = 0; i < numXpdGains; i++) {
417 if (i == (numXpdGains - 1))
418 pPdGainBoundaries[i] =
419 (u16)(maxPwrT4[i] / 2);
420 else
421 pPdGainBoundaries[i] =
422 (u16)((maxPwrT4[i] + minPwrT4[i + 1]) / 4);
423
424 pPdGainBoundaries[i] =
425 min((u16)MAX_RATE_POWER, pPdGainBoundaries[i]);
426
427 if ((i == 0) && !AR_SREV_5416_20_OR_LATER(ah)) {
428 minDelta = pPdGainBoundaries[0] - 23;
429 pPdGainBoundaries[0] = 23;
430 } else {
431 minDelta = 0;
432 }
433
434 if (i == 0) {
435 if (AR_SREV_9280_20_OR_LATER(ah))
436 ss = (int16_t)(0 - (minPwrT4[i] / 2));
437 else
438 ss = 0;
439 } else {
440 ss = (int16_t)((pPdGainBoundaries[i - 1] -
441 (minPwrT4[i] / 2)) -
442 tPdGainOverlap + 1 + minDelta);
443 }
444 vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]);
445 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
446
447 while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
448 tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep);
449 pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal);
450 ss++;
451 }
452
453 sizeCurrVpdTable = (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1);
454 tgtIndex = (u8)(pPdGainBoundaries[i] + tPdGainOverlap -
455 (minPwrT4[i] / 2));
456 maxIndex = (tgtIndex < sizeCurrVpdTable) ?
457 tgtIndex : sizeCurrVpdTable;
458
459 while ((ss < maxIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
460 pPDADCValues[k++] = vpdTableI[i][ss++];
461 }
462
463 vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] -
464 vpdTableI[i][sizeCurrVpdTable - 2]);
465 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
466
467 if (tgtIndex >= maxIndex) {
468 while ((ss <= tgtIndex) &&
469 (k < (AR5416_NUM_PDADC_VALUES - 1))) {
470 tmpVal = (int16_t)((vpdTableI[i][sizeCurrVpdTable - 1] +
471 (ss - maxIndex + 1) * vpdStep));
472 pPDADCValues[k++] = (u8)((tmpVal > 255) ?
473 255 : tmpVal);
474 ss++;
475 }
476 }
477 }
478
479 if (eeprom_4k)
480 pdgain_boundary_default = 58;
481 else
482 pdgain_boundary_default = pPdGainBoundaries[i - 1];
483
484 while (i < AR5416_PD_GAINS_IN_MASK) {
485 pPdGainBoundaries[i] = pdgain_boundary_default;
486 i++;
487 }
488
489 while (k < AR5416_NUM_PDADC_VALUES) {
490 pPDADCValues[k] = pPDADCValues[k - 1];
491 k++;
492 }
493}
494
282int ath9k_hw_eeprom_init(struct ath_hw *ah) 495int ath9k_hw_eeprom_init(struct ath_hw *ah)
283{ 496{
284 int status; 497 int status;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index dd59f09441a3..58e2ddc927a9 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -17,12 +17,12 @@
17#ifndef EEPROM_H 17#ifndef EEPROM_H
18#define EEPROM_H 18#define EEPROM_H
19 19
20#define AR_EEPROM_MODAL_SPURS 5
21
20#include "../ath.h" 22#include "../ath.h"
21#include <net/cfg80211.h> 23#include <net/cfg80211.h>
22#include "ar9003_eeprom.h" 24#include "ar9003_eeprom.h"
23 25
24#define AH_USE_EEPROM 0x1
25
26#ifdef __BIG_ENDIAN 26#ifdef __BIG_ENDIAN
27#define AR5416_EEPROM_MAGIC 0x5aa5 27#define AR5416_EEPROM_MAGIC 0x5aa5
28#else 28#else
@@ -149,8 +149,6 @@
149#define AR5416_NUM_PD_GAINS 4 149#define AR5416_NUM_PD_GAINS 4
150#define AR5416_PD_GAINS_IN_MASK 4 150#define AR5416_PD_GAINS_IN_MASK 4
151#define AR5416_PD_GAIN_ICEPTS 5 151#define AR5416_PD_GAIN_ICEPTS 5
152#define AR5416_EEPROM_MODAL_SPURS 5
153#define AR5416_MAX_RATE_POWER 63
154#define AR5416_NUM_PDADC_VALUES 128 152#define AR5416_NUM_PDADC_VALUES 128
155#define AR5416_BCHAN_UNUSED 0xFF 153#define AR5416_BCHAN_UNUSED 0xFF
156#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64 154#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64
@@ -175,8 +173,6 @@
175#define AR5416_EEP4K_NUM_CTLS 12 173#define AR5416_EEP4K_NUM_CTLS 12
176#define AR5416_EEP4K_NUM_BAND_EDGES 4 174#define AR5416_EEP4K_NUM_BAND_EDGES 4
177#define AR5416_EEP4K_NUM_PD_GAINS 2 175#define AR5416_EEP4K_NUM_PD_GAINS 2
178#define AR5416_EEP4K_PD_GAINS_IN_MASK 4
179#define AR5416_EEP4K_PD_GAIN_ICEPTS 5
180#define AR5416_EEP4K_MAX_CHAINS 1 176#define AR5416_EEP4K_MAX_CHAINS 1
181 177
182#define AR9280_TX_GAIN_TABLE_SIZE 22 178#define AR9280_TX_GAIN_TABLE_SIZE 22
@@ -198,35 +194,12 @@
198#define AR9287_NUM_2G_40_TARGET_POWERS 3 194#define AR9287_NUM_2G_40_TARGET_POWERS 3
199#define AR9287_NUM_CTLS 12 195#define AR9287_NUM_CTLS 12
200#define AR9287_NUM_BAND_EDGES 4 196#define AR9287_NUM_BAND_EDGES 4
201#define AR9287_NUM_PD_GAINS 4
202#define AR9287_PD_GAINS_IN_MASK 4
203#define AR9287_PD_GAIN_ICEPTS 1 197#define AR9287_PD_GAIN_ICEPTS 1
204#define AR9287_EEPROM_MODAL_SPURS 5
205#define AR9287_MAX_RATE_POWER 63
206#define AR9287_NUM_PDADC_VALUES 128
207#define AR9287_NUM_RATES 16
208#define AR9287_BCHAN_UNUSED 0xFF
209#define AR9287_MAX_PWR_RANGE_IN_HALF_DB 64
210#define AR9287_OPFLAGS_11A 0x01
211#define AR9287_OPFLAGS_11G 0x02
212#define AR9287_OPFLAGS_2G_HT40 0x08
213#define AR9287_OPFLAGS_2G_HT20 0x20
214#define AR9287_OPFLAGS_5G_HT40 0x04
215#define AR9287_OPFLAGS_5G_HT20 0x10
216#define AR9287_EEPMISC_BIG_ENDIAN 0x01 198#define AR9287_EEPMISC_BIG_ENDIAN 0x01
217#define AR9287_EEPMISC_WOW 0x02 199#define AR9287_EEPMISC_WOW 0x02
218#define AR9287_MAX_CHAINS 2 200#define AR9287_MAX_CHAINS 2
219#define AR9287_ANT_16S 32 201#define AR9287_ANT_16S 32
220#define AR9287_custdatasize 20 202
221
222#define AR9287_NUM_ANT_CHAIN_FIELDS 6
223#define AR9287_NUM_ANT_COMMON_FIELDS 4
224#define AR9287_SIZE_ANT_CHAIN_FIELD 2
225#define AR9287_SIZE_ANT_COMMON_FIELD 4
226#define AR9287_ANT_CHAIN_MASK 0x3
227#define AR9287_ANT_COMMON_MASK 0xf
228#define AR9287_CHAIN_0_IDX 0
229#define AR9287_CHAIN_1_IDX 1
230#define AR9287_DATA_SZ 32 203#define AR9287_DATA_SZ 32
231 204
232#define AR9287_PWR_TABLE_OFFSET_DB -5 205#define AR9287_PWR_TABLE_OFFSET_DB -5
@@ -280,6 +253,7 @@ enum eeprom_param {
280 EEP_PAPRD, 253 EEP_PAPRD,
281 EEP_MODAL_VER, 254 EEP_MODAL_VER,
282 EEP_ANT_DIV_CTL1, 255 EEP_ANT_DIV_CTL1,
256 EEP_CHAIN_MASK_REDUCE
283}; 257};
284 258
285enum ar5416_rates { 259enum ar5416_rates {
@@ -395,7 +369,7 @@ struct modal_eep_header {
395 u16 xpaBiasLvlFreq[3]; 369 u16 xpaBiasLvlFreq[3];
396 u8 futureModal[6]; 370 u8 futureModal[6];
397 371
398 struct spur_chan spurChans[AR5416_EEPROM_MODAL_SPURS]; 372 struct spur_chan spurChans[AR_EEPROM_MODAL_SPURS];
399} __packed; 373} __packed;
400 374
401struct calDataPerFreqOpLoop { 375struct calDataPerFreqOpLoop {
@@ -463,7 +437,7 @@ struct modal_eep_4k_header {
463 u8 db2_4:4, reserved:4; 437 u8 db2_4:4, reserved:4;
464#endif 438#endif
465 u8 futureModal[4]; 439 u8 futureModal[4];
466 struct spur_chan spurChans[AR5416_EEPROM_MODAL_SPURS]; 440 struct spur_chan spurChans[AR_EEPROM_MODAL_SPURS];
467} __packed; 441} __packed;
468 442
469struct base_eep_ar9287_header { 443struct base_eep_ar9287_header {
@@ -521,7 +495,7 @@ struct modal_eep_ar9287_header {
521 u8 ob_qam; 495 u8 ob_qam;
522 u8 ob_pal_off; 496 u8 ob_pal_off;
523 u8 futureModal[30]; 497 u8 futureModal[30];
524 struct spur_chan spurChans[AR9287_EEPROM_MODAL_SPURS]; 498 struct spur_chan spurChans[AR_EEPROM_MODAL_SPURS];
525} __packed; 499} __packed;
526 500
527struct cal_data_per_freq { 501struct cal_data_per_freq {
@@ -530,8 +504,8 @@ struct cal_data_per_freq {
530} __packed; 504} __packed;
531 505
532struct cal_data_per_freq_4k { 506struct cal_data_per_freq_4k {
533 u8 pwrPdg[AR5416_EEP4K_NUM_PD_GAINS][AR5416_EEP4K_PD_GAIN_ICEPTS]; 507 u8 pwrPdg[AR5416_EEP4K_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
534 u8 vpdPdg[AR5416_EEP4K_NUM_PD_GAINS][AR5416_EEP4K_PD_GAIN_ICEPTS]; 508 u8 vpdPdg[AR5416_EEP4K_NUM_PD_GAINS][AR5416_PD_GAIN_ICEPTS];
535} __packed; 509} __packed;
536 510
537struct cal_target_power_leg { 511struct cal_target_power_leg {
@@ -557,8 +531,8 @@ struct cal_data_op_loop_ar9287 {
557} __packed; 531} __packed;
558 532
559struct cal_data_per_freq_ar9287 { 533struct cal_data_per_freq_ar9287 {
560 u8 pwrPdg[AR9287_NUM_PD_GAINS][AR9287_PD_GAIN_ICEPTS]; 534 u8 pwrPdg[AR5416_NUM_PD_GAINS][AR9287_PD_GAIN_ICEPTS];
561 u8 vpdPdg[AR9287_NUM_PD_GAINS][AR9287_PD_GAIN_ICEPTS]; 535 u8 vpdPdg[AR5416_NUM_PD_GAINS][AR9287_PD_GAIN_ICEPTS];
562} __packed; 536} __packed;
563 537
564union cal_data_per_freq_ar9287_u { 538union cal_data_per_freq_ar9287_u {
@@ -673,15 +647,12 @@ struct eeprom_ops {
673 bool (*fill_eeprom)(struct ath_hw *hw); 647 bool (*fill_eeprom)(struct ath_hw *hw);
674 int (*get_eeprom_ver)(struct ath_hw *hw); 648 int (*get_eeprom_ver)(struct ath_hw *hw);
675 int (*get_eeprom_rev)(struct ath_hw *hw); 649 int (*get_eeprom_rev)(struct ath_hw *hw);
676 u8 (*get_num_ant_config)(struct ath_hw *hw,
677 enum ath9k_hal_freq_band band);
678 u32 (*get_eeprom_antenna_cfg)(struct ath_hw *hw,
679 struct ath9k_channel *chan);
680 void (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan); 650 void (*set_board_values)(struct ath_hw *hw, struct ath9k_channel *chan);
681 void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan); 651 void (*set_addac)(struct ath_hw *hw, struct ath9k_channel *chan);
682 void (*set_txpower)(struct ath_hw *hw, struct ath9k_channel *chan, 652 void (*set_txpower)(struct ath_hw *hw, struct ath9k_channel *chan,
683 u16 cfgCtl, u8 twiceAntennaReduction, 653 u16 cfgCtl, u8 twiceAntennaReduction,
684 u8 twiceMaxRegulatoryPower, u8 powerLimit); 654 u8 twiceMaxRegulatoryPower, u8 powerLimit,
655 bool test);
685 u16 (*get_spur_channel)(struct ath_hw *ah, u16 i, bool is2GHz); 656 u16 (*get_spur_channel)(struct ath_hw *ah, u16 i, bool is2GHz);
686}; 657};
687 658
@@ -714,6 +685,14 @@ u16 ath9k_hw_get_max_edge_power(u16 freq, struct cal_ctl_edges *pRdEdgesPower,
714void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah); 685void ath9k_hw_update_regulatory_maxpower(struct ath_hw *ah);
715int ath9k_hw_eeprom_init(struct ath_hw *ah); 686int ath9k_hw_eeprom_init(struct ath_hw *ah);
716 687
688void ath9k_hw_get_gain_boundaries_pdadcs(struct ath_hw *ah,
689 struct ath9k_channel *chan,
690 void *pRawDataSet,
691 u8 *bChans, u16 availPiers,
692 u16 tPdGainOverlap,
693 u16 *pPdGainBoundaries, u8 *pPDADCValues,
694 u16 numXpdGains);
695
717#define ar5416_get_ntxchains(_txchainmask) \ 696#define ar5416_get_ntxchains(_txchainmask) \
718 (((_txchainmask >> 2) & 1) + \ 697 (((_txchainmask >> 2) & 1) + \
719 ((_txchainmask >> 1) & 1) + (_txchainmask & 1)) 698 ((_txchainmask >> 1) & 1) + (_txchainmask & 1))
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index 4fa4d8e28c64..fbdff7e47952 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -37,14 +37,14 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
37 eep_start_loc = 64; 37 eep_start_loc = 64;
38 38
39 if (!ath9k_hw_use_flash(ah)) { 39 if (!ath9k_hw_use_flash(ah)) {
40 ath_print(common, ATH_DBG_EEPROM, 40 ath_dbg(common, ATH_DBG_EEPROM,
41 "Reading from EEPROM, not flash\n"); 41 "Reading from EEPROM, not flash\n");
42 } 42 }
43 43
44 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) { 44 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
45 if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) { 45 if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
46 ath_print(common, ATH_DBG_EEPROM, 46 ath_dbg(common, ATH_DBG_EEPROM,
47 "Unable to read eeprom region\n"); 47 "Unable to read eeprom region\n");
48 return false; 48 return false;
49 } 49 }
50 eep_data++; 50 eep_data++;
@@ -69,13 +69,12 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
69 if (!ath9k_hw_use_flash(ah)) { 69 if (!ath9k_hw_use_flash(ah)) {
70 if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET, 70 if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET,
71 &magic)) { 71 &magic)) {
72 ath_print(common, ATH_DBG_FATAL, 72 ath_err(common, "Reading Magic # failed\n");
73 "Reading Magic # failed\n");
74 return false; 73 return false;
75 } 74 }
76 75
77 ath_print(common, ATH_DBG_EEPROM, 76 ath_dbg(common, ATH_DBG_EEPROM,
78 "Read Magic = 0x%04X\n", magic); 77 "Read Magic = 0x%04X\n", magic);
79 78
80 if (magic != AR5416_EEPROM_MAGIC) { 79 if (magic != AR5416_EEPROM_MAGIC) {
81 magic2 = swab16(magic); 80 magic2 = swab16(magic);
@@ -90,16 +89,15 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
90 eepdata++; 89 eepdata++;
91 } 90 }
92 } else { 91 } else {
93 ath_print(common, ATH_DBG_FATAL, 92 ath_err(common,
94 "Invalid EEPROM Magic. " 93 "Invalid EEPROM Magic. Endianness mismatch.\n");
95 "endianness mismatch.\n");
96 return -EINVAL; 94 return -EINVAL;
97 } 95 }
98 } 96 }
99 } 97 }
100 98
101 ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n", 99 ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
102 need_swap ? "True" : "False"); 100 need_swap ? "True" : "False");
103 101
104 if (need_swap) 102 if (need_swap)
105 el = swab16(ah->eeprom.map4k.baseEepHeader.length); 103 el = swab16(ah->eeprom.map4k.baseEepHeader.length);
@@ -120,8 +118,8 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
120 u32 integer; 118 u32 integer;
121 u16 word; 119 u16 word;
122 120
123 ath_print(common, ATH_DBG_EEPROM, 121 ath_dbg(common, ATH_DBG_EEPROM,
124 "EEPROM Endianness is not native.. Changing\n"); 122 "EEPROM Endianness is not native.. Changing\n");
125 123
126 word = swab16(eep->baseEepHeader.length); 124 word = swab16(eep->baseEepHeader.length);
127 eep->baseEepHeader.length = word; 125 eep->baseEepHeader.length = word;
@@ -155,7 +153,7 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
155 eep->modalHeader.antCtrlChain[i] = integer; 153 eep->modalHeader.antCtrlChain[i] = integer;
156 } 154 }
157 155
158 for (i = 0; i < AR5416_EEPROM_MODAL_SPURS; i++) { 156 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
159 word = swab16(eep->modalHeader.spurChans[i].spurChan); 157 word = swab16(eep->modalHeader.spurChans[i].spurChan);
160 eep->modalHeader.spurChans[i].spurChan = word; 158 eep->modalHeader.spurChans[i].spurChan = word;
161 } 159 }
@@ -163,9 +161,8 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
163 161
164 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER || 162 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER ||
165 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { 163 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
166 ath_print(common, ATH_DBG_FATAL, 164 ath_err(common, "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
167 "Bad EEPROM checksum 0x%x or revision 0x%04x\n", 165 sum, ah->eep_ops->get_eeprom_ver(ah));
168 sum, ah->eep_ops->get_eeprom_ver(ah));
169 return -EINVAL; 166 return -EINVAL;
170 } 167 }
171 168
@@ -230,173 +227,6 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
230 } 227 }
231} 228}
232 229
233static void ath9k_hw_get_4k_gain_boundaries_pdadcs(struct ath_hw *ah,
234 struct ath9k_channel *chan,
235 struct cal_data_per_freq_4k *pRawDataSet,
236 u8 *bChans, u16 availPiers,
237 u16 tPdGainOverlap,
238 u16 *pPdGainBoundaries, u8 *pPDADCValues,
239 u16 numXpdGains)
240{
241#define TMP_VAL_VPD_TABLE \
242 ((vpdTableI[i][sizeCurrVpdTable - 1] + (ss - maxIndex + 1) * vpdStep));
243 int i, j, k;
244 int16_t ss;
245 u16 idxL = 0, idxR = 0, numPiers;
246 static u8 vpdTableL[AR5416_EEP4K_NUM_PD_GAINS]
247 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
248 static u8 vpdTableR[AR5416_EEP4K_NUM_PD_GAINS]
249 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
250 static u8 vpdTableI[AR5416_EEP4K_NUM_PD_GAINS]
251 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
252
253 u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
254 u8 minPwrT4[AR5416_EEP4K_NUM_PD_GAINS];
255 u8 maxPwrT4[AR5416_EEP4K_NUM_PD_GAINS];
256 int16_t vpdStep;
257 int16_t tmpVal;
258 u16 sizeCurrVpdTable, maxIndex, tgtIndex;
259 bool match;
260 int16_t minDelta = 0;
261 struct chan_centers centers;
262#define PD_GAIN_BOUNDARY_DEFAULT 58;
263
264 memset(&minPwrT4, 0, AR9287_NUM_PD_GAINS);
265 ath9k_hw_get_channel_centers(ah, chan, &centers);
266
267 for (numPiers = 0; numPiers < availPiers; numPiers++) {
268 if (bChans[numPiers] == AR5416_BCHAN_UNUSED)
269 break;
270 }
271
272 match = ath9k_hw_get_lower_upper_index(
273 (u8)FREQ2FBIN(centers.synth_center,
274 IS_CHAN_2GHZ(chan)), bChans, numPiers,
275 &idxL, &idxR);
276
277 if (match) {
278 for (i = 0; i < numXpdGains; i++) {
279 minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0];
280 maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4];
281 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
282 pRawDataSet[idxL].pwrPdg[i],
283 pRawDataSet[idxL].vpdPdg[i],
284 AR5416_EEP4K_PD_GAIN_ICEPTS,
285 vpdTableI[i]);
286 }
287 } else {
288 for (i = 0; i < numXpdGains; i++) {
289 pVpdL = pRawDataSet[idxL].vpdPdg[i];
290 pPwrL = pRawDataSet[idxL].pwrPdg[i];
291 pVpdR = pRawDataSet[idxR].vpdPdg[i];
292 pPwrR = pRawDataSet[idxR].pwrPdg[i];
293
294 minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
295
296 maxPwrT4[i] =
297 min(pPwrL[AR5416_EEP4K_PD_GAIN_ICEPTS - 1],
298 pPwrR[AR5416_EEP4K_PD_GAIN_ICEPTS - 1]);
299
300
301 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
302 pPwrL, pVpdL,
303 AR5416_EEP4K_PD_GAIN_ICEPTS,
304 vpdTableL[i]);
305 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
306 pPwrR, pVpdR,
307 AR5416_EEP4K_PD_GAIN_ICEPTS,
308 vpdTableR[i]);
309
310 for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
311 vpdTableI[i][j] =
312 (u8)(ath9k_hw_interpolate((u16)
313 FREQ2FBIN(centers.
314 synth_center,
315 IS_CHAN_2GHZ
316 (chan)),
317 bChans[idxL], bChans[idxR],
318 vpdTableL[i][j], vpdTableR[i][j]));
319 }
320 }
321 }
322
323 k = 0;
324
325 for (i = 0; i < numXpdGains; i++) {
326 if (i == (numXpdGains - 1))
327 pPdGainBoundaries[i] =
328 (u16)(maxPwrT4[i] / 2);
329 else
330 pPdGainBoundaries[i] =
331 (u16)((maxPwrT4[i] + minPwrT4[i + 1]) / 4);
332
333 pPdGainBoundaries[i] =
334 min((u16)AR5416_MAX_RATE_POWER, pPdGainBoundaries[i]);
335
336 if ((i == 0) && !AR_SREV_5416_20_OR_LATER(ah)) {
337 minDelta = pPdGainBoundaries[0] - 23;
338 pPdGainBoundaries[0] = 23;
339 } else {
340 minDelta = 0;
341 }
342
343 if (i == 0) {
344 if (AR_SREV_9280_20_OR_LATER(ah))
345 ss = (int16_t)(0 - (minPwrT4[i] / 2));
346 else
347 ss = 0;
348 } else {
349 ss = (int16_t)((pPdGainBoundaries[i - 1] -
350 (minPwrT4[i] / 2)) -
351 tPdGainOverlap + 1 + minDelta);
352 }
353 vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]);
354 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
355
356 while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
357 tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep);
358 pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal);
359 ss++;
360 }
361
362 sizeCurrVpdTable = (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1);
363 tgtIndex = (u8)(pPdGainBoundaries[i] + tPdGainOverlap -
364 (minPwrT4[i] / 2));
365 maxIndex = (tgtIndex < sizeCurrVpdTable) ?
366 tgtIndex : sizeCurrVpdTable;
367
368 while ((ss < maxIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1)))
369 pPDADCValues[k++] = vpdTableI[i][ss++];
370
371 vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] -
372 vpdTableI[i][sizeCurrVpdTable - 2]);
373 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
374
375 if (tgtIndex >= maxIndex) {
376 while ((ss <= tgtIndex) &&
377 (k < (AR5416_NUM_PDADC_VALUES - 1))) {
378 tmpVal = (int16_t) TMP_VAL_VPD_TABLE;
379 pPDADCValues[k++] = (u8)((tmpVal > 255) ?
380 255 : tmpVal);
381 ss++;
382 }
383 }
384 }
385
386 while (i < AR5416_EEP4K_PD_GAINS_IN_MASK) {
387 pPdGainBoundaries[i] = PD_GAIN_BOUNDARY_DEFAULT;
388 i++;
389 }
390
391 while (k < AR5416_NUM_PDADC_VALUES) {
392 pPDADCValues[k] = pPDADCValues[k - 1];
393 k++;
394 }
395
396 return;
397#undef TMP_VAL_VPD_TABLE
398}
399
400static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah, 230static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
401 struct ath9k_channel *chan, 231 struct ath9k_channel *chan,
402 int16_t *pTxPowerIndexOffset) 232 int16_t *pTxPowerIndexOffset)
@@ -407,7 +237,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
407 u8 *pCalBChans = NULL; 237 u8 *pCalBChans = NULL;
408 u16 pdGainOverlap_t2; 238 u16 pdGainOverlap_t2;
409 static u8 pdadcValues[AR5416_NUM_PDADC_VALUES]; 239 static u8 pdadcValues[AR5416_NUM_PDADC_VALUES];
410 u16 gainBoundaries[AR5416_EEP4K_PD_GAINS_IN_MASK]; 240 u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK];
411 u16 numPiers, i, j; 241 u16 numPiers, i, j;
412 u16 numXpdGain, xpdMask; 242 u16 numXpdGain, xpdMask;
413 u16 xpdGainValues[AR5416_EEP4K_NUM_PD_GAINS] = { 0, 0 }; 243 u16 xpdGainValues[AR5416_EEP4K_NUM_PD_GAINS] = { 0, 0 };
@@ -429,12 +259,12 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
429 259
430 numXpdGain = 0; 260 numXpdGain = 0;
431 261
432 for (i = 1; i <= AR5416_EEP4K_PD_GAINS_IN_MASK; i++) { 262 for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) {
433 if ((xpdMask >> (AR5416_EEP4K_PD_GAINS_IN_MASK - i)) & 1) { 263 if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) {
434 if (numXpdGain >= AR5416_EEP4K_NUM_PD_GAINS) 264 if (numXpdGain >= AR5416_EEP4K_NUM_PD_GAINS)
435 break; 265 break;
436 xpdGainValues[numXpdGain] = 266 xpdGainValues[numXpdGain] =
437 (u16)(AR5416_EEP4K_PD_GAINS_IN_MASK - i); 267 (u16)(AR5416_PD_GAINS_IN_MASK - i);
438 numXpdGain++; 268 numXpdGain++;
439 } 269 }
440 } 270 }
@@ -458,7 +288,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
458 if (pEepData->baseEepHeader.txMask & (1 << i)) { 288 if (pEepData->baseEepHeader.txMask & (1 << i)) {
459 pRawDataset = pEepData->calPierData2G[i]; 289 pRawDataset = pEepData->calPierData2G[i];
460 290
461 ath9k_hw_get_4k_gain_boundaries_pdadcs(ah, chan, 291 ath9k_hw_get_gain_boundaries_pdadcs(ah, chan,
462 pRawDataset, pCalBChans, 292 pRawDataset, pCalBChans,
463 numPiers, pdGainOverlap_t2, 293 numPiers, pdGainOverlap_t2,
464 gainBoundaries, 294 gainBoundaries,
@@ -488,21 +318,20 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
488 ((pdadcValues[4 * j + 3] & 0xFF) << 24); 318 ((pdadcValues[4 * j + 3] & 0xFF) << 24);
489 REG_WRITE(ah, regOffset, reg32); 319 REG_WRITE(ah, regOffset, reg32);
490 320
491 ath_print(common, ATH_DBG_EEPROM, 321 ath_dbg(common, ATH_DBG_EEPROM,
492 "PDADC (%d,%4x): %4.4x %8.8x\n", 322 "PDADC (%d,%4x): %4.4x %8.8x\n",
493 i, regChainOffset, regOffset, 323 i, regChainOffset, regOffset,
494 reg32); 324 reg32);
495 ath_print(common, ATH_DBG_EEPROM, 325 ath_dbg(common, ATH_DBG_EEPROM,
496 "PDADC: Chain %d | " 326 "PDADC: Chain %d | "
497 "PDADC %3d Value %3d | " 327 "PDADC %3d Value %3d | "
498 "PDADC %3d Value %3d | " 328 "PDADC %3d Value %3d | "
499 "PDADC %3d Value %3d | " 329 "PDADC %3d Value %3d | "
500 "PDADC %3d Value %3d |\n", 330 "PDADC %3d Value %3d |\n",
501 i, 4 * j, pdadcValues[4 * j], 331 i, 4 * j, pdadcValues[4 * j],
502 4 * j + 1, pdadcValues[4 * j + 1], 332 4 * j + 1, pdadcValues[4 * j + 1],
503 4 * j + 2, pdadcValues[4 * j + 2], 333 4 * j + 2, pdadcValues[4 * j + 2],
504 4 * j + 3, 334 4 * j + 3, pdadcValues[4 * j + 3]);
505 pdadcValues[4 * j + 3]);
506 335
507 regOffset += 4; 336 regOffset += 4;
508 } 337 }
@@ -532,14 +361,16 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
532 int i; 361 int i;
533 int16_t twiceLargestAntenna; 362 int16_t twiceLargestAntenna;
534 u16 twiceMinEdgePower; 363 u16 twiceMinEdgePower;
535 u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER; 364 u16 twiceMaxEdgePower = MAX_RATE_POWER;
536 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower; 365 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
537 u16 numCtlModes, *pCtlMode, ctlMode, freq; 366 u16 numCtlModes;
367 const u16 *pCtlMode;
368 u16 ctlMode, freq;
538 struct chan_centers centers; 369 struct chan_centers centers;
539 struct cal_ctl_data_4k *rep; 370 struct cal_ctl_data_4k *rep;
540 struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k; 371 struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k;
541 static const u16 tpScaleReductionTable[5] = 372 static const u16 tpScaleReductionTable[5] =
542 { 0, 3, 6, 9, AR5416_MAX_RATE_POWER }; 373 { 0, 3, 6, 9, MAX_RATE_POWER };
543 struct cal_target_power_leg targetPowerOfdm, targetPowerCck = { 374 struct cal_target_power_leg targetPowerOfdm, targetPowerCck = {
544 0, { 0, 0, 0, 0} 375 0, { 0, 0, 0, 0}
545 }; 376 };
@@ -550,10 +381,10 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
550 struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = { 381 struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = {
551 0, {0, 0, 0, 0} 382 0, {0, 0, 0, 0}
552 }; 383 };
553 u16 ctlModesFor11g[] = 384 static const u16 ctlModesFor11g[] = {
554 { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT, 385 CTL_11B, CTL_11G, CTL_2GHT20,
555 CTL_2GHT40 386 CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40
556 }; 387 };
557 388
558 ath9k_hw_get_channel_centers(ah, chan, &centers); 389 ath9k_hw_get_channel_centers(ah, chan, &centers);
559 390
@@ -615,7 +446,7 @@ static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah,
615 446
616 if (ah->eep_ops->get_eeprom_ver(ah) == 14 && 447 if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
617 ah->eep_ops->get_eeprom_rev(ah) <= 2) 448 ah->eep_ops->get_eeprom_rev(ah) <= 2)
618 twiceMaxEdgePower = AR5416_MAX_RATE_POWER; 449 twiceMaxEdgePower = MAX_RATE_POWER;
619 450
620 for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) && 451 for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) &&
621 pEepData->ctlIndex[i]; i++) { 452 pEepData->ctlIndex[i]; i++) {
@@ -726,7 +557,7 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
726 u16 cfgCtl, 557 u16 cfgCtl,
727 u8 twiceAntennaReduction, 558 u8 twiceAntennaReduction,
728 u8 twiceMaxRegulatoryPower, 559 u8 twiceMaxRegulatoryPower,
729 u8 powerLimit) 560 u8 powerLimit, bool test)
730{ 561{
731 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 562 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
732 struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k; 563 struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k;
@@ -751,15 +582,20 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
751 582
752 ath9k_hw_set_4k_power_cal_table(ah, chan, &txPowerIndexOffset); 583 ath9k_hw_set_4k_power_cal_table(ah, chan, &txPowerIndexOffset);
753 584
585 regulatory->max_power_level = 0;
754 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) { 586 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
755 ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]); 587 ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
756 if (ratesArray[i] > AR5416_MAX_RATE_POWER) 588 if (ratesArray[i] > MAX_RATE_POWER)
757 ratesArray[i] = AR5416_MAX_RATE_POWER; 589 ratesArray[i] = MAX_RATE_POWER;
590
591 if (ratesArray[i] > regulatory->max_power_level)
592 regulatory->max_power_level = ratesArray[i];
758 } 593 }
759 594
595 if (test)
596 return;
760 597
761 /* Update regulatory */ 598 /* Update regulatory */
762
763 i = rate6mb; 599 i = rate6mb;
764 if (IS_CHAN_HT40(chan)) 600 if (IS_CHAN_HT40(chan))
765 i = rateHt40_0; 601 i = rateHt40_0;
@@ -934,8 +770,7 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
934 pModal = &eep->modalHeader; 770 pModal = &eep->modalHeader;
935 txRxAttenLocal = 23; 771 txRxAttenLocal = 23;
936 772
937 REG_WRITE(ah, AR_PHY_SWITCH_COM, 773 REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon);
938 ah->eep_ops->get_eeprom_antenna_cfg(ah, chan));
939 774
940 /* Single chain for 4K EEPROM*/ 775 /* Single chain for 4K EEPROM*/
941 ath9k_hw_4k_set_gain(ah, pModal, eep, txRxAttenLocal); 776 ath9k_hw_4k_set_gain(ah, pModal, eep, txRxAttenLocal);
@@ -1151,21 +986,6 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
1151 } 986 }
1152} 987}
1153 988
1154static u32 ath9k_hw_4k_get_eeprom_antenna_cfg(struct ath_hw *ah,
1155 struct ath9k_channel *chan)
1156{
1157 struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k;
1158 struct modal_eep_4k_header *pModal = &eep->modalHeader;
1159
1160 return pModal->antCtrlCommon;
1161}
1162
1163static u8 ath9k_hw_4k_get_num_ant_config(struct ath_hw *ah,
1164 enum ath9k_hal_freq_band freq_band)
1165{
1166 return 1;
1167}
1168
1169static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) 989static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
1170{ 990{
1171#define EEP_MAP4K_SPURCHAN \ 991#define EEP_MAP4K_SPURCHAN \
@@ -1174,17 +994,17 @@ static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
1174 994
1175 u16 spur_val = AR_NO_SPUR; 995 u16 spur_val = AR_NO_SPUR;
1176 996
1177 ath_print(common, ATH_DBG_ANI, 997 ath_dbg(common, ATH_DBG_ANI,
1178 "Getting spur idx %d is2Ghz. %d val %x\n", 998 "Getting spur idx:%d is2Ghz:%d val:%x\n",
1179 i, is2GHz, ah->config.spurchans[i][is2GHz]); 999 i, is2GHz, ah->config.spurchans[i][is2GHz]);
1180 1000
1181 switch (ah->config.spurmode) { 1001 switch (ah->config.spurmode) {
1182 case SPUR_DISABLE: 1002 case SPUR_DISABLE:
1183 break; 1003 break;
1184 case SPUR_ENABLE_IOCTL: 1004 case SPUR_ENABLE_IOCTL:
1185 spur_val = ah->config.spurchans[i][is2GHz]; 1005 spur_val = ah->config.spurchans[i][is2GHz];
1186 ath_print(common, ATH_DBG_ANI, 1006 ath_dbg(common, ATH_DBG_ANI,
1187 "Getting spur val from new loc. %d\n", spur_val); 1007 "Getting spur val from new loc. %d\n", spur_val);
1188 break; 1008 break;
1189 case SPUR_ENABLE_EEPROM: 1009 case SPUR_ENABLE_EEPROM:
1190 spur_val = EEP_MAP4K_SPURCHAN; 1010 spur_val = EEP_MAP4K_SPURCHAN;
@@ -1202,8 +1022,6 @@ const struct eeprom_ops eep_4k_ops = {
1202 .fill_eeprom = ath9k_hw_4k_fill_eeprom, 1022 .fill_eeprom = ath9k_hw_4k_fill_eeprom,
1203 .get_eeprom_ver = ath9k_hw_4k_get_eeprom_ver, 1023 .get_eeprom_ver = ath9k_hw_4k_get_eeprom_ver,
1204 .get_eeprom_rev = ath9k_hw_4k_get_eeprom_rev, 1024 .get_eeprom_rev = ath9k_hw_4k_get_eeprom_rev,
1205 .get_num_ant_config = ath9k_hw_4k_get_num_ant_config,
1206 .get_eeprom_antenna_cfg = ath9k_hw_4k_get_eeprom_antenna_cfg,
1207 .set_board_values = ath9k_hw_4k_set_board_values, 1025 .set_board_values = ath9k_hw_4k_set_board_values,
1208 .set_addac = ath9k_hw_4k_set_addac, 1026 .set_addac = ath9k_hw_4k_set_addac,
1209 .set_txpower = ath9k_hw_4k_set_txpower, 1027 .set_txpower = ath9k_hw_4k_set_txpower,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index 195406db3bd8..9b6bc8a953bc 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -37,21 +37,21 @@ static bool ath9k_hw_ar9287_fill_eeprom(struct ath_hw *ah)
37 int addr, eep_start_loc; 37 int addr, eep_start_loc;
38 eep_data = (u16 *)eep; 38 eep_data = (u16 *)eep;
39 39
40 if (AR9287_HTC_DEVID(ah)) 40 if (common->bus_ops->ath_bus_type == ATH_USB)
41 eep_start_loc = AR9287_HTC_EEP_START_LOC; 41 eep_start_loc = AR9287_HTC_EEP_START_LOC;
42 else 42 else
43 eep_start_loc = AR9287_EEP_START_LOC; 43 eep_start_loc = AR9287_EEP_START_LOC;
44 44
45 if (!ath9k_hw_use_flash(ah)) { 45 if (!ath9k_hw_use_flash(ah)) {
46 ath_print(common, ATH_DBG_EEPROM, 46 ath_dbg(common, ATH_DBG_EEPROM,
47 "Reading from EEPROM, not flash\n"); 47 "Reading from EEPROM, not flash\n");
48 } 48 }
49 49
50 for (addr = 0; addr < NUM_EEP_WORDS; addr++) { 50 for (addr = 0; addr < NUM_EEP_WORDS; addr++) {
51 if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, 51 if (!ath9k_hw_nvram_read(common, addr + eep_start_loc,
52 eep_data)) { 52 eep_data)) {
53 ath_print(common, ATH_DBG_EEPROM, 53 ath_dbg(common, ATH_DBG_EEPROM,
54 "Unable to read eeprom region\n"); 54 "Unable to read eeprom region\n");
55 return false; 55 return false;
56 } 56 }
57 eep_data++; 57 eep_data++;
@@ -72,13 +72,12 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
72 if (!ath9k_hw_use_flash(ah)) { 72 if (!ath9k_hw_use_flash(ah)) {
73 if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET, 73 if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET,
74 &magic)) { 74 &magic)) {
75 ath_print(common, ATH_DBG_FATAL, 75 ath_err(common, "Reading Magic # failed\n");
76 "Reading Magic # failed\n");
77 return false; 76 return false;
78 } 77 }
79 78
80 ath_print(common, ATH_DBG_EEPROM, 79 ath_dbg(common, ATH_DBG_EEPROM,
81 "Read Magic = 0x%04X\n", magic); 80 "Read Magic = 0x%04X\n", magic);
82 81
83 if (magic != AR5416_EEPROM_MAGIC) { 82 if (magic != AR5416_EEPROM_MAGIC) {
84 magic2 = swab16(magic); 83 magic2 = swab16(magic);
@@ -93,16 +92,15 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
93 eepdata++; 92 eepdata++;
94 } 93 }
95 } else { 94 } else {
96 ath_print(common, ATH_DBG_FATAL, 95 ath_err(common,
97 "Invalid EEPROM Magic. " 96 "Invalid EEPROM Magic. Endianness mismatch.\n");
98 "Endianness mismatch.\n");
99 return -EINVAL; 97 return -EINVAL;
100 } 98 }
101 } 99 }
102 } 100 }
103 101
104 ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n", 102 ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
105 need_swap ? "True" : "False"); 103 need_swap ? "True" : "False");
106 104
107 if (need_swap) 105 if (need_swap)
108 el = swab16(ah->eeprom.map9287.baseEepHeader.length); 106 el = swab16(ah->eeprom.map9287.baseEepHeader.length);
@@ -152,7 +150,7 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
152 eep->modalHeader.antCtrlChain[i] = integer; 150 eep->modalHeader.antCtrlChain[i] = integer;
153 } 151 }
154 152
155 for (i = 0; i < AR9287_EEPROM_MODAL_SPURS; i++) { 153 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
156 word = swab16(eep->modalHeader.spurChans[i].spurChan); 154 word = swab16(eep->modalHeader.spurChans[i].spurChan);
157 eep->modalHeader.spurChans[i].spurChan = word; 155 eep->modalHeader.spurChans[i].spurChan = word;
158 } 156 }
@@ -160,9 +158,8 @@ static int ath9k_hw_ar9287_check_eeprom(struct ath_hw *ah)
160 158
161 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR9287_EEP_VER 159 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR9287_EEP_VER
162 || ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { 160 || ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
163 ath_print(common, ATH_DBG_FATAL, 161 ath_err(common, "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
164 "Bad EEPROM checksum 0x%x or revision 0x%04x\n", 162 sum, ah->eep_ops->get_eeprom_ver(ah));
165 sum, ah->eep_ops->get_eeprom_ver(ah));
166 return -EINVAL; 163 return -EINVAL;
167 } 164 }
168 165
@@ -223,163 +220,6 @@ static u32 ath9k_hw_ar9287_get_eeprom(struct ath_hw *ah,
223 } 220 }
224} 221}
225 222
226static void ath9k_hw_get_ar9287_gain_boundaries_pdadcs(struct ath_hw *ah,
227 struct ath9k_channel *chan,
228 struct cal_data_per_freq_ar9287 *pRawDataSet,
229 u8 *bChans, u16 availPiers,
230 u16 tPdGainOverlap,
231 u16 *pPdGainBoundaries,
232 u8 *pPDADCValues,
233 u16 numXpdGains)
234{
235#define TMP_VAL_VPD_TABLE \
236 ((vpdTableI[i][sizeCurrVpdTable - 1] + (ss - maxIndex + 1) * vpdStep));
237
238 int i, j, k;
239 int16_t ss;
240 u16 idxL = 0, idxR = 0, numPiers;
241 u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
242 u8 minPwrT4[AR9287_NUM_PD_GAINS];
243 u8 maxPwrT4[AR9287_NUM_PD_GAINS];
244 int16_t vpdStep;
245 int16_t tmpVal;
246 u16 sizeCurrVpdTable, maxIndex, tgtIndex;
247 bool match;
248 int16_t minDelta = 0;
249 struct chan_centers centers;
250 static u8 vpdTableL[AR5416_EEP4K_NUM_PD_GAINS]
251 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
252 static u8 vpdTableR[AR5416_EEP4K_NUM_PD_GAINS]
253 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
254 static u8 vpdTableI[AR5416_EEP4K_NUM_PD_GAINS]
255 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
256
257 memset(&minPwrT4, 0, AR9287_NUM_PD_GAINS);
258 ath9k_hw_get_channel_centers(ah, chan, &centers);
259
260 for (numPiers = 0; numPiers < availPiers; numPiers++) {
261 if (bChans[numPiers] == AR9287_BCHAN_UNUSED)
262 break;
263 }
264
265 match = ath9k_hw_get_lower_upper_index(
266 (u8)FREQ2FBIN(centers.synth_center, IS_CHAN_2GHZ(chan)),
267 bChans, numPiers, &idxL, &idxR);
268
269 if (match) {
270 for (i = 0; i < numXpdGains; i++) {
271 minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0];
272 maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4];
273 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
274 pRawDataSet[idxL].pwrPdg[i],
275 pRawDataSet[idxL].vpdPdg[i],
276 AR9287_PD_GAIN_ICEPTS,
277 vpdTableI[i]);
278 }
279 } else {
280 for (i = 0; i < numXpdGains; i++) {
281 pVpdL = pRawDataSet[idxL].vpdPdg[i];
282 pPwrL = pRawDataSet[idxL].pwrPdg[i];
283 pVpdR = pRawDataSet[idxR].vpdPdg[i];
284 pPwrR = pRawDataSet[idxR].pwrPdg[i];
285
286 minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
287
288 maxPwrT4[i] = min(pPwrL[AR9287_PD_GAIN_ICEPTS - 1],
289 pPwrR[AR9287_PD_GAIN_ICEPTS - 1]);
290
291 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
292 pPwrL, pVpdL,
293 AR9287_PD_GAIN_ICEPTS,
294 vpdTableL[i]);
295 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
296 pPwrR, pVpdR,
297 AR9287_PD_GAIN_ICEPTS,
298 vpdTableR[i]);
299
300 for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
301 vpdTableI[i][j] = (u8)(ath9k_hw_interpolate(
302 (u16)FREQ2FBIN(centers. synth_center,
303 IS_CHAN_2GHZ(chan)),
304 bChans[idxL], bChans[idxR],
305 vpdTableL[i][j], vpdTableR[i][j]));
306 }
307 }
308 }
309
310 k = 0;
311
312 for (i = 0; i < numXpdGains; i++) {
313 if (i == (numXpdGains - 1))
314 pPdGainBoundaries[i] =
315 (u16)(maxPwrT4[i] / 2);
316 else
317 pPdGainBoundaries[i] =
318 (u16)((maxPwrT4[i] + minPwrT4[i+1]) / 4);
319
320 pPdGainBoundaries[i] = min((u16)AR5416_MAX_RATE_POWER,
321 pPdGainBoundaries[i]);
322
323
324 minDelta = 0;
325
326 if (i == 0) {
327 if (AR_SREV_9280_20_OR_LATER(ah))
328 ss = (int16_t)(0 - (minPwrT4[i] / 2));
329 else
330 ss = 0;
331 } else {
332 ss = (int16_t)((pPdGainBoundaries[i-1] -
333 (minPwrT4[i] / 2)) -
334 tPdGainOverlap + 1 + minDelta);
335 }
336
337 vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]);
338 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
339
340 while ((ss < 0) && (k < (AR9287_NUM_PDADC_VALUES - 1))) {
341 tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep);
342 pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal);
343 ss++;
344 }
345
346 sizeCurrVpdTable = (u8)((maxPwrT4[i] - minPwrT4[i]) / 2 + 1);
347 tgtIndex = (u8)(pPdGainBoundaries[i] +
348 tPdGainOverlap - (minPwrT4[i] / 2));
349 maxIndex = (tgtIndex < sizeCurrVpdTable) ?
350 tgtIndex : sizeCurrVpdTable;
351
352 while ((ss < maxIndex) && (k < (AR9287_NUM_PDADC_VALUES - 1)))
353 pPDADCValues[k++] = vpdTableI[i][ss++];
354
355 vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] -
356 vpdTableI[i][sizeCurrVpdTable - 2]);
357 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
358
359 if (tgtIndex > maxIndex) {
360 while ((ss <= tgtIndex) &&
361 (k < (AR9287_NUM_PDADC_VALUES - 1))) {
362 tmpVal = (int16_t) TMP_VAL_VPD_TABLE;
363 pPDADCValues[k++] =
364 (u8)((tmpVal > 255) ? 255 : tmpVal);
365 ss++;
366 }
367 }
368 }
369
370 while (i < AR9287_PD_GAINS_IN_MASK) {
371 pPdGainBoundaries[i] = pPdGainBoundaries[i-1];
372 i++;
373 }
374
375 while (k < AR9287_NUM_PDADC_VALUES) {
376 pPDADCValues[k] = pPDADCValues[k-1];
377 k++;
378 }
379
380#undef TMP_VAL_VPD_TABLE
381}
382
383static void ar9287_eeprom_get_tx_gain_index(struct ath_hw *ah, 223static void ar9287_eeprom_get_tx_gain_index(struct ath_hw *ah,
384 struct ath9k_channel *chan, 224 struct ath9k_channel *chan,
385 struct cal_data_op_loop_ar9287 *pRawDatasetOpLoop, 225 struct cal_data_op_loop_ar9287 *pRawDatasetOpLoop,
@@ -392,7 +232,7 @@ static void ar9287_eeprom_get_tx_gain_index(struct ath_hw *ah,
392 ath9k_hw_get_channel_centers(ah, chan, &centers); 232 ath9k_hw_get_channel_centers(ah, chan, &centers);
393 233
394 for (numPiers = 0; numPiers < availPiers; numPiers++) { 234 for (numPiers = 0; numPiers < availPiers; numPiers++) {
395 if (pCalChans[numPiers] == AR9287_BCHAN_UNUSED) 235 if (pCalChans[numPiers] == AR5416_BCHAN_UNUSED)
396 break; 236 break;
397 } 237 }
398 238
@@ -458,11 +298,11 @@ static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
458 struct cal_data_op_loop_ar9287 *pRawDatasetOpenLoop; 298 struct cal_data_op_loop_ar9287 *pRawDatasetOpenLoop;
459 u8 *pCalBChans = NULL; 299 u8 *pCalBChans = NULL;
460 u16 pdGainOverlap_t2; 300 u16 pdGainOverlap_t2;
461 u8 pdadcValues[AR9287_NUM_PDADC_VALUES]; 301 u8 pdadcValues[AR5416_NUM_PDADC_VALUES];
462 u16 gainBoundaries[AR9287_PD_GAINS_IN_MASK]; 302 u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK];
463 u16 numPiers = 0, i, j; 303 u16 numPiers = 0, i, j;
464 u16 numXpdGain, xpdMask; 304 u16 numXpdGain, xpdMask;
465 u16 xpdGainValues[AR9287_NUM_PD_GAINS] = {0, 0, 0, 0}; 305 u16 xpdGainValues[AR5416_NUM_PD_GAINS] = {0, 0, 0, 0};
466 u32 reg32, regOffset, regChainOffset, regval; 306 u32 reg32, regOffset, regChainOffset, regval;
467 int16_t modalIdx, diff = 0; 307 int16_t modalIdx, diff = 0;
468 struct ar9287_eeprom *pEepData = &ah->eeprom.map9287; 308 struct ar9287_eeprom *pEepData = &ah->eeprom.map9287;
@@ -490,12 +330,12 @@ static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
490 numXpdGain = 0; 330 numXpdGain = 0;
491 331
492 /* Calculate the value of xpdgains from the xpdGain Mask */ 332 /* Calculate the value of xpdgains from the xpdGain Mask */
493 for (i = 1; i <= AR9287_PD_GAINS_IN_MASK; i++) { 333 for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) {
494 if ((xpdMask >> (AR9287_PD_GAINS_IN_MASK - i)) & 1) { 334 if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) {
495 if (numXpdGain >= AR9287_NUM_PD_GAINS) 335 if (numXpdGain >= AR5416_NUM_PD_GAINS)
496 break; 336 break;
497 xpdGainValues[numXpdGain] = 337 xpdGainValues[numXpdGain] =
498 (u16)(AR9287_PD_GAINS_IN_MASK-i); 338 (u16)(AR5416_PD_GAINS_IN_MASK-i);
499 numXpdGain++; 339 numXpdGain++;
500 } 340 }
501 } 341 }
@@ -528,7 +368,7 @@ static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
528 (struct cal_data_per_freq_ar9287 *) 368 (struct cal_data_per_freq_ar9287 *)
529 pEepData->calPierData2G[i]; 369 pEepData->calPierData2G[i];
530 370
531 ath9k_hw_get_ar9287_gain_boundaries_pdadcs(ah, chan, 371 ath9k_hw_get_gain_boundaries_pdadcs(ah, chan,
532 pRawDataset, 372 pRawDataset,
533 pCalBChans, numPiers, 373 pCalBChans, numPiers,
534 pdGainOverlap_t2, 374 pdGainOverlap_t2,
@@ -564,13 +404,13 @@ static void ath9k_hw_set_ar9287_power_cal_table(struct ath_hw *ah,
564 (int32_t)AR9287_PWR_TABLE_OFFSET_DB); 404 (int32_t)AR9287_PWR_TABLE_OFFSET_DB);
565 diff *= 2; 405 diff *= 2;
566 406
567 for (j = 0; j < ((u16)AR9287_NUM_PDADC_VALUES-diff); j++) 407 for (j = 0; j < ((u16)AR5416_NUM_PDADC_VALUES-diff); j++)
568 pdadcValues[j] = pdadcValues[j+diff]; 408 pdadcValues[j] = pdadcValues[j+diff];
569 409
570 for (j = (u16)(AR9287_NUM_PDADC_VALUES-diff); 410 for (j = (u16)(AR5416_NUM_PDADC_VALUES-diff);
571 j < AR9287_NUM_PDADC_VALUES; j++) 411 j < AR5416_NUM_PDADC_VALUES; j++)
572 pdadcValues[j] = 412 pdadcValues[j] =
573 pdadcValues[AR9287_NUM_PDADC_VALUES-diff]; 413 pdadcValues[AR5416_NUM_PDADC_VALUES-diff];
574 } 414 }
575 415
576 if (!ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) { 416 if (!ath9k_hw_ar9287_get_eeprom(ah, EEP_OL_PWRCTRL)) {
@@ -613,9 +453,9 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
613#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10 453#define REDUCE_SCALED_POWER_BY_THREE_CHAIN 10
614 454
615 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 455 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
616 u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER; 456 u16 twiceMaxEdgePower = MAX_RATE_POWER;
617 static const u16 tpScaleReductionTable[5] = 457 static const u16 tpScaleReductionTable[5] =
618 { 0, 3, 6, 9, AR5416_MAX_RATE_POWER }; 458 { 0, 3, 6, 9, MAX_RATE_POWER };
619 int i; 459 int i;
620 int16_t twiceLargestAntenna; 460 int16_t twiceLargestAntenna;
621 struct cal_ctl_data_ar9287 *rep; 461 struct cal_ctl_data_ar9287 *rep;
@@ -626,13 +466,13 @@ static void ath9k_hw_set_ar9287_power_per_rate_table(struct ath_hw *ah,
626 struct cal_target_power_ht targetPowerHt20, 466 struct cal_target_power_ht targetPowerHt20,
627 targetPowerHt40 = {0, {0, 0, 0, 0} }; 467 targetPowerHt40 = {0, {0, 0, 0, 0} };
628 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower; 468 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
629 u16 ctlModesFor11g[] = {CTL_11B, 469 static const u16 ctlModesFor11g[] = {
630 CTL_11G, 470 CTL_11B, CTL_11G, CTL_2GHT20,
631 CTL_2GHT20, 471 CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40
632 CTL_11B_EXT, 472 };
633 CTL_11G_EXT, 473 u16 numCtlModes = 0;
634 CTL_2GHT40}; 474 const u16 *pCtlMode = NULL;
635 u16 numCtlModes = 0, *pCtlMode = NULL, ctlMode, freq; 475 u16 ctlMode, freq;
636 struct chan_centers centers; 476 struct chan_centers centers;
637 int tx_chainmask; 477 int tx_chainmask;
638 u16 twiceMinEdgePower; 478 u16 twiceMinEdgePower;
@@ -853,7 +693,7 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
853 struct ath9k_channel *chan, u16 cfgCtl, 693 struct ath9k_channel *chan, u16 cfgCtl,
854 u8 twiceAntennaReduction, 694 u8 twiceAntennaReduction,
855 u8 twiceMaxRegulatoryPower, 695 u8 twiceMaxRegulatoryPower,
856 u8 powerLimit) 696 u8 powerLimit, bool test)
857{ 697{
858 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 698 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
859 struct ar9287_eeprom *pEepData = &ah->eeprom.map9287; 699 struct ar9287_eeprom *pEepData = &ah->eeprom.map9287;
@@ -877,12 +717,26 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
877 717
878 ath9k_hw_set_ar9287_power_cal_table(ah, chan, &txPowerIndexOffset); 718 ath9k_hw_set_ar9287_power_cal_table(ah, chan, &txPowerIndexOffset);
879 719
720 regulatory->max_power_level = 0;
880 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) { 721 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
881 ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]); 722 ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
882 if (ratesArray[i] > AR9287_MAX_RATE_POWER) 723 if (ratesArray[i] > MAX_RATE_POWER)
883 ratesArray[i] = AR9287_MAX_RATE_POWER; 724 ratesArray[i] = MAX_RATE_POWER;
725
726 if (ratesArray[i] > regulatory->max_power_level)
727 regulatory->max_power_level = ratesArray[i];
884 } 728 }
885 729
730 if (test)
731 return;
732
733 if (IS_CHAN_2GHZ(chan))
734 i = rate1l;
735 else
736 i = rate6mb;
737
738 regulatory->max_power_level = ratesArray[i];
739
886 if (AR_SREV_9280_20_OR_LATER(ah)) { 740 if (AR_SREV_9280_20_OR_LATER(ah)) {
887 for (i = 0; i < Ar5416RateSize; i++) 741 for (i = 0; i < Ar5416RateSize; i++)
888 ratesArray[i] -= AR9287_PWR_TABLE_OFFSET_DB * 2; 742 ratesArray[i] -= AR9287_PWR_TABLE_OFFSET_DB * 2;
@@ -971,17 +825,6 @@ static void ath9k_hw_ar9287_set_txpower(struct ath_hw *ah,
971 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8) 825 | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8)
972 | ATH9K_POW_SM(ratesArray[rateDupCck], 0)); 826 | ATH9K_POW_SM(ratesArray[rateDupCck], 0));
973 } 827 }
974
975 if (IS_CHAN_2GHZ(chan))
976 i = rate1l;
977 else
978 i = rate6mb;
979
980 if (AR_SREV_9280_20_OR_LATER(ah))
981 regulatory->max_power_level =
982 ratesArray[i] + AR9287_PWR_TABLE_OFFSET_DB * 2;
983 else
984 regulatory->max_power_level = ratesArray[i];
985} 828}
986 829
987static void ath9k_hw_ar9287_set_addac(struct ath_hw *ah, 830static void ath9k_hw_ar9287_set_addac(struct ath_hw *ah,
@@ -1023,8 +866,7 @@ static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah,
1023 antWrites[j++] = (u16)(pModal->antCtrlChain[i] & 0x3); 866 antWrites[j++] = (u16)(pModal->antCtrlChain[i] & 0x3);
1024 } 867 }
1025 868
1026 REG_WRITE(ah, AR_PHY_SWITCH_COM, 869 REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon);
1027 ah->eep_ops->get_eeprom_antenna_cfg(ah, chan));
1028 870
1029 for (i = 0; i < AR9287_MAX_CHAINS; i++) { 871 for (i = 0; i < AR9287_MAX_CHAINS; i++) {
1030 regChainOffset = i * 0x1000; 872 regChainOffset = i * 0x1000;
@@ -1125,21 +967,6 @@ static void ath9k_hw_ar9287_set_board_values(struct ath_hw *ah,
1125 pModal->xpaBiasLvl); 967 pModal->xpaBiasLvl);
1126} 968}
1127 969
1128static u8 ath9k_hw_ar9287_get_num_ant_config(struct ath_hw *ah,
1129 enum ath9k_hal_freq_band freq_band)
1130{
1131 return 1;
1132}
1133
1134static u32 ath9k_hw_ar9287_get_eeprom_antenna_cfg(struct ath_hw *ah,
1135 struct ath9k_channel *chan)
1136{
1137 struct ar9287_eeprom *eep = &ah->eeprom.map9287;
1138 struct modal_eep_ar9287_header *pModal = &eep->modalHeader;
1139
1140 return pModal->antCtrlCommon;
1141}
1142
1143static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah, 970static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah,
1144 u16 i, bool is2GHz) 971 u16 i, bool is2GHz)
1145{ 972{
@@ -1149,17 +976,17 @@ static u16 ath9k_hw_ar9287_get_spur_channel(struct ath_hw *ah,
1149 struct ath_common *common = ath9k_hw_common(ah); 976 struct ath_common *common = ath9k_hw_common(ah);
1150 u16 spur_val = AR_NO_SPUR; 977 u16 spur_val = AR_NO_SPUR;
1151 978
1152 ath_print(common, ATH_DBG_ANI, 979 ath_dbg(common, ATH_DBG_ANI,
1153 "Getting spur idx %d is2Ghz. %d val %x\n", 980 "Getting spur idx:%d is2Ghz:%d val:%x\n",
1154 i, is2GHz, ah->config.spurchans[i][is2GHz]); 981 i, is2GHz, ah->config.spurchans[i][is2GHz]);
1155 982
1156 switch (ah->config.spurmode) { 983 switch (ah->config.spurmode) {
1157 case SPUR_DISABLE: 984 case SPUR_DISABLE:
1158 break; 985 break;
1159 case SPUR_ENABLE_IOCTL: 986 case SPUR_ENABLE_IOCTL:
1160 spur_val = ah->config.spurchans[i][is2GHz]; 987 spur_val = ah->config.spurchans[i][is2GHz];
1161 ath_print(common, ATH_DBG_ANI, 988 ath_dbg(common, ATH_DBG_ANI,
1162 "Getting spur val from new loc. %d\n", spur_val); 989 "Getting spur val from new loc. %d\n", spur_val);
1163 break; 990 break;
1164 case SPUR_ENABLE_EEPROM: 991 case SPUR_ENABLE_EEPROM:
1165 spur_val = EEP_MAP9287_SPURCHAN; 992 spur_val = EEP_MAP9287_SPURCHAN;
@@ -1177,8 +1004,6 @@ const struct eeprom_ops eep_ar9287_ops = {
1177 .fill_eeprom = ath9k_hw_ar9287_fill_eeprom, 1004 .fill_eeprom = ath9k_hw_ar9287_fill_eeprom,
1178 .get_eeprom_ver = ath9k_hw_ar9287_get_eeprom_ver, 1005 .get_eeprom_ver = ath9k_hw_ar9287_get_eeprom_ver,
1179 .get_eeprom_rev = ath9k_hw_ar9287_get_eeprom_rev, 1006 .get_eeprom_rev = ath9k_hw_ar9287_get_eeprom_rev,
1180 .get_num_ant_config = ath9k_hw_ar9287_get_num_ant_config,
1181 .get_eeprom_antenna_cfg = ath9k_hw_ar9287_get_eeprom_antenna_cfg,
1182 .set_board_values = ath9k_hw_ar9287_set_board_values, 1007 .set_board_values = ath9k_hw_ar9287_set_board_values,
1183 .set_addac = ath9k_hw_ar9287_set_addac, 1008 .set_addac = ath9k_hw_ar9287_set_addac,
1184 .set_txpower = ath9k_hw_ar9287_set_txpower, 1009 .set_txpower = ath9k_hw_ar9287_set_txpower,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index a3ccb1b9638d..088f141f2006 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -96,8 +96,8 @@ static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
96 for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) { 96 for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) {
97 if (!ath9k_hw_nvram_read(common, addr + ar5416_eep_start_loc, 97 if (!ath9k_hw_nvram_read(common, addr + ar5416_eep_start_loc,
98 eep_data)) { 98 eep_data)) {
99 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, 99 ath_err(ath9k_hw_common(ah),
100 "Unable to read eeprom region\n"); 100 "Unable to read eeprom region\n");
101 return false; 101 return false;
102 } 102 }
103 eep_data++; 103 eep_data++;
@@ -117,13 +117,13 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
117 int i, addr, size; 117 int i, addr, size;
118 118
119 if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET, &magic)) { 119 if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
120 ath_print(common, ATH_DBG_FATAL, "Reading Magic # failed\n"); 120 ath_err(common, "Reading Magic # failed\n");
121 return false; 121 return false;
122 } 122 }
123 123
124 if (!ath9k_hw_use_flash(ah)) { 124 if (!ath9k_hw_use_flash(ah)) {
125 ath_print(common, ATH_DBG_EEPROM, 125 ath_dbg(common, ATH_DBG_EEPROM,
126 "Read Magic = 0x%04X\n", magic); 126 "Read Magic = 0x%04X\n", magic);
127 127
128 if (magic != AR5416_EEPROM_MAGIC) { 128 if (magic != AR5416_EEPROM_MAGIC) {
129 magic2 = swab16(magic); 129 magic2 = swab16(magic);
@@ -139,16 +139,15 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
139 eepdata++; 139 eepdata++;
140 } 140 }
141 } else { 141 } else {
142 ath_print(common, ATH_DBG_FATAL, 142 ath_err(common,
143 "Invalid EEPROM Magic. " 143 "Invalid EEPROM Magic. Endianness mismatch.\n");
144 "Endianness mismatch.\n");
145 return -EINVAL; 144 return -EINVAL;
146 } 145 }
147 } 146 }
148 } 147 }
149 148
150 ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n", 149 ath_dbg(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
151 need_swap ? "True" : "False"); 150 need_swap ? "True" : "False");
152 151
153 if (need_swap) 152 if (need_swap)
154 el = swab16(ah->eeprom.def.baseEepHeader.length); 153 el = swab16(ah->eeprom.def.baseEepHeader.length);
@@ -169,8 +168,8 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
169 u32 integer, j; 168 u32 integer, j;
170 u16 word; 169 u16 word;
171 170
172 ath_print(common, ATH_DBG_EEPROM, 171 ath_dbg(common, ATH_DBG_EEPROM,
173 "EEPROM Endianness is not native.. Changing.\n"); 172 "EEPROM Endianness is not native.. Changing.\n");
174 173
175 word = swab16(eep->baseEepHeader.length); 174 word = swab16(eep->baseEepHeader.length);
176 eep->baseEepHeader.length = word; 175 eep->baseEepHeader.length = word;
@@ -207,7 +206,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
207 pModal->antCtrlChain[i] = integer; 206 pModal->antCtrlChain[i] = integer;
208 } 207 }
209 208
210 for (i = 0; i < AR5416_EEPROM_MODAL_SPURS; i++) { 209 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
211 word = swab16(pModal->spurChans[i].spurChan); 210 word = swab16(pModal->spurChans[i].spurChan);
212 pModal->spurChans[i].spurChan = word; 211 pModal->spurChans[i].spurChan = word;
213 } 212 }
@@ -216,8 +215,7 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
216 215
217 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER || 216 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER ||
218 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { 217 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
219 ath_print(common, ATH_DBG_FATAL, 218 ath_err(common, "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
220 "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
221 sum, ah->eep_ops->get_eeprom_ver(ah)); 219 sum, ah->eep_ops->get_eeprom_ver(ah));
222 return -EINVAL; 220 return -EINVAL;
223 } 221 }
@@ -376,8 +374,7 @@ static void ath9k_hw_def_set_board_values(struct ath_hw *ah,
376 pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]); 374 pModal = &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
377 txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44; 375 txRxAttenLocal = IS_CHAN_2GHZ(chan) ? 23 : 44;
378 376
379 REG_WRITE(ah, AR_PHY_SWITCH_COM, 377 REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon & 0xffff);
380 ah->eep_ops->get_eeprom_antenna_cfg(ah, chan));
381 378
382 for (i = 0; i < AR5416_MAX_CHAINS; i++) { 379 for (i = 0; i < AR5416_MAX_CHAINS; i++) {
383 if (AR_SREV_9280(ah)) { 380 if (AR_SREV_9280(ah)) {
@@ -590,168 +587,6 @@ static void ath9k_hw_def_set_addac(struct ath_hw *ah,
590#undef XPA_LVL_FREQ 587#undef XPA_LVL_FREQ
591} 588}
592 589
593static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah,
594 struct ath9k_channel *chan,
595 struct cal_data_per_freq *pRawDataSet,
596 u8 *bChans, u16 availPiers,
597 u16 tPdGainOverlap,
598 u16 *pPdGainBoundaries, u8 *pPDADCValues,
599 u16 numXpdGains)
600{
601 int i, j, k;
602 int16_t ss;
603 u16 idxL = 0, idxR = 0, numPiers;
604 static u8 vpdTableL[AR5416_NUM_PD_GAINS]
605 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
606 static u8 vpdTableR[AR5416_NUM_PD_GAINS]
607 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
608 static u8 vpdTableI[AR5416_NUM_PD_GAINS]
609 [AR5416_MAX_PWR_RANGE_IN_HALF_DB];
610
611 u8 *pVpdL, *pVpdR, *pPwrL, *pPwrR;
612 u8 minPwrT4[AR5416_NUM_PD_GAINS];
613 u8 maxPwrT4[AR5416_NUM_PD_GAINS];
614 int16_t vpdStep;
615 int16_t tmpVal;
616 u16 sizeCurrVpdTable, maxIndex, tgtIndex;
617 bool match;
618 int16_t minDelta = 0;
619 struct chan_centers centers;
620
621 memset(&minPwrT4, 0, AR9287_NUM_PD_GAINS);
622 ath9k_hw_get_channel_centers(ah, chan, &centers);
623
624 for (numPiers = 0; numPiers < availPiers; numPiers++) {
625 if (bChans[numPiers] == AR5416_BCHAN_UNUSED)
626 break;
627 }
628
629 match = ath9k_hw_get_lower_upper_index((u8)FREQ2FBIN(centers.synth_center,
630 IS_CHAN_2GHZ(chan)),
631 bChans, numPiers, &idxL, &idxR);
632
633 if (match) {
634 for (i = 0; i < numXpdGains; i++) {
635 minPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][0];
636 maxPwrT4[i] = pRawDataSet[idxL].pwrPdg[i][4];
637 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
638 pRawDataSet[idxL].pwrPdg[i],
639 pRawDataSet[idxL].vpdPdg[i],
640 AR5416_PD_GAIN_ICEPTS,
641 vpdTableI[i]);
642 }
643 } else {
644 for (i = 0; i < numXpdGains; i++) {
645 pVpdL = pRawDataSet[idxL].vpdPdg[i];
646 pPwrL = pRawDataSet[idxL].pwrPdg[i];
647 pVpdR = pRawDataSet[idxR].vpdPdg[i];
648 pPwrR = pRawDataSet[idxR].pwrPdg[i];
649
650 minPwrT4[i] = max(pPwrL[0], pPwrR[0]);
651
652 maxPwrT4[i] =
653 min(pPwrL[AR5416_PD_GAIN_ICEPTS - 1],
654 pPwrR[AR5416_PD_GAIN_ICEPTS - 1]);
655
656
657 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
658 pPwrL, pVpdL,
659 AR5416_PD_GAIN_ICEPTS,
660 vpdTableL[i]);
661 ath9k_hw_fill_vpd_table(minPwrT4[i], maxPwrT4[i],
662 pPwrR, pVpdR,
663 AR5416_PD_GAIN_ICEPTS,
664 vpdTableR[i]);
665
666 for (j = 0; j <= (maxPwrT4[i] - minPwrT4[i]) / 2; j++) {
667 vpdTableI[i][j] =
668 (u8)(ath9k_hw_interpolate((u16)
669 FREQ2FBIN(centers.
670 synth_center,
671 IS_CHAN_2GHZ
672 (chan)),
673 bChans[idxL], bChans[idxR],
674 vpdTableL[i][j], vpdTableR[i][j]));
675 }
676 }
677 }
678
679 k = 0;
680
681 for (i = 0; i < numXpdGains; i++) {
682 if (i == (numXpdGains - 1))
683 pPdGainBoundaries[i] =
684 (u16)(maxPwrT4[i] / 2);
685 else
686 pPdGainBoundaries[i] =
687 (u16)((maxPwrT4[i] + minPwrT4[i + 1]) / 4);
688
689 pPdGainBoundaries[i] =
690 min((u16)AR5416_MAX_RATE_POWER, pPdGainBoundaries[i]);
691
692 if ((i == 0) && !AR_SREV_5416_20_OR_LATER(ah)) {
693 minDelta = pPdGainBoundaries[0] - 23;
694 pPdGainBoundaries[0] = 23;
695 } else {
696 minDelta = 0;
697 }
698
699 if (i == 0) {
700 if (AR_SREV_9280_20_OR_LATER(ah))
701 ss = (int16_t)(0 - (minPwrT4[i] / 2));
702 else
703 ss = 0;
704 } else {
705 ss = (int16_t)((pPdGainBoundaries[i - 1] -
706 (minPwrT4[i] / 2)) -
707 tPdGainOverlap + 1 + minDelta);
708 }
709 vpdStep = (int16_t)(vpdTableI[i][1] - vpdTableI[i][0]);
710 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
711
712 while ((ss < 0) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
713 tmpVal = (int16_t)(vpdTableI[i][0] + ss * vpdStep);
714 pPDADCValues[k++] = (u8)((tmpVal < 0) ? 0 : tmpVal);
715 ss++;
716 }
717
718 sizeCurrVpdTable = (u8) ((maxPwrT4[i] - minPwrT4[i]) / 2 + 1);
719 tgtIndex = (u8)(pPdGainBoundaries[i] + tPdGainOverlap -
720 (minPwrT4[i] / 2));
721 maxIndex = (tgtIndex < sizeCurrVpdTable) ?
722 tgtIndex : sizeCurrVpdTable;
723
724 while ((ss < maxIndex) && (k < (AR5416_NUM_PDADC_VALUES - 1))) {
725 pPDADCValues[k++] = vpdTableI[i][ss++];
726 }
727
728 vpdStep = (int16_t)(vpdTableI[i][sizeCurrVpdTable - 1] -
729 vpdTableI[i][sizeCurrVpdTable - 2]);
730 vpdStep = (int16_t)((vpdStep < 1) ? 1 : vpdStep);
731
732 if (tgtIndex >= maxIndex) {
733 while ((ss <= tgtIndex) &&
734 (k < (AR5416_NUM_PDADC_VALUES - 1))) {
735 tmpVal = (int16_t)((vpdTableI[i][sizeCurrVpdTable - 1] +
736 (ss - maxIndex + 1) * vpdStep));
737 pPDADCValues[k++] = (u8)((tmpVal > 255) ?
738 255 : tmpVal);
739 ss++;
740 }
741 }
742 }
743
744 while (i < AR5416_PD_GAINS_IN_MASK) {
745 pPdGainBoundaries[i] = pPdGainBoundaries[i - 1];
746 i++;
747 }
748
749 while (k < AR5416_NUM_PDADC_VALUES) {
750 pPDADCValues[k] = pPDADCValues[k - 1];
751 k++;
752 }
753}
754
755static int16_t ath9k_change_gain_boundary_setting(struct ath_hw *ah, 590static int16_t ath9k_change_gain_boundary_setting(struct ath_hw *ah,
756 u16 *gb, 591 u16 *gb,
757 u16 numXpdGain, 592 u16 numXpdGain,
@@ -784,7 +619,7 @@ static int16_t ath9k_change_gain_boundary_setting(struct ath_hw *ah,
784 /* Because of a hardware limitation, ensure the gain boundary 619 /* Because of a hardware limitation, ensure the gain boundary
785 * is not larger than (63 - overlap) 620 * is not larger than (63 - overlap)
786 */ 621 */
787 gb_limit = (u16)(AR5416_MAX_RATE_POWER - pdGainOverlap_t2); 622 gb_limit = (u16)(MAX_RATE_POWER - pdGainOverlap_t2);
788 623
789 for (k = 0; k < numXpdGain; k++) 624 for (k = 0; k < numXpdGain; k++)
790 gb[k] = (u16)min(gb_limit, gb[k]); 625 gb[k] = (u16)min(gb_limit, gb[k]);
@@ -918,7 +753,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
918 ath9k_olc_get_pdadcs(ah, pcdacIdx, 753 ath9k_olc_get_pdadcs(ah, pcdacIdx,
919 txPower/2, pdadcValues); 754 txPower/2, pdadcValues);
920 } else { 755 } else {
921 ath9k_hw_get_def_gain_boundaries_pdadcs(ah, 756 ath9k_hw_get_gain_boundaries_pdadcs(ah,
922 chan, pRawDataset, 757 chan, pRawDataset,
923 pCalBChans, numPiers, 758 pCalBChans, numPiers,
924 pdGainOverlap_t2, 759 pdGainOverlap_t2,
@@ -966,20 +801,19 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
966 ((pdadcValues[4 * j + 3] & 0xFF) << 24); 801 ((pdadcValues[4 * j + 3] & 0xFF) << 24);
967 REG_WRITE(ah, regOffset, reg32); 802 REG_WRITE(ah, regOffset, reg32);
968 803
969 ath_print(common, ATH_DBG_EEPROM, 804 ath_dbg(common, ATH_DBG_EEPROM,
970 "PDADC (%d,%4x): %4.4x %8.8x\n", 805 "PDADC (%d,%4x): %4.4x %8.8x\n",
971 i, regChainOffset, regOffset, 806 i, regChainOffset, regOffset,
972 reg32); 807 reg32);
973 ath_print(common, ATH_DBG_EEPROM, 808 ath_dbg(common, ATH_DBG_EEPROM,
974 "PDADC: Chain %d | PDADC %3d " 809 "PDADC: Chain %d | PDADC %3d "
975 "Value %3d | PDADC %3d Value %3d | " 810 "Value %3d | PDADC %3d Value %3d | "
976 "PDADC %3d Value %3d | PDADC %3d " 811 "PDADC %3d Value %3d | PDADC %3d "
977 "Value %3d |\n", 812 "Value %3d |\n",
978 i, 4 * j, pdadcValues[4 * j], 813 i, 4 * j, pdadcValues[4 * j],
979 4 * j + 1, pdadcValues[4 * j + 1], 814 4 * j + 1, pdadcValues[4 * j + 1],
980 4 * j + 2, pdadcValues[4 * j + 2], 815 4 * j + 2, pdadcValues[4 * j + 2],
981 4 * j + 3, 816 4 * j + 3, pdadcValues[4 * j + 3]);
982 pdadcValues[4 * j + 3]);
983 817
984 regOffset += 4; 818 regOffset += 4;
985 } 819 }
@@ -1004,9 +838,9 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
1004 838
1005 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 839 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
1006 struct ar5416_eeprom_def *pEepData = &ah->eeprom.def; 840 struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
1007 u16 twiceMaxEdgePower = AR5416_MAX_RATE_POWER; 841 u16 twiceMaxEdgePower = MAX_RATE_POWER;
1008 static const u16 tpScaleReductionTable[5] = 842 static const u16 tpScaleReductionTable[5] =
1009 { 0, 3, 6, 9, AR5416_MAX_RATE_POWER }; 843 { 0, 3, 6, 9, MAX_RATE_POWER };
1010 844
1011 int i; 845 int i;
1012 int16_t twiceLargestAntenna; 846 int16_t twiceLargestAntenna;
@@ -1022,13 +856,16 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
1022 0, {0, 0, 0, 0} 856 0, {0, 0, 0, 0}
1023 }; 857 };
1024 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower; 858 u16 scaledPower = 0, minCtlPower, maxRegAllowedPower;
1025 u16 ctlModesFor11a[] = 859 static const u16 ctlModesFor11a[] = {
1026 { CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40 }; 860 CTL_11A, CTL_5GHT20, CTL_11A_EXT, CTL_5GHT40
1027 u16 ctlModesFor11g[] = 861 };
1028 { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT, 862 static const u16 ctlModesFor11g[] = {
1029 CTL_2GHT40 863 CTL_11B, CTL_11G, CTL_2GHT20,
1030 }; 864 CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40
1031 u16 numCtlModes, *pCtlMode, ctlMode, freq; 865 };
866 u16 numCtlModes;
867 const u16 *pCtlMode;
868 u16 ctlMode, freq;
1032 struct chan_centers centers; 869 struct chan_centers centers;
1033 int tx_chainmask; 870 int tx_chainmask;
1034 u16 twiceMinEdgePower; 871 u16 twiceMinEdgePower;
@@ -1148,7 +985,7 @@ static void ath9k_hw_set_def_power_per_rate_table(struct ath_hw *ah,
1148 985
1149 if (ah->eep_ops->get_eeprom_ver(ah) == 14 && 986 if (ah->eep_ops->get_eeprom_ver(ah) == 14 &&
1150 ah->eep_ops->get_eeprom_rev(ah) <= 2) 987 ah->eep_ops->get_eeprom_rev(ah) <= 2)
1151 twiceMaxEdgePower = AR5416_MAX_RATE_POWER; 988 twiceMaxEdgePower = MAX_RATE_POWER;
1152 989
1153 for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; i++) { 990 for (i = 0; (i < AR5416_NUM_CTLS) && pEepData->ctlIndex[i]; i++) {
1154 if ((((cfgCtl & ~CTL_MODE_M) | 991 if ((((cfgCtl & ~CTL_MODE_M) |
@@ -1263,7 +1100,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1263 u16 cfgCtl, 1100 u16 cfgCtl,
1264 u8 twiceAntennaReduction, 1101 u8 twiceAntennaReduction,
1265 u8 twiceMaxRegulatoryPower, 1102 u8 twiceMaxRegulatoryPower,
1266 u8 powerLimit) 1103 u8 powerLimit, bool test)
1267{ 1104{
1268#define RT_AR_DELTA(x) (ratesArray[x] - cck_ofdm_delta) 1105#define RT_AR_DELTA(x) (ratesArray[x] - cck_ofdm_delta)
1269 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 1106 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
@@ -1290,12 +1127,44 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1290 1127
1291 ath9k_hw_set_def_power_cal_table(ah, chan, &txPowerIndexOffset); 1128 ath9k_hw_set_def_power_cal_table(ah, chan, &txPowerIndexOffset);
1292 1129
1130 regulatory->max_power_level = 0;
1293 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) { 1131 for (i = 0; i < ARRAY_SIZE(ratesArray); i++) {
1294 ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]); 1132 ratesArray[i] = (int16_t)(txPowerIndexOffset + ratesArray[i]);
1295 if (ratesArray[i] > AR5416_MAX_RATE_POWER) 1133 if (ratesArray[i] > MAX_RATE_POWER)
1296 ratesArray[i] = AR5416_MAX_RATE_POWER; 1134 ratesArray[i] = MAX_RATE_POWER;
1135 if (ratesArray[i] > regulatory->max_power_level)
1136 regulatory->max_power_level = ratesArray[i];
1137 }
1138
1139 if (!test) {
1140 i = rate6mb;
1141
1142 if (IS_CHAN_HT40(chan))
1143 i = rateHt40_0;
1144 else if (IS_CHAN_HT20(chan))
1145 i = rateHt20_0;
1146
1147 regulatory->max_power_level = ratesArray[i];
1297 } 1148 }
1298 1149
1150 switch(ar5416_get_ntxchains(ah->txchainmask)) {
1151 case 1:
1152 break;
1153 case 2:
1154 regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN;
1155 break;
1156 case 3:
1157 regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
1158 break;
1159 default:
1160 ath_dbg(ath9k_hw_common(ah), ATH_DBG_EEPROM,
1161 "Invalid chainmask configuration\n");
1162 break;
1163 }
1164
1165 if (test)
1166 return;
1167
1299 if (AR_SREV_9280_20_OR_LATER(ah)) { 1168 if (AR_SREV_9280_20_OR_LATER(ah)) {
1300 for (i = 0; i < Ar5416RateSize; i++) { 1169 for (i = 0; i < Ar5416RateSize; i++) {
1301 int8_t pwr_table_offset; 1170 int8_t pwr_table_offset;
@@ -1392,62 +1261,6 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1392 REG_WRITE(ah, AR_PHY_POWER_TX_SUB, 1261 REG_WRITE(ah, AR_PHY_POWER_TX_SUB,
1393 ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6) 1262 ATH9K_POW_SM(pModal->pwrDecreaseFor3Chain, 6)
1394 | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0)); 1263 | ATH9K_POW_SM(pModal->pwrDecreaseFor2Chain, 0));
1395
1396 i = rate6mb;
1397
1398 if (IS_CHAN_HT40(chan))
1399 i = rateHt40_0;
1400 else if (IS_CHAN_HT20(chan))
1401 i = rateHt20_0;
1402
1403 if (AR_SREV_9280_20_OR_LATER(ah))
1404 regulatory->max_power_level =
1405 ratesArray[i] + AR5416_PWR_TABLE_OFFSET_DB * 2;
1406 else
1407 regulatory->max_power_level = ratesArray[i];
1408
1409 switch(ar5416_get_ntxchains(ah->txchainmask)) {
1410 case 1:
1411 break;
1412 case 2:
1413 regulatory->max_power_level += INCREASE_MAXPOW_BY_TWO_CHAIN;
1414 break;
1415 case 3:
1416 regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
1417 break;
1418 default:
1419 ath_print(ath9k_hw_common(ah), ATH_DBG_EEPROM,
1420 "Invalid chainmask configuration\n");
1421 break;
1422 }
1423}
1424
1425static u8 ath9k_hw_def_get_num_ant_config(struct ath_hw *ah,
1426 enum ath9k_hal_freq_band freq_band)
1427{
1428 struct ar5416_eeprom_def *eep = &ah->eeprom.def;
1429 struct modal_eep_header *pModal =
1430 &(eep->modalHeader[freq_band]);
1431 struct base_eep_header *pBase = &eep->baseEepHeader;
1432 u8 num_ant_config;
1433
1434 num_ant_config = 1;
1435
1436 if (pBase->version >= 0x0E0D &&
1437 (pModal->lna_ctl & LNA_CTL_USE_ANT1))
1438 num_ant_config += 1;
1439
1440 return num_ant_config;
1441}
1442
1443static u32 ath9k_hw_def_get_eeprom_antenna_cfg(struct ath_hw *ah,
1444 struct ath9k_channel *chan)
1445{
1446 struct ar5416_eeprom_def *eep = &ah->eeprom.def;
1447 struct modal_eep_header *pModal =
1448 &(eep->modalHeader[IS_CHAN_2GHZ(chan)]);
1449
1450 return pModal->antCtrlCommon;
1451} 1264}
1452 1265
1453static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) 1266static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
@@ -1458,17 +1271,17 @@ static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
1458 1271
1459 u16 spur_val = AR_NO_SPUR; 1272 u16 spur_val = AR_NO_SPUR;
1460 1273
1461 ath_print(common, ATH_DBG_ANI, 1274 ath_dbg(common, ATH_DBG_ANI,
1462 "Getting spur idx %d is2Ghz. %d val %x\n", 1275 "Getting spur idx:%d is2Ghz:%d val:%x\n",
1463 i, is2GHz, ah->config.spurchans[i][is2GHz]); 1276 i, is2GHz, ah->config.spurchans[i][is2GHz]);
1464 1277
1465 switch (ah->config.spurmode) { 1278 switch (ah->config.spurmode) {
1466 case SPUR_DISABLE: 1279 case SPUR_DISABLE:
1467 break; 1280 break;
1468 case SPUR_ENABLE_IOCTL: 1281 case SPUR_ENABLE_IOCTL:
1469 spur_val = ah->config.spurchans[i][is2GHz]; 1282 spur_val = ah->config.spurchans[i][is2GHz];
1470 ath_print(common, ATH_DBG_ANI, 1283 ath_dbg(common, ATH_DBG_ANI,
1471 "Getting spur val from new loc. %d\n", spur_val); 1284 "Getting spur val from new loc. %d\n", spur_val);
1472 break; 1285 break;
1473 case SPUR_ENABLE_EEPROM: 1286 case SPUR_ENABLE_EEPROM:
1474 spur_val = EEP_DEF_SPURCHAN; 1287 spur_val = EEP_DEF_SPURCHAN;
@@ -1486,8 +1299,6 @@ const struct eeprom_ops eep_def_ops = {
1486 .fill_eeprom = ath9k_hw_def_fill_eeprom, 1299 .fill_eeprom = ath9k_hw_def_fill_eeprom,
1487 .get_eeprom_ver = ath9k_hw_def_get_eeprom_ver, 1300 .get_eeprom_ver = ath9k_hw_def_get_eeprom_ver,
1488 .get_eeprom_rev = ath9k_hw_def_get_eeprom_rev, 1301 .get_eeprom_rev = ath9k_hw_def_get_eeprom_rev,
1489 .get_num_ant_config = ath9k_hw_def_get_num_ant_config,
1490 .get_eeprom_antenna_cfg = ath9k_hw_def_get_eeprom_antenna_cfg,
1491 .set_board_values = ath9k_hw_def_set_board_values, 1302 .set_board_values = ath9k_hw_def_set_board_values,
1492 .set_addac = ath9k_hw_def_set_addac, 1303 .set_addac = ath9k_hw_def_set_addac,
1493 .set_txpower = ath9k_hw_def_set_txpower, 1304 .set_txpower = ath9k_hw_def_set_txpower,
diff --git a/drivers/net/wireless/ath/ath9k/gpio.c b/drivers/net/wireless/ath/ath9k/gpio.c
index 4a9a68bba324..133764069246 100644
--- a/drivers/net/wireless/ath/ath9k/gpio.c
+++ b/drivers/net/wireless/ath/ath9k/gpio.c
@@ -103,8 +103,8 @@ static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
103 103
104 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev); 104 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
105 if (ret) 105 if (ret)
106 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 106 ath_err(ath9k_hw_common(sc->sc_ah),
107 "Failed to register led:%s", led->name); 107 "Failed to register led:%s", led->name);
108 else 108 else
109 led->registered = 1; 109 led->registered = 1;
110 return ret; 110 return ret;
@@ -236,13 +236,13 @@ static void ath_detect_bt_priority(struct ath_softc *sc)
236 sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN); 236 sc->sc_flags &= ~(SC_OP_BT_PRIORITY_DETECTED | SC_OP_BT_SCAN);
237 /* Detect if colocated bt started scanning */ 237 /* Detect if colocated bt started scanning */
238 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) { 238 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
239 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX, 239 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
240 "BT scan detected"); 240 "BT scan detected\n");
241 sc->sc_flags |= (SC_OP_BT_SCAN | 241 sc->sc_flags |= (SC_OP_BT_SCAN |
242 SC_OP_BT_PRIORITY_DETECTED); 242 SC_OP_BT_PRIORITY_DETECTED);
243 } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) { 243 } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
244 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX, 244 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
245 "BT priority traffic detected"); 245 "BT priority traffic detected\n");
246 sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED; 246 sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
247 } 247 }
248 248
@@ -259,7 +259,7 @@ static void ath9k_gen_timer_start(struct ath_hw *ah,
259 ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period); 259 ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
260 260
261 if ((ah->imask & ATH9K_INT_GENTIMER) == 0) { 261 if ((ah->imask & ATH9K_INT_GENTIMER) == 0) {
262 ath9k_hw_set_interrupts(ah, 0); 262 ath9k_hw_disable_interrupts(ah);
263 ah->imask |= ATH9K_INT_GENTIMER; 263 ah->imask |= ATH9K_INT_GENTIMER;
264 ath9k_hw_set_interrupts(ah, ah->imask); 264 ath9k_hw_set_interrupts(ah, ah->imask);
265 } 265 }
@@ -273,7 +273,7 @@ static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
273 273
274 /* if no timer is enabled, turn off interrupt mask */ 274 /* if no timer is enabled, turn off interrupt mask */
275 if (timer_table->timer_mask.val == 0) { 275 if (timer_table->timer_mask.val == 0) {
276 ath9k_hw_set_interrupts(ah, 0); 276 ath9k_hw_disable_interrupts(ah);
277 ah->imask &= ~ATH9K_INT_GENTIMER; 277 ah->imask &= ~ATH9K_INT_GENTIMER;
278 ath9k_hw_set_interrupts(ah, ah->imask); 278 ath9k_hw_set_interrupts(ah, ah->imask);
279 } 279 }
@@ -310,10 +310,8 @@ static void ath_btcoex_period_timer(unsigned long data)
310 310
311 timer_period = is_btscan ? btcoex->btscan_no_stomp : 311 timer_period = is_btscan ? btcoex->btscan_no_stomp :
312 btcoex->btcoex_no_stomp; 312 btcoex->btcoex_no_stomp;
313 ath9k_gen_timer_start(ah, 313 ath9k_gen_timer_start(ah, btcoex->no_stomp_timer, 0,
314 btcoex->no_stomp_timer, 314 timer_period * 10);
315 (ath9k_hw_gettsf32(ah) +
316 timer_period), timer_period * 10);
317 btcoex->hw_timer_enabled = true; 315 btcoex->hw_timer_enabled = true;
318 } 316 }
319 317
@@ -333,8 +331,8 @@ static void ath_btcoex_no_stomp_timer(void *arg)
333 struct ath_common *common = ath9k_hw_common(ah); 331 struct ath_common *common = ath9k_hw_common(ah);
334 bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN; 332 bool is_btscan = sc->sc_flags & SC_OP_BT_SCAN;
335 333
336 ath_print(common, ATH_DBG_BTCOEX, 334 ath_dbg(common, ATH_DBG_BTCOEX,
337 "no stomp timer running\n"); 335 "no stomp timer running\n");
338 336
339 spin_lock_bh(&btcoex->btcoex_lock); 337 spin_lock_bh(&btcoex->btcoex_lock);
340 338
@@ -380,8 +378,8 @@ void ath9k_btcoex_timer_resume(struct ath_softc *sc)
380 struct ath_btcoex *btcoex = &sc->btcoex; 378 struct ath_btcoex *btcoex = &sc->btcoex;
381 struct ath_hw *ah = sc->sc_ah; 379 struct ath_hw *ah = sc->sc_ah;
382 380
383 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX, 381 ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
384 "Starting btcoex timers"); 382 "Starting btcoex timers\n");
385 383
386 /* make sure duty cycle timer is also stopped when resuming */ 384 /* make sure duty cycle timer is also stopped when resuming */
387 if (btcoex->hw_timer_enabled) 385 if (btcoex->hw_timer_enabled)
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.c b/drivers/net/wireless/ath/ath9k/hif_usb.c
index 0de3c3d3c245..5ab3084eb9cb 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.c
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.c
@@ -28,10 +28,7 @@ MODULE_FIRMWARE(FIRMWARE_AR9271);
28static struct usb_device_id ath9k_hif_usb_ids[] = { 28static struct usb_device_id ath9k_hif_usb_ids[] = {
29 { USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */ 29 { USB_DEVICE(0x0cf3, 0x9271) }, /* Atheros */
30 { USB_DEVICE(0x0cf3, 0x1006) }, /* Atheros */ 30 { USB_DEVICE(0x0cf3, 0x1006) }, /* Atheros */
31 { USB_DEVICE(0x0cf3, 0x7010) }, /* Atheros */
32 { USB_DEVICE(0x0cf3, 0x7015) }, /* Atheros */
33 { USB_DEVICE(0x0846, 0x9030) }, /* Netgear N150 */ 31 { USB_DEVICE(0x0846, 0x9030) }, /* Netgear N150 */
34 { USB_DEVICE(0x0846, 0x9018) }, /* Netgear WNDA3200 */
35 { USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */ 32 { USB_DEVICE(0x07D1, 0x3A10) }, /* Dlink Wireless 150 */
36 { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */ 33 { USB_DEVICE(0x13D3, 0x3327) }, /* Azurewave */
37 { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */ 34 { USB_DEVICE(0x13D3, 0x3328) }, /* Azurewave */
@@ -40,9 +37,21 @@ static struct usb_device_id ath9k_hif_usb_ids[] = {
40 { USB_DEVICE(0x13D3, 0x3349) }, /* Azurewave */ 37 { USB_DEVICE(0x13D3, 0x3349) }, /* Azurewave */
41 { USB_DEVICE(0x13D3, 0x3350) }, /* Azurewave */ 38 { USB_DEVICE(0x13D3, 0x3350) }, /* Azurewave */
42 { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */ 39 { USB_DEVICE(0x04CA, 0x4605) }, /* Liteon */
43 { USB_DEVICE(0x083A, 0xA704) }, /* SMC Networks */
44 { USB_DEVICE(0x040D, 0x3801) }, /* VIA */ 40 { USB_DEVICE(0x040D, 0x3801) }, /* VIA */
45 { USB_DEVICE(0x1668, 0x1200) }, /* Verizon */ 41 { USB_DEVICE(0x0cf3, 0xb003) }, /* Ubiquiti WifiStation Ext */
42
43 { USB_DEVICE(0x0cf3, 0x7015),
44 .driver_info = AR9287_USB }, /* Atheros */
45 { USB_DEVICE(0x1668, 0x1200),
46 .driver_info = AR9287_USB }, /* Verizon */
47
48 { USB_DEVICE(0x0cf3, 0x7010),
49 .driver_info = AR9280_USB }, /* Atheros */
50 { USB_DEVICE(0x0846, 0x9018),
51 .driver_info = AR9280_USB }, /* Netgear WNDA3200 */
52 { USB_DEVICE(0x083A, 0xA704),
53 .driver_info = AR9280_USB }, /* SMC Networks */
54
46 { }, 55 { },
47}; 56};
48 57
@@ -144,16 +153,36 @@ static void hif_usb_tx_cb(struct urb *urb)
144 case -ENODEV: 153 case -ENODEV:
145 case -ESHUTDOWN: 154 case -ESHUTDOWN:
146 /* 155 /*
147 * The URB has been killed, free the SKBs 156 * The URB has been killed, free the SKBs.
148 * and return.
149 */ 157 */
150 ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue); 158 ath9k_skb_queue_purge(hif_dev, &tx_buf->skb_queue);
151 return; 159
160 /*
161 * If the URBs are being flushed, no need to add this
162 * URB to the free list.
163 */
164 spin_lock(&hif_dev->tx.tx_lock);
165 if (hif_dev->tx.flags & HIF_USB_TX_FLUSH) {
166 spin_unlock(&hif_dev->tx.tx_lock);
167 return;
168 }
169 spin_unlock(&hif_dev->tx.tx_lock);
170
171 /*
172 * In the stop() case, this URB has to be added to
173 * the free list.
174 */
175 goto add_free;
152 default: 176 default:
153 break; 177 break;
154 } 178 }
155 179
156 /* Check if TX has been stopped */ 180 /*
181 * Check if TX has been stopped, this is needed because
182 * this CB could have been invoked just after the TX lock
183 * was released in hif_stop() and kill_urb() hasn't been
184 * called yet.
185 */
157 spin_lock(&hif_dev->tx.tx_lock); 186 spin_lock(&hif_dev->tx.tx_lock);
158 if (hif_dev->tx.flags & HIF_USB_TX_STOP) { 187 if (hif_dev->tx.flags & HIF_USB_TX_STOP) {
159 spin_unlock(&hif_dev->tx.tx_lock); 188 spin_unlock(&hif_dev->tx.tx_lock);
@@ -305,6 +334,7 @@ static void hif_usb_start(void *hif_handle, u8 pipe_id)
305static void hif_usb_stop(void *hif_handle, u8 pipe_id) 334static void hif_usb_stop(void *hif_handle, u8 pipe_id)
306{ 335{
307 struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle; 336 struct hif_device_usb *hif_dev = (struct hif_device_usb *)hif_handle;
337 struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
308 unsigned long flags; 338 unsigned long flags;
309 339
310 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags); 340 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
@@ -312,6 +342,12 @@ static void hif_usb_stop(void *hif_handle, u8 pipe_id)
312 hif_dev->tx.tx_skb_cnt = 0; 342 hif_dev->tx.tx_skb_cnt = 0;
313 hif_dev->tx.flags |= HIF_USB_TX_STOP; 343 hif_dev->tx.flags |= HIF_USB_TX_STOP;
314 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags); 344 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
345
346 /* The pending URBs have to be canceled. */
347 list_for_each_entry_safe(tx_buf, tx_buf_tmp,
348 &hif_dev->tx.tx_pending, list) {
349 usb_kill_urb(tx_buf->urb);
350 }
315} 351}
316 352
317static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb, 353static int hif_usb_send(void *hif_handle, u8 pipe_id, struct sk_buff *skb,
@@ -353,9 +389,9 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
353 struct sk_buff *skb) 389 struct sk_buff *skb)
354{ 390{
355 struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER]; 391 struct sk_buff *nskb, *skb_pool[MAX_PKT_NUM_IN_TRANSFER];
356 int index = 0, i = 0, chk_idx, len = skb->len; 392 int index = 0, i = 0, len = skb->len;
357 int rx_remain_len = 0, rx_pkt_len = 0; 393 int rx_remain_len, rx_pkt_len;
358 u16 pkt_len, pkt_tag, pool_index = 0; 394 u16 pool_index = 0;
359 u8 *ptr; 395 u8 *ptr;
360 396
361 spin_lock(&hif_dev->rx_lock); 397 spin_lock(&hif_dev->rx_lock);
@@ -389,64 +425,64 @@ static void ath9k_hif_usb_rx_stream(struct hif_device_usb *hif_dev,
389 spin_unlock(&hif_dev->rx_lock); 425 spin_unlock(&hif_dev->rx_lock);
390 426
391 while (index < len) { 427 while (index < len) {
428 u16 pkt_len;
429 u16 pkt_tag;
430 u16 pad_len;
431 int chk_idx;
432
392 ptr = (u8 *) skb->data; 433 ptr = (u8 *) skb->data;
393 434
394 pkt_len = ptr[index] + (ptr[index+1] << 8); 435 pkt_len = ptr[index] + (ptr[index+1] << 8);
395 pkt_tag = ptr[index+2] + (ptr[index+3] << 8); 436 pkt_tag = ptr[index+2] + (ptr[index+3] << 8);
396 437
397 if (pkt_tag == ATH_USB_RX_STREAM_MODE_TAG) { 438 if (pkt_tag != ATH_USB_RX_STREAM_MODE_TAG) {
398 u16 pad_len; 439 RX_STAT_INC(skb_dropped);
399 440 return;
400 pad_len = 4 - (pkt_len & 0x3); 441 }
401 if (pad_len == 4) 442
402 pad_len = 0; 443 pad_len = 4 - (pkt_len & 0x3);
403 444 if (pad_len == 4)
404 chk_idx = index; 445 pad_len = 0;
405 index = index + 4 + pkt_len + pad_len; 446
406 447 chk_idx = index;
407 if (index > MAX_RX_BUF_SIZE) { 448 index = index + 4 + pkt_len + pad_len;
408 spin_lock(&hif_dev->rx_lock); 449
409 hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE; 450 if (index > MAX_RX_BUF_SIZE) {
410 hif_dev->rx_transfer_len = 451 spin_lock(&hif_dev->rx_lock);
411 MAX_RX_BUF_SIZE - chk_idx - 4; 452 hif_dev->rx_remain_len = index - MAX_RX_BUF_SIZE;
412 hif_dev->rx_pad_len = pad_len; 453 hif_dev->rx_transfer_len =
413 454 MAX_RX_BUF_SIZE - chk_idx - 4;
414 nskb = __dev_alloc_skb(pkt_len + 32, 455 hif_dev->rx_pad_len = pad_len;
415 GFP_ATOMIC); 456
416 if (!nskb) { 457 nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
417 dev_err(&hif_dev->udev->dev, 458 if (!nskb) {
418 "ath9k_htc: RX memory allocation" 459 dev_err(&hif_dev->udev->dev,
419 " error\n"); 460 "ath9k_htc: RX memory allocation error\n");
420 spin_unlock(&hif_dev->rx_lock);
421 goto err;
422 }
423 skb_reserve(nskb, 32);
424 RX_STAT_INC(skb_allocated);
425
426 memcpy(nskb->data, &(skb->data[chk_idx+4]),
427 hif_dev->rx_transfer_len);
428
429 /* Record the buffer pointer */
430 hif_dev->remain_skb = nskb;
431 spin_unlock(&hif_dev->rx_lock); 461 spin_unlock(&hif_dev->rx_lock);
432 } else { 462 goto err;
433 nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
434 if (!nskb) {
435 dev_err(&hif_dev->udev->dev,
436 "ath9k_htc: RX memory allocation"
437 " error\n");
438 goto err;
439 }
440 skb_reserve(nskb, 32);
441 RX_STAT_INC(skb_allocated);
442
443 memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len);
444 skb_put(nskb, pkt_len);
445 skb_pool[pool_index++] = nskb;
446 } 463 }
464 skb_reserve(nskb, 32);
465 RX_STAT_INC(skb_allocated);
466
467 memcpy(nskb->data, &(skb->data[chk_idx+4]),
468 hif_dev->rx_transfer_len);
469
470 /* Record the buffer pointer */
471 hif_dev->remain_skb = nskb;
472 spin_unlock(&hif_dev->rx_lock);
447 } else { 473 } else {
448 RX_STAT_INC(skb_dropped); 474 nskb = __dev_alloc_skb(pkt_len + 32, GFP_ATOMIC);
449 return; 475 if (!nskb) {
476 dev_err(&hif_dev->udev->dev,
477 "ath9k_htc: RX memory allocation error\n");
478 goto err;
479 }
480 skb_reserve(nskb, 32);
481 RX_STAT_INC(skb_allocated);
482
483 memcpy(nskb->data, &(skb->data[chk_idx+4]), pkt_len);
484 skb_put(nskb, pkt_len);
485 skb_pool[pool_index++] = nskb;
450 } 486 }
451 } 487 }
452 488
@@ -461,7 +497,7 @@ err:
461static void ath9k_hif_usb_rx_cb(struct urb *urb) 497static void ath9k_hif_usb_rx_cb(struct urb *urb)
462{ 498{
463 struct sk_buff *skb = (struct sk_buff *) urb->context; 499 struct sk_buff *skb = (struct sk_buff *) urb->context;
464 struct hif_device_usb *hif_dev = (struct hif_device_usb *) 500 struct hif_device_usb *hif_dev =
465 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); 501 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
466 int ret; 502 int ret;
467 503
@@ -508,7 +544,7 @@ static void ath9k_hif_usb_reg_in_cb(struct urb *urb)
508{ 544{
509 struct sk_buff *skb = (struct sk_buff *) urb->context; 545 struct sk_buff *skb = (struct sk_buff *) urb->context;
510 struct sk_buff *nskb; 546 struct sk_buff *nskb;
511 struct hif_device_usb *hif_dev = (struct hif_device_usb *) 547 struct hif_device_usb *hif_dev =
512 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); 548 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
513 int ret; 549 int ret;
514 550
@@ -578,6 +614,7 @@ free:
578static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev) 614static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
579{ 615{
580 struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL; 616 struct tx_buf *tx_buf = NULL, *tx_buf_tmp = NULL;
617 unsigned long flags;
581 618
582 list_for_each_entry_safe(tx_buf, tx_buf_tmp, 619 list_for_each_entry_safe(tx_buf, tx_buf_tmp,
583 &hif_dev->tx.tx_buf, list) { 620 &hif_dev->tx.tx_buf, list) {
@@ -588,6 +625,10 @@ static void ath9k_hif_usb_dealloc_tx_urbs(struct hif_device_usb *hif_dev)
588 kfree(tx_buf); 625 kfree(tx_buf);
589 } 626 }
590 627
628 spin_lock_irqsave(&hif_dev->tx.tx_lock, flags);
629 hif_dev->tx.flags |= HIF_USB_TX_FLUSH;
630 spin_unlock_irqrestore(&hif_dev->tx.tx_lock, flags);
631
591 list_for_each_entry_safe(tx_buf, tx_buf_tmp, 632 list_for_each_entry_safe(tx_buf, tx_buf_tmp,
592 &hif_dev->tx.tx_pending, list) { 633 &hif_dev->tx.tx_pending, list) {
593 usb_kill_urb(tx_buf->urb); 634 usb_kill_urb(tx_buf->urb);
@@ -776,7 +817,8 @@ static void ath9k_hif_usb_dealloc_urbs(struct hif_device_usb *hif_dev)
776 ath9k_hif_usb_dealloc_rx_urbs(hif_dev); 817 ath9k_hif_usb_dealloc_rx_urbs(hif_dev);
777} 818}
778 819
779static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev) 820static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev,
821 u32 drv_info)
780{ 822{
781 int transfer, err; 823 int transfer, err;
782 const void *data = hif_dev->firmware->data; 824 const void *data = hif_dev->firmware->data;
@@ -807,18 +849,10 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
807 } 849 }
808 kfree(buf); 850 kfree(buf);
809 851
810 switch (hif_dev->device_id) { 852 if (IS_AR7010_DEVICE(drv_info))
811 case 0x7010:
812 case 0x7015:
813 case 0x9018:
814 case 0xA704:
815 case 0x1200:
816 firm_offset = AR7010_FIRMWARE_TEXT; 853 firm_offset = AR7010_FIRMWARE_TEXT;
817 break; 854 else
818 default:
819 firm_offset = AR9271_FIRMWARE_TEXT; 855 firm_offset = AR9271_FIRMWARE_TEXT;
820 break;
821 }
822 856
823 /* 857 /*
824 * Issue FW download complete command to firmware. 858 * Issue FW download complete command to firmware.
@@ -836,7 +870,7 @@ static int ath9k_hif_usb_download_fw(struct hif_device_usb *hif_dev)
836 return 0; 870 return 0;
837} 871}
838 872
839static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev) 873static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev, u32 drv_info)
840{ 874{
841 int ret, idx; 875 int ret, idx;
842 struct usb_host_interface *alt = &hif_dev->interface->altsetting[0]; 876 struct usb_host_interface *alt = &hif_dev->interface->altsetting[0];
@@ -852,7 +886,7 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
852 } 886 }
853 887
854 /* Download firmware */ 888 /* Download firmware */
855 ret = ath9k_hif_usb_download_fw(hif_dev); 889 ret = ath9k_hif_usb_download_fw(hif_dev, drv_info);
856 if (ret) { 890 if (ret) {
857 dev_err(&hif_dev->udev->dev, 891 dev_err(&hif_dev->udev->dev,
858 "ath9k_htc: Firmware - %s download failed\n", 892 "ath9k_htc: Firmware - %s download failed\n",
@@ -884,9 +918,9 @@ static int ath9k_hif_usb_dev_init(struct hif_device_usb *hif_dev)
884 918
885 return 0; 919 return 0;
886 920
887err_fw_download:
888 ath9k_hif_usb_dealloc_urbs(hif_dev);
889err_urb: 921err_urb:
922 ath9k_hif_usb_dealloc_urbs(hif_dev);
923err_fw_download:
890 release_firmware(hif_dev->firmware); 924 release_firmware(hif_dev->firmware);
891err_fw_req: 925err_fw_req:
892 hif_dev->firmware = NULL; 926 hif_dev->firmware = NULL;
@@ -931,23 +965,15 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
931 965
932 /* Find out which firmware to load */ 966 /* Find out which firmware to load */
933 967
934 switch(hif_dev->device_id) { 968 if (IS_AR7010_DEVICE(id->driver_info))
935 case 0x7010:
936 case 0x7015:
937 case 0x9018:
938 case 0xA704:
939 case 0x1200:
940 if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202) 969 if (le16_to_cpu(udev->descriptor.bcdDevice) == 0x0202)
941 hif_dev->fw_name = FIRMWARE_AR7010_1_1; 970 hif_dev->fw_name = FIRMWARE_AR7010_1_1;
942 else 971 else
943 hif_dev->fw_name = FIRMWARE_AR7010; 972 hif_dev->fw_name = FIRMWARE_AR7010;
944 break; 973 else
945 default:
946 hif_dev->fw_name = FIRMWARE_AR9271; 974 hif_dev->fw_name = FIRMWARE_AR9271;
947 break;
948 }
949 975
950 ret = ath9k_hif_usb_dev_init(hif_dev); 976 ret = ath9k_hif_usb_dev_init(hif_dev, id->driver_info);
951 if (ret) { 977 if (ret) {
952 ret = -EINVAL; 978 ret = -EINVAL;
953 goto err_hif_init_usb; 979 goto err_hif_init_usb;
@@ -955,7 +981,7 @@ static int ath9k_hif_usb_probe(struct usb_interface *interface,
955 981
956 ret = ath9k_htc_hw_init(hif_dev->htc_handle, 982 ret = ath9k_htc_hw_init(hif_dev->htc_handle,
957 &hif_dev->udev->dev, hif_dev->device_id, 983 &hif_dev->udev->dev, hif_dev->device_id,
958 hif_dev->udev->product); 984 hif_dev->udev->product, id->driver_info);
959 if (ret) { 985 if (ret) {
960 ret = -EINVAL; 986 ret = -EINVAL;
961 goto err_htc_hw_init; 987 goto err_htc_hw_init;
@@ -998,18 +1024,17 @@ static void ath9k_hif_usb_reboot(struct usb_device *udev)
998static void ath9k_hif_usb_disconnect(struct usb_interface *interface) 1024static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
999{ 1025{
1000 struct usb_device *udev = interface_to_usbdev(interface); 1026 struct usb_device *udev = interface_to_usbdev(interface);
1001 struct hif_device_usb *hif_dev = 1027 struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
1002 (struct hif_device_usb *) usb_get_intfdata(interface); 1028 bool unplugged = (udev->state == USB_STATE_NOTATTACHED) ? true : false;
1003 1029
1004 if (hif_dev) { 1030 if (hif_dev) {
1005 ath9k_htc_hw_deinit(hif_dev->htc_handle, 1031 ath9k_htc_hw_deinit(hif_dev->htc_handle, unplugged);
1006 (udev->state == USB_STATE_NOTATTACHED) ? true : false);
1007 ath9k_htc_hw_free(hif_dev->htc_handle); 1032 ath9k_htc_hw_free(hif_dev->htc_handle);
1008 ath9k_hif_usb_dev_deinit(hif_dev); 1033 ath9k_hif_usb_dev_deinit(hif_dev);
1009 usb_set_intfdata(interface, NULL); 1034 usb_set_intfdata(interface, NULL);
1010 } 1035 }
1011 1036
1012 if (hif_dev->flags & HIF_USB_START) 1037 if (!unplugged && (hif_dev->flags & HIF_USB_START))
1013 ath9k_hif_usb_reboot(udev); 1038 ath9k_hif_usb_reboot(udev);
1014 1039
1015 kfree(hif_dev); 1040 kfree(hif_dev);
@@ -1021,8 +1046,7 @@ static void ath9k_hif_usb_disconnect(struct usb_interface *interface)
1021static int ath9k_hif_usb_suspend(struct usb_interface *interface, 1046static int ath9k_hif_usb_suspend(struct usb_interface *interface,
1022 pm_message_t message) 1047 pm_message_t message)
1023{ 1048{
1024 struct hif_device_usb *hif_dev = 1049 struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
1025 (struct hif_device_usb *) usb_get_intfdata(interface);
1026 1050
1027 /* 1051 /*
1028 * The device has to be set to FULLSLEEP mode in case no 1052 * The device has to be set to FULLSLEEP mode in case no
@@ -1038,8 +1062,8 @@ static int ath9k_hif_usb_suspend(struct usb_interface *interface,
1038 1062
1039static int ath9k_hif_usb_resume(struct usb_interface *interface) 1063static int ath9k_hif_usb_resume(struct usb_interface *interface)
1040{ 1064{
1041 struct hif_device_usb *hif_dev = 1065 struct hif_device_usb *hif_dev = usb_get_intfdata(interface);
1042 (struct hif_device_usb *) usb_get_intfdata(interface); 1066 struct htc_target *htc_handle = hif_dev->htc_handle;
1043 int ret; 1067 int ret;
1044 1068
1045 ret = ath9k_hif_usb_alloc_urbs(hif_dev); 1069 ret = ath9k_hif_usb_alloc_urbs(hif_dev);
@@ -1047,7 +1071,8 @@ static int ath9k_hif_usb_resume(struct usb_interface *interface)
1047 return ret; 1071 return ret;
1048 1072
1049 if (hif_dev->firmware) { 1073 if (hif_dev->firmware) {
1050 ret = ath9k_hif_usb_download_fw(hif_dev); 1074 ret = ath9k_hif_usb_download_fw(hif_dev,
1075 htc_handle->drv_priv->ah->hw_version.usbdev);
1051 if (ret) 1076 if (ret)
1052 goto fail_resume; 1077 goto fail_resume;
1053 } else { 1078 } else {
@@ -1057,7 +1082,7 @@ static int ath9k_hif_usb_resume(struct usb_interface *interface)
1057 1082
1058 mdelay(100); 1083 mdelay(100);
1059 1084
1060 ret = ath9k_htc_resume(hif_dev->htc_handle); 1085 ret = ath9k_htc_resume(htc_handle);
1061 1086
1062 if (ret) 1087 if (ret)
1063 goto fail_resume; 1088 goto fail_resume;
diff --git a/drivers/net/wireless/ath/ath9k/hif_usb.h b/drivers/net/wireless/ath/ath9k/hif_usb.h
index 2daf97b11c08..7b9d863d4035 100644
--- a/drivers/net/wireless/ath/ath9k/hif_usb.h
+++ b/drivers/net/wireless/ath/ath9k/hif_usb.h
@@ -17,6 +17,8 @@
17#ifndef HTC_USB_H 17#ifndef HTC_USB_H
18#define HTC_USB_H 18#define HTC_USB_H
19 19
20#define IS_AR7010_DEVICE(_v) (((_v) == AR9280_USB) || ((_v) == AR9287_USB))
21
20#define AR9271_FIRMWARE 0x501000 22#define AR9271_FIRMWARE 0x501000
21#define AR9271_FIRMWARE_TEXT 0x903000 23#define AR9271_FIRMWARE_TEXT 0x903000
22#define AR7010_FIRMWARE_TEXT 0x906000 24#define AR7010_FIRMWARE_TEXT 0x906000
@@ -62,6 +64,7 @@ struct tx_buf {
62}; 64};
63 65
64#define HIF_USB_TX_STOP BIT(0) 66#define HIF_USB_TX_STOP BIT(0)
67#define HIF_USB_TX_FLUSH BIT(1)
65 68
66struct hif_usb_tx { 69struct hif_usb_tx {
67 u8 flags; 70 u8 flags;
diff --git a/drivers/net/wireless/ath/ath9k/htc.h b/drivers/net/wireless/ath/ath9k/htc.h
index c3b561daa6c1..a099b3e87ed3 100644
--- a/drivers/net/wireless/ath/ath9k/htc.h
+++ b/drivers/net/wireless/ath/ath9k/htc.h
@@ -331,17 +331,15 @@ void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv);
331 331
332#define OP_INVALID BIT(0) 332#define OP_INVALID BIT(0)
333#define OP_SCANNING BIT(1) 333#define OP_SCANNING BIT(1)
334#define OP_FULL_RESET BIT(2) 334#define OP_LED_ASSOCIATED BIT(2)
335#define OP_LED_ASSOCIATED BIT(3) 335#define OP_LED_ON BIT(3)
336#define OP_LED_ON BIT(4) 336#define OP_PREAMBLE_SHORT BIT(4)
337#define OP_PREAMBLE_SHORT BIT(5) 337#define OP_PROTECT_ENABLE BIT(5)
338#define OP_PROTECT_ENABLE BIT(6) 338#define OP_ASSOCIATED BIT(6)
339#define OP_ASSOCIATED BIT(7) 339#define OP_ENABLE_BEACON BIT(7)
340#define OP_ENABLE_BEACON BIT(8) 340#define OP_LED_DEINIT BIT(8)
341#define OP_LED_DEINIT BIT(9) 341#define OP_BT_PRIORITY_DETECTED BIT(9)
342#define OP_UNPLUGGED BIT(10) 342#define OP_BT_SCAN BIT(10)
343#define OP_BT_PRIORITY_DETECTED BIT(11)
344#define OP_BT_SCAN BIT(12)
345 343
346struct ath9k_htc_priv { 344struct ath9k_htc_priv {
347 struct device *dev; 345 struct device *dev;
@@ -368,7 +366,7 @@ struct ath9k_htc_priv {
368 u16 seq_no; 366 u16 seq_no;
369 u32 bmiss_cnt; 367 u32 bmiss_cnt;
370 368
371 struct ath9k_hw_cal_data caldata[38]; 369 struct ath9k_hw_cal_data caldata[ATH9K_NUM_CHANNELS];
372 370
373 spinlock_t beacon_lock; 371 spinlock_t beacon_lock;
374 372
@@ -378,7 +376,7 @@ struct ath9k_htc_priv {
378 struct ieee80211_vif *vif; 376 struct ieee80211_vif *vif;
379 struct htc_beacon_config cur_beacon_conf; 377 struct htc_beacon_config cur_beacon_conf;
380 unsigned int rxfilter; 378 unsigned int rxfilter;
381 struct tasklet_struct wmi_tasklet; 379 struct tasklet_struct swba_tasklet;
382 struct tasklet_struct rx_tasklet; 380 struct tasklet_struct rx_tasklet;
383 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS]; 381 struct ieee80211_supported_band sbands[IEEE80211_NUM_BANDS];
384 struct ath9k_htc_rx rx; 382 struct ath9k_htc_rx rx;
@@ -386,6 +384,7 @@ struct ath9k_htc_priv {
386 struct sk_buff_head tx_queue; 384 struct sk_buff_head tx_queue;
387 struct delayed_work ath9k_ani_work; 385 struct delayed_work ath9k_ani_work;
388 struct work_struct ps_work; 386 struct work_struct ps_work;
387 struct work_struct fatal_work;
389 388
390 struct mutex htc_pm_lock; 389 struct mutex htc_pm_lock;
391 unsigned long ps_usecount; 390 unsigned long ps_usecount;
@@ -420,6 +419,8 @@ static inline void ath_read_cachesize(struct ath_common *common, int *csz)
420 common->bus_ops->read_cachesize(common, csz); 419 common->bus_ops->read_cachesize(common, csz);
421} 420}
422 421
422void ath9k_htc_reset(struct ath9k_htc_priv *priv);
423
423void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv); 424void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv);
424void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv, 425void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
425 struct ieee80211_vif *vif); 426 struct ieee80211_vif *vif);
@@ -435,6 +436,7 @@ void ath9k_htc_beaconep(void *drv_priv, struct sk_buff *skb,
435void ath9k_htc_station_work(struct work_struct *work); 436void ath9k_htc_station_work(struct work_struct *work);
436void ath9k_htc_aggr_work(struct work_struct *work); 437void ath9k_htc_aggr_work(struct work_struct *work);
437void ath9k_ani_work(struct work_struct *work);; 438void ath9k_ani_work(struct work_struct *work);;
439void ath_start_ani(struct ath9k_htc_priv *priv);
438 440
439int ath9k_tx_init(struct ath9k_htc_priv *priv); 441int ath9k_tx_init(struct ath9k_htc_priv *priv);
440void ath9k_tx_tasklet(unsigned long data); 442void ath9k_tx_tasklet(unsigned long data);
@@ -457,13 +459,18 @@ void ath9k_htc_ps_restore(struct ath9k_htc_priv *priv);
457void ath9k_ps_work(struct work_struct *work); 459void ath9k_ps_work(struct work_struct *work);
458bool ath9k_htc_setpower(struct ath9k_htc_priv *priv, 460bool ath9k_htc_setpower(struct ath9k_htc_priv *priv,
459 enum ath9k_power_mode mode); 461 enum ath9k_power_mode mode);
462void ath_update_txpow(struct ath9k_htc_priv *priv);
460 463
461void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv); 464void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv);
465void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw);
466void ath9k_htc_radio_enable(struct ieee80211_hw *hw);
467void ath9k_htc_radio_disable(struct ieee80211_hw *hw);
468void ath9k_led_stop_brightness(struct ath9k_htc_priv *priv);
462void ath9k_init_leds(struct ath9k_htc_priv *priv); 469void ath9k_init_leds(struct ath9k_htc_priv *priv);
463void ath9k_deinit_leds(struct ath9k_htc_priv *priv); 470void ath9k_deinit_leds(struct ath9k_htc_priv *priv);
464 471
465int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, 472int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
466 u16 devid, char *product); 473 u16 devid, char *product, u32 drv_info);
467void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug); 474void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug);
468#ifdef CONFIG_PM 475#ifdef CONFIG_PM
469void ath9k_htc_suspend(struct htc_target *htc_handle); 476void ath9k_htc_suspend(struct htc_target *htc_handle);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
index 1b72aa482ac7..87cc65a78a3f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_beacon.c
@@ -123,11 +123,11 @@ static void ath9k_htc_beacon_config_sta(struct ath9k_htc_priv *priv,
123 /* TSF out of range threshold fixed at 1 second */ 123 /* TSF out of range threshold fixed at 1 second */
124 bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD; 124 bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
125 125
126 ath_print(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu); 126 ath_dbg(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
127 ath_print(common, ATH_DBG_BEACON, 127 ath_dbg(common, ATH_DBG_BEACON,
128 "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n", 128 "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
129 bs.bs_bmissthreshold, bs.bs_sleepduration, 129 bs.bs_bmissthreshold, bs.bs_sleepduration,
130 bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext); 130 bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
131 131
132 /* Set the computed STA beacon timers */ 132 /* Set the computed STA beacon timers */
133 133
@@ -154,9 +154,9 @@ static void ath9k_htc_beacon_config_adhoc(struct ath9k_htc_priv *priv,
154 if (priv->op_flags & OP_ENABLE_BEACON) 154 if (priv->op_flags & OP_ENABLE_BEACON)
155 imask |= ATH9K_INT_SWBA; 155 imask |= ATH9K_INT_SWBA;
156 156
157 ath_print(common, ATH_DBG_BEACON, 157 ath_dbg(common, ATH_DBG_BEACON,
158 "IBSS Beacon config, intval: %d, imask: 0x%x\n", 158 "IBSS Beacon config, intval: %d, imask: 0x%x\n",
159 bss_conf->beacon_interval, imask); 159 bss_conf->beacon_interval, imask);
160 160
161 WMI_CMD(WMI_DISABLE_INTR_CMDID); 161 WMI_CMD(WMI_DISABLE_INTR_CMDID);
162 ath9k_hw_beaconinit(priv->ah, nexttbtt, intval); 162 ath9k_hw_beaconinit(priv->ah, nexttbtt, intval);
@@ -246,8 +246,8 @@ void ath9k_htc_beaconq_config(struct ath9k_htc_priv *priv)
246 qi.tqi_cwmax = qi_be.tqi_cwmax; 246 qi.tqi_cwmax = qi_be.tqi_cwmax;
247 247
248 if (!ath9k_hw_set_txq_props(ah, priv->beaconq, &qi)) { 248 if (!ath9k_hw_set_txq_props(ah, priv->beaconq, &qi)) {
249 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, 249 ath_err(ath9k_hw_common(ah),
250 "Unable to update beacon queue %u!\n", qnum); 250 "Unable to update beacon queue %u!\n", qnum);
251 } else { 251 } else {
252 ath9k_hw_resettxqueue(ah, priv->beaconq); 252 ath9k_hw_resettxqueue(ah, priv->beaconq);
253 } 253 }
@@ -278,8 +278,8 @@ void ath9k_htc_beacon_config(struct ath9k_htc_priv *priv,
278 ath9k_htc_beacon_config_adhoc(priv, cur_conf); 278 ath9k_htc_beacon_config_adhoc(priv, cur_conf);
279 break; 279 break;
280 default: 280 default:
281 ath_print(common, ATH_DBG_CONFIG, 281 ath_dbg(common, ATH_DBG_CONFIG,
282 "Unsupported beaconing mode\n"); 282 "Unsupported beaconing mode\n");
283 return; 283 return;
284 } 284 }
285} 285}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
index 50eec9a3b88c..fe70f67aa088 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_gpio.c
@@ -1,3 +1,19 @@
1/*
2 * Copyright (c) 2010 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
1#include "htc.h" 17#include "htc.h"
2 18
3/******************/ 19/******************/
@@ -20,13 +36,13 @@ static void ath_detect_bt_priority(struct ath9k_htc_priv *priv)
20 priv->op_flags &= ~(OP_BT_PRIORITY_DETECTED | OP_BT_SCAN); 36 priv->op_flags &= ~(OP_BT_PRIORITY_DETECTED | OP_BT_SCAN);
21 /* Detect if colocated bt started scanning */ 37 /* Detect if colocated bt started scanning */
22 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) { 38 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_SCAN_THRESHOLD) {
23 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX, 39 ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
24 "BT scan detected"); 40 "BT scan detected\n");
25 priv->op_flags |= (OP_BT_SCAN | 41 priv->op_flags |= (OP_BT_SCAN |
26 OP_BT_PRIORITY_DETECTED); 42 OP_BT_PRIORITY_DETECTED);
27 } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) { 43 } else if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
28 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX, 44 ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
29 "BT priority traffic detected"); 45 "BT priority traffic detected\n");
30 priv->op_flags |= OP_BT_PRIORITY_DETECTED; 46 priv->op_flags |= OP_BT_PRIORITY_DETECTED;
31 } 47 }
32 48
@@ -83,8 +99,8 @@ static void ath_btcoex_duty_cycle_work(struct work_struct *work)
83 struct ath_common *common = ath9k_hw_common(ah); 99 struct ath_common *common = ath9k_hw_common(ah);
84 bool is_btscan = priv->op_flags & OP_BT_SCAN; 100 bool is_btscan = priv->op_flags & OP_BT_SCAN;
85 101
86 ath_print(common, ATH_DBG_BTCOEX, 102 ath_dbg(common, ATH_DBG_BTCOEX,
87 "time slice work for bt and wlan\n"); 103 "time slice work for bt and wlan\n");
88 104
89 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan) 105 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW || is_btscan)
90 ath9k_cmn_btcoex_bt_stomp(common, ATH_BTCOEX_STOMP_NONE); 106 ath9k_cmn_btcoex_bt_stomp(common, ATH_BTCOEX_STOMP_NONE);
@@ -114,8 +130,7 @@ void ath_htc_resume_btcoex_work(struct ath9k_htc_priv *priv)
114 struct ath_btcoex *btcoex = &priv->btcoex; 130 struct ath_btcoex *btcoex = &priv->btcoex;
115 struct ath_hw *ah = priv->ah; 131 struct ath_hw *ah = priv->ah;
116 132
117 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX, 133 ath_dbg(ath9k_hw_common(ah), ATH_DBG_BTCOEX, "Starting btcoex work\n");
118 "Starting btcoex work");
119 134
120 btcoex->bt_priority_cnt = 0; 135 btcoex->bt_priority_cnt = 0;
121 btcoex->bt_priority_time = jiffies; 136 btcoex->bt_priority_time = jiffies;
@@ -132,3 +147,314 @@ void ath_htc_cancel_btcoex_work(struct ath9k_htc_priv *priv)
132 cancel_delayed_work_sync(&priv->coex_period_work); 147 cancel_delayed_work_sync(&priv->coex_period_work);
133 cancel_delayed_work_sync(&priv->duty_cycle_work); 148 cancel_delayed_work_sync(&priv->duty_cycle_work);
134} 149}
150
151/*******/
152/* LED */
153/*******/
154
155static void ath9k_led_blink_work(struct work_struct *work)
156{
157 struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
158 ath9k_led_blink_work.work);
159
160 if (!(priv->op_flags & OP_LED_ASSOCIATED))
161 return;
162
163 if ((priv->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
164 (priv->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
165 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
166 else
167 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
168 (priv->op_flags & OP_LED_ON) ? 1 : 0);
169
170 ieee80211_queue_delayed_work(priv->hw,
171 &priv->ath9k_led_blink_work,
172 (priv->op_flags & OP_LED_ON) ?
173 msecs_to_jiffies(priv->led_off_duration) :
174 msecs_to_jiffies(priv->led_on_duration));
175
176 priv->led_on_duration = priv->led_on_cnt ?
177 max((ATH_LED_ON_DURATION_IDLE - priv->led_on_cnt), 25) :
178 ATH_LED_ON_DURATION_IDLE;
179 priv->led_off_duration = priv->led_off_cnt ?
180 max((ATH_LED_OFF_DURATION_IDLE - priv->led_off_cnt), 10) :
181 ATH_LED_OFF_DURATION_IDLE;
182 priv->led_on_cnt = priv->led_off_cnt = 0;
183
184 if (priv->op_flags & OP_LED_ON)
185 priv->op_flags &= ~OP_LED_ON;
186 else
187 priv->op_flags |= OP_LED_ON;
188}
189
190static void ath9k_led_brightness_work(struct work_struct *work)
191{
192 struct ath_led *led = container_of(work, struct ath_led,
193 brightness_work.work);
194 struct ath9k_htc_priv *priv = led->priv;
195
196 switch (led->brightness) {
197 case LED_OFF:
198 if (led->led_type == ATH_LED_ASSOC ||
199 led->led_type == ATH_LED_RADIO) {
200 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
201 (led->led_type == ATH_LED_RADIO));
202 priv->op_flags &= ~OP_LED_ASSOCIATED;
203 if (led->led_type == ATH_LED_RADIO)
204 priv->op_flags &= ~OP_LED_ON;
205 } else {
206 priv->led_off_cnt++;
207 }
208 break;
209 case LED_FULL:
210 if (led->led_type == ATH_LED_ASSOC) {
211 priv->op_flags |= OP_LED_ASSOCIATED;
212 ieee80211_queue_delayed_work(priv->hw,
213 &priv->ath9k_led_blink_work, 0);
214 } else if (led->led_type == ATH_LED_RADIO) {
215 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
216 priv->op_flags |= OP_LED_ON;
217 } else {
218 priv->led_on_cnt++;
219 }
220 break;
221 default:
222 break;
223 }
224}
225
226static void ath9k_led_brightness(struct led_classdev *led_cdev,
227 enum led_brightness brightness)
228{
229 struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
230 struct ath9k_htc_priv *priv = led->priv;
231
232 led->brightness = brightness;
233 if (!(priv->op_flags & OP_LED_DEINIT))
234 ieee80211_queue_delayed_work(priv->hw,
235 &led->brightness_work, 0);
236}
237
238void ath9k_led_stop_brightness(struct ath9k_htc_priv *priv)
239{
240 cancel_delayed_work_sync(&priv->radio_led.brightness_work);
241 cancel_delayed_work_sync(&priv->assoc_led.brightness_work);
242 cancel_delayed_work_sync(&priv->tx_led.brightness_work);
243 cancel_delayed_work_sync(&priv->rx_led.brightness_work);
244}
245
246static int ath9k_register_led(struct ath9k_htc_priv *priv, struct ath_led *led,
247 char *trigger)
248{
249 int ret;
250
251 led->priv = priv;
252 led->led_cdev.name = led->name;
253 led->led_cdev.default_trigger = trigger;
254 led->led_cdev.brightness_set = ath9k_led_brightness;
255
256 ret = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_cdev);
257 if (ret)
258 ath_err(ath9k_hw_common(priv->ah),
259 "Failed to register led:%s", led->name);
260 else
261 led->registered = 1;
262
263 INIT_DELAYED_WORK(&led->brightness_work, ath9k_led_brightness_work);
264
265 return ret;
266}
267
268static void ath9k_unregister_led(struct ath_led *led)
269{
270 if (led->registered) {
271 led_classdev_unregister(&led->led_cdev);
272 led->registered = 0;
273 }
274}
275
276void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
277{
278 priv->op_flags |= OP_LED_DEINIT;
279 ath9k_unregister_led(&priv->assoc_led);
280 priv->op_flags &= ~OP_LED_ASSOCIATED;
281 ath9k_unregister_led(&priv->tx_led);
282 ath9k_unregister_led(&priv->rx_led);
283 ath9k_unregister_led(&priv->radio_led);
284}
285
286void ath9k_init_leds(struct ath9k_htc_priv *priv)
287{
288 char *trigger;
289 int ret;
290
291 if (AR_SREV_9287(priv->ah))
292 priv->ah->led_pin = ATH_LED_PIN_9287;
293 else if (AR_SREV_9271(priv->ah))
294 priv->ah->led_pin = ATH_LED_PIN_9271;
295 else if (AR_DEVID_7010(priv->ah))
296 priv->ah->led_pin = ATH_LED_PIN_7010;
297 else
298 priv->ah->led_pin = ATH_LED_PIN_DEF;
299
300 /* Configure gpio 1 for output */
301 ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
302 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
303 /* LED off, active low */
304 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
305
306 INIT_DELAYED_WORK(&priv->ath9k_led_blink_work, ath9k_led_blink_work);
307
308 trigger = ieee80211_get_radio_led_name(priv->hw);
309 snprintf(priv->radio_led.name, sizeof(priv->radio_led.name),
310 "ath9k-%s::radio", wiphy_name(priv->hw->wiphy));
311 ret = ath9k_register_led(priv, &priv->radio_led, trigger);
312 priv->radio_led.led_type = ATH_LED_RADIO;
313 if (ret)
314 goto fail;
315
316 trigger = ieee80211_get_assoc_led_name(priv->hw);
317 snprintf(priv->assoc_led.name, sizeof(priv->assoc_led.name),
318 "ath9k-%s::assoc", wiphy_name(priv->hw->wiphy));
319 ret = ath9k_register_led(priv, &priv->assoc_led, trigger);
320 priv->assoc_led.led_type = ATH_LED_ASSOC;
321 if (ret)
322 goto fail;
323
324 trigger = ieee80211_get_tx_led_name(priv->hw);
325 snprintf(priv->tx_led.name, sizeof(priv->tx_led.name),
326 "ath9k-%s::tx", wiphy_name(priv->hw->wiphy));
327 ret = ath9k_register_led(priv, &priv->tx_led, trigger);
328 priv->tx_led.led_type = ATH_LED_TX;
329 if (ret)
330 goto fail;
331
332 trigger = ieee80211_get_rx_led_name(priv->hw);
333 snprintf(priv->rx_led.name, sizeof(priv->rx_led.name),
334 "ath9k-%s::rx", wiphy_name(priv->hw->wiphy));
335 ret = ath9k_register_led(priv, &priv->rx_led, trigger);
336 priv->rx_led.led_type = ATH_LED_RX;
337 if (ret)
338 goto fail;
339
340 priv->op_flags &= ~OP_LED_DEINIT;
341
342 return;
343
344fail:
345 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
346 ath9k_deinit_leds(priv);
347}
348
349/*******************/
350/* Rfkill */
351/*******************/
352
353static bool ath_is_rfkill_set(struct ath9k_htc_priv *priv)
354{
355 return ath9k_hw_gpio_get(priv->ah, priv->ah->rfkill_gpio) ==
356 priv->ah->rfkill_polarity;
357}
358
359void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw)
360{
361 struct ath9k_htc_priv *priv = hw->priv;
362 bool blocked = !!ath_is_rfkill_set(priv);
363
364 wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
365}
366
367void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv)
368{
369 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
370 wiphy_rfkill_start_polling(priv->hw->wiphy);
371}
372
373void ath9k_htc_radio_enable(struct ieee80211_hw *hw)
374{
375 struct ath9k_htc_priv *priv = hw->priv;
376 struct ath_hw *ah = priv->ah;
377 struct ath_common *common = ath9k_hw_common(ah);
378 int ret;
379 u8 cmd_rsp;
380
381 if (!ah->curchan)
382 ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
383
384 /* Reset the HW */
385 ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
386 if (ret) {
387 ath_err(common,
388 "Unable to reset hardware; reset status %d (freq %u MHz)\n",
389 ret, ah->curchan->channel);
390 }
391
392 ath_update_txpow(priv);
393
394 /* Start RX */
395 WMI_CMD(WMI_START_RECV_CMDID);
396 ath9k_host_rx_init(priv);
397
398 /* Start TX */
399 htc_start(priv->htc);
400 spin_lock_bh(&priv->tx_lock);
401 priv->tx_queues_stop = false;
402 spin_unlock_bh(&priv->tx_lock);
403 ieee80211_wake_queues(hw);
404
405 WMI_CMD(WMI_ENABLE_INTR_CMDID);
406
407 /* Enable LED */
408 ath9k_hw_cfg_output(ah, ah->led_pin,
409 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
410 ath9k_hw_set_gpio(ah, ah->led_pin, 0);
411}
412
413void ath9k_htc_radio_disable(struct ieee80211_hw *hw)
414{
415 struct ath9k_htc_priv *priv = hw->priv;
416 struct ath_hw *ah = priv->ah;
417 struct ath_common *common = ath9k_hw_common(ah);
418 int ret;
419 u8 cmd_rsp;
420
421 ath9k_htc_ps_wakeup(priv);
422
423 /* Disable LED */
424 ath9k_hw_set_gpio(ah, ah->led_pin, 1);
425 ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
426
427 WMI_CMD(WMI_DISABLE_INTR_CMDID);
428
429 /* Stop TX */
430 ieee80211_stop_queues(hw);
431 htc_stop(priv->htc);
432 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
433 skb_queue_purge(&priv->tx_queue);
434
435 /* Stop RX */
436 WMI_CMD(WMI_STOP_RECV_CMDID);
437
438 /*
439 * The MIB counters have to be disabled here,
440 * since the target doesn't do it.
441 */
442 ath9k_hw_disable_mib_counters(ah);
443
444 if (!ah->curchan)
445 ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
446
447 /* Reset the HW */
448 ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
449 if (ret) {
450 ath_err(common,
451 "Unable to reset hardware; reset status %d (freq %u MHz)\n",
452 ret, ah->curchan->channel);
453 }
454
455 /* Disable the PHY */
456 ath9k_hw_phy_disable(ah);
457
458 ath9k_htc_ps_restore(priv);
459 ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
460}
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
index 8776f49ffd41..38433f9bfe59 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c
@@ -142,7 +142,7 @@ static void ath9k_deinit_priv(struct ath9k_htc_priv *priv)
142{ 142{
143 ath9k_htc_exit_debug(priv->ah); 143 ath9k_htc_exit_debug(priv->ah);
144 ath9k_hw_deinit(priv->ah); 144 ath9k_hw_deinit(priv->ah);
145 tasklet_kill(&priv->wmi_tasklet); 145 tasklet_kill(&priv->swba_tasklet);
146 tasklet_kill(&priv->rx_tasklet); 146 tasklet_kill(&priv->rx_tasklet);
147 tasklet_kill(&priv->tx_tasklet); 147 tasklet_kill(&priv->tx_tasklet);
148 kfree(priv->ah); 148 kfree(priv->ah);
@@ -181,7 +181,8 @@ static inline int ath9k_htc_connect_svc(struct ath9k_htc_priv *priv,
181 return htc_connect_service(priv->htc, &req, ep_id); 181 return htc_connect_service(priv->htc, &req, ep_id);
182} 182}
183 183
184static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid) 184static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid,
185 u32 drv_info)
185{ 186{
186 int ret; 187 int ret;
187 188
@@ -245,17 +246,10 @@ static int ath9k_init_htc_services(struct ath9k_htc_priv *priv, u16 devid)
245 * the HIF layer, shouldn't matter much. 246 * the HIF layer, shouldn't matter much.
246 */ 247 */
247 248
248 switch(devid) { 249 if (IS_AR7010_DEVICE(drv_info))
249 case 0x7010:
250 case 0x7015:
251 case 0x9018:
252 case 0xA704:
253 case 0x1200:
254 priv->htc->credits = 45; 250 priv->htc->credits = 45;
255 break; 251 else
256 default:
257 priv->htc->credits = 33; 252 priv->htc->credits = 33;
258 }
259 253
260 ret = htc_init(priv->htc); 254 ret = htc_init(priv->htc);
261 if (ret) 255 if (ret)
@@ -294,9 +288,9 @@ static unsigned int ath9k_regread(void *hw_priv, u32 reg_offset)
294 (u8 *) &val, sizeof(val), 288 (u8 *) &val, sizeof(val),
295 100); 289 100);
296 if (unlikely(r)) { 290 if (unlikely(r)) {
297 ath_print(common, ATH_DBG_WMI, 291 ath_dbg(common, ATH_DBG_WMI,
298 "REGISTER READ FAILED: (0x%04x, %d)\n", 292 "REGISTER READ FAILED: (0x%04x, %d)\n",
299 reg_offset, r); 293 reg_offset, r);
300 return -EIO; 294 return -EIO;
301 } 295 }
302 296
@@ -308,7 +302,7 @@ static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
308 struct ath_hw *ah = (struct ath_hw *) hw_priv; 302 struct ath_hw *ah = (struct ath_hw *) hw_priv;
309 struct ath_common *common = ath9k_hw_common(ah); 303 struct ath_common *common = ath9k_hw_common(ah);
310 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv; 304 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *) common->priv;
311 __be32 buf[2] = { 305 const __be32 buf[2] = {
312 cpu_to_be32(reg_offset), 306 cpu_to_be32(reg_offset),
313 cpu_to_be32(val), 307 cpu_to_be32(val),
314 }; 308 };
@@ -319,9 +313,9 @@ static void ath9k_regwrite_single(void *hw_priv, u32 val, u32 reg_offset)
319 (u8 *) &val, sizeof(val), 313 (u8 *) &val, sizeof(val),
320 100); 314 100);
321 if (unlikely(r)) { 315 if (unlikely(r)) {
322 ath_print(common, ATH_DBG_WMI, 316 ath_dbg(common, ATH_DBG_WMI,
323 "REGISTER WRITE FAILED:(0x%04x, %d)\n", 317 "REGISTER WRITE FAILED:(0x%04x, %d)\n",
324 reg_offset, r); 318 reg_offset, r);
325 } 319 }
326} 320}
327 321
@@ -351,9 +345,9 @@ static void ath9k_regwrite_buffer(void *hw_priv, u32 val, u32 reg_offset)
351 (u8 *) &rsp_status, sizeof(rsp_status), 345 (u8 *) &rsp_status, sizeof(rsp_status),
352 100); 346 100);
353 if (unlikely(r)) { 347 if (unlikely(r)) {
354 ath_print(common, ATH_DBG_WMI, 348 ath_dbg(common, ATH_DBG_WMI,
355 "REGISTER WRITE FAILED, multi len: %d\n", 349 "REGISTER WRITE FAILED, multi len: %d\n",
356 priv->wmi->multi_write_idx); 350 priv->wmi->multi_write_idx);
357 } 351 }
358 priv->wmi->multi_write_idx = 0; 352 priv->wmi->multi_write_idx = 0;
359 } 353 }
@@ -401,9 +395,9 @@ static void ath9k_regwrite_flush(void *hw_priv)
401 (u8 *) &rsp_status, sizeof(rsp_status), 395 (u8 *) &rsp_status, sizeof(rsp_status),
402 100); 396 100);
403 if (unlikely(r)) { 397 if (unlikely(r)) {
404 ath_print(common, ATH_DBG_WMI, 398 ath_dbg(common, ATH_DBG_WMI,
405 "REGISTER WRITE FAILED, multi len: %d\n", 399 "REGISTER WRITE FAILED, multi len: %d\n",
406 priv->wmi->multi_write_idx); 400 priv->wmi->multi_write_idx);
407 } 401 }
408 priv->wmi->multi_write_idx = 0; 402 priv->wmi->multi_write_idx = 0;
409 } 403 }
@@ -475,9 +469,9 @@ static void setup_ht_cap(struct ath9k_htc_priv *priv,
475 tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, 2); 469 tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, 2);
476 rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, 2); 470 rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, 2);
477 471
478 ath_print(common, ATH_DBG_CONFIG, 472 ath_dbg(common, ATH_DBG_CONFIG,
479 "TX streams %d, RX streams: %d\n", 473 "TX streams %d, RX streams: %d\n",
480 tx_streams, rx_streams); 474 tx_streams, rx_streams);
481 475
482 if (tx_streams != rx_streams) { 476 if (tx_streams != rx_streams) {
483 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; 477 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
@@ -501,37 +495,31 @@ static int ath9k_init_queues(struct ath9k_htc_priv *priv)
501 495
502 priv->beaconq = ath9k_hw_beaconq_setup(priv->ah); 496 priv->beaconq = ath9k_hw_beaconq_setup(priv->ah);
503 if (priv->beaconq == -1) { 497 if (priv->beaconq == -1) {
504 ath_print(common, ATH_DBG_FATAL, 498 ath_err(common, "Unable to setup BEACON xmit queue\n");
505 "Unable to setup BEACON xmit queue\n");
506 goto err; 499 goto err;
507 } 500 }
508 501
509 priv->cabq = ath9k_htc_cabq_setup(priv); 502 priv->cabq = ath9k_htc_cabq_setup(priv);
510 if (priv->cabq == -1) { 503 if (priv->cabq == -1) {
511 ath_print(common, ATH_DBG_FATAL, 504 ath_err(common, "Unable to setup CAB xmit queue\n");
512 "Unable to setup CAB xmit queue\n");
513 goto err; 505 goto err;
514 } 506 }
515 507
516 if (!ath9k_htc_txq_setup(priv, WME_AC_BE)) { 508 if (!ath9k_htc_txq_setup(priv, WME_AC_BE)) {
517 ath_print(common, ATH_DBG_FATAL, 509 ath_err(common, "Unable to setup xmit queue for BE traffic\n");
518 "Unable to setup xmit queue for BE traffic\n");
519 goto err; 510 goto err;
520 } 511 }
521 512
522 if (!ath9k_htc_txq_setup(priv, WME_AC_BK)) { 513 if (!ath9k_htc_txq_setup(priv, WME_AC_BK)) {
523 ath_print(common, ATH_DBG_FATAL, 514 ath_err(common, "Unable to setup xmit queue for BK traffic\n");
524 "Unable to setup xmit queue for BK traffic\n");
525 goto err; 515 goto err;
526 } 516 }
527 if (!ath9k_htc_txq_setup(priv, WME_AC_VI)) { 517 if (!ath9k_htc_txq_setup(priv, WME_AC_VI)) {
528 ath_print(common, ATH_DBG_FATAL, 518 ath_err(common, "Unable to setup xmit queue for VI traffic\n");
529 "Unable to setup xmit queue for VI traffic\n");
530 goto err; 519 goto err;
531 } 520 }
532 if (!ath9k_htc_txq_setup(priv, WME_AC_VO)) { 521 if (!ath9k_htc_txq_setup(priv, WME_AC_VO)) {
533 ath_print(common, ATH_DBG_FATAL, 522 ath_err(common, "Unable to setup xmit queue for VO traffic\n");
534 "Unable to setup xmit queue for VO traffic\n");
535 goto err; 523 goto err;
536 } 524 }
537 525
@@ -549,9 +537,9 @@ static void ath9k_init_crypto(struct ath9k_htc_priv *priv)
549 /* Get the hardware key cache size. */ 537 /* Get the hardware key cache size. */
550 common->keymax = priv->ah->caps.keycache_size; 538 common->keymax = priv->ah->caps.keycache_size;
551 if (common->keymax > ATH_KEYMAX) { 539 if (common->keymax > ATH_KEYMAX) {
552 ath_print(common, ATH_DBG_ANY, 540 ath_dbg(common, ATH_DBG_ANY,
553 "Warning, using only %u entries in %u key cache\n", 541 "Warning, using only %u entries in %u key cache\n",
554 ATH_KEYMAX, common->keymax); 542 ATH_KEYMAX, common->keymax);
555 common->keymax = ATH_KEYMAX; 543 common->keymax = ATH_KEYMAX;
556 } 544 }
557 545
@@ -627,7 +615,8 @@ static void ath9k_init_btcoex(struct ath9k_htc_priv *priv)
627} 615}
628 616
629static int ath9k_init_priv(struct ath9k_htc_priv *priv, 617static int ath9k_init_priv(struct ath9k_htc_priv *priv,
630 u16 devid, char *product) 618 u16 devid, char *product,
619 u32 drv_info)
631{ 620{
632 struct ath_hw *ah = NULL; 621 struct ath_hw *ah = NULL;
633 struct ath_common *common; 622 struct ath_common *common;
@@ -641,6 +630,8 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
641 630
642 ah->hw_version.devid = devid; 631 ah->hw_version.devid = devid;
643 ah->hw_version.subsysid = 0; /* FIXME */ 632 ah->hw_version.subsysid = 0; /* FIXME */
633 ah->hw_version.usbdev = drv_info;
634 ah->ah_flags |= AH_USE_EEPROM;
644 priv->ah = ah; 635 priv->ah = ah;
645 636
646 common = ath9k_hw_common(ah); 637 common = ath9k_hw_common(ah);
@@ -656,13 +647,15 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
656 spin_lock_init(&priv->tx_lock); 647 spin_lock_init(&priv->tx_lock);
657 mutex_init(&priv->mutex); 648 mutex_init(&priv->mutex);
658 mutex_init(&priv->htc_pm_lock); 649 mutex_init(&priv->htc_pm_lock);
659 tasklet_init(&priv->wmi_tasklet, ath9k_wmi_tasklet, 650 tasklet_init(&priv->swba_tasklet, ath9k_swba_tasklet,
660 (unsigned long)priv); 651 (unsigned long)priv);
661 tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet, 652 tasklet_init(&priv->rx_tasklet, ath9k_rx_tasklet,
662 (unsigned long)priv); 653 (unsigned long)priv);
663 tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet, (unsigned long)priv); 654 tasklet_init(&priv->tx_tasklet, ath9k_tx_tasklet,
655 (unsigned long)priv);
664 INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work); 656 INIT_DELAYED_WORK(&priv->ath9k_ani_work, ath9k_ani_work);
665 INIT_WORK(&priv->ps_work, ath9k_ps_work); 657 INIT_WORK(&priv->ps_work, ath9k_ps_work);
658 INIT_WORK(&priv->fatal_work, ath9k_fatal_work);
666 659
667 /* 660 /*
668 * Cache line size is used to size and align various 661 * Cache line size is used to size and align various
@@ -673,16 +666,15 @@ static int ath9k_init_priv(struct ath9k_htc_priv *priv,
673 666
674 ret = ath9k_hw_init(ah); 667 ret = ath9k_hw_init(ah);
675 if (ret) { 668 if (ret) {
676 ath_print(common, ATH_DBG_FATAL, 669 ath_err(common,
677 "Unable to initialize hardware; " 670 "Unable to initialize hardware; initialization status: %d\n",
678 "initialization status: %d\n", ret); 671 ret);
679 goto err_hw; 672 goto err_hw;
680 } 673 }
681 674
682 ret = ath9k_htc_init_debug(ah); 675 ret = ath9k_htc_init_debug(ah);
683 if (ret) { 676 if (ret) {
684 ath_print(common, ATH_DBG_FATAL, 677 ath_err(common, "Unable to create debugfs files\n");
685 "Unable to create debugfs files\n");
686 goto err_debug; 678 goto err_debug;
687 } 679 }
688 680
@@ -762,7 +754,7 @@ static void ath9k_set_hw_capab(struct ath9k_htc_priv *priv,
762} 754}
763 755
764static int ath9k_init_device(struct ath9k_htc_priv *priv, 756static int ath9k_init_device(struct ath9k_htc_priv *priv,
765 u16 devid, char *product) 757 u16 devid, char *product, u32 drv_info)
766{ 758{
767 struct ieee80211_hw *hw = priv->hw; 759 struct ieee80211_hw *hw = priv->hw;
768 struct ath_common *common; 760 struct ath_common *common;
@@ -771,7 +763,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv,
771 struct ath_regulatory *reg; 763 struct ath_regulatory *reg;
772 764
773 /* Bring up device */ 765 /* Bring up device */
774 error = ath9k_init_priv(priv, devid, product); 766 error = ath9k_init_priv(priv, devid, product, drv_info);
775 if (error != 0) 767 if (error != 0)
776 goto err_init; 768 goto err_init;
777 769
@@ -829,7 +821,7 @@ err_init:
829} 821}
830 822
831int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev, 823int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
832 u16 devid, char *product) 824 u16 devid, char *product, u32 drv_info)
833{ 825{
834 struct ieee80211_hw *hw; 826 struct ieee80211_hw *hw;
835 struct ath9k_htc_priv *priv; 827 struct ath9k_htc_priv *priv;
@@ -856,14 +848,11 @@ int ath9k_htc_probe_device(struct htc_target *htc_handle, struct device *dev,
856 goto err_free; 848 goto err_free;
857 } 849 }
858 850
859 ret = ath9k_init_htc_services(priv, devid); 851 ret = ath9k_init_htc_services(priv, devid, drv_info);
860 if (ret) 852 if (ret)
861 goto err_init; 853 goto err_init;
862 854
863 /* The device may have been unplugged earlier. */ 855 ret = ath9k_init_device(priv, devid, product, drv_info);
864 priv->op_flags &= ~OP_UNPLUGGED;
865
866 ret = ath9k_init_device(priv, devid, product);
867 if (ret) 856 if (ret)
868 goto err_init; 857 goto err_init;
869 858
@@ -882,7 +871,7 @@ void ath9k_htc_disconnect_device(struct htc_target *htc_handle, bool hotunplug)
882 871
883 /* Check if the device has been yanked out. */ 872 /* Check if the device has been yanked out. */
884 if (hotunplug) 873 if (hotunplug)
885 htc_handle->drv_priv->op_flags |= OP_UNPLUGGED; 874 htc_handle->drv_priv->ah->ah_flags |= AH_UNPLUGGED;
886 875
887 ath9k_deinit_device(htc_handle->drv_priv); 876 ath9k_deinit_device(htc_handle->drv_priv);
888 ath9k_deinit_wmi(htc_handle->drv_priv); 877 ath9k_deinit_wmi(htc_handle->drv_priv);
@@ -899,14 +888,15 @@ void ath9k_htc_suspend(struct htc_target *htc_handle)
899 888
900int ath9k_htc_resume(struct htc_target *htc_handle) 889int ath9k_htc_resume(struct htc_target *htc_handle)
901{ 890{
891 struct ath9k_htc_priv *priv = htc_handle->drv_priv;
902 int ret; 892 int ret;
903 893
904 ret = ath9k_htc_wait_for_target(htc_handle->drv_priv); 894 ret = ath9k_htc_wait_for_target(priv);
905 if (ret) 895 if (ret)
906 return ret; 896 return ret;
907 897
908 ret = ath9k_init_htc_services(htc_handle->drv_priv, 898 ret = ath9k_init_htc_services(priv, priv->ah->hw_version.devid,
909 htc_handle->drv_priv->ah->hw_version.devid); 899 priv->ah->hw_version.usbdev);
910 return ret; 900 return ret;
911} 901}
912#endif 902#endif
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
index 51977caca47f..845b4c938d16 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c
@@ -24,12 +24,12 @@ static struct dentry *ath9k_debugfs_root;
24/* Utilities */ 24/* Utilities */
25/*************/ 25/*************/
26 26
27static void ath_update_txpow(struct ath9k_htc_priv *priv) 27void ath_update_txpow(struct ath9k_htc_priv *priv)
28{ 28{
29 struct ath_hw *ah = priv->ah; 29 struct ath_hw *ah = priv->ah;
30 30
31 if (priv->curtxpow != priv->txpowlimit) { 31 if (priv->curtxpow != priv->txpowlimit) {
32 ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit); 32 ath9k_hw_set_txpowerlimit(ah, priv->txpowlimit, false);
33 /* read back in case value is clamped */ 33 /* read back in case value is clamped */
34 priv->curtxpow = ath9k_hw_regulatory(ah)->power_limit; 34 priv->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
35 } 35 }
@@ -116,6 +116,60 @@ void ath9k_ps_work(struct work_struct *work)
116 ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP); 116 ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
117} 117}
118 118
119void ath9k_htc_reset(struct ath9k_htc_priv *priv)
120{
121 struct ath_hw *ah = priv->ah;
122 struct ath_common *common = ath9k_hw_common(ah);
123 struct ieee80211_channel *channel = priv->hw->conf.channel;
124 struct ath9k_hw_cal_data *caldata;
125 enum htc_phymode mode;
126 __be16 htc_mode;
127 u8 cmd_rsp;
128 int ret;
129
130 mutex_lock(&priv->mutex);
131 ath9k_htc_ps_wakeup(priv);
132
133 if (priv->op_flags & OP_ASSOCIATED)
134 cancel_delayed_work_sync(&priv->ath9k_ani_work);
135
136 ieee80211_stop_queues(priv->hw);
137 htc_stop(priv->htc);
138 WMI_CMD(WMI_DISABLE_INTR_CMDID);
139 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
140 WMI_CMD(WMI_STOP_RECV_CMDID);
141
142 caldata = &priv->caldata[channel->hw_value];
143 ret = ath9k_hw_reset(ah, ah->curchan, caldata, false);
144 if (ret) {
145 ath_err(common,
146 "Unable to reset device (%u Mhz) reset status %d\n",
147 channel->center_freq, ret);
148 }
149
150 ath_update_txpow(priv);
151
152 WMI_CMD(WMI_START_RECV_CMDID);
153 ath9k_host_rx_init(priv);
154
155 mode = ath9k_htc_get_curmode(priv, ah->curchan);
156 htc_mode = cpu_to_be16(mode);
157 WMI_CMD_BUF(WMI_SET_MODE_CMDID, &htc_mode);
158
159 WMI_CMD(WMI_ENABLE_INTR_CMDID);
160 htc_start(priv->htc);
161
162 if (priv->op_flags & OP_ASSOCIATED) {
163 ath9k_htc_beacon_config(priv, priv->vif);
164 ath_start_ani(priv);
165 }
166
167 ieee80211_wake_queues(priv->hw);
168
169 ath9k_htc_ps_restore(priv);
170 mutex_unlock(&priv->mutex);
171}
172
119static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv, 173static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
120 struct ieee80211_hw *hw, 174 struct ieee80211_hw *hw,
121 struct ath9k_channel *hchan) 175 struct ath9k_channel *hchan)
@@ -123,7 +177,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
123 struct ath_hw *ah = priv->ah; 177 struct ath_hw *ah = priv->ah;
124 struct ath_common *common = ath9k_hw_common(ah); 178 struct ath_common *common = ath9k_hw_common(ah);
125 struct ieee80211_conf *conf = &common->hw->conf; 179 struct ieee80211_conf *conf = &common->hw->conf;
126 bool fastcc = true; 180 bool fastcc;
127 struct ieee80211_channel *channel = hw->conf.channel; 181 struct ieee80211_channel *channel = hw->conf.channel;
128 struct ath9k_hw_cal_data *caldata; 182 struct ath9k_hw_cal_data *caldata;
129 enum htc_phymode mode; 183 enum htc_phymode mode;
@@ -134,8 +188,7 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
134 if (priv->op_flags & OP_INVALID) 188 if (priv->op_flags & OP_INVALID)
135 return -EIO; 189 return -EIO;
136 190
137 if (priv->op_flags & OP_FULL_RESET) 191 fastcc = !!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL);
138 fastcc = false;
139 192
140 ath9k_htc_ps_wakeup(priv); 193 ath9k_htc_ps_wakeup(priv);
141 htc_stop(priv->htc); 194 htc_stop(priv->htc);
@@ -143,18 +196,18 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
143 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID); 196 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
144 WMI_CMD(WMI_STOP_RECV_CMDID); 197 WMI_CMD(WMI_STOP_RECV_CMDID);
145 198
146 ath_print(common, ATH_DBG_CONFIG, 199 ath_dbg(common, ATH_DBG_CONFIG,
147 "(%u MHz) -> (%u MHz), HT: %d, HT40: %d fastcc: %d\n", 200 "(%u MHz) -> (%u MHz), HT: %d, HT40: %d fastcc: %d\n",
148 priv->ah->curchan->channel, 201 priv->ah->curchan->channel,
149 channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf), 202 channel->center_freq, conf_is_ht(conf), conf_is_ht40(conf),
150 fastcc); 203 fastcc);
151 204
152 caldata = &priv->caldata[channel->hw_value]; 205 caldata = &priv->caldata[channel->hw_value];
153 ret = ath9k_hw_reset(ah, hchan, caldata, fastcc); 206 ret = ath9k_hw_reset(ah, hchan, caldata, fastcc);
154 if (ret) { 207 if (ret) {
155 ath_print(common, ATH_DBG_FATAL, 208 ath_err(common,
156 "Unable to reset channel (%u Mhz) " 209 "Unable to reset channel (%u Mhz) reset status %d\n",
157 "reset status %d\n", channel->center_freq, ret); 210 channel->center_freq, ret);
158 goto err; 211 goto err;
159 } 212 }
160 213
@@ -177,23 +230,43 @@ static int ath9k_htc_set_channel(struct ath9k_htc_priv *priv,
177 goto err; 230 goto err;
178 231
179 htc_start(priv->htc); 232 htc_start(priv->htc);
180
181 priv->op_flags &= ~OP_FULL_RESET;
182err: 233err:
183 ath9k_htc_ps_restore(priv); 234 ath9k_htc_ps_restore(priv);
184 return ret; 235 return ret;
185} 236}
186 237
238static void __ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
239{
240 struct ath_common *common = ath9k_hw_common(priv->ah);
241 struct ath9k_htc_target_vif hvif;
242 int ret = 0;
243 u8 cmd_rsp;
244
245 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
246 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
247 hvif.index = 0; /* Should do for now */
248 WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
249 priv->nvifs--;
250}
251
187static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv) 252static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
188{ 253{
189 struct ath_common *common = ath9k_hw_common(priv->ah); 254 struct ath_common *common = ath9k_hw_common(priv->ah);
190 struct ath9k_htc_target_vif hvif; 255 struct ath9k_htc_target_vif hvif;
256 struct ath9k_htc_target_sta tsta;
191 int ret = 0; 257 int ret = 0;
192 u8 cmd_rsp; 258 u8 cmd_rsp;
193 259
194 if (priv->nvifs > 0) 260 if (priv->nvifs > 0)
195 return -ENOBUFS; 261 return -ENOBUFS;
196 262
263 if (priv->nstations >= ATH9K_HTC_MAX_STA)
264 return -ENOBUFS;
265
266 /*
267 * Add an interface.
268 */
269
197 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif)); 270 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif));
198 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN); 271 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
199 272
@@ -206,23 +279,57 @@ static int ath9k_htc_add_monitor_interface(struct ath9k_htc_priv *priv)
206 return ret; 279 return ret;
207 280
208 priv->nvifs++; 281 priv->nvifs++;
282
283 /*
284 * Associate a station with the interface for packet injection.
285 */
286
287 memset(&tsta, 0, sizeof(struct ath9k_htc_target_sta));
288
289 memcpy(&tsta.macaddr, common->macaddr, ETH_ALEN);
290
291 tsta.is_vif_sta = 1;
292 tsta.sta_index = priv->nstations;
293 tsta.vif_index = hvif.index;
294 tsta.maxampdu = 0xffff;
295
296 WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
297 if (ret) {
298 ath_err(common, "Unable to add station entry for monitor mode\n");
299 goto err_vif;
300 }
301
302 priv->nstations++;
303
209 return 0; 304 return 0;
305
306err_vif:
307 /*
308 * Remove the interface from the target.
309 */
310 __ath9k_htc_remove_monitor_interface(priv);
311 return ret;
210} 312}
211 313
212static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv) 314static int ath9k_htc_remove_monitor_interface(struct ath9k_htc_priv *priv)
213{ 315{
214 struct ath_common *common = ath9k_hw_common(priv->ah); 316 struct ath_common *common = ath9k_hw_common(priv->ah);
215 struct ath9k_htc_target_vif hvif;
216 int ret = 0; 317 int ret = 0;
217 u8 cmd_rsp; 318 u8 cmd_rsp, sta_idx;
218 319
219 memset(&hvif, 0, sizeof(struct ath9k_htc_target_vif)); 320 __ath9k_htc_remove_monitor_interface(priv);
220 memcpy(&hvif.myaddr, common->macaddr, ETH_ALEN);
221 hvif.index = 0; /* Should do for now */
222 WMI_CMD_BUF(WMI_VAP_REMOVE_CMDID, &hvif);
223 priv->nvifs--;
224 321
225 return ret; 322 sta_idx = 0; /* Only single interface, for now */
323
324 WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
325 if (ret) {
326 ath_err(common, "Unable to remove station entry for monitor mode\n");
327 return ret;
328 }
329
330 priv->nstations--;
331
332 return 0;
226} 333}
227 334
228static int ath9k_htc_add_station(struct ath9k_htc_priv *priv, 335static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
@@ -263,15 +370,16 @@ static int ath9k_htc_add_station(struct ath9k_htc_priv *priv,
263 WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta); 370 WMI_CMD_BUF(WMI_NODE_CREATE_CMDID, &tsta);
264 if (ret) { 371 if (ret) {
265 if (sta) 372 if (sta)
266 ath_print(common, ATH_DBG_FATAL, 373 ath_err(common,
267 "Unable to add station entry for: %pM\n", sta->addr); 374 "Unable to add station entry for: %pM\n",
375 sta->addr);
268 return ret; 376 return ret;
269 } 377 }
270 378
271 if (sta) 379 if (sta)
272 ath_print(common, ATH_DBG_CONFIG, 380 ath_dbg(common, ATH_DBG_CONFIG,
273 "Added a station entry for: %pM (idx: %d)\n", 381 "Added a station entry for: %pM (idx: %d)\n",
274 sta->addr, tsta.sta_index); 382 sta->addr, tsta.sta_index);
275 383
276 priv->nstations++; 384 priv->nstations++;
277 return 0; 385 return 0;
@@ -296,16 +404,16 @@ static int ath9k_htc_remove_station(struct ath9k_htc_priv *priv,
296 WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx); 404 WMI_CMD_BUF(WMI_NODE_REMOVE_CMDID, &sta_idx);
297 if (ret) { 405 if (ret) {
298 if (sta) 406 if (sta)
299 ath_print(common, ATH_DBG_FATAL, 407 ath_err(common,
300 "Unable to remove station entry for: %pM\n", 408 "Unable to remove station entry for: %pM\n",
301 sta->addr); 409 sta->addr);
302 return ret; 410 return ret;
303 } 411 }
304 412
305 if (sta) 413 if (sta)
306 ath_print(common, ATH_DBG_CONFIG, 414 ath_dbg(common, ATH_DBG_CONFIG,
307 "Removed a station entry for: %pM (idx: %d)\n", 415 "Removed a station entry for: %pM (idx: %d)\n",
308 sta->addr, sta_idx); 416 sta->addr, sta_idx);
309 417
310 priv->nstations--; 418 priv->nstations--;
311 return 0; 419 return 0;
@@ -390,8 +498,8 @@ static int ath9k_htc_send_rate_cmd(struct ath9k_htc_priv *priv,
390 498
391 WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, trate); 499 WMI_CMD_BUF(WMI_RC_RATE_UPDATE_CMDID, trate);
392 if (ret) { 500 if (ret) {
393 ath_print(common, ATH_DBG_FATAL, 501 ath_err(common,
394 "Unable to initialize Rate information on target\n"); 502 "Unable to initialize Rate information on target\n");
395 } 503 }
396 504
397 return ret; 505 return ret;
@@ -408,9 +516,9 @@ static void ath9k_htc_init_rate(struct ath9k_htc_priv *priv,
408 ath9k_htc_setup_rate(priv, sta, &trate); 516 ath9k_htc_setup_rate(priv, sta, &trate);
409 ret = ath9k_htc_send_rate_cmd(priv, &trate); 517 ret = ath9k_htc_send_rate_cmd(priv, &trate);
410 if (!ret) 518 if (!ret)
411 ath_print(common, ATH_DBG_CONFIG, 519 ath_dbg(common, ATH_DBG_CONFIG,
412 "Updated target sta: %pM, rate caps: 0x%X\n", 520 "Updated target sta: %pM, rate caps: 0x%X\n",
413 sta->addr, be32_to_cpu(trate.capflags)); 521 sta->addr, be32_to_cpu(trate.capflags));
414} 522}
415 523
416static void ath9k_htc_update_rate(struct ath9k_htc_priv *priv, 524static void ath9k_htc_update_rate(struct ath9k_htc_priv *priv,
@@ -435,9 +543,9 @@ static void ath9k_htc_update_rate(struct ath9k_htc_priv *priv,
435 543
436 ret = ath9k_htc_send_rate_cmd(priv, &trate); 544 ret = ath9k_htc_send_rate_cmd(priv, &trate);
437 if (!ret) 545 if (!ret)
438 ath_print(common, ATH_DBG_CONFIG, 546 ath_dbg(common, ATH_DBG_CONFIG,
439 "Updated target sta: %pM, rate caps: 0x%X\n", 547 "Updated target sta: %pM, rate caps: 0x%X\n",
440 bss_conf->bssid, be32_to_cpu(trate.capflags)); 548 bss_conf->bssid, be32_to_cpu(trate.capflags));
441} 549}
442 550
443static int ath9k_htc_tx_aggr_oper(struct ath9k_htc_priv *priv, 551static int ath9k_htc_tx_aggr_oper(struct ath9k_htc_priv *priv,
@@ -464,14 +572,14 @@ static int ath9k_htc_tx_aggr_oper(struct ath9k_htc_priv *priv,
464 572
465 WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr); 573 WMI_CMD_BUF(WMI_TX_AGGR_ENABLE_CMDID, &aggr);
466 if (ret) 574 if (ret)
467 ath_print(common, ATH_DBG_CONFIG, 575 ath_dbg(common, ATH_DBG_CONFIG,
468 "Unable to %s TX aggregation for (%pM, %d)\n", 576 "Unable to %s TX aggregation for (%pM, %d)\n",
469 (aggr.aggr_enable) ? "start" : "stop", sta->addr, tid); 577 (aggr.aggr_enable) ? "start" : "stop", sta->addr, tid);
470 else 578 else
471 ath_print(common, ATH_DBG_CONFIG, 579 ath_dbg(common, ATH_DBG_CONFIG,
472 "%s TX aggregation for (%pM, %d)\n", 580 "%s TX aggregation for (%pM, %d)\n",
473 (aggr.aggr_enable) ? "Starting" : "Stopping", 581 (aggr.aggr_enable) ? "Starting" : "Stopping",
474 sta->addr, tid); 582 sta->addr, tid);
475 583
476 spin_lock_bh(&priv->tx_lock); 584 spin_lock_bh(&priv->tx_lock);
477 ista->tid_state[tid] = (aggr.aggr_enable && !ret) ? AGGR_START : AGGR_STOP; 585 ista->tid_state[tid] = (aggr.aggr_enable && !ret) ? AGGR_START : AGGR_STOP;
@@ -689,7 +797,7 @@ void ath9k_htc_debug_remove_root(void)
689/* ANI */ 797/* ANI */
690/*******/ 798/*******/
691 799
692static void ath_start_ani(struct ath9k_htc_priv *priv) 800void ath_start_ani(struct ath9k_htc_priv *priv)
693{ 801{
694 struct ath_common *common = ath9k_hw_common(priv->ah); 802 struct ath_common *common = ath9k_hw_common(priv->ah);
695 unsigned long timestamp = jiffies_to_msecs(jiffies); 803 unsigned long timestamp = jiffies_to_msecs(jiffies);
@@ -724,7 +832,7 @@ void ath9k_ani_work(struct work_struct *work)
724 /* Long calibration runs independently of short calibration. */ 832 /* Long calibration runs independently of short calibration. */
725 if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) { 833 if ((timestamp - common->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
726 longcal = true; 834 longcal = true;
727 ath_print(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies); 835 ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
728 common->ani.longcal_timer = timestamp; 836 common->ani.longcal_timer = timestamp;
729 } 837 }
730 838
@@ -733,8 +841,8 @@ void ath9k_ani_work(struct work_struct *work)
733 if ((timestamp - common->ani.shortcal_timer) >= 841 if ((timestamp - common->ani.shortcal_timer) >=
734 short_cal_interval) { 842 short_cal_interval) {
735 shortcal = true; 843 shortcal = true;
736 ath_print(common, ATH_DBG_ANI, 844 ath_dbg(common, ATH_DBG_ANI,
737 "shortcal @%lu\n", jiffies); 845 "shortcal @%lu\n", jiffies);
738 common->ani.shortcal_timer = timestamp; 846 common->ani.shortcal_timer = timestamp;
739 common->ani.resetcal_timer = timestamp; 847 common->ani.resetcal_timer = timestamp;
740 } 848 }
@@ -788,317 +896,6 @@ set_timer:
788 msecs_to_jiffies(cal_interval)); 896 msecs_to_jiffies(cal_interval));
789} 897}
790 898
791/*******/
792/* LED */
793/*******/
794
795static void ath9k_led_blink_work(struct work_struct *work)
796{
797 struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
798 ath9k_led_blink_work.work);
799
800 if (!(priv->op_flags & OP_LED_ASSOCIATED))
801 return;
802
803 if ((priv->led_on_duration == ATH_LED_ON_DURATION_IDLE) ||
804 (priv->led_off_duration == ATH_LED_OFF_DURATION_IDLE))
805 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
806 else
807 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
808 (priv->op_flags & OP_LED_ON) ? 1 : 0);
809
810 ieee80211_queue_delayed_work(priv->hw,
811 &priv->ath9k_led_blink_work,
812 (priv->op_flags & OP_LED_ON) ?
813 msecs_to_jiffies(priv->led_off_duration) :
814 msecs_to_jiffies(priv->led_on_duration));
815
816 priv->led_on_duration = priv->led_on_cnt ?
817 max((ATH_LED_ON_DURATION_IDLE - priv->led_on_cnt), 25) :
818 ATH_LED_ON_DURATION_IDLE;
819 priv->led_off_duration = priv->led_off_cnt ?
820 max((ATH_LED_OFF_DURATION_IDLE - priv->led_off_cnt), 10) :
821 ATH_LED_OFF_DURATION_IDLE;
822 priv->led_on_cnt = priv->led_off_cnt = 0;
823
824 if (priv->op_flags & OP_LED_ON)
825 priv->op_flags &= ~OP_LED_ON;
826 else
827 priv->op_flags |= OP_LED_ON;
828}
829
830static void ath9k_led_brightness_work(struct work_struct *work)
831{
832 struct ath_led *led = container_of(work, struct ath_led,
833 brightness_work.work);
834 struct ath9k_htc_priv *priv = led->priv;
835
836 switch (led->brightness) {
837 case LED_OFF:
838 if (led->led_type == ATH_LED_ASSOC ||
839 led->led_type == ATH_LED_RADIO) {
840 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin,
841 (led->led_type == ATH_LED_RADIO));
842 priv->op_flags &= ~OP_LED_ASSOCIATED;
843 if (led->led_type == ATH_LED_RADIO)
844 priv->op_flags &= ~OP_LED_ON;
845 } else {
846 priv->led_off_cnt++;
847 }
848 break;
849 case LED_FULL:
850 if (led->led_type == ATH_LED_ASSOC) {
851 priv->op_flags |= OP_LED_ASSOCIATED;
852 ieee80211_queue_delayed_work(priv->hw,
853 &priv->ath9k_led_blink_work, 0);
854 } else if (led->led_type == ATH_LED_RADIO) {
855 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 0);
856 priv->op_flags |= OP_LED_ON;
857 } else {
858 priv->led_on_cnt++;
859 }
860 break;
861 default:
862 break;
863 }
864}
865
866static void ath9k_led_brightness(struct led_classdev *led_cdev,
867 enum led_brightness brightness)
868{
869 struct ath_led *led = container_of(led_cdev, struct ath_led, led_cdev);
870 struct ath9k_htc_priv *priv = led->priv;
871
872 led->brightness = brightness;
873 if (!(priv->op_flags & OP_LED_DEINIT))
874 ieee80211_queue_delayed_work(priv->hw,
875 &led->brightness_work, 0);
876}
877
878static void ath9k_led_stop_brightness(struct ath9k_htc_priv *priv)
879{
880 cancel_delayed_work_sync(&priv->radio_led.brightness_work);
881 cancel_delayed_work_sync(&priv->assoc_led.brightness_work);
882 cancel_delayed_work_sync(&priv->tx_led.brightness_work);
883 cancel_delayed_work_sync(&priv->rx_led.brightness_work);
884}
885
886static int ath9k_register_led(struct ath9k_htc_priv *priv, struct ath_led *led,
887 char *trigger)
888{
889 int ret;
890
891 led->priv = priv;
892 led->led_cdev.name = led->name;
893 led->led_cdev.default_trigger = trigger;
894 led->led_cdev.brightness_set = ath9k_led_brightness;
895
896 ret = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_cdev);
897 if (ret)
898 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL,
899 "Failed to register led:%s", led->name);
900 else
901 led->registered = 1;
902
903 INIT_DELAYED_WORK(&led->brightness_work, ath9k_led_brightness_work);
904
905 return ret;
906}
907
908static void ath9k_unregister_led(struct ath_led *led)
909{
910 if (led->registered) {
911 led_classdev_unregister(&led->led_cdev);
912 led->registered = 0;
913 }
914}
915
916void ath9k_deinit_leds(struct ath9k_htc_priv *priv)
917{
918 priv->op_flags |= OP_LED_DEINIT;
919 ath9k_unregister_led(&priv->assoc_led);
920 priv->op_flags &= ~OP_LED_ASSOCIATED;
921 ath9k_unregister_led(&priv->tx_led);
922 ath9k_unregister_led(&priv->rx_led);
923 ath9k_unregister_led(&priv->radio_led);
924}
925
926void ath9k_init_leds(struct ath9k_htc_priv *priv)
927{
928 char *trigger;
929 int ret;
930
931 if (AR_SREV_9287(priv->ah))
932 priv->ah->led_pin = ATH_LED_PIN_9287;
933 else if (AR_SREV_9271(priv->ah))
934 priv->ah->led_pin = ATH_LED_PIN_9271;
935 else if (AR_DEVID_7010(priv->ah))
936 priv->ah->led_pin = ATH_LED_PIN_7010;
937 else
938 priv->ah->led_pin = ATH_LED_PIN_DEF;
939
940 /* Configure gpio 1 for output */
941 ath9k_hw_cfg_output(priv->ah, priv->ah->led_pin,
942 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
943 /* LED off, active low */
944 ath9k_hw_set_gpio(priv->ah, priv->ah->led_pin, 1);
945
946 INIT_DELAYED_WORK(&priv->ath9k_led_blink_work, ath9k_led_blink_work);
947
948 trigger = ieee80211_get_radio_led_name(priv->hw);
949 snprintf(priv->radio_led.name, sizeof(priv->radio_led.name),
950 "ath9k-%s::radio", wiphy_name(priv->hw->wiphy));
951 ret = ath9k_register_led(priv, &priv->radio_led, trigger);
952 priv->radio_led.led_type = ATH_LED_RADIO;
953 if (ret)
954 goto fail;
955
956 trigger = ieee80211_get_assoc_led_name(priv->hw);
957 snprintf(priv->assoc_led.name, sizeof(priv->assoc_led.name),
958 "ath9k-%s::assoc", wiphy_name(priv->hw->wiphy));
959 ret = ath9k_register_led(priv, &priv->assoc_led, trigger);
960 priv->assoc_led.led_type = ATH_LED_ASSOC;
961 if (ret)
962 goto fail;
963
964 trigger = ieee80211_get_tx_led_name(priv->hw);
965 snprintf(priv->tx_led.name, sizeof(priv->tx_led.name),
966 "ath9k-%s::tx", wiphy_name(priv->hw->wiphy));
967 ret = ath9k_register_led(priv, &priv->tx_led, trigger);
968 priv->tx_led.led_type = ATH_LED_TX;
969 if (ret)
970 goto fail;
971
972 trigger = ieee80211_get_rx_led_name(priv->hw);
973 snprintf(priv->rx_led.name, sizeof(priv->rx_led.name),
974 "ath9k-%s::rx", wiphy_name(priv->hw->wiphy));
975 ret = ath9k_register_led(priv, &priv->rx_led, trigger);
976 priv->rx_led.led_type = ATH_LED_RX;
977 if (ret)
978 goto fail;
979
980 priv->op_flags &= ~OP_LED_DEINIT;
981
982 return;
983
984fail:
985 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
986 ath9k_deinit_leds(priv);
987}
988
989/*******************/
990/* Rfkill */
991/*******************/
992
993static bool ath_is_rfkill_set(struct ath9k_htc_priv *priv)
994{
995 return ath9k_hw_gpio_get(priv->ah, priv->ah->rfkill_gpio) ==
996 priv->ah->rfkill_polarity;
997}
998
999static void ath9k_htc_rfkill_poll_state(struct ieee80211_hw *hw)
1000{
1001 struct ath9k_htc_priv *priv = hw->priv;
1002 bool blocked = !!ath_is_rfkill_set(priv);
1003
1004 wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
1005}
1006
1007void ath9k_start_rfkill_poll(struct ath9k_htc_priv *priv)
1008{
1009 if (priv->ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1010 wiphy_rfkill_start_polling(priv->hw->wiphy);
1011}
1012
1013static void ath9k_htc_radio_enable(struct ieee80211_hw *hw)
1014{
1015 struct ath9k_htc_priv *priv = hw->priv;
1016 struct ath_hw *ah = priv->ah;
1017 struct ath_common *common = ath9k_hw_common(ah);
1018 int ret;
1019 u8 cmd_rsp;
1020
1021 if (!ah->curchan)
1022 ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
1023
1024 /* Reset the HW */
1025 ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
1026 if (ret) {
1027 ath_print(common, ATH_DBG_FATAL,
1028 "Unable to reset hardware; reset status %d "
1029 "(freq %u MHz)\n", ret, ah->curchan->channel);
1030 }
1031
1032 ath_update_txpow(priv);
1033
1034 /* Start RX */
1035 WMI_CMD(WMI_START_RECV_CMDID);
1036 ath9k_host_rx_init(priv);
1037
1038 /* Start TX */
1039 htc_start(priv->htc);
1040 spin_lock_bh(&priv->tx_lock);
1041 priv->tx_queues_stop = false;
1042 spin_unlock_bh(&priv->tx_lock);
1043 ieee80211_wake_queues(hw);
1044
1045 WMI_CMD(WMI_ENABLE_INTR_CMDID);
1046
1047 /* Enable LED */
1048 ath9k_hw_cfg_output(ah, ah->led_pin,
1049 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
1050 ath9k_hw_set_gpio(ah, ah->led_pin, 0);
1051}
1052
1053static void ath9k_htc_radio_disable(struct ieee80211_hw *hw)
1054{
1055 struct ath9k_htc_priv *priv = hw->priv;
1056 struct ath_hw *ah = priv->ah;
1057 struct ath_common *common = ath9k_hw_common(ah);
1058 int ret;
1059 u8 cmd_rsp;
1060
1061 ath9k_htc_ps_wakeup(priv);
1062
1063 /* Disable LED */
1064 ath9k_hw_set_gpio(ah, ah->led_pin, 1);
1065 ath9k_hw_cfg_gpio_input(ah, ah->led_pin);
1066
1067 WMI_CMD(WMI_DISABLE_INTR_CMDID);
1068
1069 /* Stop TX */
1070 ieee80211_stop_queues(hw);
1071 htc_stop(priv->htc);
1072 WMI_CMD(WMI_DRAIN_TXQ_ALL_CMDID);
1073 skb_queue_purge(&priv->tx_queue);
1074
1075 /* Stop RX */
1076 WMI_CMD(WMI_STOP_RECV_CMDID);
1077
1078 /*
1079 * The MIB counters have to be disabled here,
1080 * since the target doesn't do it.
1081 */
1082 ath9k_hw_disable_mib_counters(ah);
1083
1084 if (!ah->curchan)
1085 ah->curchan = ath9k_cmn_get_curchannel(hw, ah);
1086
1087 /* Reset the HW */
1088 ret = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
1089 if (ret) {
1090 ath_print(common, ATH_DBG_FATAL,
1091 "Unable to reset hardware; reset status %d "
1092 "(freq %u MHz)\n", ret, ah->curchan->channel);
1093 }
1094
1095 /* Disable the PHY */
1096 ath9k_hw_phy_disable(ah);
1097
1098 ath9k_htc_ps_restore(priv);
1099 ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
1100}
1101
1102/**********************/ 899/**********************/
1103/* mac80211 Callbacks */ 900/* mac80211 Callbacks */
1104/**********************/ 901/**********************/
@@ -1124,15 +921,15 @@ static int ath9k_htc_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1124 ret = ath9k_htc_tx_start(priv, skb); 921 ret = ath9k_htc_tx_start(priv, skb);
1125 if (ret != 0) { 922 if (ret != 0) {
1126 if (ret == -ENOMEM) { 923 if (ret == -ENOMEM) {
1127 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT, 924 ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
1128 "Stopping TX queues\n"); 925 "Stopping TX queues\n");
1129 ieee80211_stop_queues(hw); 926 ieee80211_stop_queues(hw);
1130 spin_lock_bh(&priv->tx_lock); 927 spin_lock_bh(&priv->tx_lock);
1131 priv->tx_queues_stop = true; 928 priv->tx_queues_stop = true;
1132 spin_unlock_bh(&priv->tx_lock); 929 spin_unlock_bh(&priv->tx_lock);
1133 } else { 930 } else {
1134 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT, 931 ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
1135 "Tx failed"); 932 "Tx failed\n");
1136 } 933 }
1137 goto fail_tx; 934 goto fail_tx;
1138 } 935 }
@@ -1158,9 +955,9 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
1158 955
1159 mutex_lock(&priv->mutex); 956 mutex_lock(&priv->mutex);
1160 957
1161 ath_print(common, ATH_DBG_CONFIG, 958 ath_dbg(common, ATH_DBG_CONFIG,
1162 "Starting driver with initial channel: %d MHz\n", 959 "Starting driver with initial channel: %d MHz\n",
1163 curchan->center_freq); 960 curchan->center_freq);
1164 961
1165 /* Ensure that HW is awake before flushing RX */ 962 /* Ensure that HW is awake before flushing RX */
1166 ath9k_htc_setpower(priv, ATH9K_PM_AWAKE); 963 ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
@@ -1169,15 +966,12 @@ static int ath9k_htc_start(struct ieee80211_hw *hw)
1169 /* setup initial channel */ 966 /* setup initial channel */
1170 init_channel = ath9k_cmn_get_curchannel(hw, ah); 967 init_channel = ath9k_cmn_get_curchannel(hw, ah);
1171 968
1172 /* Reset SERDES registers */
1173 ath9k_hw_configpcipowersave(ah, 0, 0);
1174
1175 ath9k_hw_htc_resetinit(ah); 969 ath9k_hw_htc_resetinit(ah);
1176 ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false); 970 ret = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
1177 if (ret) { 971 if (ret) {
1178 ath_print(common, ATH_DBG_FATAL, 972 ath_err(common,
1179 "Unable to reset hardware; reset status %d " 973 "Unable to reset hardware; reset status %d (freq %u MHz)\n",
1180 "(freq %u MHz)\n", ret, curchan->center_freq); 974 ret, curchan->center_freq);
1181 mutex_unlock(&priv->mutex); 975 mutex_unlock(&priv->mutex);
1182 return ret; 976 return ret;
1183 } 977 }
@@ -1220,19 +1014,20 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1220 int ret = 0; 1014 int ret = 0;
1221 u8 cmd_rsp; 1015 u8 cmd_rsp;
1222 1016
1017 /* Cancel all the running timers/work .. */
1018 cancel_work_sync(&priv->fatal_work);
1019 cancel_work_sync(&priv->ps_work);
1020 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
1021 ath9k_led_stop_brightness(priv);
1022
1223 mutex_lock(&priv->mutex); 1023 mutex_lock(&priv->mutex);
1224 1024
1225 if (priv->op_flags & OP_INVALID) { 1025 if (priv->op_flags & OP_INVALID) {
1226 ath_print(common, ATH_DBG_ANY, "Device not present\n"); 1026 ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
1227 mutex_unlock(&priv->mutex); 1027 mutex_unlock(&priv->mutex);
1228 return; 1028 return;
1229 } 1029 }
1230 1030
1231 /* Cancel all the running timers/work .. */
1232 cancel_work_sync(&priv->ps_work);
1233 cancel_delayed_work_sync(&priv->ath9k_led_blink_work);
1234 ath9k_led_stop_brightness(priv);
1235
1236 ath9k_htc_ps_wakeup(priv); 1031 ath9k_htc_ps_wakeup(priv);
1237 htc_stop(priv->htc); 1032 htc_stop(priv->htc);
1238 WMI_CMD(WMI_DISABLE_INTR_CMDID); 1033 WMI_CMD(WMI_DISABLE_INTR_CMDID);
@@ -1243,11 +1038,10 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1243 /* Remove monitor interface here */ 1038 /* Remove monitor interface here */
1244 if (ah->opmode == NL80211_IFTYPE_MONITOR) { 1039 if (ah->opmode == NL80211_IFTYPE_MONITOR) {
1245 if (ath9k_htc_remove_monitor_interface(priv)) 1040 if (ath9k_htc_remove_monitor_interface(priv))
1246 ath_print(common, ATH_DBG_FATAL, 1041 ath_err(common, "Unable to remove monitor interface\n");
1247 "Unable to remove monitor interface\n");
1248 else 1042 else
1249 ath_print(common, ATH_DBG_CONFIG, 1043 ath_dbg(common, ATH_DBG_CONFIG,
1250 "Monitor interface removed\n"); 1044 "Monitor interface removed\n");
1251 } 1045 }
1252 1046
1253 if (ah->btcoex_hw.enabled) { 1047 if (ah->btcoex_hw.enabled) {
@@ -1258,13 +1052,12 @@ static void ath9k_htc_stop(struct ieee80211_hw *hw)
1258 1052
1259 ath9k_hw_phy_disable(ah); 1053 ath9k_hw_phy_disable(ah);
1260 ath9k_hw_disable(ah); 1054 ath9k_hw_disable(ah);
1261 ath9k_hw_configpcipowersave(ah, 1, 1);
1262 ath9k_htc_ps_restore(priv); 1055 ath9k_htc_ps_restore(priv);
1263 ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP); 1056 ath9k_htc_setpower(priv, ATH9K_PM_FULL_SLEEP);
1264 1057
1265 priv->op_flags |= OP_INVALID; 1058 priv->op_flags |= OP_INVALID;
1266 1059
1267 ath_print(common, ATH_DBG_CONFIG, "Driver halt\n"); 1060 ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
1268 mutex_unlock(&priv->mutex); 1061 mutex_unlock(&priv->mutex);
1269} 1062}
1270 1063
@@ -1298,14 +1091,14 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1298 hvif.opmode = cpu_to_be32(HTC_M_IBSS); 1091 hvif.opmode = cpu_to_be32(HTC_M_IBSS);
1299 break; 1092 break;
1300 default: 1093 default:
1301 ath_print(common, ATH_DBG_FATAL, 1094 ath_err(common,
1302 "Interface type %d not yet supported\n", vif->type); 1095 "Interface type %d not yet supported\n", vif->type);
1303 ret = -EOPNOTSUPP; 1096 ret = -EOPNOTSUPP;
1304 goto out; 1097 goto out;
1305 } 1098 }
1306 1099
1307 ath_print(common, ATH_DBG_CONFIG, 1100 ath_dbg(common, ATH_DBG_CONFIG,
1308 "Attach a VIF of type: %d\n", vif->type); 1101 "Attach a VIF of type: %d\n", vif->type);
1309 1102
1310 priv->ah->opmode = vif->type; 1103 priv->ah->opmode = vif->type;
1311 1104
@@ -1328,8 +1121,8 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw,
1328 1121
1329 ret = ath9k_htc_update_cap_target(priv); 1122 ret = ath9k_htc_update_cap_target(priv);
1330 if (ret) 1123 if (ret)
1331 ath_print(common, ATH_DBG_CONFIG, "Failed to update" 1124 ath_dbg(common, ATH_DBG_CONFIG,
1332 " capability in target \n"); 1125 "Failed to update capability in target\n");
1333 1126
1334 priv->vif = vif; 1127 priv->vif = vif;
1335out: 1128out:
@@ -1349,7 +1142,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw,
1349 int ret = 0; 1142 int ret = 0;
1350 u8 cmd_rsp; 1143 u8 cmd_rsp;
1351 1144
1352 ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n"); 1145 ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
1353 1146
1354 mutex_lock(&priv->mutex); 1147 mutex_lock(&priv->mutex);
1355 ath9k_htc_ps_wakeup(priv); 1148 ath9k_htc_ps_wakeup(priv);
@@ -1386,8 +1179,8 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1386 mutex_unlock(&priv->htc_pm_lock); 1179 mutex_unlock(&priv->htc_pm_lock);
1387 1180
1388 if (enable_radio) { 1181 if (enable_radio) {
1389 ath_print(common, ATH_DBG_CONFIG, 1182 ath_dbg(common, ATH_DBG_CONFIG,
1390 "not-idle: enabling radio\n"); 1183 "not-idle: enabling radio\n");
1391 ath9k_htc_setpower(priv, ATH9K_PM_AWAKE); 1184 ath9k_htc_setpower(priv, ATH9K_PM_AWAKE);
1392 ath9k_htc_radio_enable(hw); 1185 ath9k_htc_radio_enable(hw);
1393 } 1186 }
@@ -1397,19 +1190,21 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1397 struct ieee80211_channel *curchan = hw->conf.channel; 1190 struct ieee80211_channel *curchan = hw->conf.channel;
1398 int pos = curchan->hw_value; 1191 int pos = curchan->hw_value;
1399 1192
1400 ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n", 1193 ath_dbg(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
1401 curchan->center_freq); 1194 curchan->center_freq);
1402 1195
1403 ath9k_cmn_update_ichannel(hw, &priv->ah->channels[pos]); 1196 ath9k_cmn_update_ichannel(&priv->ah->channels[pos],
1197 hw->conf.channel,
1198 hw->conf.channel_type);
1404 1199
1405 if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) { 1200 if (ath9k_htc_set_channel(priv, hw, &priv->ah->channels[pos]) < 0) {
1406 ath_print(common, ATH_DBG_FATAL, 1201 ath_err(common, "Unable to set channel\n");
1407 "Unable to set channel\n");
1408 mutex_unlock(&priv->mutex); 1202 mutex_unlock(&priv->mutex);
1409 return -EINVAL; 1203 return -EINVAL;
1410 } 1204 }
1411 1205
1412 } 1206 }
1207
1413 if (changed & IEEE80211_CONF_CHANGE_PS) { 1208 if (changed & IEEE80211_CONF_CHANGE_PS) {
1414 if (conf->flags & IEEE80211_CONF_PS) { 1209 if (conf->flags & IEEE80211_CONF_PS) {
1415 ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP); 1210 ath9k_htc_setpower(priv, ATH9K_PM_NETWORK_SLEEP);
@@ -1421,14 +1216,18 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1421 } 1216 }
1422 } 1217 }
1423 1218
1219 if (changed & IEEE80211_CONF_CHANGE_POWER) {
1220 priv->txpowlimit = 2 * conf->power_level;
1221 ath_update_txpow(priv);
1222 }
1223
1424 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 1224 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
1425 if (conf->flags & IEEE80211_CONF_MONITOR) { 1225 if (conf->flags & IEEE80211_CONF_MONITOR) {
1426 if (ath9k_htc_add_monitor_interface(priv)) 1226 if (ath9k_htc_add_monitor_interface(priv))
1427 ath_print(common, ATH_DBG_FATAL, 1227 ath_err(common, "Failed to set monitor mode\n");
1428 "Failed to set monitor mode\n");
1429 else 1228 else
1430 ath_print(common, ATH_DBG_CONFIG, 1229 ath_dbg(common, ATH_DBG_CONFIG,
1431 "HW opmode set to Monitor mode\n"); 1230 "HW opmode set to Monitor mode\n");
1432 } 1231 }
1433 } 1232 }
1434 1233
@@ -1440,8 +1239,8 @@ static int ath9k_htc_config(struct ieee80211_hw *hw, u32 changed)
1440 } 1239 }
1441 mutex_unlock(&priv->htc_pm_lock); 1240 mutex_unlock(&priv->htc_pm_lock);
1442 1241
1443 ath_print(common, ATH_DBG_CONFIG, 1242 ath_dbg(common, ATH_DBG_CONFIG,
1444 "idle: disabling radio\n"); 1243 "idle: disabling radio\n");
1445 ath9k_htc_radio_disable(hw); 1244 ath9k_htc_radio_disable(hw);
1446 } 1245 }
1447 1246
@@ -1478,8 +1277,8 @@ static void ath9k_htc_configure_filter(struct ieee80211_hw *hw,
1478 rfilt = ath9k_htc_calcrxfilter(priv); 1277 rfilt = ath9k_htc_calcrxfilter(priv);
1479 ath9k_hw_setrxfilter(priv->ah, rfilt); 1278 ath9k_hw_setrxfilter(priv->ah, rfilt);
1480 1279
1481 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_CONFIG, 1280 ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_CONFIG,
1482 "Set HW RX filter: 0x%x\n", rfilt); 1281 "Set HW RX filter: 0x%x\n", rfilt);
1483 1282
1484 ath9k_htc_ps_restore(priv); 1283 ath9k_htc_ps_restore(priv);
1485 mutex_unlock(&priv->mutex); 1284 mutex_unlock(&priv->mutex);
@@ -1542,15 +1341,14 @@ static int ath9k_htc_conf_tx(struct ieee80211_hw *hw, u16 queue,
1542 1341
1543 qnum = get_hw_qnum(queue, priv->hwq_map); 1342 qnum = get_hw_qnum(queue, priv->hwq_map);
1544 1343
1545 ath_print(common, ATH_DBG_CONFIG, 1344 ath_dbg(common, ATH_DBG_CONFIG,
1546 "Configure tx [queue/hwq] [%d/%d], " 1345 "Configure tx [queue/hwq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
1547 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n", 1346 queue, qnum, params->aifs, params->cw_min,
1548 queue, qnum, params->aifs, params->cw_min, 1347 params->cw_max, params->txop);
1549 params->cw_max, params->txop);
1550 1348
1551 ret = ath_htc_txq_update(priv, qnum, &qi); 1349 ret = ath_htc_txq_update(priv, qnum, &qi);
1552 if (ret) { 1350 if (ret) {
1553 ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n"); 1351 ath_err(common, "TXQ Update failed\n");
1554 goto out; 1352 goto out;
1555 } 1353 }
1556 1354
@@ -1578,7 +1376,7 @@ static int ath9k_htc_set_key(struct ieee80211_hw *hw,
1578 return -ENOSPC; 1376 return -ENOSPC;
1579 1377
1580 mutex_lock(&priv->mutex); 1378 mutex_lock(&priv->mutex);
1581 ath_print(common, ATH_DBG_CONFIG, "Set HW Key\n"); 1379 ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n");
1582 ath9k_htc_ps_wakeup(priv); 1380 ath9k_htc_ps_wakeup(priv);
1583 1381
1584 switch (cmd) { 1382 switch (cmd) {
@@ -1624,7 +1422,7 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1624 if (changed & BSS_CHANGED_ASSOC) { 1422 if (changed & BSS_CHANGED_ASSOC) {
1625 common->curaid = bss_conf->assoc ? 1423 common->curaid = bss_conf->assoc ?
1626 bss_conf->aid : 0; 1424 bss_conf->aid : 0;
1627 ath_print(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n", 1425 ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
1628 bss_conf->assoc); 1426 bss_conf->assoc);
1629 1427
1630 if (bss_conf->assoc) { 1428 if (bss_conf->assoc) {
@@ -1641,9 +1439,9 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1641 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN); 1439 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
1642 ath9k_hw_write_associd(ah); 1440 ath9k_hw_write_associd(ah);
1643 1441
1644 ath_print(common, ATH_DBG_CONFIG, 1442 ath_dbg(common, ATH_DBG_CONFIG,
1645 "BSSID: %pM aid: 0x%x\n", 1443 "BSSID: %pM aid: 0x%x\n",
1646 common->curbssid, common->curaid); 1444 common->curbssid, common->curaid);
1647 } 1445 }
1648 1446
1649 if ((changed & BSS_CHANGED_BEACON_INT) || 1447 if ((changed & BSS_CHANGED_BEACON_INT) ||
@@ -1661,8 +1459,8 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1661 } 1459 }
1662 1460
1663 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 1461 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
1664 ath_print(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n", 1462 ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
1665 bss_conf->use_short_preamble); 1463 bss_conf->use_short_preamble);
1666 if (bss_conf->use_short_preamble) 1464 if (bss_conf->use_short_preamble)
1667 priv->op_flags |= OP_PREAMBLE_SHORT; 1465 priv->op_flags |= OP_PREAMBLE_SHORT;
1668 else 1466 else
@@ -1670,8 +1468,8 @@ static void ath9k_htc_bss_info_changed(struct ieee80211_hw *hw,
1670 } 1468 }
1671 1469
1672 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 1470 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
1673 ath_print(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n", 1471 ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
1674 bss_conf->use_cts_prot); 1472 bss_conf->use_cts_prot);
1675 if (bss_conf->use_cts_prot && 1473 if (bss_conf->use_cts_prot &&
1676 hw->conf.channel->band != IEEE80211_BAND_5GHZ) 1474 hw->conf.channel->band != IEEE80211_BAND_5GHZ)
1677 priv->op_flags |= OP_PROTECT_ENABLE; 1475 priv->op_flags |= OP_PROTECT_ENABLE;
@@ -1762,8 +1560,7 @@ static int ath9k_htc_ampdu_action(struct ieee80211_hw *hw,
1762 spin_unlock_bh(&priv->tx_lock); 1560 spin_unlock_bh(&priv->tx_lock);
1763 break; 1561 break;
1764 default: 1562 default:
1765 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_FATAL, 1563 ath_err(ath9k_hw_common(priv->ah), "Unknown AMPDU action\n");
1766 "Unknown AMPDU action\n");
1767 } 1564 }
1768 1565
1769 return ret; 1566 return ret;
@@ -1792,7 +1589,6 @@ static void ath9k_htc_sw_scan_complete(struct ieee80211_hw *hw)
1792 spin_lock_bh(&priv->beacon_lock); 1589 spin_lock_bh(&priv->beacon_lock);
1793 priv->op_flags &= ~OP_SCANNING; 1590 priv->op_flags &= ~OP_SCANNING;
1794 spin_unlock_bh(&priv->beacon_lock); 1591 spin_unlock_bh(&priv->beacon_lock);
1795 priv->op_flags |= OP_FULL_RESET;
1796 if (priv->op_flags & OP_ASSOCIATED) { 1592 if (priv->op_flags & OP_ASSOCIATED) {
1797 ath9k_htc_beacon_config(priv, priv->vif); 1593 ath9k_htc_beacon_config(priv, priv->vif);
1798 ath_start_ani(priv); 1594 ath_start_ani(priv);
diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
index 29d80ca78393..33f36029fa4f 100644
--- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
+++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c
@@ -20,8 +20,15 @@
20/* TX */ 20/* TX */
21/******/ 21/******/
22 22
23static const int subtype_txq_to_hwq[] = {
24 [WME_AC_BE] = ATH_TXQ_AC_BE,
25 [WME_AC_BK] = ATH_TXQ_AC_BK,
26 [WME_AC_VI] = ATH_TXQ_AC_VI,
27 [WME_AC_VO] = ATH_TXQ_AC_VO,
28};
29
23#define ATH9K_HTC_INIT_TXQ(subtype) do { \ 30#define ATH9K_HTC_INIT_TXQ(subtype) do { \
24 qi.tqi_subtype = subtype; \ 31 qi.tqi_subtype = subtype_txq_to_hwq[subtype]; \
25 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; \ 32 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; \
26 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; \ 33 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; \
27 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; \ 34 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; \
@@ -62,8 +69,8 @@ int ath_htc_txq_update(struct ath9k_htc_priv *priv, int qnum,
62 qi.tqi_readyTime = qinfo->tqi_readyTime; 69 qi.tqi_readyTime = qinfo->tqi_readyTime;
63 70
64 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) { 71 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
65 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, 72 ath_err(ath9k_hw_common(ah),
66 "Unable to update hardware queue %u!\n", qnum); 73 "Unable to update hardware queue %u!\n", qnum);
67 error = -EIO; 74 error = -EIO;
68 } else { 75 } else {
69 ath9k_hw_resettxqueue(ah, qnum); 76 ath9k_hw_resettxqueue(ah, qnum);
@@ -244,7 +251,7 @@ void ath9k_tx_tasklet(unsigned long data)
244 ista = (struct ath9k_htc_sta *)sta->drv_priv; 251 ista = (struct ath9k_htc_sta *)sta->drv_priv;
245 252
246 if (ath9k_htc_check_tx_aggr(priv, ista, tid)) { 253 if (ath9k_htc_check_tx_aggr(priv, ista, tid)) {
247 ieee80211_start_tx_ba_session(sta, tid); 254 ieee80211_start_tx_ba_session(sta, tid, 0);
248 spin_lock_bh(&priv->tx_lock); 255 spin_lock_bh(&priv->tx_lock);
249 ista->tid_state[tid] = AGGR_PROGRESS; 256 ista->tid_state[tid] = AGGR_PROGRESS;
250 spin_unlock_bh(&priv->tx_lock); 257 spin_unlock_bh(&priv->tx_lock);
@@ -263,8 +270,8 @@ void ath9k_tx_tasklet(unsigned long data)
263 if (priv->tx_queues_stop) { 270 if (priv->tx_queues_stop) {
264 priv->tx_queues_stop = false; 271 priv->tx_queues_stop = false;
265 spin_unlock_bh(&priv->tx_lock); 272 spin_unlock_bh(&priv->tx_lock);
266 ath_print(ath9k_hw_common(priv->ah), ATH_DBG_XMIT, 273 ath_dbg(ath9k_hw_common(priv->ah), ATH_DBG_XMIT,
267 "Waking up TX queues\n"); 274 "Waking up TX queues\n");
268 ieee80211_wake_queues(priv->hw); 275 ieee80211_wake_queues(priv->hw);
269 return; 276 return;
270 } 277 }
@@ -289,8 +296,7 @@ void ath9k_htc_txep(void *drv_priv, struct sk_buff *skb,
289 (ep_id == priv->data_vo_ep)) { 296 (ep_id == priv->data_vo_ep)) {
290 skb_pull(skb, sizeof(struct tx_frame_hdr)); 297 skb_pull(skb, sizeof(struct tx_frame_hdr));
291 } else { 298 } else {
292 ath_print(common, ATH_DBG_FATAL, 299 ath_err(common, "Unsupported TX EPID: %d\n", ep_id);
293 "Unsupported TX EPID: %d\n", ep_id);
294 dev_kfree_skb_any(skb); 300 dev_kfree_skb_any(skb);
295 return; 301 return;
296 } 302 }
@@ -330,9 +336,8 @@ bool ath9k_htc_txq_setup(struct ath9k_htc_priv *priv, int subtype)
330 return false; 336 return false;
331 337
332 if (qnum >= ARRAY_SIZE(priv->hwq_map)) { 338 if (qnum >= ARRAY_SIZE(priv->hwq_map)) {
333 ath_print(common, ATH_DBG_FATAL, 339 ath_err(common, "qnum %u out of range, max %zu!\n",
334 "qnum %u out of range, max %u!\n", 340 qnum, ARRAY_SIZE(priv->hwq_map));
335 qnum, (unsigned int)ARRAY_SIZE(priv->hwq_map));
336 ath9k_hw_releasetxqueue(ah, qnum); 341 ath9k_hw_releasetxqueue(ah, qnum);
337 return false; 342 return false;
338 } 343 }
@@ -483,8 +488,7 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
483 __le16 fc; 488 __le16 fc;
484 489
485 if (skb->len <= HTC_RX_FRAME_HEADER_SIZE) { 490 if (skb->len <= HTC_RX_FRAME_HEADER_SIZE) {
486 ath_print(common, ATH_DBG_FATAL, 491 ath_err(common, "Corrupted RX frame, dropping\n");
487 "Corrupted RX frame, dropping\n");
488 goto rx_next; 492 goto rx_next;
489 } 493 }
490 494
@@ -492,10 +496,9 @@ static bool ath9k_rx_prepare(struct ath9k_htc_priv *priv,
492 496
493 if (be16_to_cpu(rxstatus->rs_datalen) - 497 if (be16_to_cpu(rxstatus->rs_datalen) -
494 (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) { 498 (skb->len - HTC_RX_FRAME_HEADER_SIZE) != 0) {
495 ath_print(common, ATH_DBG_FATAL, 499 ath_err(common,
496 "Corrupted RX data len, dropping " 500 "Corrupted RX data len, dropping (dlen: %d, skblen: %d)\n",
497 "(dlen: %d, skblen: %d)\n", 501 rxstatus->rs_datalen, skb->len);
498 rxstatus->rs_datalen, skb->len);
499 goto rx_next; 502 goto rx_next;
500 } 503 }
501 504
@@ -678,8 +681,8 @@ void ath9k_htc_rxep(void *drv_priv, struct sk_buff *skb,
678 spin_unlock(&priv->rx.rxbuflock); 681 spin_unlock(&priv->rx.rxbuflock);
679 682
680 if (rxbuf == NULL) { 683 if (rxbuf == NULL) {
681 ath_print(common, ATH_DBG_ANY, 684 ath_dbg(common, ATH_DBG_ANY,
682 "No free RX buffer\n"); 685 "No free RX buffer\n");
683 goto err; 686 goto err;
684 } 687 }
685 688
@@ -721,8 +724,7 @@ int ath9k_rx_init(struct ath9k_htc_priv *priv)
721 for (i = 0; i < ATH9K_HTC_RXBUF; i++) { 724 for (i = 0; i < ATH9K_HTC_RXBUF; i++) {
722 rxbuf = kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL); 725 rxbuf = kzalloc(sizeof(struct ath9k_htc_rxbuf), GFP_KERNEL);
723 if (rxbuf == NULL) { 726 if (rxbuf == NULL) {
724 ath_print(common, ATH_DBG_FATAL, 727 ath_err(common, "Unable to allocate RX buffers\n");
725 "Unable to allocate RX buffers\n");
726 goto err; 728 goto err;
727 } 729 }
728 list_add_tail(&rxbuf->list, &priv->rx.rxbuf); 730 list_add_tail(&rxbuf->list, &priv->rx.rxbuf);
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.c b/drivers/net/wireless/ath/ath9k/htc_hst.c
index 861ec9269309..c41ab8c30161 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.c
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.c
@@ -462,9 +462,10 @@ void ath9k_htc_hw_free(struct htc_target *htc)
462} 462}
463 463
464int ath9k_htc_hw_init(struct htc_target *target, 464int ath9k_htc_hw_init(struct htc_target *target,
465 struct device *dev, u16 devid, char *product) 465 struct device *dev, u16 devid,
466 char *product, u32 drv_info)
466{ 467{
467 if (ath9k_htc_probe_device(target, dev, devid, product)) { 468 if (ath9k_htc_probe_device(target, dev, devid, product, drv_info)) {
468 printk(KERN_ERR "Failed to initialize the device\n"); 469 printk(KERN_ERR "Failed to initialize the device\n");
469 return -ENODEV; 470 return -ENODEV;
470 } 471 }
diff --git a/drivers/net/wireless/ath/ath9k/htc_hst.h b/drivers/net/wireless/ath/ath9k/htc_hst.h
index 07b6509d5896..ecd018798c47 100644
--- a/drivers/net/wireless/ath/ath9k/htc_hst.h
+++ b/drivers/net/wireless/ath/ath9k/htc_hst.h
@@ -77,20 +77,6 @@ struct htc_config_pipe_msg {
77 u8 credits; 77 u8 credits;
78} __packed; 78} __packed;
79 79
80struct htc_packet {
81 void *pktcontext;
82 u8 *buf;
83 u8 *buf_payload;
84 u32 buflen;
85 u32 payload_len;
86
87 int endpoint;
88 int status;
89
90 void *context;
91 u32 reserved;
92};
93
94struct htc_ep_callbacks { 80struct htc_ep_callbacks {
95 void *priv; 81 void *priv;
96 void (*tx) (void *, struct sk_buff *, enum htc_endpoint_id, bool txok); 82 void (*tx) (void *, struct sk_buff *, enum htc_endpoint_id, bool txok);
@@ -123,11 +109,6 @@ struct htc_endpoint {
123#define HTC_CONTROL_BUFFER_SIZE \ 109#define HTC_CONTROL_BUFFER_SIZE \
124 (HTC_MAX_CONTROL_MESSAGE_LENGTH + sizeof(struct htc_frame_hdr)) 110 (HTC_MAX_CONTROL_MESSAGE_LENGTH + sizeof(struct htc_frame_hdr))
125 111
126struct htc_control_buf {
127 struct htc_packet htc_pkt;
128 u8 buf[HTC_CONTROL_BUFFER_SIZE];
129};
130
131#define HTC_OP_START_WAIT BIT(0) 112#define HTC_OP_START_WAIT BIT(0)
132#define HTC_OP_CONFIG_PIPE_CREDITS BIT(1) 113#define HTC_OP_CONFIG_PIPE_CREDITS BIT(1)
133 114
@@ -239,7 +220,8 @@ struct htc_target *ath9k_htc_hw_alloc(void *hif_handle,
239 struct device *dev); 220 struct device *dev);
240void ath9k_htc_hw_free(struct htc_target *htc); 221void ath9k_htc_hw_free(struct htc_target *htc);
241int ath9k_htc_hw_init(struct htc_target *target, 222int ath9k_htc_hw_init(struct htc_target *target,
242 struct device *dev, u16 devid, char *product); 223 struct device *dev, u16 devid, char *product,
224 u32 drv_info);
243void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug); 225void ath9k_htc_hw_deinit(struct htc_target *target, bool hot_unplug);
244 226
245#endif /* HTC_HST_H */ 227#endif /* HTC_HST_H */
diff --git a/drivers/net/wireless/ath/ath9k/hw-ops.h b/drivers/net/wireless/ath/ath9k/hw-ops.h
index 0a4ad348b699..c8f254fe0f0b 100644
--- a/drivers/net/wireless/ath/ath9k/hw-ops.h
+++ b/drivers/net/wireless/ath/ath9k/hw-ops.h
@@ -223,11 +223,6 @@ static inline void ath9k_hw_rfbus_done(struct ath_hw *ah)
223 return ath9k_hw_private_ops(ah)->rfbus_done(ah); 223 return ath9k_hw_private_ops(ah)->rfbus_done(ah);
224} 224}
225 225
226static inline void ath9k_enable_rfkill(struct ath_hw *ah)
227{
228 return ath9k_hw_private_ops(ah)->enable_rfkill(ah);
229}
230
231static inline void ath9k_hw_restore_chainmask(struct ath_hw *ah) 226static inline void ath9k_hw_restore_chainmask(struct ath_hw *ah)
232{ 227{
233 if (!ath9k_hw_private_ops(ah)->restore_chainmask) 228 if (!ath9k_hw_private_ops(ah)->restore_chainmask)
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index c7fbe25cc128..fde978665e07 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -54,13 +54,6 @@ static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
54 ath9k_hw_private_ops(ah)->init_mode_regs(ah); 54 ath9k_hw_private_ops(ah)->init_mode_regs(ah);
55} 55}
56 56
57static bool ath9k_hw_macversion_supported(struct ath_hw *ah)
58{
59 struct ath_hw_private_ops *priv_ops = ath9k_hw_private_ops(ah);
60
61 return priv_ops->macversion_supported(ah->hw_version.macVersion);
62}
63
64static u32 ath9k_hw_compute_pll_control(struct ath_hw *ah, 57static u32 ath9k_hw_compute_pll_control(struct ath_hw *ah,
65 struct ath9k_channel *chan) 58 struct ath9k_channel *chan)
66{ 59{
@@ -129,9 +122,9 @@ bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout)
129 udelay(AH_TIME_QUANTUM); 122 udelay(AH_TIME_QUANTUM);
130 } 123 }
131 124
132 ath_print(ath9k_hw_common(ah), ATH_DBG_ANY, 125 ath_dbg(ath9k_hw_common(ah), ATH_DBG_ANY,
133 "timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n", 126 "timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
134 timeout, reg, REG_READ(ah, reg), mask, val); 127 timeout, reg, REG_READ(ah, reg), mask, val);
135 128
136 return false; 129 return false;
137} 130}
@@ -211,8 +204,8 @@ u16 ath9k_hw_computetxtime(struct ath_hw *ah,
211 } 204 }
212 break; 205 break;
213 default: 206 default:
214 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, 207 ath_err(ath9k_hw_common(ah),
215 "Unknown phy %u (rate ix %u)\n", phy, rateix); 208 "Unknown phy %u (rate ix %u)\n", phy, rateix);
216 txTime = 0; 209 txTime = 0;
217 break; 210 break;
218 } 211 }
@@ -284,11 +277,9 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
284 277
285static void ath9k_hw_disablepcie(struct ath_hw *ah) 278static void ath9k_hw_disablepcie(struct ath_hw *ah)
286{ 279{
287 if (AR_SREV_9100(ah)) 280 if (!AR_SREV_5416(ah))
288 return; 281 return;
289 282
290 ENABLE_REGWRITE_BUFFER(ah);
291
292 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00); 283 REG_WRITE(ah, AR_PCIE_SERDES, 0x9248fc00);
293 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924); 284 REG_WRITE(ah, AR_PCIE_SERDES, 0x24924924);
294 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029); 285 REG_WRITE(ah, AR_PCIE_SERDES, 0x28000029);
@@ -300,8 +291,6 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
300 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007); 291 REG_WRITE(ah, AR_PCIE_SERDES, 0x000e1007);
301 292
302 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000); 293 REG_WRITE(ah, AR_PCIE_SERDES2, 0x00000000);
303
304 REGWRITE_BUFFER_FLUSH(ah);
305} 294}
306 295
307/* This should work for all families including legacy */ 296/* This should work for all families including legacy */
@@ -310,10 +299,9 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
310 struct ath_common *common = ath9k_hw_common(ah); 299 struct ath_common *common = ath9k_hw_common(ah);
311 u32 regAddr[2] = { AR_STA_ID0 }; 300 u32 regAddr[2] = { AR_STA_ID0 };
312 u32 regHold[2]; 301 u32 regHold[2];
313 u32 patternData[4] = { 0x55555555, 302 static const u32 patternData[4] = {
314 0xaaaaaaaa, 303 0x55555555, 0xaaaaaaaa, 0x66666666, 0x99999999
315 0x66666666, 304 };
316 0x99999999 };
317 int i, j, loop_max; 305 int i, j, loop_max;
318 306
319 if (!AR_SREV_9300_20_OR_LATER(ah)) { 307 if (!AR_SREV_9300_20_OR_LATER(ah)) {
@@ -332,11 +320,9 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
332 REG_WRITE(ah, addr, wrData); 320 REG_WRITE(ah, addr, wrData);
333 rdData = REG_READ(ah, addr); 321 rdData = REG_READ(ah, addr);
334 if (rdData != wrData) { 322 if (rdData != wrData) {
335 ath_print(common, ATH_DBG_FATAL, 323 ath_err(common,
336 "address test failed " 324 "address test failed addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
337 "addr: 0x%08x - wr:0x%08x != " 325 addr, wrData, rdData);
338 "rd:0x%08x\n",
339 addr, wrData, rdData);
340 return false; 326 return false;
341 } 327 }
342 } 328 }
@@ -345,11 +331,9 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
345 REG_WRITE(ah, addr, wrData); 331 REG_WRITE(ah, addr, wrData);
346 rdData = REG_READ(ah, addr); 332 rdData = REG_READ(ah, addr);
347 if (wrData != rdData) { 333 if (wrData != rdData) {
348 ath_print(common, ATH_DBG_FATAL, 334 ath_err(common,
349 "address test failed " 335 "address test failed addr: 0x%08x - wr:0x%08x != rd:0x%08x\n",
350 "addr: 0x%08x - wr:0x%08x != " 336 addr, wrData, rdData);
351 "rd:0x%08x\n",
352 addr, wrData, rdData);
353 return false; 337 return false;
354 } 338 }
355 } 339 }
@@ -419,17 +403,12 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
419 ah->hw_version.magic = AR5416_MAGIC; 403 ah->hw_version.magic = AR5416_MAGIC;
420 ah->hw_version.subvendorid = 0; 404 ah->hw_version.subvendorid = 0;
421 405
422 ah->ah_flags = 0;
423 if (!AR_SREV_9100(ah))
424 ah->ah_flags = AH_USE_EEPROM;
425
426 ah->atim_window = 0; 406 ah->atim_window = 0;
427 ah->sta_id1_defaults = 407 ah->sta_id1_defaults =
428 AR_STA_ID1_CRPT_MIC_ENABLE | 408 AR_STA_ID1_CRPT_MIC_ENABLE |
429 AR_STA_ID1_MCAST_KSRCH; 409 AR_STA_ID1_MCAST_KSRCH;
430 ah->beacon_interval = 100;
431 ah->enable_32kHz_clock = DONT_USE_32KHZ; 410 ah->enable_32kHz_clock = DONT_USE_32KHZ;
432 ah->slottime = (u32) -1; 411 ah->slottime = 20;
433 ah->globaltxtimeout = (u32) -1; 412 ah->globaltxtimeout = (u32) -1;
434 ah->power_mode = ATH9K_PM_UNDEFINED; 413 ah->power_mode = ATH9K_PM_UNDEFINED;
435} 414}
@@ -440,7 +419,7 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah)
440 u32 sum; 419 u32 sum;
441 int i; 420 int i;
442 u16 eeval; 421 u16 eeval;
443 u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW }; 422 static const u32 EEP_MAC[] = { EEP_MAC_LSW, EEP_MAC_MID, EEP_MAC_MSW };
444 423
445 sum = 0; 424 sum = 0;
446 for (i = 0; i < 3; i++) { 425 for (i = 0; i < 3; i++) {
@@ -474,16 +453,15 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
474 if (ecode != 0) 453 if (ecode != 0)
475 return ecode; 454 return ecode;
476 455
477 ath_print(ath9k_hw_common(ah), ATH_DBG_CONFIG, 456 ath_dbg(ath9k_hw_common(ah), ATH_DBG_CONFIG,
478 "Eeprom VER: %d, REV: %d\n", 457 "Eeprom VER: %d, REV: %d\n",
479 ah->eep_ops->get_eeprom_ver(ah), 458 ah->eep_ops->get_eeprom_ver(ah),
480 ah->eep_ops->get_eeprom_rev(ah)); 459 ah->eep_ops->get_eeprom_rev(ah));
481 460
482 ecode = ath9k_hw_rf_alloc_ext_banks(ah); 461 ecode = ath9k_hw_rf_alloc_ext_banks(ah);
483 if (ecode) { 462 if (ecode) {
484 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, 463 ath_err(ath9k_hw_common(ah),
485 "Failed allocating banks for " 464 "Failed allocating banks for external radio\n");
486 "external radio\n");
487 ath9k_hw_rf_free_ext_banks(ah); 465 ath9k_hw_rf_free_ext_banks(ah);
488 return ecode; 466 return ecode;
489 } 467 }
@@ -514,8 +492,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
514 ah->hw_version.macVersion = AR_SREV_VERSION_9100; 492 ah->hw_version.macVersion = AR_SREV_VERSION_9100;
515 493
516 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { 494 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
517 ath_print(common, ATH_DBG_FATAL, 495 ath_err(common, "Couldn't reset chip\n");
518 "Couldn't reset chip\n");
519 return -EIO; 496 return -EIO;
520 } 497 }
521 498
@@ -525,7 +502,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
525 ath9k_hw_attach_ops(ah); 502 ath9k_hw_attach_ops(ah);
526 503
527 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) { 504 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
528 ath_print(common, ATH_DBG_FATAL, "Couldn't wakeup chip\n"); 505 ath_err(common, "Couldn't wakeup chip\n");
529 return -EIO; 506 return -EIO;
530 } 507 }
531 508
@@ -541,7 +518,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
541 } 518 }
542 } 519 }
543 520
544 ath_print(common, ATH_DBG_RESET, "serialize_regmode is %d\n", 521 ath_dbg(common, ATH_DBG_RESET, "serialize_regmode is %d\n",
545 ah->config.serialize_regmode); 522 ah->config.serialize_regmode);
546 523
547 if (AR_SREV_9285(ah) || AR_SREV_9271(ah)) 524 if (AR_SREV_9285(ah) || AR_SREV_9271(ah))
@@ -549,11 +526,22 @@ static int __ath9k_hw_init(struct ath_hw *ah)
549 else 526 else
550 ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD; 527 ah->config.max_txtrig_level = MAX_TX_FIFO_THRESHOLD;
551 528
552 if (!ath9k_hw_macversion_supported(ah)) { 529 switch (ah->hw_version.macVersion) {
553 ath_print(common, ATH_DBG_FATAL, 530 case AR_SREV_VERSION_5416_PCI:
554 "Mac Chip Rev 0x%02x.%x is not supported by " 531 case AR_SREV_VERSION_5416_PCIE:
555 "this driver\n", ah->hw_version.macVersion, 532 case AR_SREV_VERSION_9160:
556 ah->hw_version.macRev); 533 case AR_SREV_VERSION_9100:
534 case AR_SREV_VERSION_9280:
535 case AR_SREV_VERSION_9285:
536 case AR_SREV_VERSION_9287:
537 case AR_SREV_VERSION_9271:
538 case AR_SREV_VERSION_9300:
539 case AR_SREV_VERSION_9485:
540 break;
541 default:
542 ath_err(common,
543 "Mac Chip Rev 0x%02x.%x is not supported by this driver\n",
544 ah->hw_version.macVersion, ah->hw_version.macRev);
557 return -EOPNOTSUPP; 545 return -EOPNOTSUPP;
558 } 546 }
559 547
@@ -599,8 +587,7 @@ static int __ath9k_hw_init(struct ath_hw *ah)
599 587
600 r = ath9k_hw_init_macaddr(ah); 588 r = ath9k_hw_init_macaddr(ah);
601 if (r) { 589 if (r) {
602 ath_print(common, ATH_DBG_FATAL, 590 ath_err(common, "Failed to initialize MAC address\n");
603 "Failed to initialize MAC address\n");
604 return r; 591 return r;
605 } 592 }
606 593
@@ -634,21 +621,21 @@ int ath9k_hw_init(struct ath_hw *ah)
634 case AR9287_DEVID_PCIE: 621 case AR9287_DEVID_PCIE:
635 case AR2427_DEVID_PCIE: 622 case AR2427_DEVID_PCIE:
636 case AR9300_DEVID_PCIE: 623 case AR9300_DEVID_PCIE:
624 case AR9300_DEVID_AR9485_PCIE:
637 break; 625 break;
638 default: 626 default:
639 if (common->bus_ops->ath_bus_type == ATH_USB) 627 if (common->bus_ops->ath_bus_type == ATH_USB)
640 break; 628 break;
641 ath_print(common, ATH_DBG_FATAL, 629 ath_err(common, "Hardware device ID 0x%04x not supported\n",
642 "Hardware device ID 0x%04x not supported\n", 630 ah->hw_version.devid);
643 ah->hw_version.devid);
644 return -EOPNOTSUPP; 631 return -EOPNOTSUPP;
645 } 632 }
646 633
647 ret = __ath9k_hw_init(ah); 634 ret = __ath9k_hw_init(ah);
648 if (ret) { 635 if (ret) {
649 ath_print(common, ATH_DBG_FATAL, 636 ath_err(common,
650 "Unable to initialize hardware; " 637 "Unable to initialize hardware; initialization status: %d\n",
651 "initialization status: %d\n", ret); 638 ret);
652 return ret; 639 return ret;
653 } 640 }
654 641
@@ -680,7 +667,12 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
680static void ath9k_hw_init_pll(struct ath_hw *ah, 667static void ath9k_hw_init_pll(struct ath_hw *ah,
681 struct ath9k_channel *chan) 668 struct ath9k_channel *chan)
682{ 669{
683 u32 pll = ath9k_hw_compute_pll_control(ah, chan); 670 u32 pll;
671
672 if (AR_SREV_9485(ah))
673 REG_WRITE(ah, AR_RTC_PLL_CONTROL2, 0x886666);
674
675 pll = ath9k_hw_compute_pll_control(ah, chan);
684 676
685 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll); 677 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
686 678
@@ -772,8 +764,8 @@ static void ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
772static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu) 764static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
773{ 765{
774 if (tu > 0xFFFF) { 766 if (tu > 0xFFFF) {
775 ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT, 767 ath_dbg(ath9k_hw_common(ah), ATH_DBG_XMIT,
776 "bad global tx timeout %u\n", tu); 768 "bad global tx timeout %u\n", tu);
777 ah->globaltxtimeout = (u32) -1; 769 ah->globaltxtimeout = (u32) -1;
778 return false; 770 return false;
779 } else { 771 } else {
@@ -790,8 +782,8 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
790 int slottime; 782 int slottime;
791 int sifstime; 783 int sifstime;
792 784
793 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n", 785 ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n",
794 ah->misc_mode); 786 ah->misc_mode);
795 787
796 if (ah->misc_mode != 0) 788 if (ah->misc_mode != 0)
797 REG_WRITE(ah, AR_PCU_MISC, 789 REG_WRITE(ah, AR_PCU_MISC,
@@ -816,7 +808,7 @@ void ath9k_hw_init_global_settings(struct ath_hw *ah)
816 if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ) 808 if (conf->channel && conf->channel->band == IEEE80211_BAND_2GHZ)
817 acktimeout += 64 - sifstime - ah->slottime; 809 acktimeout += 64 - sifstime - ah->slottime;
818 810
819 ath9k_hw_setslottime(ah, slottime); 811 ath9k_hw_setslottime(ah, ah->slottime);
820 ath9k_hw_set_ack_timeout(ah, acktimeout); 812 ath9k_hw_set_ack_timeout(ah, acktimeout);
821 ath9k_hw_set_cts_timeout(ah, acktimeout); 813 ath9k_hw_set_cts_timeout(ah, acktimeout);
822 if (ah->globaltxtimeout != (u32) -1) 814 if (ah->globaltxtimeout != (u32) -1)
@@ -1034,8 +1026,8 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1034 1026
1035 REG_WRITE(ah, AR_RTC_RC, 0); 1027 REG_WRITE(ah, AR_RTC_RC, 0);
1036 if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) { 1028 if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) {
1037 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, 1029 ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
1038 "RTC stuck in MAC reset\n"); 1030 "RTC stuck in MAC reset\n");
1039 return false; 1031 return false;
1040 } 1032 }
1041 1033
@@ -1081,8 +1073,8 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
1081 AR_RTC_STATUS_M, 1073 AR_RTC_STATUS_M,
1082 AR_RTC_STATUS_ON, 1074 AR_RTC_STATUS_ON,
1083 AH_WAIT_TIMEOUT)) { 1075 AH_WAIT_TIMEOUT)) {
1084 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, 1076 ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
1085 "RTC not waking up\n"); 1077 "RTC not waking up\n");
1086 return false; 1078 return false;
1087 } 1079 }
1088 1080
@@ -1142,16 +1134,14 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1142 1134
1143 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) { 1135 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
1144 if (ath9k_hw_numtxpending(ah, qnum)) { 1136 if (ath9k_hw_numtxpending(ah, qnum)) {
1145 ath_print(common, ATH_DBG_QUEUE, 1137 ath_dbg(common, ATH_DBG_QUEUE,
1146 "Transmit frames pending on " 1138 "Transmit frames pending on queue %d\n", qnum);
1147 "queue %d\n", qnum);
1148 return false; 1139 return false;
1149 } 1140 }
1150 } 1141 }
1151 1142
1152 if (!ath9k_hw_rfbus_req(ah)) { 1143 if (!ath9k_hw_rfbus_req(ah)) {
1153 ath_print(common, ATH_DBG_FATAL, 1144 ath_err(common, "Could not kill baseband RX\n");
1154 "Could not kill baseband RX\n");
1155 return false; 1145 return false;
1156 } 1146 }
1157 1147
@@ -1159,8 +1149,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1159 1149
1160 r = ath9k_hw_rf_set_freq(ah, chan); 1150 r = ath9k_hw_rf_set_freq(ah, chan);
1161 if (r) { 1151 if (r) {
1162 ath_print(common, ATH_DBG_FATAL, 1152 ath_err(common, "Failed to set channel\n");
1163 "Failed to set channel\n");
1164 return false; 1153 return false;
1165 } 1154 }
1166 ath9k_hw_set_clockrate(ah); 1155 ath9k_hw_set_clockrate(ah);
@@ -1170,7 +1159,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1170 channel->max_antenna_gain * 2, 1159 channel->max_antenna_gain * 2,
1171 channel->max_power * 2, 1160 channel->max_power * 2,
1172 min((u32) MAX_RATE_POWER, 1161 min((u32) MAX_RATE_POWER,
1173 (u32) regulatory->power_limit)); 1162 (u32) regulatory->power_limit), false);
1174 1163
1175 ath9k_hw_rfbus_done(ah); 1164 ath9k_hw_rfbus_done(ah);
1176 1165
@@ -1227,7 +1216,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1227 if (!ah->chip_fullsleep) { 1216 if (!ah->chip_fullsleep) {
1228 ath9k_hw_abortpcurecv(ah); 1217 ath9k_hw_abortpcurecv(ah);
1229 if (!ath9k_hw_stopdmarecv(ah)) { 1218 if (!ath9k_hw_stopdmarecv(ah)) {
1230 ath_print(common, ATH_DBG_XMIT, 1219 ath_dbg(common, ATH_DBG_XMIT,
1231 "Failed to stop receive dma\n"); 1220 "Failed to stop receive dma\n");
1232 bChannelChange = false; 1221 bChannelChange = false;
1233 } 1222 }
@@ -1283,6 +1272,8 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1283 1272
1284 ath9k_hw_mark_phy_inactive(ah); 1273 ath9k_hw_mark_phy_inactive(ah);
1285 1274
1275 ah->paprd_table_write_done = false;
1276
1286 /* Only required on the first reset */ 1277 /* Only required on the first reset */
1287 if (AR_SREV_9271(ah) && ah->htc_reset_init) { 1278 if (AR_SREV_9271(ah) && ah->htc_reset_init) {
1288 REG_WRITE(ah, 1279 REG_WRITE(ah,
@@ -1292,7 +1283,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1292 } 1283 }
1293 1284
1294 if (!ath9k_hw_chip_reset(ah, chan)) { 1285 if (!ath9k_hw_chip_reset(ah, chan)) {
1295 ath_print(common, ATH_DBG_FATAL, "Chip reset failed\n"); 1286 ath_err(common, "Chip reset failed\n");
1296 return -EINVAL; 1287 return -EINVAL;
1297 } 1288 }
1298 1289
@@ -1394,7 +1385,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1394 ath9k_hw_init_qos(ah); 1385 ath9k_hw_init_qos(ah);
1395 1386
1396 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT) 1387 if (ah->caps.hw_caps & ATH9K_HW_CAP_RFSILENT)
1397 ath9k_enable_rfkill(ah); 1388 ath9k_hw_cfg_gpio_input(ah, ah->rfkill_gpio);
1398 1389
1399 ath9k_hw_init_global_settings(ah); 1390 ath9k_hw_init_global_settings(ah);
1400 1391
@@ -1439,13 +1430,13 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
1439 u32 mask; 1430 u32 mask;
1440 mask = REG_READ(ah, AR_CFG); 1431 mask = REG_READ(ah, AR_CFG);
1441 if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) { 1432 if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
1442 ath_print(common, ATH_DBG_RESET, 1433 ath_dbg(common, ATH_DBG_RESET,
1443 "CFG Byte Swap Set 0x%x\n", mask); 1434 "CFG Byte Swap Set 0x%x\n", mask);
1444 } else { 1435 } else {
1445 mask = 1436 mask =
1446 INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB; 1437 INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
1447 REG_WRITE(ah, AR_CFG, mask); 1438 REG_WRITE(ah, AR_CFG, mask);
1448 ath_print(common, ATH_DBG_RESET, 1439 ath_dbg(common, ATH_DBG_RESET,
1449 "Setting CFG 0x%x\n", REG_READ(ah, AR_CFG)); 1440 "Setting CFG 0x%x\n", REG_READ(ah, AR_CFG));
1450 } 1441 }
1451 } else { 1442 } else {
@@ -1573,9 +1564,9 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
1573 AR_RTC_FORCE_WAKE_EN); 1564 AR_RTC_FORCE_WAKE_EN);
1574 } 1565 }
1575 if (i == 0) { 1566 if (i == 0) {
1576 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, 1567 ath_err(ath9k_hw_common(ah),
1577 "Failed to wakeup in %uus\n", 1568 "Failed to wakeup in %uus\n",
1578 POWER_UP_TIME / 20); 1569 POWER_UP_TIME / 20);
1579 return false; 1570 return false;
1580 } 1571 }
1581 } 1572 }
@@ -1599,8 +1590,8 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
1599 if (ah->power_mode == mode) 1590 if (ah->power_mode == mode)
1600 return status; 1591 return status;
1601 1592
1602 ath_print(common, ATH_DBG_RESET, "%s -> %s\n", 1593 ath_dbg(common, ATH_DBG_RESET, "%s -> %s\n",
1603 modes[ah->power_mode], modes[mode]); 1594 modes[ah->power_mode], modes[mode]);
1604 1595
1605 switch (mode) { 1596 switch (mode) {
1606 case ATH9K_PM_AWAKE: 1597 case ATH9K_PM_AWAKE:
@@ -1614,12 +1605,20 @@ bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
1614 ath9k_set_power_network_sleep(ah, setChip); 1605 ath9k_set_power_network_sleep(ah, setChip);
1615 break; 1606 break;
1616 default: 1607 default:
1617 ath_print(common, ATH_DBG_FATAL, 1608 ath_err(common, "Unknown power mode %u\n", mode);
1618 "Unknown power mode %u\n", mode);
1619 return false; 1609 return false;
1620 } 1610 }
1621 ah->power_mode = mode; 1611 ah->power_mode = mode;
1622 1612
1613 /*
1614 * XXX: If this warning never comes up after a while then
1615 * simply keep the ATH_DBG_WARN_ON_ONCE() but make
1616 * ath9k_hw_setpower() return type void.
1617 */
1618
1619 if (!(ah->ah_flags & AH_UNPLUGGED))
1620 ATH_DBG_WARN_ON_ONCE(!status);
1621
1623 return status; 1622 return status;
1624} 1623}
1625EXPORT_SYMBOL(ath9k_hw_setpower); 1624EXPORT_SYMBOL(ath9k_hw_setpower);
@@ -1632,17 +1631,9 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
1632{ 1631{
1633 int flags = 0; 1632 int flags = 0;
1634 1633
1635 ah->beacon_interval = beacon_period;
1636
1637 ENABLE_REGWRITE_BUFFER(ah); 1634 ENABLE_REGWRITE_BUFFER(ah);
1638 1635
1639 switch (ah->opmode) { 1636 switch (ah->opmode) {
1640 case NL80211_IFTYPE_STATION:
1641 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(next_beacon));
1642 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff);
1643 REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff);
1644 flags |= AR_TBTT_TIMER_EN;
1645 break;
1646 case NL80211_IFTYPE_ADHOC: 1637 case NL80211_IFTYPE_ADHOC:
1647 case NL80211_IFTYPE_MESH_POINT: 1638 case NL80211_IFTYPE_MESH_POINT:
1648 REG_SET_BIT(ah, AR_TXCFG, 1639 REG_SET_BIT(ah, AR_TXCFG,
@@ -1666,17 +1657,9 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
1666 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN; 1657 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
1667 break; 1658 break;
1668 default: 1659 default:
1669 if (ah->is_monitoring) { 1660 ath_dbg(ath9k_hw_common(ah), ATH_DBG_BEACON,
1670 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, 1661 "%s: unsupported opmode: %d\n",
1671 TU_TO_USEC(next_beacon)); 1662 __func__, ah->opmode);
1672 REG_WRITE(ah, AR_NEXT_DMA_BEACON_ALERT, 0xffff);
1673 REG_WRITE(ah, AR_NEXT_SWBA, 0x7ffff);
1674 flags |= AR_TBTT_TIMER_EN;
1675 break;
1676 }
1677 ath_print(ath9k_hw_common(ah), ATH_DBG_BEACON,
1678 "%s: unsupported opmode: %d\n",
1679 __func__, ah->opmode);
1680 return; 1663 return;
1681 break; 1664 break;
1682 } 1665 }
@@ -1732,10 +1715,10 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
1732 else 1715 else
1733 nextTbtt = bs->bs_nexttbtt; 1716 nextTbtt = bs->bs_nexttbtt;
1734 1717
1735 ath_print(common, ATH_DBG_BEACON, "next DTIM %d\n", bs->bs_nextdtim); 1718 ath_dbg(common, ATH_DBG_BEACON, "next DTIM %d\n", bs->bs_nextdtim);
1736 ath_print(common, ATH_DBG_BEACON, "next beacon %d\n", nextTbtt); 1719 ath_dbg(common, ATH_DBG_BEACON, "next beacon %d\n", nextTbtt);
1737 ath_print(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval); 1720 ath_dbg(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval);
1738 ath_print(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod); 1721 ath_dbg(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod);
1739 1722
1740 ENABLE_REGWRITE_BUFFER(ah); 1723 ENABLE_REGWRITE_BUFFER(ah);
1741 1724
@@ -1781,7 +1764,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1781 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw; 1764 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
1782 1765
1783 u16 capField = 0, eeval; 1766 u16 capField = 0, eeval;
1784 u8 ant_div_ctl1; 1767 u8 ant_div_ctl1, tx_chainmask, rx_chainmask;
1785 1768
1786 eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_0); 1769 eeval = ah->eep_ops->get_eeprom(ah, EEP_REG_0);
1787 regulatory->current_rd = eeval; 1770 regulatory->current_rd = eeval;
@@ -1800,14 +1783,14 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1800 regulatory->current_rd += 5; 1783 regulatory->current_rd += 5;
1801 else if (regulatory->current_rd == 0x41) 1784 else if (regulatory->current_rd == 0x41)
1802 regulatory->current_rd = 0x43; 1785 regulatory->current_rd = 0x43;
1803 ath_print(common, ATH_DBG_REGULATORY, 1786 ath_dbg(common, ATH_DBG_REGULATORY,
1804 "regdomain mapped to 0x%x\n", regulatory->current_rd); 1787 "regdomain mapped to 0x%x\n", regulatory->current_rd);
1805 } 1788 }
1806 1789
1807 eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE); 1790 eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE);
1808 if ((eeval & (AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A)) == 0) { 1791 if ((eeval & (AR5416_OPFLAGS_11G | AR5416_OPFLAGS_11A)) == 0) {
1809 ath_print(common, ATH_DBG_FATAL, 1792 ath_err(common,
1810 "no band has been marked as supported in EEPROM.\n"); 1793 "no band has been marked as supported in EEPROM\n");
1811 return -EINVAL; 1794 return -EINVAL;
1812 } 1795 }
1813 1796
@@ -1833,6 +1816,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1833 1816
1834 ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA; 1817 ah->misc_mode |= AR_PCU_MIC_NEW_LOC_ENA;
1835 1818
1819 /* enable key search for every frame in an aggregate */
1820 if (AR_SREV_9300_20_OR_LATER(ah))
1821 ah->misc_mode |= AR_PCU_ALWAYS_PERFORM_KEYSEARCH;
1822
1836 pCap->low_2ghz_chan = 2312; 1823 pCap->low_2ghz_chan = 2312;
1837 pCap->high_2ghz_chan = 2732; 1824 pCap->high_2ghz_chan = 2732;
1838 1825
@@ -1921,13 +1908,7 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1921 AR_SREV_5416(ah)) 1908 AR_SREV_5416(ah))
1922 pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND; 1909 pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND;
1923 1910
1924 pCap->num_antcfg_5ghz = 1911 if (AR_SREV_9280_20_OR_LATER(ah) && common->btcoex_enabled) {
1925 ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_5GHZ);
1926 pCap->num_antcfg_2ghz =
1927 ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_2GHZ);
1928
1929 if (AR_SREV_9280_20_OR_LATER(ah) &&
1930 ath9k_hw_btcoex_supported(ah)) {
1931 btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO; 1912 btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO;
1932 btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO; 1913 btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO;
1933 1914
@@ -1942,8 +1923,10 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1942 } 1923 }
1943 1924
1944 if (AR_SREV_9300_20_OR_LATER(ah)) { 1925 if (AR_SREV_9300_20_OR_LATER(ah)) {
1945 pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_LDPC | 1926 pCap->hw_caps |= ATH9K_HW_CAP_EDMA | ATH9K_HW_CAP_FASTCLOCK;
1946 ATH9K_HW_CAP_FASTCLOCK; 1927 if (!AR_SREV_9485(ah))
1928 pCap->hw_caps |= ATH9K_HW_CAP_LDPC;
1929
1947 pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH; 1930 pCap->rx_hp_qdepth = ATH9K_HW_RX_HP_QDEPTH;
1948 pCap->rx_lp_qdepth = ATH9K_HW_RX_LP_QDEPTH; 1931 pCap->rx_lp_qdepth = ATH9K_HW_RX_LP_QDEPTH;
1949 pCap->rx_status_len = sizeof(struct ar9003_rxs); 1932 pCap->rx_status_len = sizeof(struct ar9003_rxs);
@@ -1963,6 +1946,9 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1963 if (AR_SREV_9300_20_OR_LATER(ah)) 1946 if (AR_SREV_9300_20_OR_LATER(ah))
1964 pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED; 1947 pCap->hw_caps |= ATH9K_HW_CAP_RAC_SUPPORTED;
1965 1948
1949 if (AR_SREV_9300_20_OR_LATER(ah))
1950 ah->ent_mode = REG_READ(ah, AR_ENT_OTP);
1951
1966 if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah)) 1952 if (AR_SREV_9287_11_OR_LATER(ah) || AR_SREV_9271(ah))
1967 pCap->hw_caps |= ATH9K_HW_CAP_SGI_20; 1953 pCap->hw_caps |= ATH9K_HW_CAP_SGI_20;
1968 1954
@@ -1973,6 +1959,29 @@ int ath9k_hw_fill_cap_info(struct ath_hw *ah)
1973 if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1)) 1959 if ((ant_div_ctl1 & 0x1) && ((ant_div_ctl1 >> 3) & 0x1))
1974 pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB; 1960 pCap->hw_caps |= ATH9K_HW_CAP_ANT_DIV_COMB;
1975 } 1961 }
1962 if (AR_SREV_9300_20_OR_LATER(ah)) {
1963 if (ah->eep_ops->get_eeprom(ah, EEP_CHAIN_MASK_REDUCE))
1964 pCap->hw_caps |= ATH9K_HW_CAP_APM;
1965 }
1966
1967
1968
1969 if (AR_SREV_9485_10(ah)) {
1970 pCap->pcie_lcr_extsync_en = true;
1971 pCap->pcie_lcr_offset = 0x80;
1972 }
1973
1974 tx_chainmask = pCap->tx_chainmask;
1975 rx_chainmask = pCap->rx_chainmask;
1976 while (tx_chainmask || rx_chainmask) {
1977 if (tx_chainmask & BIT(0))
1978 pCap->max_txchains++;
1979 if (rx_chainmask & BIT(0))
1980 pCap->max_rxchains++;
1981
1982 tx_chainmask >>= 1;
1983 rx_chainmask >>= 1;
1984 }
1976 1985
1977 return 0; 1986 return 0;
1978} 1987}
@@ -2177,7 +2186,7 @@ bool ath9k_hw_disable(struct ath_hw *ah)
2177} 2186}
2178EXPORT_SYMBOL(ath9k_hw_disable); 2187EXPORT_SYMBOL(ath9k_hw_disable);
2179 2188
2180void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit) 2189void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test)
2181{ 2190{
2182 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 2191 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
2183 struct ath9k_channel *chan = ah->curchan; 2192 struct ath9k_channel *chan = ah->curchan;
@@ -2190,7 +2199,7 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit)
2190 channel->max_antenna_gain * 2, 2199 channel->max_antenna_gain * 2,
2191 channel->max_power * 2, 2200 channel->max_power * 2,
2192 min((u32) MAX_RATE_POWER, 2201 min((u32) MAX_RATE_POWER,
2193 (u32) regulatory->power_limit)); 2202 (u32) regulatory->power_limit), test);
2194} 2203}
2195EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit); 2204EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit);
2196 2205
@@ -2250,8 +2259,8 @@ void ath9k_hw_reset_tsf(struct ath_hw *ah)
2250{ 2259{
2251 if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0, 2260 if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0,
2252 AH_TSF_WRITE_TIMEOUT)) 2261 AH_TSF_WRITE_TIMEOUT))
2253 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, 2262 ath_dbg(ath9k_hw_common(ah), ATH_DBG_RESET,
2254 "AR_SLP32_TSF_WRITE_STATUS limit exceeded\n"); 2263 "AR_SLP32_TSF_WRITE_STATUS limit exceeded\n");
2255 2264
2256 REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE); 2265 REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE);
2257} 2266}
@@ -2324,11 +2333,10 @@ static u32 rightmost_index(struct ath_gen_timer_table *timer_table, u32 *mask)
2324 return timer_table->gen_timer_index[b]; 2333 return timer_table->gen_timer_index[b];
2325} 2334}
2326 2335
2327u32 ath9k_hw_gettsf32(struct ath_hw *ah) 2336static u32 ath9k_hw_gettsf32(struct ath_hw *ah)
2328{ 2337{
2329 return REG_READ(ah, AR_TSF_L32); 2338 return REG_READ(ah, AR_TSF_L32);
2330} 2339}
2331EXPORT_SYMBOL(ath9k_hw_gettsf32);
2332 2340
2333struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah, 2341struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
2334 void (*trigger)(void *), 2342 void (*trigger)(void *),
@@ -2342,9 +2350,9 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
2342 timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL); 2350 timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL);
2343 2351
2344 if (timer == NULL) { 2352 if (timer == NULL) {
2345 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, 2353 ath_err(ath9k_hw_common(ah),
2346 "Failed to allocate memory" 2354 "Failed to allocate memory for hw timer[%d]\n",
2347 "for hw timer[%d]\n", timer_index); 2355 timer_index);
2348 return NULL; 2356 return NULL;
2349 } 2357 }
2350 2358
@@ -2373,9 +2381,9 @@ void ath9k_hw_gen_timer_start(struct ath_hw *ah,
2373 2381
2374 tsf = ath9k_hw_gettsf32(ah); 2382 tsf = ath9k_hw_gettsf32(ah);
2375 2383
2376 ath_print(ath9k_hw_common(ah), ATH_DBG_HWTIMER, 2384 ath_dbg(ath9k_hw_common(ah), ATH_DBG_HWTIMER,
2377 "curent tsf %x period %x" 2385 "current tsf %x period %x timer_next %x\n",
2378 "timer_next %x\n", tsf, timer_period, timer_next); 2386 tsf, timer_period, timer_next);
2379 2387
2380 /* 2388 /*
2381 * Pull timer_next forward if the current TSF already passed it 2389 * Pull timer_next forward if the current TSF already passed it
@@ -2455,8 +2463,8 @@ void ath_gen_timer_isr(struct ath_hw *ah)
2455 index = rightmost_index(timer_table, &thresh_mask); 2463 index = rightmost_index(timer_table, &thresh_mask);
2456 timer = timer_table->timers[index]; 2464 timer = timer_table->timers[index];
2457 BUG_ON(!timer); 2465 BUG_ON(!timer);
2458 ath_print(common, ATH_DBG_HWTIMER, 2466 ath_dbg(common, ATH_DBG_HWTIMER,
2459 "TSF overflow for Gen timer %d\n", index); 2467 "TSF overflow for Gen timer %d\n", index);
2460 timer->overflow(timer->arg); 2468 timer->overflow(timer->arg);
2461 } 2469 }
2462 2470
@@ -2464,8 +2472,8 @@ void ath_gen_timer_isr(struct ath_hw *ah)
2464 index = rightmost_index(timer_table, &trigger_mask); 2472 index = rightmost_index(timer_table, &trigger_mask);
2465 timer = timer_table->timers[index]; 2473 timer = timer_table->timers[index];
2466 BUG_ON(!timer); 2474 BUG_ON(!timer);
2467 ath_print(common, ATH_DBG_HWTIMER, 2475 ath_dbg(common, ATH_DBG_HWTIMER,
2468 "Gen timer[%d] trigger\n", index); 2476 "Gen timer[%d] trigger\n", index);
2469 timer->trigger(timer->arg); 2477 timer->trigger(timer->arg);
2470 } 2478 }
2471} 2479}
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index d47d1b4b6002..5a3dfec45e96 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -30,7 +30,6 @@
30#include "btcoex.h" 30#include "btcoex.h"
31 31
32#include "../regd.h" 32#include "../regd.h"
33#include "../debug.h"
34 33
35#define ATHEROS_VENDOR_ID 0x168c 34#define ATHEROS_VENDOR_ID 0x168c
36 35
@@ -44,6 +43,7 @@
44#define AR9287_DEVID_PCI 0x002d 43#define AR9287_DEVID_PCI 0x002d
45#define AR9287_DEVID_PCIE 0x002e 44#define AR9287_DEVID_PCIE 0x002e
46#define AR9300_DEVID_PCIE 0x0030 45#define AR9300_DEVID_PCIE 0x0030
46#define AR9300_DEVID_AR9485_PCIE 0x0032
47 47
48#define AR5416_AR9100_DEVID 0x000b 48#define AR5416_AR9100_DEVID 0x000b
49 49
@@ -157,6 +157,13 @@
157#define PAPRD_GAIN_TABLE_ENTRIES 32 157#define PAPRD_GAIN_TABLE_ENTRIES 32
158#define PAPRD_TABLE_SZ 24 158#define PAPRD_TABLE_SZ 24
159 159
160enum ath_hw_txq_subtype {
161 ATH_TXQ_AC_BE = 0,
162 ATH_TXQ_AC_BK = 1,
163 ATH_TXQ_AC_VI = 2,
164 ATH_TXQ_AC_VO = 3,
165};
166
160enum ath_ini_subsys { 167enum ath_ini_subsys {
161 ATH_INI_PRE = 0, 168 ATH_INI_PRE = 0,
162 ATH_INI_CORE, 169 ATH_INI_CORE,
@@ -180,6 +187,7 @@ enum ath9k_hw_caps {
180 ATH9K_HW_CAP_ANT_DIV_COMB = BIT(12), 187 ATH9K_HW_CAP_ANT_DIV_COMB = BIT(12),
181 ATH9K_HW_CAP_2GHZ = BIT(13), 188 ATH9K_HW_CAP_2GHZ = BIT(13),
182 ATH9K_HW_CAP_5GHZ = BIT(14), 189 ATH9K_HW_CAP_5GHZ = BIT(14),
190 ATH9K_HW_CAP_APM = BIT(15),
183}; 191};
184 192
185struct ath9k_hw_capabilities { 193struct ath9k_hw_capabilities {
@@ -191,16 +199,18 @@ struct ath9k_hw_capabilities {
191 u16 rts_aggr_limit; 199 u16 rts_aggr_limit;
192 u8 tx_chainmask; 200 u8 tx_chainmask;
193 u8 rx_chainmask; 201 u8 rx_chainmask;
202 u8 max_txchains;
203 u8 max_rxchains;
194 u16 tx_triglevel_max; 204 u16 tx_triglevel_max;
195 u16 reg_cap; 205 u16 reg_cap;
196 u8 num_gpio_pins; 206 u8 num_gpio_pins;
197 u8 num_antcfg_2ghz;
198 u8 num_antcfg_5ghz;
199 u8 rx_hp_qdepth; 207 u8 rx_hp_qdepth;
200 u8 rx_lp_qdepth; 208 u8 rx_lp_qdepth;
201 u8 rx_status_len; 209 u8 rx_status_len;
202 u8 tx_desc_len; 210 u8 tx_desc_len;
203 u8 txs_len; 211 u8 txs_len;
212 u16 pcie_lcr_offset;
213 bool pcie_lcr_extsync_en;
204}; 214};
205 215
206struct ath9k_ops_config { 216struct ath9k_ops_config {
@@ -226,7 +236,6 @@ struct ath9k_ops_config {
226#define SPUR_DISABLE 0 236#define SPUR_DISABLE 0
227#define SPUR_ENABLE_IOCTL 1 237#define SPUR_ENABLE_IOCTL 1
228#define SPUR_ENABLE_EEPROM 2 238#define SPUR_ENABLE_EEPROM 2
229#define AR_EEPROM_MODAL_SPURS 5
230#define AR_SPUR_5413_1 1640 239#define AR_SPUR_5413_1 1640
231#define AR_SPUR_5413_2 1200 240#define AR_SPUR_5413_2 1200
232#define AR_NO_SPUR 0x8000 241#define AR_NO_SPUR 0x8000
@@ -434,6 +443,7 @@ struct ath9k_hw_version {
434 u16 analog5GhzRev; 443 u16 analog5GhzRev;
435 u16 analog2GhzRev; 444 u16 analog2GhzRev;
436 u16 subsysid; 445 u16 subsysid;
446 enum ath_usb_dev usbdev;
437}; 447};
438 448
439/* Generic TSF timer definitions */ 449/* Generic TSF timer definitions */
@@ -478,6 +488,40 @@ struct ath_hw_antcomb_conf {
478}; 488};
479 489
480/** 490/**
491 * struct ath_hw_radar_conf - radar detection initialization parameters
492 *
493 * @pulse_inband: threshold for checking the ratio of in-band power
494 * to total power for short radar pulses (half dB steps)
495 * @pulse_inband_step: threshold for checking an in-band power to total
496 * power ratio increase for short radar pulses (half dB steps)
497 * @pulse_height: threshold for detecting the beginning of a short
498 * radar pulse (dB step)
499 * @pulse_rssi: threshold for detecting if a short radar pulse is
500 * gone (dB step)
501 * @pulse_maxlen: maximum pulse length (0.8 us steps)
502 *
503 * @radar_rssi: RSSI threshold for starting long radar detection (dB steps)
504 * @radar_inband: threshold for checking the ratio of in-band power
505 * to total power for long radar pulses (half dB steps)
506 * @fir_power: threshold for detecting the end of a long radar pulse (dB)
507 *
508 * @ext_channel: enable extension channel radar detection
509 */
510struct ath_hw_radar_conf {
511 unsigned int pulse_inband;
512 unsigned int pulse_inband_step;
513 unsigned int pulse_height;
514 unsigned int pulse_rssi;
515 unsigned int pulse_maxlen;
516
517 unsigned int radar_rssi;
518 unsigned int radar_inband;
519 int fir_power;
520
521 bool ext_channel;
522};
523
524/**
481 * struct ath_hw_private_ops - callbacks used internally by hardware code 525 * struct ath_hw_private_ops - callbacks used internally by hardware code
482 * 526 *
483 * This structure contains private callbacks designed to only be used internally 527 * This structure contains private callbacks designed to only be used internally
@@ -488,7 +532,6 @@ struct ath_hw_antcomb_conf {
488 * 532 *
489 * @init_mode_regs: Initializes mode registers 533 * @init_mode_regs: Initializes mode registers
490 * @init_mode_gain_regs: Initialize TX/RX gain registers 534 * @init_mode_gain_regs: Initialize TX/RX gain registers
491 * @macversion_supported: If this specific mac revision is supported
492 * 535 *
493 * @rf_set_freq: change frequency 536 * @rf_set_freq: change frequency
494 * @spur_mitigate_freq: spur mitigation 537 * @spur_mitigate_freq: spur mitigation
@@ -510,7 +553,6 @@ struct ath_hw_private_ops {
510 553
511 void (*init_mode_regs)(struct ath_hw *ah); 554 void (*init_mode_regs)(struct ath_hw *ah);
512 void (*init_mode_gain_regs)(struct ath_hw *ah); 555 void (*init_mode_gain_regs)(struct ath_hw *ah);
513 bool (*macversion_supported)(u32 macversion);
514 void (*setup_calibration)(struct ath_hw *ah, 556 void (*setup_calibration)(struct ath_hw *ah,
515 struct ath9k_cal_list *currCal); 557 struct ath9k_cal_list *currCal);
516 558
@@ -534,7 +576,6 @@ struct ath_hw_private_ops {
534 void (*set_delta_slope)(struct ath_hw *ah, struct ath9k_channel *chan); 576 void (*set_delta_slope)(struct ath_hw *ah, struct ath9k_channel *chan);
535 bool (*rfbus_req)(struct ath_hw *ah); 577 bool (*rfbus_req)(struct ath_hw *ah);
536 void (*rfbus_done)(struct ath_hw *ah); 578 void (*rfbus_done)(struct ath_hw *ah);
537 void (*enable_rfkill)(struct ath_hw *ah);
538 void (*restore_chainmask)(struct ath_hw *ah); 579 void (*restore_chainmask)(struct ath_hw *ah);
539 void (*set_diversity)(struct ath_hw *ah, bool value); 580 void (*set_diversity)(struct ath_hw *ah, bool value);
540 u32 (*compute_pll_control)(struct ath_hw *ah, 581 u32 (*compute_pll_control)(struct ath_hw *ah,
@@ -542,6 +583,8 @@ struct ath_hw_private_ops {
542 bool (*ani_control)(struct ath_hw *ah, enum ath9k_ani_cmd cmd, 583 bool (*ani_control)(struct ath_hw *ah, enum ath9k_ani_cmd cmd,
543 int param); 584 int param);
544 void (*do_getnf)(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]); 585 void (*do_getnf)(struct ath_hw *ah, int16_t nfarray[NUM_NF_READINGS]);
586 void (*set_radar_params)(struct ath_hw *ah,
587 struct ath_hw_radar_conf *conf);
545 588
546 /* ANI */ 589 /* ANI */
547 void (*ani_cache_ini_regs)(struct ath_hw *ah); 590 void (*ani_cache_ini_regs)(struct ath_hw *ah);
@@ -603,6 +646,10 @@ struct ath_nf_limits {
603 s16 nominal; 646 s16 nominal;
604}; 647};
605 648
649/* ah_flags */
650#define AH_USE_EEPROM 0x1
651#define AH_UNPLUGGED 0x2 /* The card has been physically removed. */
652
606struct ath_hw { 653struct ath_hw {
607 struct ieee80211_hw *hw; 654 struct ieee80211_hw *hw;
608 struct ath_common common; 655 struct ath_common common;
@@ -718,9 +765,7 @@ struct ath_hw {
718 u32 *bank6Temp; 765 u32 *bank6Temp;
719 766
720 u8 txpower_limit; 767 u8 txpower_limit;
721 int16_t txpower_indexoffset;
722 int coverage_class; 768 int coverage_class;
723 u32 beacon_interval;
724 u32 slottime; 769 u32 slottime;
725 u32 globaltxtimeout; 770 u32 globaltxtimeout;
726 771
@@ -740,6 +785,8 @@ struct ath_hw {
740 u8 txchainmask; 785 u8 txchainmask;
741 u8 rxchainmask; 786 u8 rxchainmask;
742 787
788 struct ath_hw_radar_conf radar_conf;
789
743 u32 originalGain[22]; 790 u32 originalGain[22];
744 int initPDADC; 791 int initPDADC;
745 int PDADCdelta; 792 int PDADCdelta;
@@ -789,6 +836,11 @@ struct ath_hw {
789 u32 bb_watchdog_last_status; 836 u32 bb_watchdog_last_status;
790 u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */ 837 u32 bb_watchdog_timeout_ms; /* in ms, 0 to disable */
791 838
839 unsigned int paprd_target_power;
840 unsigned int paprd_training_power;
841 unsigned int paprd_ratemask;
842 unsigned int paprd_ratemask_ht40;
843 bool paprd_table_write_done;
792 u32 paprd_gain_table_entries[PAPRD_GAIN_TABLE_ENTRIES]; 844 u32 paprd_gain_table_entries[PAPRD_GAIN_TABLE_ENTRIES];
793 u8 paprd_gain_table_index[PAPRD_GAIN_TABLE_ENTRIES]; 845 u8 paprd_gain_table_index[PAPRD_GAIN_TABLE_ENTRIES];
794 /* 846 /*
@@ -797,6 +849,9 @@ struct ath_hw {
797 * this register when in sleep states. 849 * this register when in sleep states.
798 */ 850 */
799 u32 WARegVal; 851 u32 WARegVal;
852
853 /* Enterprise mode cap */
854 u32 ent_mode;
800}; 855};
801 856
802static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah) 857static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
@@ -819,10 +874,9 @@ static inline struct ath_hw_ops *ath9k_hw_ops(struct ath_hw *ah)
819 return &ah->ops; 874 return &ah->ops;
820} 875}
821 876
822static inline int sign_extend(int val, const int nbits) 877static inline u8 get_streams(int mask)
823{ 878{
824 int order = BIT(nbits-1); 879 return !!(mask & BIT(0)) + !!(mask & BIT(1)) + !!(mask & BIT(2));
825 return (val ^ order) - order;
826} 880}
827 881
828/* Initialization, Detach, Reset */ 882/* Initialization, Detach, Reset */
@@ -861,7 +915,7 @@ u32 ath9k_hw_getrxfilter(struct ath_hw *ah);
861void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits); 915void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits);
862bool ath9k_hw_phy_disable(struct ath_hw *ah); 916bool ath9k_hw_phy_disable(struct ath_hw *ah);
863bool ath9k_hw_disable(struct ath_hw *ah); 917bool ath9k_hw_disable(struct ath_hw *ah);
864void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit); 918void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit, bool test);
865void ath9k_hw_setopmode(struct ath_hw *ah); 919void ath9k_hw_setopmode(struct ath_hw *ah);
866void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1); 920void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
867void ath9k_hw_setbssidmask(struct ath_hw *ah); 921void ath9k_hw_setbssidmask(struct ath_hw *ah);
@@ -893,7 +947,6 @@ void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer);
893 947
894void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer); 948void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer);
895void ath_gen_timer_isr(struct ath_hw *hw); 949void ath_gen_timer_isr(struct ath_hw *hw);
896u32 ath9k_hw_gettsf32(struct ath_hw *ah);
897 950
898void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len); 951void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len);
899 952
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 14b8ab386daf..767d8b86f1e1 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -29,17 +29,27 @@ static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
29module_param_named(debug, ath9k_debug, uint, 0); 29module_param_named(debug, ath9k_debug, uint, 0);
30MODULE_PARM_DESC(debug, "Debugging mask"); 30MODULE_PARM_DESC(debug, "Debugging mask");
31 31
32int modparam_nohwcrypt; 32int ath9k_modparam_nohwcrypt;
33module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444); 33module_param_named(nohwcrypt, ath9k_modparam_nohwcrypt, int, 0444);
34MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption"); 34MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
35 35
36int led_blink; 36int led_blink;
37module_param_named(blink, led_blink, int, 0444); 37module_param_named(blink, led_blink, int, 0444);
38MODULE_PARM_DESC(blink, "Enable LED blink on activity"); 38MODULE_PARM_DESC(blink, "Enable LED blink on activity");
39 39
40static int ath9k_btcoex_enable;
41module_param_named(btcoex_enable, ath9k_btcoex_enable, int, 0444);
42MODULE_PARM_DESC(btcoex_enable, "Enable wifi-BT coexistence");
43
44int ath9k_pm_qos_value = ATH9K_PM_QOS_DEFAULT_VALUE;
45module_param_named(pmqos, ath9k_pm_qos_value, int, S_IRUSR | S_IRGRP | S_IROTH);
46MODULE_PARM_DESC(pmqos, "User specified PM-QOS value");
47
48bool is_ath9k_unloaded;
40/* We use the hw_value as an index into our private channel structure */ 49/* We use the hw_value as an index into our private channel structure */
41 50
42#define CHAN2G(_freq, _idx) { \ 51#define CHAN2G(_freq, _idx) { \
52 .band = IEEE80211_BAND_2GHZ, \
43 .center_freq = (_freq), \ 53 .center_freq = (_freq), \
44 .hw_value = (_idx), \ 54 .hw_value = (_idx), \
45 .max_power = 20, \ 55 .max_power = 20, \
@@ -206,7 +216,9 @@ static void setup_ht_cap(struct ath_softc *sc,
206 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 216 ht_info->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
207 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 217 ht_info->ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
208 218
209 if (AR_SREV_9300_20_OR_LATER(ah)) 219 if (AR_SREV_9485(ah))
220 max_streams = 1;
221 else if (AR_SREV_9300_20_OR_LATER(ah))
210 max_streams = 3; 222 max_streams = 3;
211 else 223 else
212 max_streams = 2; 224 max_streams = 2;
@@ -222,9 +234,9 @@ static void setup_ht_cap(struct ath_softc *sc,
222 tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams); 234 tx_streams = ath9k_cmn_count_streams(common->tx_chainmask, max_streams);
223 rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams); 235 rx_streams = ath9k_cmn_count_streams(common->rx_chainmask, max_streams);
224 236
225 ath_print(common, ATH_DBG_CONFIG, 237 ath_dbg(common, ATH_DBG_CONFIG,
226 "TX streams %d, RX streams: %d\n", 238 "TX streams %d, RX streams: %d\n",
227 tx_streams, rx_streams); 239 tx_streams, rx_streams);
228 240
229 if (tx_streams != rx_streams) { 241 if (tx_streams != rx_streams) {
230 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; 242 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
@@ -267,8 +279,8 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
267 struct ath_buf *bf; 279 struct ath_buf *bf;
268 int i, bsize, error, desc_len; 280 int i, bsize, error, desc_len;
269 281
270 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n", 282 ath_dbg(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
271 name, nbuf, ndesc); 283 name, nbuf, ndesc);
272 284
273 INIT_LIST_HEAD(head); 285 INIT_LIST_HEAD(head);
274 286
@@ -279,8 +291,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
279 291
280 /* ath_desc must be a multiple of DWORDs */ 292 /* ath_desc must be a multiple of DWORDs */
281 if ((desc_len % 4) != 0) { 293 if ((desc_len % 4) != 0) {
282 ath_print(common, ATH_DBG_FATAL, 294 ath_err(common, "ath_desc not DWORD aligned\n");
283 "ath_desc not DWORD aligned\n");
284 BUG_ON((desc_len % 4) != 0); 295 BUG_ON((desc_len % 4) != 0);
285 error = -ENOMEM; 296 error = -ENOMEM;
286 goto fail; 297 goto fail;
@@ -314,9 +325,9 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
314 goto fail; 325 goto fail;
315 } 326 }
316 ds = (u8 *) dd->dd_desc; 327 ds = (u8 *) dd->dd_desc;
317 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n", 328 ath_dbg(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
318 name, ds, (u32) dd->dd_desc_len, 329 name, ds, (u32) dd->dd_desc_len,
319 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len); 330 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
320 331
321 /* allocate buffers */ 332 /* allocate buffers */
322 bsize = sizeof(struct ath_buf) * nbuf; 333 bsize = sizeof(struct ath_buf) * nbuf;
@@ -362,7 +373,7 @@ fail:
362#undef DS2PHYS 373#undef DS2PHYS
363} 374}
364 375
365static void ath9k_init_crypto(struct ath_softc *sc) 376void ath9k_init_crypto(struct ath_softc *sc)
366{ 377{
367 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 378 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
368 int i = 0; 379 int i = 0;
@@ -370,9 +381,9 @@ static void ath9k_init_crypto(struct ath_softc *sc)
370 /* Get the hardware key cache size. */ 381 /* Get the hardware key cache size. */
371 common->keymax = sc->sc_ah->caps.keycache_size; 382 common->keymax = sc->sc_ah->caps.keycache_size;
372 if (common->keymax > ATH_KEYMAX) { 383 if (common->keymax > ATH_KEYMAX) {
373 ath_print(common, ATH_DBG_ANY, 384 ath_dbg(common, ATH_DBG_ANY,
374 "Warning, using only %u entries in %u key cache\n", 385 "Warning, using only %u entries in %u key cache\n",
375 ATH_KEYMAX, common->keymax); 386 ATH_KEYMAX, common->keymax);
376 common->keymax = ATH_KEYMAX; 387 common->keymax = ATH_KEYMAX;
377 } 388 }
378 389
@@ -395,7 +406,8 @@ static void ath9k_init_crypto(struct ath_softc *sc)
395 406
396static int ath9k_init_btcoex(struct ath_softc *sc) 407static int ath9k_init_btcoex(struct ath_softc *sc)
397{ 408{
398 int r, qnum; 409 struct ath_txq *txq;
410 int r;
399 411
400 switch (sc->sc_ah->btcoex_hw.scheme) { 412 switch (sc->sc_ah->btcoex_hw.scheme) {
401 case ATH_BTCOEX_CFG_NONE: 413 case ATH_BTCOEX_CFG_NONE:
@@ -408,8 +420,8 @@ static int ath9k_init_btcoex(struct ath_softc *sc)
408 r = ath_init_btcoex_timer(sc); 420 r = ath_init_btcoex_timer(sc);
409 if (r) 421 if (r)
410 return -1; 422 return -1;
411 qnum = sc->tx.hwq_map[WME_AC_BE]; 423 txq = sc->tx.txq_map[WME_AC_BE];
412 ath9k_hw_init_btcoex_hw(sc->sc_ah, qnum); 424 ath9k_hw_init_btcoex_hw(sc->sc_ah, txq->axq_qnum);
413 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW; 425 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
414 break; 426 break;
415 default: 427 default:
@@ -422,59 +434,18 @@ static int ath9k_init_btcoex(struct ath_softc *sc)
422 434
423static int ath9k_init_queues(struct ath_softc *sc) 435static int ath9k_init_queues(struct ath_softc *sc)
424{ 436{
425 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
426 int i = 0; 437 int i = 0;
427 438
428 for (i = 0; i < ARRAY_SIZE(sc->tx.hwq_map); i++)
429 sc->tx.hwq_map[i] = -1;
430
431 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah); 439 sc->beacon.beaconq = ath9k_hw_beaconq_setup(sc->sc_ah);
432 if (sc->beacon.beaconq == -1) {
433 ath_print(common, ATH_DBG_FATAL,
434 "Unable to setup a beacon xmit queue\n");
435 goto err;
436 }
437
438 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0); 440 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
439 if (sc->beacon.cabq == NULL) {
440 ath_print(common, ATH_DBG_FATAL,
441 "Unable to setup CAB xmit queue\n");
442 goto err;
443 }
444 441
445 sc->config.cabqReadytime = ATH_CABQ_READY_TIME; 442 sc->config.cabqReadytime = ATH_CABQ_READY_TIME;
446 ath_cabq_update(sc); 443 ath_cabq_update(sc);
447 444
448 if (!ath_tx_setup(sc, WME_AC_BK)) { 445 for (i = 0; i < WME_NUM_AC; i++)
449 ath_print(common, ATH_DBG_FATAL, 446 sc->tx.txq_map[i] = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, i);
450 "Unable to setup xmit queue for BK traffic\n");
451 goto err;
452 }
453
454 if (!ath_tx_setup(sc, WME_AC_BE)) {
455 ath_print(common, ATH_DBG_FATAL,
456 "Unable to setup xmit queue for BE traffic\n");
457 goto err;
458 }
459 if (!ath_tx_setup(sc, WME_AC_VI)) {
460 ath_print(common, ATH_DBG_FATAL,
461 "Unable to setup xmit queue for VI traffic\n");
462 goto err;
463 }
464 if (!ath_tx_setup(sc, WME_AC_VO)) {
465 ath_print(common, ATH_DBG_FATAL,
466 "Unable to setup xmit queue for VO traffic\n");
467 goto err;
468 }
469 447
470 return 0; 448 return 0;
471
472err:
473 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
474 if (ATH_TXQ_SETUP(sc, i))
475 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
476
477 return -EIO;
478} 449}
479 450
480static int ath9k_init_channels_rates(struct ath_softc *sc) 451static int ath9k_init_channels_rates(struct ath_softc *sc)
@@ -570,6 +541,9 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
570 ah->hw_version.subsysid = subsysid; 541 ah->hw_version.subsysid = subsysid;
571 sc->sc_ah = ah; 542 sc->sc_ah = ah;
572 543
544 if (!sc->dev->platform_data)
545 ah->ah_flags |= AH_USE_EEPROM;
546
573 common = ath9k_hw_common(ah); 547 common = ath9k_hw_common(ah);
574 common->ops = &ath9k_common_ops; 548 common->ops = &ath9k_common_ops;
575 common->bus_ops = bus_ops; 549 common->bus_ops = bus_ops;
@@ -577,10 +551,10 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
577 common->hw = sc->hw; 551 common->hw = sc->hw;
578 common->priv = sc; 552 common->priv = sc;
579 common->debug_mask = ath9k_debug; 553 common->debug_mask = ath9k_debug;
554 common->btcoex_enabled = ath9k_btcoex_enable == 1;
580 spin_lock_init(&common->cc_lock); 555 spin_lock_init(&common->cc_lock);
581 556
582 spin_lock_init(&sc->wiphy_lock); 557 spin_lock_init(&sc->wiphy_lock);
583 spin_lock_init(&sc->sc_resetlock);
584 spin_lock_init(&sc->sc_serial_rw); 558 spin_lock_init(&sc->sc_serial_rw);
585 spin_lock_init(&sc->sc_pm_lock); 559 spin_lock_init(&sc->sc_pm_lock);
586 mutex_init(&sc->mutex); 560 mutex_init(&sc->mutex);
@@ -600,13 +574,6 @@ static int ath9k_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
600 if (ret) 574 if (ret)
601 goto err_hw; 575 goto err_hw;
602 576
603 ret = ath9k_init_debug(ah);
604 if (ret) {
605 ath_print(common, ATH_DBG_FATAL,
606 "Unable to create debugfs files\n");
607 goto err_debug;
608 }
609
610 ret = ath9k_init_queues(sc); 577 ret = ath9k_init_queues(sc);
611 if (ret) 578 if (ret)
612 goto err_queues; 579 goto err_queues;
@@ -629,8 +596,6 @@ err_btcoex:
629 if (ATH_TXQ_SETUP(sc, i)) 596 if (ATH_TXQ_SETUP(sc, i))
630 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 597 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
631err_queues: 598err_queues:
632 ath9k_exit_debug(ah);
633err_debug:
634 ath9k_hw_deinit(ah); 599 ath9k_hw_deinit(ah);
635err_hw: 600err_hw:
636 tasklet_kill(&sc->intr_tq); 601 tasklet_kill(&sc->intr_tq);
@@ -642,6 +607,37 @@ err_hw:
642 return ret; 607 return ret;
643} 608}
644 609
610static void ath9k_init_band_txpower(struct ath_softc *sc, int band)
611{
612 struct ieee80211_supported_band *sband;
613 struct ieee80211_channel *chan;
614 struct ath_hw *ah = sc->sc_ah;
615 struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
616 int i;
617
618 sband = &sc->sbands[band];
619 for (i = 0; i < sband->n_channels; i++) {
620 chan = &sband->channels[i];
621 ah->curchan = &ah->channels[chan->hw_value];
622 ath9k_cmn_update_ichannel(ah->curchan, chan, NL80211_CHAN_HT20);
623 ath9k_hw_set_txpowerlimit(ah, MAX_RATE_POWER, true);
624 chan->max_power = reg->max_power_level / 2;
625 }
626}
627
628static void ath9k_init_txpower_limits(struct ath_softc *sc)
629{
630 struct ath_hw *ah = sc->sc_ah;
631 struct ath9k_channel *curchan = ah->curchan;
632
633 if (ah->caps.hw_caps & ATH9K_HW_CAP_2GHZ)
634 ath9k_init_band_txpower(sc, IEEE80211_BAND_2GHZ);
635 if (ah->caps.hw_caps & ATH9K_HW_CAP_5GHZ)
636 ath9k_init_band_txpower(sc, IEEE80211_BAND_5GHZ);
637
638 ah->curchan = curchan;
639}
640
645void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) 641void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
646{ 642{
647 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 643 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
@@ -657,7 +653,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
657 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) 653 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT)
658 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION; 654 hw->flags |= IEEE80211_HW_AMPDU_AGGREGATION;
659 655
660 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || modparam_nohwcrypt) 656 if (AR_SREV_9160_10_OR_LATER(sc->sc_ah) || ath9k_modparam_nohwcrypt)
661 hw->flags |= IEEE80211_HW_MFP_CAPABLE; 657 hw->flags |= IEEE80211_HW_MFP_CAPABLE;
662 658
663 hw->wiphy->interface_modes = 659 hw->wiphy->interface_modes =
@@ -705,6 +701,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
705 const struct ath_bus_ops *bus_ops) 701 const struct ath_bus_ops *bus_ops)
706{ 702{
707 struct ieee80211_hw *hw = sc->hw; 703 struct ieee80211_hw *hw = sc->hw;
704 struct ath_wiphy *aphy = hw->priv;
708 struct ath_common *common; 705 struct ath_common *common;
709 struct ath_hw *ah; 706 struct ath_hw *ah;
710 int error = 0; 707 int error = 0;
@@ -737,11 +734,19 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
737 if (error != 0) 734 if (error != 0)
738 goto error_rx; 735 goto error_rx;
739 736
737 ath9k_init_txpower_limits(sc);
738
740 /* Register with mac80211 */ 739 /* Register with mac80211 */
741 error = ieee80211_register_hw(hw); 740 error = ieee80211_register_hw(hw);
742 if (error) 741 if (error)
743 goto error_register; 742 goto error_register;
744 743
744 error = ath9k_init_debug(ah);
745 if (error) {
746 ath_err(common, "Unable to create debugfs files\n");
747 goto error_world;
748 }
749
745 /* Handle world regulatory */ 750 /* Handle world regulatory */
746 if (!ath_is_world_regd(reg)) { 751 if (!ath_is_world_regd(reg)) {
747 error = regulatory_hint(hw->wiphy, reg->alpha2); 752 error = regulatory_hint(hw->wiphy, reg->alpha2);
@@ -754,6 +759,7 @@ int ath9k_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
754 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work); 759 INIT_WORK(&sc->chan_work, ath9k_wiphy_chan_work);
755 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work); 760 INIT_DELAYED_WORK(&sc->wiphy_work, ath9k_wiphy_work);
756 sc->wiphy_scheduler_int = msecs_to_jiffies(500); 761 sc->wiphy_scheduler_int = msecs_to_jiffies(500);
762 aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
757 763
758 ath_init_leds(sc); 764 ath_init_leds(sc);
759 ath_start_rfkill_poll(sc); 765 ath_start_rfkill_poll(sc);
@@ -799,7 +805,6 @@ static void ath9k_deinit_softc(struct ath_softc *sc)
799 if (ATH_TXQ_SETUP(sc, i)) 805 if (ATH_TXQ_SETUP(sc, i))
800 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 806 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
801 807
802 ath9k_exit_debug(sc->sc_ah);
803 ath9k_hw_deinit(sc->sc_ah); 808 ath9k_hw_deinit(sc->sc_ah);
804 809
805 tasklet_kill(&sc->intr_tq); 810 tasklet_kill(&sc->intr_tq);
@@ -866,20 +871,12 @@ static int __init ath9k_init(void)
866 goto err_out; 871 goto err_out;
867 } 872 }
868 873
869 error = ath9k_debug_create_root();
870 if (error) {
871 printk(KERN_ERR
872 "ath9k: Unable to create debugfs root: %d\n",
873 error);
874 goto err_rate_unregister;
875 }
876
877 error = ath_pci_init(); 874 error = ath_pci_init();
878 if (error < 0) { 875 if (error < 0) {
879 printk(KERN_ERR 876 printk(KERN_ERR
880 "ath9k: No PCI devices found, driver not installed.\n"); 877 "ath9k: No PCI devices found, driver not installed.\n");
881 error = -ENODEV; 878 error = -ENODEV;
882 goto err_remove_root; 879 goto err_rate_unregister;
883 } 880 }
884 881
885 error = ath_ahb_init(); 882 error = ath_ahb_init();
@@ -893,8 +890,6 @@ static int __init ath9k_init(void)
893 err_pci_exit: 890 err_pci_exit:
894 ath_pci_exit(); 891 ath_pci_exit();
895 892
896 err_remove_root:
897 ath9k_debug_remove_root();
898 err_rate_unregister: 893 err_rate_unregister:
899 ath_rate_control_unregister(); 894 ath_rate_control_unregister();
900 err_out: 895 err_out:
@@ -904,9 +899,9 @@ module_init(ath9k_init);
904 899
905static void __exit ath9k_exit(void) 900static void __exit ath9k_exit(void)
906{ 901{
902 is_ath9k_unloaded = true;
907 ath_ahb_exit(); 903 ath_ahb_exit();
908 ath_pci_exit(); 904 ath_pci_exit();
909 ath9k_debug_remove_root();
910 ath_rate_control_unregister(); 905 ath_rate_control_unregister();
911 printk(KERN_INFO "%s: Driver unloaded\n", dev_info); 906 printk(KERN_INFO "%s: Driver unloaded\n", dev_info);
912} 907}
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index c996963ab339..180170d3ce25 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -20,11 +20,11 @@
20static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah, 20static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
21 struct ath9k_tx_queue_info *qi) 21 struct ath9k_tx_queue_info *qi)
22{ 22{
23 ath_print(ath9k_hw_common(ah), ATH_DBG_INTERRUPT, 23 ath_dbg(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
24 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", 24 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
25 ah->txok_interrupt_mask, ah->txerr_interrupt_mask, 25 ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
26 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask, 26 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
27 ah->txurn_interrupt_mask); 27 ah->txurn_interrupt_mask);
28 28
29 ENABLE_REGWRITE_BUFFER(ah); 29 ENABLE_REGWRITE_BUFFER(ah);
30 30
@@ -56,8 +56,8 @@ EXPORT_SYMBOL(ath9k_hw_puttxbuf);
56 56
57void ath9k_hw_txstart(struct ath_hw *ah, u32 q) 57void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
58{ 58{
59 ath_print(ath9k_hw_common(ah), ATH_DBG_QUEUE, 59 ath_dbg(ath9k_hw_common(ah), ATH_DBG_QUEUE,
60 "Enable TXE on queue: %u\n", q); 60 "Enable TXE on queue: %u\n", q);
61 REG_WRITE(ah, AR_Q_TXE, 1 << q); 61 REG_WRITE(ah, AR_Q_TXE, 1 << q);
62} 62}
63EXPORT_SYMBOL(ath9k_hw_txstart); 63EXPORT_SYMBOL(ath9k_hw_txstart);
@@ -117,12 +117,11 @@ EXPORT_SYMBOL(ath9k_hw_numtxpending);
117bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel) 117bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
118{ 118{
119 u32 txcfg, curLevel, newLevel; 119 u32 txcfg, curLevel, newLevel;
120 enum ath9k_int omask;
121 120
122 if (ah->tx_trig_level >= ah->config.max_txtrig_level) 121 if (ah->tx_trig_level >= ah->config.max_txtrig_level)
123 return false; 122 return false;
124 123
125 omask = ath9k_hw_set_interrupts(ah, ah->imask & ~ATH9K_INT_GLOBAL); 124 ath9k_hw_disable_interrupts(ah);
126 125
127 txcfg = REG_READ(ah, AR_TXCFG); 126 txcfg = REG_READ(ah, AR_TXCFG);
128 curLevel = MS(txcfg, AR_FTRIG); 127 curLevel = MS(txcfg, AR_FTRIG);
@@ -136,7 +135,7 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
136 REG_WRITE(ah, AR_TXCFG, 135 REG_WRITE(ah, AR_TXCFG,
137 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG)); 136 (txcfg & ~AR_FTRIG) | SM(newLevel, AR_FTRIG));
138 137
139 ath9k_hw_set_interrupts(ah, omask); 138 ath9k_hw_enable_interrupts(ah);
140 139
141 ah->tx_trig_level = newLevel; 140 ah->tx_trig_level = newLevel;
142 141
@@ -155,15 +154,15 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
155 u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM; 154 u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
156 155
157 if (q >= pCap->total_queues) { 156 if (q >= pCap->total_queues) {
158 ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, " 157 ath_dbg(common, ATH_DBG_QUEUE,
159 "invalid queue: %u\n", q); 158 "Stopping TX DMA, invalid queue: %u\n", q);
160 return false; 159 return false;
161 } 160 }
162 161
163 qi = &ah->txq[q]; 162 qi = &ah->txq[q];
164 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 163 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
165 ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, " 164 ath_dbg(common, ATH_DBG_QUEUE,
166 "inactive queue: %u\n", q); 165 "Stopping TX DMA, inactive queue: %u\n", q);
167 return false; 166 return false;
168 } 167 }
169 168
@@ -176,9 +175,9 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
176 } 175 }
177 176
178 if (ath9k_hw_numtxpending(ah, q)) { 177 if (ath9k_hw_numtxpending(ah, q)) {
179 ath_print(common, ATH_DBG_QUEUE, 178 ath_dbg(common, ATH_DBG_QUEUE,
180 "%s: Num of pending TX Frames %d on Q %d\n", 179 "%s: Num of pending TX Frames %d on Q %d\n",
181 __func__, ath9k_hw_numtxpending(ah, q), q); 180 __func__, ath9k_hw_numtxpending(ah, q), q);
182 181
183 for (j = 0; j < 2; j++) { 182 for (j = 0; j < 2; j++) {
184 tsfLow = REG_READ(ah, AR_TSF_L32); 183 tsfLow = REG_READ(ah, AR_TSF_L32);
@@ -192,9 +191,9 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
192 if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10)) 191 if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
193 break; 192 break;
194 193
195 ath_print(common, ATH_DBG_QUEUE, 194 ath_dbg(common, ATH_DBG_QUEUE,
196 "TSF has moved while trying to set " 195 "TSF has moved while trying to set quiet time TSF: 0x%08x\n",
197 "quiet time TSF: 0x%08x\n", tsfLow); 196 tsfLow);
198 } 197 }
199 198
200 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); 199 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
@@ -205,9 +204,8 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
205 wait = wait_time; 204 wait = wait_time;
206 while (ath9k_hw_numtxpending(ah, q)) { 205 while (ath9k_hw_numtxpending(ah, q)) {
207 if ((--wait) == 0) { 206 if ((--wait) == 0) {
208 ath_print(common, ATH_DBG_FATAL, 207 ath_err(common,
209 "Failed to stop TX DMA in 100 " 208 "Failed to stop TX DMA in 100 msec after killing last frame\n");
210 "msec after killing last frame\n");
211 break; 209 break;
212 } 210 }
213 udelay(ATH9K_TIME_QUANTUM); 211 udelay(ATH9K_TIME_QUANTUM);
@@ -240,19 +238,19 @@ bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
240 struct ath9k_tx_queue_info *qi; 238 struct ath9k_tx_queue_info *qi;
241 239
242 if (q >= pCap->total_queues) { 240 if (q >= pCap->total_queues) {
243 ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, " 241 ath_dbg(common, ATH_DBG_QUEUE,
244 "invalid queue: %u\n", q); 242 "Set TXQ properties, invalid queue: %u\n", q);
245 return false; 243 return false;
246 } 244 }
247 245
248 qi = &ah->txq[q]; 246 qi = &ah->txq[q];
249 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 247 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
250 ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, " 248 ath_dbg(common, ATH_DBG_QUEUE,
251 "inactive queue: %u\n", q); 249 "Set TXQ properties, inactive queue: %u\n", q);
252 return false; 250 return false;
253 } 251 }
254 252
255 ath_print(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q); 253 ath_dbg(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
256 254
257 qi->tqi_ver = qinfo->tqi_ver; 255 qi->tqi_ver = qinfo->tqi_ver;
258 qi->tqi_subtype = qinfo->tqi_subtype; 256 qi->tqi_subtype = qinfo->tqi_subtype;
@@ -311,15 +309,15 @@ bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
311 struct ath9k_tx_queue_info *qi; 309 struct ath9k_tx_queue_info *qi;
312 310
313 if (q >= pCap->total_queues) { 311 if (q >= pCap->total_queues) {
314 ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, " 312 ath_dbg(common, ATH_DBG_QUEUE,
315 "invalid queue: %u\n", q); 313 "Get TXQ properties, invalid queue: %u\n", q);
316 return false; 314 return false;
317 } 315 }
318 316
319 qi = &ah->txq[q]; 317 qi = &ah->txq[q];
320 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 318 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
321 ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, " 319 ath_dbg(common, ATH_DBG_QUEUE,
322 "inactive queue: %u\n", q); 320 "Get TXQ properties, inactive queue: %u\n", q);
323 return false; 321 return false;
324 } 322 }
325 323
@@ -369,23 +367,20 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
369 ATH9K_TX_QUEUE_INACTIVE) 367 ATH9K_TX_QUEUE_INACTIVE)
370 break; 368 break;
371 if (q == pCap->total_queues) { 369 if (q == pCap->total_queues) {
372 ath_print(common, ATH_DBG_FATAL, 370 ath_err(common, "No available TX queue\n");
373 "No available TX queue\n");
374 return -1; 371 return -1;
375 } 372 }
376 break; 373 break;
377 default: 374 default:
378 ath_print(common, ATH_DBG_FATAL, 375 ath_err(common, "Invalid TX queue type: %u\n", type);
379 "Invalid TX queue type: %u\n", type);
380 return -1; 376 return -1;
381 } 377 }
382 378
383 ath_print(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q); 379 ath_dbg(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
384 380
385 qi = &ah->txq[q]; 381 qi = &ah->txq[q];
386 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) { 382 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
387 ath_print(common, ATH_DBG_FATAL, 383 ath_err(common, "TX queue: %u already active\n", q);
388 "TX queue: %u already active\n", q);
389 return -1; 384 return -1;
390 } 385 }
391 memset(qi, 0, sizeof(struct ath9k_tx_queue_info)); 386 memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
@@ -417,18 +412,18 @@ bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
417 struct ath9k_tx_queue_info *qi; 412 struct ath9k_tx_queue_info *qi;
418 413
419 if (q >= pCap->total_queues) { 414 if (q >= pCap->total_queues) {
420 ath_print(common, ATH_DBG_QUEUE, "Release TXQ, " 415 ath_dbg(common, ATH_DBG_QUEUE,
421 "invalid queue: %u\n", q); 416 "Release TXQ, invalid queue: %u\n", q);
422 return false; 417 return false;
423 } 418 }
424 qi = &ah->txq[q]; 419 qi = &ah->txq[q];
425 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 420 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
426 ath_print(common, ATH_DBG_QUEUE, "Release TXQ, " 421 ath_dbg(common, ATH_DBG_QUEUE,
427 "inactive queue: %u\n", q); 422 "Release TXQ, inactive queue: %u\n", q);
428 return false; 423 return false;
429 } 424 }
430 425
431 ath_print(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q); 426 ath_dbg(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
432 427
433 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE; 428 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
434 ah->txok_interrupt_mask &= ~(1 << q); 429 ah->txok_interrupt_mask &= ~(1 << q);
@@ -451,19 +446,19 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
451 u32 cwMin, chanCwMin, value; 446 u32 cwMin, chanCwMin, value;
452 447
453 if (q >= pCap->total_queues) { 448 if (q >= pCap->total_queues) {
454 ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, " 449 ath_dbg(common, ATH_DBG_QUEUE,
455 "invalid queue: %u\n", q); 450 "Reset TXQ, invalid queue: %u\n", q);
456 return false; 451 return false;
457 } 452 }
458 453
459 qi = &ah->txq[q]; 454 qi = &ah->txq[q];
460 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 455 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
461 ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, " 456 ath_dbg(common, ATH_DBG_QUEUE,
462 "inactive queue: %u\n", q); 457 "Reset TXQ, inactive queue: %u\n", q);
463 return true; 458 return true;
464 } 459 }
465 460
466 ath_print(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q); 461 ath_dbg(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
467 462
468 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) { 463 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
469 if (chan && IS_CHAN_B(chan)) 464 if (chan && IS_CHAN_B(chan))
@@ -697,15 +692,16 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
697 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) { 692 if ((ads.ds_rxstatus8 & AR_RxFrameOK) == 0) {
698 if (ads.ds_rxstatus8 & AR_CRCErr) 693 if (ads.ds_rxstatus8 & AR_CRCErr)
699 rs->rs_status |= ATH9K_RXERR_CRC; 694 rs->rs_status |= ATH9K_RXERR_CRC;
700 else if (ads.ds_rxstatus8 & AR_PHYErr) { 695 if (ads.ds_rxstatus8 & AR_PHYErr) {
701 rs->rs_status |= ATH9K_RXERR_PHY; 696 rs->rs_status |= ATH9K_RXERR_PHY;
702 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode); 697 phyerr = MS(ads.ds_rxstatus8, AR_PHYErrCode);
703 rs->rs_phyerr = phyerr; 698 rs->rs_phyerr = phyerr;
704 } else if (ads.ds_rxstatus8 & AR_DecryptCRCErr) 699 }
700 if (ads.ds_rxstatus8 & AR_DecryptCRCErr)
705 rs->rs_status |= ATH9K_RXERR_DECRYPT; 701 rs->rs_status |= ATH9K_RXERR_DECRYPT;
706 else if (ads.ds_rxstatus8 & AR_MichaelErr) 702 if (ads.ds_rxstatus8 & AR_MichaelErr)
707 rs->rs_status |= ATH9K_RXERR_MIC; 703 rs->rs_status |= ATH9K_RXERR_MIC;
708 else if (ads.ds_rxstatus8 & AR_KeyMiss) 704 if (ads.ds_rxstatus8 & AR_KeyMiss)
709 rs->rs_status |= ATH9K_RXERR_DECRYPT; 705 rs->rs_status |= ATH9K_RXERR_DECRYPT;
710 } 706 }
711 707
@@ -735,9 +731,9 @@ bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
735 AR_DIAG_RX_ABORT)); 731 AR_DIAG_RX_ABORT));
736 732
737 reg = REG_READ(ah, AR_OBS_BUS_1); 733 reg = REG_READ(ah, AR_OBS_BUS_1);
738 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL, 734 ath_err(ath9k_hw_common(ah),
739 "RX failed to go idle in 10 ms RXSM=0x%x\n", 735 "RX failed to go idle in 10 ms RXSM=0x%x\n",
740 reg); 736 reg);
741 737
742 return false; 738 return false;
743 } 739 }
@@ -766,14 +762,6 @@ void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning)
766} 762}
767EXPORT_SYMBOL(ath9k_hw_startpcureceive); 763EXPORT_SYMBOL(ath9k_hw_startpcureceive);
768 764
769void ath9k_hw_stoppcurecv(struct ath_hw *ah)
770{
771 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_DIS);
772
773 ath9k_hw_disable_mib_counters(ah);
774}
775EXPORT_SYMBOL(ath9k_hw_stoppcurecv);
776
777void ath9k_hw_abortpcurecv(struct ath_hw *ah) 765void ath9k_hw_abortpcurecv(struct ath_hw *ah)
778{ 766{
779 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS); 767 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_RX_ABORT | AR_DIAG_RX_DIS);
@@ -799,12 +787,11 @@ bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
799 } 787 }
800 788
801 if (i == 0) { 789 if (i == 0) {
802 ath_print(common, ATH_DBG_FATAL, 790 ath_err(common,
803 "DMA failed to stop in %d ms " 791 "DMA failed to stop in %d ms AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
804 "AR_CR=0x%08x AR_DIAG_SW=0x%08x\n", 792 AH_RX_STOP_DMA_TIMEOUT / 1000,
805 AH_RX_STOP_DMA_TIMEOUT / 1000, 793 REG_READ(ah, AR_CR),
806 REG_READ(ah, AR_CR), 794 REG_READ(ah, AR_DIAG_SW));
807 REG_READ(ah, AR_DIAG_SW));
808 return false; 795 return false;
809 } else { 796 } else {
810 return true; 797 return true;
@@ -848,28 +835,59 @@ bool ath9k_hw_intrpend(struct ath_hw *ah)
848} 835}
849EXPORT_SYMBOL(ath9k_hw_intrpend); 836EXPORT_SYMBOL(ath9k_hw_intrpend);
850 837
851enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, 838void ath9k_hw_disable_interrupts(struct ath_hw *ah)
852 enum ath9k_int ints) 839{
840 struct ath_common *common = ath9k_hw_common(ah);
841
842 ath_dbg(common, ATH_DBG_INTERRUPT, "disable IER\n");
843 REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
844 (void) REG_READ(ah, AR_IER);
845 if (!AR_SREV_9100(ah)) {
846 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
847 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
848
849 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
850 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
851 }
852}
853EXPORT_SYMBOL(ath9k_hw_disable_interrupts);
854
855void ath9k_hw_enable_interrupts(struct ath_hw *ah)
856{
857 struct ath_common *common = ath9k_hw_common(ah);
858
859 if (!(ah->imask & ATH9K_INT_GLOBAL))
860 return;
861
862 ath_dbg(common, ATH_DBG_INTERRUPT, "enable IER\n");
863 REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
864 if (!AR_SREV_9100(ah)) {
865 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
866 AR_INTR_MAC_IRQ);
867 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
868
869
870 REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
871 AR_INTR_SYNC_DEFAULT);
872 REG_WRITE(ah, AR_INTR_SYNC_MASK,
873 AR_INTR_SYNC_DEFAULT);
874 }
875 ath_dbg(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
876 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
877}
878EXPORT_SYMBOL(ath9k_hw_enable_interrupts);
879
880void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
853{ 881{
854 enum ath9k_int omask = ah->imask; 882 enum ath9k_int omask = ah->imask;
855 u32 mask, mask2; 883 u32 mask, mask2;
856 struct ath9k_hw_capabilities *pCap = &ah->caps; 884 struct ath9k_hw_capabilities *pCap = &ah->caps;
857 struct ath_common *common = ath9k_hw_common(ah); 885 struct ath_common *common = ath9k_hw_common(ah);
858 886
859 ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); 887 if (!(ints & ATH9K_INT_GLOBAL))
888 ath9k_hw_enable_interrupts(ah);
860 889
861 if (omask & ATH9K_INT_GLOBAL) { 890 ath_dbg(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
862 ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
863 REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
864 (void) REG_READ(ah, AR_IER);
865 if (!AR_SREV_9100(ah)) {
866 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 0);
867 (void) REG_READ(ah, AR_INTR_ASYNC_ENABLE);
868
869 REG_WRITE(ah, AR_INTR_SYNC_ENABLE, 0);
870 (void) REG_READ(ah, AR_INTR_SYNC_ENABLE);
871 }
872 }
873 891
874 /* TODO: global int Ref count */ 892 /* TODO: global int Ref count */
875 mask = ints & ATH9K_INT_COMMON; 893 mask = ints & ATH9K_INT_COMMON;
@@ -930,7 +948,7 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
930 mask2 |= AR_IMR_S2_CST; 948 mask2 |= AR_IMR_S2_CST;
931 } 949 }
932 950
933 ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask); 951 ath_dbg(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
934 REG_WRITE(ah, AR_IMR, mask); 952 REG_WRITE(ah, AR_IMR, mask);
935 ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC | 953 ah->imrs2_reg &= ~(AR_IMR_S2_TIM | AR_IMR_S2_DTIM | AR_IMR_S2_DTIMSYNC |
936 AR_IMR_S2_CABEND | AR_IMR_S2_CABTO | 954 AR_IMR_S2_CABEND | AR_IMR_S2_CABTO |
@@ -945,24 +963,8 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah,
945 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER); 963 REG_CLR_BIT(ah, AR_IMR_S5, AR_IMR_S5_TIM_TIMER);
946 } 964 }
947 965
948 if (ints & ATH9K_INT_GLOBAL) { 966 ath9k_hw_enable_interrupts(ah);
949 ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
950 REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
951 if (!AR_SREV_9100(ah)) {
952 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
953 AR_INTR_MAC_IRQ);
954 REG_WRITE(ah, AR_INTR_ASYNC_MASK, AR_INTR_MAC_IRQ);
955
956
957 REG_WRITE(ah, AR_INTR_SYNC_ENABLE,
958 AR_INTR_SYNC_DEFAULT);
959 REG_WRITE(ah, AR_INTR_SYNC_MASK,
960 AR_INTR_SYNC_DEFAULT);
961 }
962 ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
963 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
964 }
965 967
966 return omask; 968 return;
967} 969}
968EXPORT_SYMBOL(ath9k_hw_set_interrupts); 970EXPORT_SYMBOL(ath9k_hw_set_interrupts);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index 7c1a34d64f6d..7512f97e8f49 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -104,13 +104,11 @@ struct ath_tx_status {
104 u32 ts_tstamp; 104 u32 ts_tstamp;
105 u16 ts_seqnum; 105 u16 ts_seqnum;
106 u8 ts_status; 106 u8 ts_status;
107 u8 ts_ratecode;
108 u8 ts_rateindex; 107 u8 ts_rateindex;
109 int8_t ts_rssi; 108 int8_t ts_rssi;
110 u8 ts_shortretry; 109 u8 ts_shortretry;
111 u8 ts_longretry; 110 u8 ts_longretry;
112 u8 ts_virtcol; 111 u8 ts_virtcol;
113 u8 ts_antenna;
114 u8 ts_flags; 112 u8 ts_flags;
115 int8_t ts_rssi_ctl0; 113 int8_t ts_rssi_ctl0;
116 int8_t ts_rssi_ctl1; 114 int8_t ts_rssi_ctl1;
@@ -121,7 +119,6 @@ struct ath_tx_status {
121 u8 qid; 119 u8 qid;
122 u16 desc_id; 120 u16 desc_id;
123 u8 tid; 121 u8 tid;
124 u8 pad[2];
125 u32 ba_low; 122 u32 ba_low;
126 u32 ba_high; 123 u32 ba_high;
127 u32 evm0; 124 u32 evm0;
@@ -240,7 +237,7 @@ struct ath_desc {
240 u32 ds_ctl1; 237 u32 ds_ctl1;
241 u32 ds_hw[20]; 238 u32 ds_hw[20];
242 void *ds_vdata; 239 void *ds_vdata;
243} __packed; 240} __packed __aligned(4);
244 241
245#define ATH9K_TXDESC_CLRDMASK 0x0001 242#define ATH9K_TXDESC_CLRDMASK 0x0001
246#define ATH9K_TXDESC_NOACK 0x0002 243#define ATH9K_TXDESC_NOACK 0x0002
@@ -310,7 +307,7 @@ struct ar5416_desc {
310 u32 status8; 307 u32 status8;
311 } rx; 308 } rx;
312 } u; 309 } u;
313} __packed; 310} __packed __aligned(4);
314 311
315#define AR5416DESC(_ds) ((struct ar5416_desc *)(_ds)) 312#define AR5416DESC(_ds) ((struct ar5416_desc *)(_ds))
316#define AR5416DESC_CONST(_ds) ((const struct ar5416_desc *)(_ds)) 313#define AR5416DESC_CONST(_ds) ((const struct ar5416_desc *)(_ds))
@@ -669,6 +666,7 @@ enum ath9k_key_type {
669 666
670struct ath_hw; 667struct ath_hw;
671struct ath9k_channel; 668struct ath9k_channel;
669enum ath9k_int;
672 670
673u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q); 671u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q);
674void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp); 672void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp);
@@ -693,15 +691,15 @@ void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
693bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set); 691bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set);
694void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp); 692void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp);
695void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning); 693void ath9k_hw_startpcureceive(struct ath_hw *ah, bool is_scanning);
696void ath9k_hw_stoppcurecv(struct ath_hw *ah);
697void ath9k_hw_abortpcurecv(struct ath_hw *ah); 694void ath9k_hw_abortpcurecv(struct ath_hw *ah);
698bool ath9k_hw_stopdmarecv(struct ath_hw *ah); 695bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
699int ath9k_hw_beaconq_setup(struct ath_hw *ah); 696int ath9k_hw_beaconq_setup(struct ath_hw *ah);
700 697
701/* Interrupt Handling */ 698/* Interrupt Handling */
702bool ath9k_hw_intrpend(struct ath_hw *ah); 699bool ath9k_hw_intrpend(struct ath_hw *ah);
703enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, 700void ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints);
704 enum ath9k_int ints); 701void ath9k_hw_enable_interrupts(struct ath_hw *ah);
702void ath9k_hw_disable_interrupts(struct ath_hw *ah);
705 703
706void ar9002_hw_attach_mac_ops(struct ath_hw *ah); 704void ar9002_hw_attach_mac_ops(struct ath_hw *ah);
707 705
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index c0c3464d3a86..f90a6ca94a76 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -23,7 +23,7 @@ static void ath_update_txpow(struct ath_softc *sc)
23 struct ath_hw *ah = sc->sc_ah; 23 struct ath_hw *ah = sc->sc_ah;
24 24
25 if (sc->curtxpow != sc->config.txpowlimit) { 25 if (sc->curtxpow != sc->config.txpowlimit) {
26 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit); 26 ath9k_hw_set_txpowerlimit(ah, sc->config.txpowlimit, false);
27 /* read back in case value is clamped */ 27 /* read back in case value is clamped */
28 sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit; 28 sc->curtxpow = ath9k_hw_regulatory(ah)->power_limit;
29 } 29 }
@@ -234,6 +234,8 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
234 234
235 ath9k_ps_wakeup(sc); 235 ath9k_ps_wakeup(sc);
236 236
237 spin_lock_bh(&sc->sc_pcu_lock);
238
237 /* 239 /*
238 * This is only performed if the channel settings have 240 * This is only performed if the channel settings have
239 * actually changed. 241 * actually changed.
@@ -243,11 +245,9 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
243 * hardware at the new frequency, and then re-enable 245 * hardware at the new frequency, and then re-enable
244 * the relevant bits of the h/w. 246 * the relevant bits of the h/w.
245 */ 247 */
246 ath9k_hw_set_interrupts(ah, 0); 248 ath9k_hw_disable_interrupts(ah);
247 stopped = ath_drain_all_txq(sc, false); 249 stopped = ath_drain_all_txq(sc, false);
248 250
249 spin_lock_bh(&sc->rx.pcu_lock);
250
251 if (!ath_stoprecv(sc)) 251 if (!ath_stoprecv(sc))
252 stopped = false; 252 stopped = false;
253 253
@@ -261,46 +261,39 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
261 if (!(sc->sc_flags & SC_OP_OFFCHANNEL)) 261 if (!(sc->sc_flags & SC_OP_OFFCHANNEL))
262 caldata = &aphy->caldata; 262 caldata = &aphy->caldata;
263 263
264 ath_print(common, ATH_DBG_CONFIG, 264 ath_dbg(common, ATH_DBG_CONFIG,
265 "(%u MHz) -> (%u MHz), conf_is_ht40: %d fastcc: %d\n", 265 "(%u MHz) -> (%u MHz), conf_is_ht40: %d fastcc: %d\n",
266 sc->sc_ah->curchan->channel, 266 sc->sc_ah->curchan->channel,
267 channel->center_freq, conf_is_ht40(conf), 267 channel->center_freq, conf_is_ht40(conf),
268 fastcc); 268 fastcc);
269
270 spin_lock_bh(&sc->sc_resetlock);
271 269
272 r = ath9k_hw_reset(ah, hchan, caldata, fastcc); 270 r = ath9k_hw_reset(ah, hchan, caldata, fastcc);
273 if (r) { 271 if (r) {
274 ath_print(common, ATH_DBG_FATAL, 272 ath_err(common,
275 "Unable to reset channel (%u MHz), " 273 "Unable to reset channel (%u MHz), reset status %d\n",
276 "reset status %d\n", 274 channel->center_freq, r);
277 channel->center_freq, r);
278 spin_unlock_bh(&sc->sc_resetlock);
279 spin_unlock_bh(&sc->rx.pcu_lock);
280 goto ps_restore; 275 goto ps_restore;
281 } 276 }
282 spin_unlock_bh(&sc->sc_resetlock);
283 277
284 if (ath_startrecv(sc) != 0) { 278 if (ath_startrecv(sc) != 0) {
285 ath_print(common, ATH_DBG_FATAL, 279 ath_err(common, "Unable to restart recv logic\n");
286 "Unable to restart recv logic\n");
287 r = -EIO; 280 r = -EIO;
288 spin_unlock_bh(&sc->rx.pcu_lock);
289 goto ps_restore; 281 goto ps_restore;
290 } 282 }
291 283
292 spin_unlock_bh(&sc->rx.pcu_lock);
293
294 ath_update_txpow(sc); 284 ath_update_txpow(sc);
295 ath9k_hw_set_interrupts(ah, ah->imask); 285 ath9k_hw_set_interrupts(ah, ah->imask);
296 286
297 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) { 287 if (!(sc->sc_flags & (SC_OP_OFFCHANNEL))) {
298 ath_beacon_config(sc, NULL); 288 if (sc->sc_flags & SC_OP_BEACONS)
289 ath_beacon_config(sc, NULL);
299 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); 290 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
300 ath_start_ani(common); 291 ath_start_ani(common);
301 } 292 }
302 293
303 ps_restore: 294 ps_restore:
295 spin_unlock_bh(&sc->sc_pcu_lock);
296
304 ath9k_ps_restore(sc); 297 ath9k_ps_restore(sc);
305 return r; 298 return r;
306} 299}
@@ -328,6 +321,42 @@ static void ath_paprd_activate(struct ath_softc *sc)
328 ath9k_ps_restore(sc); 321 ath9k_ps_restore(sc);
329} 322}
330 323
324static bool ath_paprd_send_frame(struct ath_softc *sc, struct sk_buff *skb, int chain)
325{
326 struct ieee80211_hw *hw = sc->hw;
327 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
328 struct ath_tx_control txctl;
329 int time_left;
330
331 memset(&txctl, 0, sizeof(txctl));
332 txctl.txq = sc->tx.txq_map[WME_AC_BE];
333
334 memset(tx_info, 0, sizeof(*tx_info));
335 tx_info->band = hw->conf.channel->band;
336 tx_info->flags |= IEEE80211_TX_CTL_NO_ACK;
337 tx_info->control.rates[0].idx = 0;
338 tx_info->control.rates[0].count = 1;
339 tx_info->control.rates[0].flags = IEEE80211_TX_RC_MCS;
340 tx_info->control.rates[1].idx = -1;
341
342 init_completion(&sc->paprd_complete);
343 sc->paprd_pending = true;
344 txctl.paprd = BIT(chain);
345 if (ath_tx_start(hw, skb, &txctl) != 0)
346 return false;
347
348 time_left = wait_for_completion_timeout(&sc->paprd_complete,
349 msecs_to_jiffies(ATH_PAPRD_TIMEOUT));
350 sc->paprd_pending = false;
351
352 if (!time_left)
353 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CALIBRATE,
354 "Timeout waiting for paprd training on TX chain %d\n",
355 chain);
356
357 return !!time_left;
358}
359
331void ath_paprd_calibrate(struct work_struct *work) 360void ath_paprd_calibrate(struct work_struct *work)
332{ 361{
333 struct ath_softc *sc = container_of(work, struct ath_softc, paprd_work); 362 struct ath_softc *sc = container_of(work, struct ath_softc, paprd_work);
@@ -335,28 +364,23 @@ void ath_paprd_calibrate(struct work_struct *work)
335 struct ath_hw *ah = sc->sc_ah; 364 struct ath_hw *ah = sc->sc_ah;
336 struct ieee80211_hdr *hdr; 365 struct ieee80211_hdr *hdr;
337 struct sk_buff *skb = NULL; 366 struct sk_buff *skb = NULL;
338 struct ieee80211_tx_info *tx_info;
339 int band = hw->conf.channel->band;
340 struct ieee80211_supported_band *sband = &sc->sbands[band];
341 struct ath_tx_control txctl;
342 struct ath9k_hw_cal_data *caldata = ah->caldata; 367 struct ath9k_hw_cal_data *caldata = ah->caldata;
343 struct ath_common *common = ath9k_hw_common(ah); 368 struct ath_common *common = ath9k_hw_common(ah);
344 int qnum, ftype; 369 int ftype;
345 int chain_ok = 0; 370 int chain_ok = 0;
346 int chain; 371 int chain;
347 int len = 1800; 372 int len = 1800;
348 int time_left;
349 int i;
350 373
351 if (!caldata) 374 if (!caldata)
352 return; 375 return;
353 376
377 if (ar9003_paprd_init_table(ah) < 0)
378 return;
379
354 skb = alloc_skb(len, GFP_KERNEL); 380 skb = alloc_skb(len, GFP_KERNEL);
355 if (!skb) 381 if (!skb)
356 return; 382 return;
357 383
358 tx_info = IEEE80211_SKB_CB(skb);
359
360 skb_put(skb, len); 384 skb_put(skb, len);
361 memset(skb->data, 0, len); 385 memset(skb->data, 0, len);
362 hdr = (struct ieee80211_hdr *)skb->data; 386 hdr = (struct ieee80211_hdr *)skb->data;
@@ -367,40 +391,25 @@ void ath_paprd_calibrate(struct work_struct *work)
367 memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN); 391 memcpy(hdr->addr2, hw->wiphy->perm_addr, ETH_ALEN);
368 memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN); 392 memcpy(hdr->addr3, hw->wiphy->perm_addr, ETH_ALEN);
369 393
370 memset(&txctl, 0, sizeof(txctl));
371 qnum = sc->tx.hwq_map[WME_AC_BE];
372 txctl.txq = &sc->tx.txq[qnum];
373
374 ath9k_ps_wakeup(sc); 394 ath9k_ps_wakeup(sc);
375 ar9003_paprd_init_table(ah);
376 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) { 395 for (chain = 0; chain < AR9300_MAX_CHAINS; chain++) {
377 if (!(common->tx_chainmask & BIT(chain))) 396 if (!(common->tx_chainmask & BIT(chain)))
378 continue; 397 continue;
379 398
380 chain_ok = 0; 399 chain_ok = 0;
381 memset(tx_info, 0, sizeof(*tx_info));
382 tx_info->band = band;
383 400
384 for (i = 0; i < 4; i++) { 401 ath_dbg(common, ATH_DBG_CALIBRATE,
385 tx_info->control.rates[i].idx = sband->n_bitrates - 1; 402 "Sending PAPRD frame for thermal measurement "
386 tx_info->control.rates[i].count = 6; 403 "on chain %d\n", chain);
387 } 404 if (!ath_paprd_send_frame(sc, skb, chain))
405 goto fail_paprd;
388 406
389 init_completion(&sc->paprd_complete);
390 ar9003_paprd_setup_gain_table(ah, chain); 407 ar9003_paprd_setup_gain_table(ah, chain);
391 txctl.paprd = BIT(chain);
392 if (ath_tx_start(hw, skb, &txctl) != 0)
393 break;
394 408
395 time_left = wait_for_completion_timeout(&sc->paprd_complete, 409 ath_dbg(common, ATH_DBG_CALIBRATE,
396 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)); 410 "Sending PAPRD training frame on chain %d\n", chain);
397 if (!time_left) { 411 if (!ath_paprd_send_frame(sc, skb, chain))
398 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
399 "Timeout waiting for paprd training on "
400 "TX chain %d\n",
401 chain);
402 goto fail_paprd; 412 goto fail_paprd;
403 }
404 413
405 if (!ar9003_paprd_is_done(ah)) 414 if (!ar9003_paprd_is_done(ah))
406 break; 415 break;
@@ -457,7 +466,7 @@ void ath_ani_calibrate(unsigned long data)
457 /* Long calibration runs independently of short calibration. */ 466 /* Long calibration runs independently of short calibration. */
458 if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) { 467 if ((timestamp - common->ani.longcal_timer) >= long_cal_interval) {
459 longcal = true; 468 longcal = true;
460 ath_print(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies); 469 ath_dbg(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
461 common->ani.longcal_timer = timestamp; 470 common->ani.longcal_timer = timestamp;
462 } 471 }
463 472
@@ -465,8 +474,8 @@ void ath_ani_calibrate(unsigned long data)
465 if (!common->ani.caldone) { 474 if (!common->ani.caldone) {
466 if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) { 475 if ((timestamp - common->ani.shortcal_timer) >= short_cal_interval) {
467 shortcal = true; 476 shortcal = true;
468 ath_print(common, ATH_DBG_ANI, 477 ath_dbg(common, ATH_DBG_ANI,
469 "shortcal @%lu\n", jiffies); 478 "shortcal @%lu\n", jiffies);
470 common->ani.shortcal_timer = timestamp; 479 common->ani.shortcal_timer = timestamp;
471 common->ani.resetcal_timer = timestamp; 480 common->ani.resetcal_timer = timestamp;
472 } 481 }
@@ -525,49 +534,25 @@ set_timer:
525 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->caldata) { 534 if ((sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_PAPRD) && ah->caldata) {
526 if (!ah->caldata->paprd_done) 535 if (!ah->caldata->paprd_done)
527 ieee80211_queue_work(sc->hw, &sc->paprd_work); 536 ieee80211_queue_work(sc->hw, &sc->paprd_work);
528 else 537 else if (!ah->paprd_table_write_done)
529 ath_paprd_activate(sc); 538 ath_paprd_activate(sc);
530 } 539 }
531} 540}
532 541
533/*
534 * Update tx/rx chainmask. For legacy association,
535 * hard code chainmask to 1x1, for 11n association, use
536 * the chainmask configuration, for bt coexistence, use
537 * the chainmask configuration even in legacy mode.
538 */
539void ath_update_chainmask(struct ath_softc *sc, int is_ht)
540{
541 struct ath_hw *ah = sc->sc_ah;
542 struct ath_common *common = ath9k_hw_common(ah);
543
544 if ((sc->sc_flags & SC_OP_OFFCHANNEL) || is_ht ||
545 (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE)) {
546 common->tx_chainmask = ah->caps.tx_chainmask;
547 common->rx_chainmask = ah->caps.rx_chainmask;
548 } else {
549 common->tx_chainmask = 1;
550 common->rx_chainmask = 1;
551 }
552
553 ath_print(common, ATH_DBG_CONFIG,
554 "tx chmask: %d, rx chmask: %d\n",
555 common->tx_chainmask,
556 common->rx_chainmask);
557}
558
559static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta) 542static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
560{ 543{
561 struct ath_node *an; 544 struct ath_node *an;
562 545 struct ath_hw *ah = sc->sc_ah;
563 an = (struct ath_node *)sta->drv_priv; 546 an = (struct ath_node *)sta->drv_priv;
564 547
548 if ((ah->caps.hw_caps) & ATH9K_HW_CAP_APM)
549 sc->sc_flags |= SC_OP_ENABLE_APM;
550
565 if (sc->sc_flags & SC_OP_TXAGGR) { 551 if (sc->sc_flags & SC_OP_TXAGGR) {
566 ath_tx_node_init(sc, an); 552 ath_tx_node_init(sc, an);
567 an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 553 an->maxampdu = 1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
568 sta->ht_cap.ampdu_factor); 554 sta->ht_cap.ampdu_factor);
569 an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density); 555 an->mpdudensity = parse_mpdudensity(sta->ht_cap.ampdu_density);
570 an->last_rssi = ATH_RSSI_DUMMY_MARKER;
571 } 556 }
572} 557}
573 558
@@ -615,6 +600,8 @@ void ath9k_tasklet(unsigned long data)
615 return; 600 return;
616 } 601 }
617 602
603 spin_lock(&sc->sc_pcu_lock);
604
618 if (!ath9k_hw_check_alive(ah)) 605 if (!ath9k_hw_check_alive(ah))
619 ieee80211_queue_work(sc->hw, &sc->hw_check_work); 606 ieee80211_queue_work(sc->hw, &sc->hw_check_work);
620 607
@@ -625,15 +612,12 @@ void ath9k_tasklet(unsigned long data)
625 rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN); 612 rxmask = (ATH9K_INT_RX | ATH9K_INT_RXEOL | ATH9K_INT_RXORN);
626 613
627 if (status & rxmask) { 614 if (status & rxmask) {
628 spin_lock_bh(&sc->rx.pcu_lock);
629
630 /* Check for high priority Rx first */ 615 /* Check for high priority Rx first */
631 if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) && 616 if ((ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) &&
632 (status & ATH9K_INT_RXHP)) 617 (status & ATH9K_INT_RXHP))
633 ath_rx_tasklet(sc, 0, true); 618 ath_rx_tasklet(sc, 0, true);
634 619
635 ath_rx_tasklet(sc, 0, false); 620 ath_rx_tasklet(sc, 0, false);
636 spin_unlock_bh(&sc->rx.pcu_lock);
637 } 621 }
638 622
639 if (status & ATH9K_INT_TX) { 623 if (status & ATH9K_INT_TX) {
@@ -648,8 +632,8 @@ void ath9k_tasklet(unsigned long data)
648 * TSF sync does not look correct; remain awake to sync with 632 * TSF sync does not look correct; remain awake to sync with
649 * the next Beacon. 633 * the next Beacon.
650 */ 634 */
651 ath_print(common, ATH_DBG_PS, 635 ath_dbg(common, ATH_DBG_PS,
652 "TSFOOR - Sync with next Beacon\n"); 636 "TSFOOR - Sync with next Beacon\n");
653 sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC; 637 sc->ps_flags |= PS_WAIT_FOR_BEACON | PS_BEACON_SYNC;
654 } 638 }
655 639
@@ -658,7 +642,9 @@ void ath9k_tasklet(unsigned long data)
658 ath_gen_timer_isr(sc->sc_ah); 642 ath_gen_timer_isr(sc->sc_ah);
659 643
660 /* re-enable hardware interrupt */ 644 /* re-enable hardware interrupt */
661 ath9k_hw_set_interrupts(ah, ah->imask); 645 ath9k_hw_enable_interrupts(ah);
646
647 spin_unlock(&sc->sc_pcu_lock);
662 ath9k_ps_restore(sc); 648 ath9k_ps_restore(sc);
663} 649}
664 650
@@ -757,7 +743,7 @@ irqreturn_t ath_isr(int irq, void *dev)
757 * interrupt; otherwise it will continue to 743 * interrupt; otherwise it will continue to
758 * fire. 744 * fire.
759 */ 745 */
760 ath9k_hw_set_interrupts(ah, 0); 746 ath9k_hw_disable_interrupts(ah);
761 /* 747 /*
762 * Let the hal handle the event. We assume 748 * Let the hal handle the event. We assume
763 * it will clear whatever condition caused 749 * it will clear whatever condition caused
@@ -766,11 +752,13 @@ irqreturn_t ath_isr(int irq, void *dev)
766 spin_lock(&common->cc_lock); 752 spin_lock(&common->cc_lock);
767 ath9k_hw_proc_mib_event(ah); 753 ath9k_hw_proc_mib_event(ah);
768 spin_unlock(&common->cc_lock); 754 spin_unlock(&common->cc_lock);
769 ath9k_hw_set_interrupts(ah, ah->imask); 755 ath9k_hw_enable_interrupts(ah);
770 } 756 }
771 757
772 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 758 if (!(ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
773 if (status & ATH9K_INT_TIM_TIMER) { 759 if (status & ATH9K_INT_TIM_TIMER) {
760 if (ATH_DBG_WARN_ON_ONCE(sc->ps_idle))
761 goto chip_reset;
774 /* Clear RxAbort bit so that we can 762 /* Clear RxAbort bit so that we can
775 * receive frames */ 763 * receive frames */
776 ath9k_setpower(sc, ATH9K_PM_AWAKE); 764 ath9k_setpower(sc, ATH9K_PM_AWAKE);
@@ -783,8 +771,8 @@ chip_reset:
783 ath_debug_stat_interrupt(sc, status); 771 ath_debug_stat_interrupt(sc, status);
784 772
785 if (sched) { 773 if (sched) {
786 /* turn off every interrupt except SWBA */ 774 /* turn off every interrupt */
787 ath9k_hw_set_interrupts(ah, (ah->imask & ATH9K_INT_SWBA)); 775 ath9k_hw_disable_interrupts(ah);
788 tasklet_schedule(&sc->intr_tq); 776 tasklet_schedule(&sc->intr_tq);
789 } 777 }
790 778
@@ -836,16 +824,18 @@ static u32 ath_get_extchanmode(struct ath_softc *sc,
836} 824}
837 825
838static void ath9k_bss_assoc_info(struct ath_softc *sc, 826static void ath9k_bss_assoc_info(struct ath_softc *sc,
827 struct ieee80211_hw *hw,
839 struct ieee80211_vif *vif, 828 struct ieee80211_vif *vif,
840 struct ieee80211_bss_conf *bss_conf) 829 struct ieee80211_bss_conf *bss_conf)
841{ 830{
831 struct ath_wiphy *aphy = hw->priv;
842 struct ath_hw *ah = sc->sc_ah; 832 struct ath_hw *ah = sc->sc_ah;
843 struct ath_common *common = ath9k_hw_common(ah); 833 struct ath_common *common = ath9k_hw_common(ah);
844 834
845 if (bss_conf->assoc) { 835 if (bss_conf->assoc) {
846 ath_print(common, ATH_DBG_CONFIG, 836 ath_dbg(common, ATH_DBG_CONFIG,
847 "Bss Info ASSOC %d, bssid: %pM\n", 837 "Bss Info ASSOC %d, bssid: %pM\n",
848 bss_conf->aid, common->curbssid); 838 bss_conf->aid, common->curbssid);
849 839
850 /* New association, store aid */ 840 /* New association, store aid */
851 common->curaid = bss_conf->aid; 841 common->curaid = bss_conf->aid;
@@ -862,12 +852,13 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
862 ath_beacon_config(sc, vif); 852 ath_beacon_config(sc, vif);
863 853
864 /* Reset rssi stats */ 854 /* Reset rssi stats */
855 aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
865 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER; 856 sc->sc_ah->stats.avgbrssi = ATH_RSSI_DUMMY_MARKER;
866 857
867 sc->sc_flags |= SC_OP_ANI_RUN; 858 sc->sc_flags |= SC_OP_ANI_RUN;
868 ath_start_ani(common); 859 ath_start_ani(common);
869 } else { 860 } else {
870 ath_print(common, ATH_DBG_CONFIG, "Bss Info DISASSOC\n"); 861 ath_dbg(common, ATH_DBG_CONFIG, "Bss Info DISASSOC\n");
871 common->curaid = 0; 862 common->curaid = 0;
872 /* Stop ANI */ 863 /* Stop ANI */
873 sc->sc_flags &= ~SC_OP_ANI_RUN; 864 sc->sc_flags &= ~SC_OP_ANI_RUN;
@@ -883,31 +874,25 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
883 int r; 874 int r;
884 875
885 ath9k_ps_wakeup(sc); 876 ath9k_ps_wakeup(sc);
877 spin_lock_bh(&sc->sc_pcu_lock);
878
886 ath9k_hw_configpcipowersave(ah, 0, 0); 879 ath9k_hw_configpcipowersave(ah, 0, 0);
887 880
888 if (!ah->curchan) 881 if (!ah->curchan)
889 ah->curchan = ath_get_curchannel(sc, sc->hw); 882 ah->curchan = ath_get_curchannel(sc, sc->hw);
890 883
891 spin_lock_bh(&sc->rx.pcu_lock);
892 spin_lock_bh(&sc->sc_resetlock);
893 r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); 884 r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
894 if (r) { 885 if (r) {
895 ath_print(common, ATH_DBG_FATAL, 886 ath_err(common,
896 "Unable to reset channel (%u MHz), " 887 "Unable to reset channel (%u MHz), reset status %d\n",
897 "reset status %d\n", 888 channel->center_freq, r);
898 channel->center_freq, r);
899 } 889 }
900 spin_unlock_bh(&sc->sc_resetlock);
901 890
902 ath_update_txpow(sc); 891 ath_update_txpow(sc);
903 if (ath_startrecv(sc) != 0) { 892 if (ath_startrecv(sc) != 0) {
904 ath_print(common, ATH_DBG_FATAL, 893 ath_err(common, "Unable to restart recv logic\n");
905 "Unable to restart recv logic\n"); 894 goto out;
906 spin_unlock_bh(&sc->rx.pcu_lock);
907 return;
908 } 895 }
909 spin_unlock_bh(&sc->rx.pcu_lock);
910
911 if (sc->sc_flags & SC_OP_BEACONS) 896 if (sc->sc_flags & SC_OP_BEACONS)
912 ath_beacon_config(sc, NULL); /* restart beacons */ 897 ath_beacon_config(sc, NULL); /* restart beacons */
913 898
@@ -920,6 +905,9 @@ void ath_radio_enable(struct ath_softc *sc, struct ieee80211_hw *hw)
920 ath9k_hw_set_gpio(ah, ah->led_pin, 0); 905 ath9k_hw_set_gpio(ah, ah->led_pin, 0);
921 906
922 ieee80211_wake_queues(hw); 907 ieee80211_wake_queues(hw);
908out:
909 spin_unlock_bh(&sc->sc_pcu_lock);
910
923 ath9k_ps_restore(sc); 911 ath9k_ps_restore(sc);
924} 912}
925 913
@@ -930,6 +918,8 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
930 int r; 918 int r;
931 919
932 ath9k_ps_wakeup(sc); 920 ath9k_ps_wakeup(sc);
921 spin_lock_bh(&sc->sc_pcu_lock);
922
933 ieee80211_stop_queues(hw); 923 ieee80211_stop_queues(hw);
934 924
935 /* 925 /*
@@ -942,34 +932,30 @@ void ath_radio_disable(struct ath_softc *sc, struct ieee80211_hw *hw)
942 } 932 }
943 933
944 /* Disable interrupts */ 934 /* Disable interrupts */
945 ath9k_hw_set_interrupts(ah, 0); 935 ath9k_hw_disable_interrupts(ah);
946 936
947 ath_drain_all_txq(sc, false); /* clear pending tx frames */ 937 ath_drain_all_txq(sc, false); /* clear pending tx frames */
948 938
949 spin_lock_bh(&sc->rx.pcu_lock);
950
951 ath_stoprecv(sc); /* turn off frame recv */ 939 ath_stoprecv(sc); /* turn off frame recv */
952 ath_flushrecv(sc); /* flush recv queue */ 940 ath_flushrecv(sc); /* flush recv queue */
953 941
954 if (!ah->curchan) 942 if (!ah->curchan)
955 ah->curchan = ath_get_curchannel(sc, hw); 943 ah->curchan = ath_get_curchannel(sc, hw);
956 944
957 spin_lock_bh(&sc->sc_resetlock);
958 r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false); 945 r = ath9k_hw_reset(ah, ah->curchan, ah->caldata, false);
959 if (r) { 946 if (r) {
960 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 947 ath_err(ath9k_hw_common(sc->sc_ah),
961 "Unable to reset channel (%u MHz), " 948 "Unable to reset channel (%u MHz), reset status %d\n",
962 "reset status %d\n", 949 channel->center_freq, r);
963 channel->center_freq, r);
964 } 950 }
965 spin_unlock_bh(&sc->sc_resetlock);
966 951
967 ath9k_hw_phy_disable(ah); 952 ath9k_hw_phy_disable(ah);
968 953
969 spin_unlock_bh(&sc->rx.pcu_lock);
970
971 ath9k_hw_configpcipowersave(ah, 1, 1); 954 ath9k_hw_configpcipowersave(ah, 1, 1);
955
956 spin_unlock_bh(&sc->sc_pcu_lock);
972 ath9k_ps_restore(sc); 957 ath9k_ps_restore(sc);
958
973 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP); 959 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
974} 960}
975 961
@@ -983,28 +969,23 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
983 /* Stop ANI */ 969 /* Stop ANI */
984 del_timer_sync(&common->ani.timer); 970 del_timer_sync(&common->ani.timer);
985 971
972 spin_lock_bh(&sc->sc_pcu_lock);
973
986 ieee80211_stop_queues(hw); 974 ieee80211_stop_queues(hw);
987 975
988 ath9k_hw_set_interrupts(ah, 0); 976 ath9k_hw_disable_interrupts(ah);
989 ath_drain_all_txq(sc, retry_tx); 977 ath_drain_all_txq(sc, retry_tx);
990 978
991 spin_lock_bh(&sc->rx.pcu_lock);
992
993 ath_stoprecv(sc); 979 ath_stoprecv(sc);
994 ath_flushrecv(sc); 980 ath_flushrecv(sc);
995 981
996 spin_lock_bh(&sc->sc_resetlock);
997 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false); 982 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, ah->caldata, false);
998 if (r) 983 if (r)
999 ath_print(common, ATH_DBG_FATAL, 984 ath_err(common,
1000 "Unable to reset hardware; reset status %d\n", r); 985 "Unable to reset hardware; reset status %d\n", r);
1001 spin_unlock_bh(&sc->sc_resetlock);
1002 986
1003 if (ath_startrecv(sc) != 0) 987 if (ath_startrecv(sc) != 0)
1004 ath_print(common, ATH_DBG_FATAL, 988 ath_err(common, "Unable to start recv logic\n");
1005 "Unable to start recv logic\n");
1006
1007 spin_unlock_bh(&sc->rx.pcu_lock);
1008 989
1009 /* 990 /*
1010 * We may be doing a reset in response to a request 991 * We may be doing a reset in response to a request
@@ -1030,6 +1011,7 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
1030 } 1011 }
1031 1012
1032 ieee80211_wake_queues(hw); 1013 ieee80211_wake_queues(hw);
1014 spin_unlock_bh(&sc->sc_pcu_lock);
1033 1015
1034 /* Start ANI */ 1016 /* Start ANI */
1035 ath_start_ani(common); 1017 ath_start_ani(common);
@@ -1037,56 +1019,6 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
1037 return r; 1019 return r;
1038} 1020}
1039 1021
1040static int ath_get_hal_qnum(u16 queue, struct ath_softc *sc)
1041{
1042 int qnum;
1043
1044 switch (queue) {
1045 case 0:
1046 qnum = sc->tx.hwq_map[WME_AC_VO];
1047 break;
1048 case 1:
1049 qnum = sc->tx.hwq_map[WME_AC_VI];
1050 break;
1051 case 2:
1052 qnum = sc->tx.hwq_map[WME_AC_BE];
1053 break;
1054 case 3:
1055 qnum = sc->tx.hwq_map[WME_AC_BK];
1056 break;
1057 default:
1058 qnum = sc->tx.hwq_map[WME_AC_BE];
1059 break;
1060 }
1061
1062 return qnum;
1063}
1064
1065int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc)
1066{
1067 int qnum;
1068
1069 switch (queue) {
1070 case WME_AC_VO:
1071 qnum = 0;
1072 break;
1073 case WME_AC_VI:
1074 qnum = 1;
1075 break;
1076 case WME_AC_BE:
1077 qnum = 2;
1078 break;
1079 case WME_AC_BK:
1080 qnum = 3;
1081 break;
1082 default:
1083 qnum = -1;
1084 break;
1085 }
1086
1087 return qnum;
1088}
1089
1090/* XXX: Remove me once we don't depend on ath9k_channel for all 1022/* XXX: Remove me once we don't depend on ath9k_channel for all
1091 * this redundant data */ 1023 * this redundant data */
1092void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw, 1024void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
@@ -1125,9 +1057,9 @@ static int ath9k_start(struct ieee80211_hw *hw)
1125 struct ath9k_channel *init_channel; 1057 struct ath9k_channel *init_channel;
1126 int r; 1058 int r;
1127 1059
1128 ath_print(common, ATH_DBG_CONFIG, 1060 ath_dbg(common, ATH_DBG_CONFIG,
1129 "Starting driver with initial channel: %d MHz\n", 1061 "Starting driver with initial channel: %d MHz\n",
1130 curchan->center_freq); 1062 curchan->center_freq);
1131 1063
1132 mutex_lock(&sc->mutex); 1064 mutex_lock(&sc->mutex);
1133 1065
@@ -1168,19 +1100,15 @@ static int ath9k_start(struct ieee80211_hw *hw)
1168 * be followed by initialization of the appropriate bits 1100 * be followed by initialization of the appropriate bits
1169 * and then setup of the interrupt mask. 1101 * and then setup of the interrupt mask.
1170 */ 1102 */
1171 spin_lock_bh(&sc->rx.pcu_lock); 1103 spin_lock_bh(&sc->sc_pcu_lock);
1172 spin_lock_bh(&sc->sc_resetlock);
1173 r = ath9k_hw_reset(ah, init_channel, ah->caldata, false); 1104 r = ath9k_hw_reset(ah, init_channel, ah->caldata, false);
1174 if (r) { 1105 if (r) {
1175 ath_print(common, ATH_DBG_FATAL, 1106 ath_err(common,
1176 "Unable to reset hardware; reset status %d " 1107 "Unable to reset hardware; reset status %d (freq %u MHz)\n",
1177 "(freq %u MHz)\n", r, 1108 r, curchan->center_freq);
1178 curchan->center_freq); 1109 spin_unlock_bh(&sc->sc_pcu_lock);
1179 spin_unlock_bh(&sc->sc_resetlock);
1180 spin_unlock_bh(&sc->rx.pcu_lock);
1181 goto mutex_unlock; 1110 goto mutex_unlock;
1182 } 1111 }
1183 spin_unlock_bh(&sc->sc_resetlock);
1184 1112
1185 /* 1113 /*
1186 * This is needed only to setup initial state 1114 * This is needed only to setup initial state
@@ -1196,13 +1124,12 @@ static int ath9k_start(struct ieee80211_hw *hw)
1196 * here except setup the interrupt mask. 1124 * here except setup the interrupt mask.
1197 */ 1125 */
1198 if (ath_startrecv(sc) != 0) { 1126 if (ath_startrecv(sc) != 0) {
1199 ath_print(common, ATH_DBG_FATAL, 1127 ath_err(common, "Unable to start recv logic\n");
1200 "Unable to start recv logic\n");
1201 r = -EIO; 1128 r = -EIO;
1202 spin_unlock_bh(&sc->rx.pcu_lock); 1129 spin_unlock_bh(&sc->sc_pcu_lock);
1203 goto mutex_unlock; 1130 goto mutex_unlock;
1204 } 1131 }
1205 spin_unlock_bh(&sc->rx.pcu_lock); 1132 spin_unlock_bh(&sc->sc_pcu_lock);
1206 1133
1207 /* Setup our intr mask. */ 1134 /* Setup our intr mask. */
1208 ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL | 1135 ah->imask = ATH9K_INT_TX | ATH9K_INT_RXEOL |
@@ -1244,7 +1171,14 @@ static int ath9k_start(struct ieee80211_hw *hw)
1244 ath9k_btcoex_timer_resume(sc); 1171 ath9k_btcoex_timer_resume(sc);
1245 } 1172 }
1246 1173
1247 pm_qos_update_request(&sc->pm_qos_req, 55); 1174 /* User has the option to provide pm-qos value as a module
1175 * parameter rather than using the default value of
1176 * 'ATH9K_PM_QOS_DEFAULT_VALUE'.
1177 */
1178 pm_qos_update_request(&sc->pm_qos_req, ath9k_pm_qos_value);
1179
1180 if (ah->caps.pcie_lcr_extsync_en && common->bus_ops->extn_synch_en)
1181 common->bus_ops->extn_synch_en(common);
1248 1182
1249mutex_unlock: 1183mutex_unlock:
1250 mutex_unlock(&sc->mutex); 1184 mutex_unlock(&sc->mutex);
@@ -1255,19 +1189,16 @@ mutex_unlock:
1255static int ath9k_tx(struct ieee80211_hw *hw, 1189static int ath9k_tx(struct ieee80211_hw *hw,
1256 struct sk_buff *skb) 1190 struct sk_buff *skb)
1257{ 1191{
1258 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1259 struct ath_wiphy *aphy = hw->priv; 1192 struct ath_wiphy *aphy = hw->priv;
1260 struct ath_softc *sc = aphy->sc; 1193 struct ath_softc *sc = aphy->sc;
1261 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1194 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1262 struct ath_tx_control txctl; 1195 struct ath_tx_control txctl;
1263 int padpos, padsize;
1264 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; 1196 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1265 int qnum;
1266 1197
1267 if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) { 1198 if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
1268 ath_print(common, ATH_DBG_XMIT, 1199 ath_dbg(common, ATH_DBG_XMIT,
1269 "ath9k: %s: TX in unexpected wiphy state " 1200 "ath9k: %s: TX in unexpected wiphy state %d\n",
1270 "%d\n", wiphy_name(hw->wiphy), aphy->state); 1201 wiphy_name(hw->wiphy), aphy->state);
1271 goto exit; 1202 goto exit;
1272 } 1203 }
1273 1204
@@ -1279,8 +1210,8 @@ static int ath9k_tx(struct ieee80211_hw *hw,
1279 if (ieee80211_is_data(hdr->frame_control) && 1210 if (ieee80211_is_data(hdr->frame_control) &&
1280 !ieee80211_is_nullfunc(hdr->frame_control) && 1211 !ieee80211_is_nullfunc(hdr->frame_control) &&
1281 !ieee80211_has_pm(hdr->frame_control)) { 1212 !ieee80211_has_pm(hdr->frame_control)) {
1282 ath_print(common, ATH_DBG_PS, "Add PM=1 for a TX frame " 1213 ath_dbg(common, ATH_DBG_PS,
1283 "while in PS mode\n"); 1214 "Add PM=1 for a TX frame while in PS mode\n");
1284 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); 1215 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
1285 } 1216 }
1286 } 1217 }
@@ -1295,12 +1226,12 @@ static int ath9k_tx(struct ieee80211_hw *hw,
1295 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 1226 if (!(sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
1296 ath9k_hw_setrxabort(sc->sc_ah, 0); 1227 ath9k_hw_setrxabort(sc->sc_ah, 0);
1297 if (ieee80211_is_pspoll(hdr->frame_control)) { 1228 if (ieee80211_is_pspoll(hdr->frame_control)) {
1298 ath_print(common, ATH_DBG_PS, 1229 ath_dbg(common, ATH_DBG_PS,
1299 "Sending PS-Poll to pick a buffered frame\n"); 1230 "Sending PS-Poll to pick a buffered frame\n");
1300 sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA; 1231 sc->ps_flags |= PS_WAIT_FOR_PSPOLL_DATA;
1301 } else { 1232 } else {
1302 ath_print(common, ATH_DBG_PS, 1233 ath_dbg(common, ATH_DBG_PS,
1303 "Wake up to complete TX\n"); 1234 "Wake up to complete TX\n");
1304 sc->ps_flags |= PS_WAIT_FOR_TX_ACK; 1235 sc->ps_flags |= PS_WAIT_FOR_TX_ACK;
1305 } 1236 }
1306 /* 1237 /*
@@ -1312,36 +1243,12 @@ static int ath9k_tx(struct ieee80211_hw *hw,
1312 } 1243 }
1313 1244
1314 memset(&txctl, 0, sizeof(struct ath_tx_control)); 1245 memset(&txctl, 0, sizeof(struct ath_tx_control));
1246 txctl.txq = sc->tx.txq_map[skb_get_queue_mapping(skb)];
1315 1247
1316 /* 1248 ath_dbg(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
1317 * As a temporary workaround, assign seq# here; this will likely need
1318 * to be cleaned up to work better with Beacon transmission and virtual
1319 * BSSes.
1320 */
1321 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) {
1322 if (info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)
1323 sc->tx.seq_no += 0x10;
1324 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1325 hdr->seq_ctrl |= cpu_to_le16(sc->tx.seq_no);
1326 }
1327
1328 /* Add the padding after the header if this is not already done */
1329 padpos = ath9k_cmn_padpos(hdr->frame_control);
1330 padsize = padpos & 3;
1331 if (padsize && skb->len>padpos) {
1332 if (skb_headroom(skb) < padsize)
1333 return -1;
1334 skb_push(skb, padsize);
1335 memmove(skb->data, skb->data + padsize, padpos);
1336 }
1337
1338 qnum = ath_get_hal_qnum(skb_get_queue_mapping(skb), sc);
1339 txctl.txq = &sc->tx.txq[qnum];
1340
1341 ath_print(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
1342 1249
1343 if (ath_tx_start(hw, skb, &txctl) != 0) { 1250 if (ath_tx_start(hw, skb, &txctl) != 0) {
1344 ath_print(common, ATH_DBG_XMIT, "TX failed\n"); 1251 ath_dbg(common, ATH_DBG_XMIT, "TX failed\n");
1345 goto exit; 1252 goto exit;
1346 } 1253 }
1347 1254
@@ -1381,7 +1288,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1381 } 1288 }
1382 1289
1383 if (sc->sc_flags & SC_OP_INVALID) { 1290 if (sc->sc_flags & SC_OP_INVALID) {
1384 ath_print(common, ATH_DBG_ANY, "Device not present\n"); 1291 ath_dbg(common, ATH_DBG_ANY, "Device not present\n");
1385 mutex_unlock(&sc->mutex); 1292 mutex_unlock(&sc->mutex);
1386 return; 1293 return;
1387 } 1294 }
@@ -1400,26 +1307,30 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1400 ath9k_btcoex_timer_pause(sc); 1307 ath9k_btcoex_timer_pause(sc);
1401 } 1308 }
1402 1309
1310 spin_lock_bh(&sc->sc_pcu_lock);
1311
1403 /* make sure h/w will not generate any interrupt 1312 /* make sure h/w will not generate any interrupt
1404 * before setting the invalid flag. */ 1313 * before setting the invalid flag. */
1405 ath9k_hw_set_interrupts(ah, 0); 1314 ath9k_hw_disable_interrupts(ah);
1406 1315
1407 spin_lock_bh(&sc->rx.pcu_lock);
1408 if (!(sc->sc_flags & SC_OP_INVALID)) { 1316 if (!(sc->sc_flags & SC_OP_INVALID)) {
1409 ath_drain_all_txq(sc, false); 1317 ath_drain_all_txq(sc, false);
1410 ath_stoprecv(sc); 1318 ath_stoprecv(sc);
1411 ath9k_hw_phy_disable(ah); 1319 ath9k_hw_phy_disable(ah);
1412 } else 1320 } else
1413 sc->rx.rxlink = NULL; 1321 sc->rx.rxlink = NULL;
1414 spin_unlock_bh(&sc->rx.pcu_lock);
1415 1322
1416 /* disable HAL and put h/w to sleep */ 1323 /* disable HAL and put h/w to sleep */
1417 ath9k_hw_disable(ah); 1324 ath9k_hw_disable(ah);
1418 ath9k_hw_configpcipowersave(ah, 1, 1); 1325 ath9k_hw_configpcipowersave(ah, 1, 1);
1326
1327 spin_unlock_bh(&sc->sc_pcu_lock);
1328
1419 ath9k_ps_restore(sc); 1329 ath9k_ps_restore(sc);
1420 1330
1421 /* Finally, put the chip in FULL SLEEP mode */ 1331 sc->ps_idle = true;
1422 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP); 1332 ath9k_set_wiphy_idle(aphy, true);
1333 ath_radio_disable(sc, hw);
1423 1334
1424 sc->sc_flags |= SC_OP_INVALID; 1335 sc->sc_flags |= SC_OP_INVALID;
1425 1336
@@ -1427,7 +1338,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
1427 1338
1428 mutex_unlock(&sc->mutex); 1339 mutex_unlock(&sc->mutex);
1429 1340
1430 ath_print(common, ATH_DBG_CONFIG, "Driver halt\n"); 1341 ath_dbg(common, ATH_DBG_CONFIG, "Driver halt\n");
1431} 1342}
1432 1343
1433static int ath9k_add_interface(struct ieee80211_hw *hw, 1344static int ath9k_add_interface(struct ieee80211_hw *hw,
@@ -1460,14 +1371,14 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1460 ic_opmode = vif->type; 1371 ic_opmode = vif->type;
1461 break; 1372 break;
1462 default: 1373 default:
1463 ath_print(common, ATH_DBG_FATAL, 1374 ath_err(common, "Interface type %d not yet supported\n",
1464 "Interface type %d not yet supported\n", vif->type); 1375 vif->type);
1465 ret = -EOPNOTSUPP; 1376 ret = -EOPNOTSUPP;
1466 goto out; 1377 goto out;
1467 } 1378 }
1468 1379
1469 ath_print(common, ATH_DBG_CONFIG, 1380 ath_dbg(common, ATH_DBG_CONFIG,
1470 "Attach a VIF of type: %d\n", ic_opmode); 1381 "Attach a VIF of type: %d\n", ic_opmode);
1471 1382
1472 /* Set the VIF opmode */ 1383 /* Set the VIF opmode */
1473 avp->av_opmode = ic_opmode; 1384 avp->av_opmode = ic_opmode;
@@ -1513,15 +1424,83 @@ out:
1513 return ret; 1424 return ret;
1514} 1425}
1515 1426
1427static void ath9k_reclaim_beacon(struct ath_softc *sc,
1428 struct ieee80211_vif *vif)
1429{
1430 struct ath_vif *avp = (void *)vif->drv_priv;
1431
1432 /* Disable SWBA interrupt */
1433 sc->sc_ah->imask &= ~ATH9K_INT_SWBA;
1434 ath9k_ps_wakeup(sc);
1435 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
1436 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1437 tasklet_kill(&sc->bcon_tasklet);
1438 ath9k_ps_restore(sc);
1439
1440 ath_beacon_return(sc, avp);
1441 sc->sc_flags &= ~SC_OP_BEACONS;
1442
1443 if (sc->nbcnvifs > 0) {
1444 /* Re-enable beaconing */
1445 sc->sc_ah->imask |= ATH9K_INT_SWBA;
1446 ath9k_ps_wakeup(sc);
1447 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
1448 ath9k_ps_restore(sc);
1449 }
1450}
1451
1452static int ath9k_change_interface(struct ieee80211_hw *hw,
1453 struct ieee80211_vif *vif,
1454 enum nl80211_iftype new_type,
1455 bool p2p)
1456{
1457 struct ath_wiphy *aphy = hw->priv;
1458 struct ath_softc *sc = aphy->sc;
1459 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1460 int ret = 0;
1461
1462 ath_dbg(common, ATH_DBG_CONFIG, "Change Interface\n");
1463 mutex_lock(&sc->mutex);
1464
1465 switch (new_type) {
1466 case NL80211_IFTYPE_AP:
1467 case NL80211_IFTYPE_ADHOC:
1468 if (sc->nbcnvifs >= ATH_BCBUF) {
1469 ath_err(common, "No beacon slot available\n");
1470 ret = -ENOBUFS;
1471 goto out;
1472 }
1473 break;
1474 case NL80211_IFTYPE_STATION:
1475 /* Stop ANI */
1476 sc->sc_flags &= ~SC_OP_ANI_RUN;
1477 del_timer_sync(&common->ani.timer);
1478 if ((vif->type == NL80211_IFTYPE_AP) ||
1479 (vif->type == NL80211_IFTYPE_ADHOC))
1480 ath9k_reclaim_beacon(sc, vif);
1481 break;
1482 default:
1483 ath_err(common, "Interface type %d not yet supported\n",
1484 vif->type);
1485 ret = -ENOTSUPP;
1486 goto out;
1487 }
1488 vif->type = new_type;
1489 vif->p2p = p2p;
1490
1491out:
1492 mutex_unlock(&sc->mutex);
1493 return ret;
1494}
1495
1516static void ath9k_remove_interface(struct ieee80211_hw *hw, 1496static void ath9k_remove_interface(struct ieee80211_hw *hw,
1517 struct ieee80211_vif *vif) 1497 struct ieee80211_vif *vif)
1518{ 1498{
1519 struct ath_wiphy *aphy = hw->priv; 1499 struct ath_wiphy *aphy = hw->priv;
1520 struct ath_softc *sc = aphy->sc; 1500 struct ath_softc *sc = aphy->sc;
1521 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1501 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1522 struct ath_vif *avp = (void *)vif->drv_priv;
1523 1502
1524 ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n"); 1503 ath_dbg(common, ATH_DBG_CONFIG, "Detach Interface\n");
1525 1504
1526 mutex_lock(&sc->mutex); 1505 mutex_lock(&sc->mutex);
1527 1506
@@ -1532,26 +1511,8 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1532 /* Reclaim beacon resources */ 1511 /* Reclaim beacon resources */
1533 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) || 1512 if ((sc->sc_ah->opmode == NL80211_IFTYPE_AP) ||
1534 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) || 1513 (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) ||
1535 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT)) { 1514 (sc->sc_ah->opmode == NL80211_IFTYPE_MESH_POINT))
1536 /* Disable SWBA interrupt */ 1515 ath9k_reclaim_beacon(sc, vif);
1537 sc->sc_ah->imask &= ~ATH9K_INT_SWBA;
1538 ath9k_ps_wakeup(sc);
1539 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
1540 ath9k_hw_stoptxdma(sc->sc_ah, sc->beacon.beaconq);
1541 ath9k_ps_restore(sc);
1542 tasklet_kill(&sc->bcon_tasklet);
1543 }
1544
1545 ath_beacon_return(sc, avp);
1546 sc->sc_flags &= ~SC_OP_BEACONS;
1547
1548 if (sc->nbcnvifs) {
1549 /* Re-enable SWBA interrupt */
1550 sc->sc_ah->imask |= ATH9K_INT_SWBA;
1551 ath9k_ps_wakeup(sc);
1552 ath9k_hw_set_interrupts(sc->sc_ah, sc->sc_ah->imask);
1553 ath9k_ps_restore(sc);
1554 }
1555 1516
1556 sc->nvifs--; 1517 sc->nvifs--;
1557 1518
@@ -1631,8 +1592,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1631 if (enable_radio) { 1592 if (enable_radio) {
1632 sc->ps_idle = false; 1593 sc->ps_idle = false;
1633 ath_radio_enable(sc, hw); 1594 ath_radio_enable(sc, hw);
1634 ath_print(common, ATH_DBG_CONFIG, 1595 ath_dbg(common, ATH_DBG_CONFIG,
1635 "not-idle: enabling radio\n"); 1596 "not-idle: enabling radio\n");
1636 } 1597 }
1637 } 1598 }
1638 1599
@@ -1654,12 +1615,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1654 1615
1655 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 1616 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
1656 if (conf->flags & IEEE80211_CONF_MONITOR) { 1617 if (conf->flags & IEEE80211_CONF_MONITOR) {
1657 ath_print(common, ATH_DBG_CONFIG, 1618 ath_dbg(common, ATH_DBG_CONFIG,
1658 "Monitor mode is enabled\n"); 1619 "Monitor mode is enabled\n");
1659 sc->sc_ah->is_monitoring = true; 1620 sc->sc_ah->is_monitoring = true;
1660 } else { 1621 } else {
1661 ath_print(common, ATH_DBG_CONFIG, 1622 ath_dbg(common, ATH_DBG_CONFIG,
1662 "Monitor mode is disabled\n"); 1623 "Monitor mode is disabled\n");
1663 sc->sc_ah->is_monitoring = false; 1624 sc->sc_ah->is_monitoring = false;
1664 } 1625 }
1665 } 1626 }
@@ -1691,14 +1652,12 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1691 goto skip_chan_change; 1652 goto skip_chan_change;
1692 } 1653 }
1693 1654
1694 ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n", 1655 ath_dbg(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
1695 curchan->center_freq); 1656 curchan->center_freq);
1696 1657
1697 /* XXX: remove me eventualy */ 1658 /* XXX: remove me eventualy */
1698 ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]); 1659 ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]);
1699 1660
1700 ath_update_chainmask(sc, conf_is_ht(conf));
1701
1702 /* update survey stats for the old channel before switching */ 1661 /* update survey stats for the old channel before switching */
1703 spin_lock_irqsave(&common->cc_lock, flags); 1662 spin_lock_irqsave(&common->cc_lock, flags);
1704 ath_update_survey_stats(sc); 1663 ath_update_survey_stats(sc);
@@ -1725,8 +1684,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
1725 } 1684 }
1726 1685
1727 if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) { 1686 if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
1728 ath_print(common, ATH_DBG_FATAL, 1687 ath_err(common, "Unable to set channel\n");
1729 "Unable to set channel\n");
1730 mutex_unlock(&sc->mutex); 1688 mutex_unlock(&sc->mutex);
1731 return -EINVAL; 1689 return -EINVAL;
1732 } 1690 }
@@ -1751,7 +1709,7 @@ skip_chan_change:
1751 spin_unlock_bh(&sc->wiphy_lock); 1709 spin_unlock_bh(&sc->wiphy_lock);
1752 1710
1753 if (disable_radio) { 1711 if (disable_radio) {
1754 ath_print(common, ATH_DBG_CONFIG, "idle: disabling radio\n"); 1712 ath_dbg(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
1755 sc->ps_idle = true; 1713 sc->ps_idle = true;
1756 ath_radio_disable(sc, hw); 1714 ath_radio_disable(sc, hw);
1757 } 1715 }
@@ -1790,8 +1748,8 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
1790 ath9k_hw_setrxfilter(sc->sc_ah, rfilt); 1748 ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
1791 ath9k_ps_restore(sc); 1749 ath9k_ps_restore(sc);
1792 1750
1793 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG, 1751 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
1794 "Set HW RX filter: 0x%x\n", rfilt); 1752 "Set HW RX filter: 0x%x\n", rfilt);
1795} 1753}
1796 1754
1797static int ath9k_sta_add(struct ieee80211_hw *hw, 1755static int ath9k_sta_add(struct ieee80211_hw *hw,
@@ -1824,12 +1782,15 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
1824 struct ath_wiphy *aphy = hw->priv; 1782 struct ath_wiphy *aphy = hw->priv;
1825 struct ath_softc *sc = aphy->sc; 1783 struct ath_softc *sc = aphy->sc;
1826 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1784 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1785 struct ath_txq *txq;
1827 struct ath9k_tx_queue_info qi; 1786 struct ath9k_tx_queue_info qi;
1828 int ret = 0, qnum; 1787 int ret = 0;
1829 1788
1830 if (queue >= WME_NUM_AC) 1789 if (queue >= WME_NUM_AC)
1831 return 0; 1790 return 0;
1832 1791
1792 txq = sc->tx.txq_map[queue];
1793
1833 mutex_lock(&sc->mutex); 1794 mutex_lock(&sc->mutex);
1834 1795
1835 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info)); 1796 memset(&qi, 0, sizeof(struct ath9k_tx_queue_info));
@@ -1838,20 +1799,18 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
1838 qi.tqi_cwmin = params->cw_min; 1799 qi.tqi_cwmin = params->cw_min;
1839 qi.tqi_cwmax = params->cw_max; 1800 qi.tqi_cwmax = params->cw_max;
1840 qi.tqi_burstTime = params->txop; 1801 qi.tqi_burstTime = params->txop;
1841 qnum = ath_get_hal_qnum(queue, sc);
1842 1802
1843 ath_print(common, ATH_DBG_CONFIG, 1803 ath_dbg(common, ATH_DBG_CONFIG,
1844 "Configure tx [queue/halq] [%d/%d], " 1804 "Configure tx [queue/halq] [%d/%d], aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
1845 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n", 1805 queue, txq->axq_qnum, params->aifs, params->cw_min,
1846 queue, qnum, params->aifs, params->cw_min, 1806 params->cw_max, params->txop);
1847 params->cw_max, params->txop);
1848 1807
1849 ret = ath_txq_update(sc, qnum, &qi); 1808 ret = ath_txq_update(sc, txq->axq_qnum, &qi);
1850 if (ret) 1809 if (ret)
1851 ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n"); 1810 ath_err(common, "TXQ Update failed\n");
1852 1811
1853 if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC) 1812 if (sc->sc_ah->opmode == NL80211_IFTYPE_ADHOC)
1854 if ((qnum == sc->tx.hwq_map[WME_AC_BE]) && !ret) 1813 if (queue == WME_AC_BE && !ret)
1855 ath_beaconq_config(sc); 1814 ath_beaconq_config(sc);
1856 1815
1857 mutex_unlock(&sc->mutex); 1816 mutex_unlock(&sc->mutex);
@@ -1870,12 +1829,12 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
1870 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1829 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1871 int ret = 0; 1830 int ret = 0;
1872 1831
1873 if (modparam_nohwcrypt) 1832 if (ath9k_modparam_nohwcrypt)
1874 return -ENOSPC; 1833 return -ENOSPC;
1875 1834
1876 mutex_lock(&sc->mutex); 1835 mutex_lock(&sc->mutex);
1877 ath9k_ps_wakeup(sc); 1836 ath9k_ps_wakeup(sc);
1878 ath_print(common, ATH_DBG_CONFIG, "Set HW Key\n"); 1837 ath_dbg(common, ATH_DBG_CONFIG, "Set HW Key\n");
1879 1838
1880 switch (cmd) { 1839 switch (cmd) {
1881 case SET_KEY: 1840 case SET_KEY:
@@ -1930,13 +1889,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1930 /* Set aggregation protection mode parameters */ 1889 /* Set aggregation protection mode parameters */
1931 sc->config.ath_aggr_prot = 0; 1890 sc->config.ath_aggr_prot = 0;
1932 1891
1933 /* Only legacy IBSS for now */ 1892 ath_dbg(common, ATH_DBG_CONFIG, "BSSID: %pM aid: 0x%x\n",
1934 if (vif->type == NL80211_IFTYPE_ADHOC) 1893 common->curbssid, common->curaid);
1935 ath_update_chainmask(sc, 0);
1936
1937 ath_print(common, ATH_DBG_CONFIG,
1938 "BSSID: %pM aid: 0x%x\n",
1939 common->curbssid, common->curaid);
1940 1894
1941 /* need to reconfigure the beacon */ 1895 /* need to reconfigure the beacon */
1942 sc->sc_flags &= ~SC_OP_BEACONS ; 1896 sc->sc_flags &= ~SC_OP_BEACONS ;
@@ -1992,8 +1946,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
1992 } 1946 }
1993 1947
1994 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 1948 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
1995 ath_print(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n", 1949 ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
1996 bss_conf->use_short_preamble); 1950 bss_conf->use_short_preamble);
1997 if (bss_conf->use_short_preamble) 1951 if (bss_conf->use_short_preamble)
1998 sc->sc_flags |= SC_OP_PREAMBLE_SHORT; 1952 sc->sc_flags |= SC_OP_PREAMBLE_SHORT;
1999 else 1953 else
@@ -2001,8 +1955,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2001 } 1955 }
2002 1956
2003 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 1957 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
2004 ath_print(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n", 1958 ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
2005 bss_conf->use_cts_prot); 1959 bss_conf->use_cts_prot);
2006 if (bss_conf->use_cts_prot && 1960 if (bss_conf->use_cts_prot &&
2007 hw->conf.channel->band != IEEE80211_BAND_5GHZ) 1961 hw->conf.channel->band != IEEE80211_BAND_5GHZ)
2008 sc->sc_flags |= SC_OP_PROTECT_ENABLE; 1962 sc->sc_flags |= SC_OP_PROTECT_ENABLE;
@@ -2011,9 +1965,9 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2011 } 1965 }
2012 1966
2013 if (changed & BSS_CHANGED_ASSOC) { 1967 if (changed & BSS_CHANGED_ASSOC) {
2014 ath_print(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n", 1968 ath_dbg(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
2015 bss_conf->assoc); 1969 bss_conf->assoc);
2016 ath9k_bss_assoc_info(sc, vif, bss_conf); 1970 ath9k_bss_assoc_info(sc, hw, vif, bss_conf);
2017 } 1971 }
2018 1972
2019 mutex_unlock(&sc->mutex); 1973 mutex_unlock(&sc->mutex);
@@ -2026,7 +1980,9 @@ static u64 ath9k_get_tsf(struct ieee80211_hw *hw)
2026 struct ath_softc *sc = aphy->sc; 1980 struct ath_softc *sc = aphy->sc;
2027 1981
2028 mutex_lock(&sc->mutex); 1982 mutex_lock(&sc->mutex);
1983 ath9k_ps_wakeup(sc);
2029 tsf = ath9k_hw_gettsf64(sc->sc_ah); 1984 tsf = ath9k_hw_gettsf64(sc->sc_ah);
1985 ath9k_ps_restore(sc);
2030 mutex_unlock(&sc->mutex); 1986 mutex_unlock(&sc->mutex);
2031 1987
2032 return tsf; 1988 return tsf;
@@ -2038,7 +1994,9 @@ static void ath9k_set_tsf(struct ieee80211_hw *hw, u64 tsf)
2038 struct ath_softc *sc = aphy->sc; 1994 struct ath_softc *sc = aphy->sc;
2039 1995
2040 mutex_lock(&sc->mutex); 1996 mutex_lock(&sc->mutex);
1997 ath9k_ps_wakeup(sc);
2041 ath9k_hw_settsf64(sc->sc_ah, tsf); 1998 ath9k_hw_settsf64(sc->sc_ah, tsf);
1999 ath9k_ps_restore(sc);
2042 mutex_unlock(&sc->mutex); 2000 mutex_unlock(&sc->mutex);
2043} 2001}
2044 2002
@@ -2076,6 +2034,9 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
2076 case IEEE80211_AMPDU_RX_STOP: 2034 case IEEE80211_AMPDU_RX_STOP:
2077 break; 2035 break;
2078 case IEEE80211_AMPDU_TX_START: 2036 case IEEE80211_AMPDU_TX_START:
2037 if (!(sc->sc_flags & SC_OP_TXAGGR))
2038 return -EOPNOTSUPP;
2039
2079 ath9k_ps_wakeup(sc); 2040 ath9k_ps_wakeup(sc);
2080 ret = ath_tx_aggr_start(sc, sta, tid, ssn); 2041 ret = ath_tx_aggr_start(sc, sta, tid, ssn);
2081 if (!ret) 2042 if (!ret)
@@ -2094,8 +2055,7 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
2094 ath9k_ps_restore(sc); 2055 ath9k_ps_restore(sc);
2095 break; 2056 break;
2096 default: 2057 default:
2097 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 2058 ath_err(ath9k_hw_common(sc->sc_ah), "Unknown AMPDU action\n");
2098 "Unknown AMPDU action\n");
2099 } 2059 }
2100 2060
2101 local_bh_enable(); 2061 local_bh_enable();
@@ -2195,6 +2155,7 @@ struct ieee80211_ops ath9k_ops = {
2195 .start = ath9k_start, 2155 .start = ath9k_start,
2196 .stop = ath9k_stop, 2156 .stop = ath9k_stop,
2197 .add_interface = ath9k_add_interface, 2157 .add_interface = ath9k_add_interface,
2158 .change_interface = ath9k_change_interface,
2198 .remove_interface = ath9k_remove_interface, 2159 .remove_interface = ath9k_remove_interface,
2199 .config = ath9k_config, 2160 .config = ath9k_config,
2200 .configure_filter = ath9k_configure_filter, 2161 .configure_filter = ath9k_configure_filter,
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index b5b651413e77..78ef1f13386f 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/nl80211.h> 17#include <linux/nl80211.h>
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/ath9k_platform.h>
19#include "ath9k.h" 20#include "ath9k.h"
20 21
21static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = { 22static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
@@ -29,6 +30,7 @@ static DEFINE_PCI_DEVICE_TABLE(ath_pci_id_table) = {
29 { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */ 30 { PCI_VDEVICE(ATHEROS, 0x002D) }, /* PCI */
30 { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */ 31 { PCI_VDEVICE(ATHEROS, 0x002E) }, /* PCI-E */
31 { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */ 32 { PCI_VDEVICE(ATHEROS, 0x0030) }, /* PCI-E AR9300 */
33 { PCI_VDEVICE(ATHEROS, 0x0032) }, /* PCI-E AR9485 */
32 { 0 } 34 { 0 }
33}; 35};
34 36
@@ -53,21 +55,35 @@ static void ath_pci_read_cachesize(struct ath_common *common, int *csz)
53 55
54static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data) 56static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
55{ 57{
56 struct ath_hw *ah = (struct ath_hw *) common->ah; 58 struct ath_softc *sc = (struct ath_softc *) common->priv;
57 59 struct ath9k_platform_data *pdata = sc->dev->platform_data;
58 common->ops->read(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S)); 60
59 61 if (pdata) {
60 if (!ath9k_hw_wait(ah, 62 if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
61 AR_EEPROM_STATUS_DATA, 63 ath_err(common,
62 AR_EEPROM_STATUS_DATA_BUSY | 64 "%s: eeprom read failed, offset %08x is out of range\n",
63 AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0, 65 __func__, off);
64 AH_WAIT_TIMEOUT)) { 66 }
65 return false; 67
68 *data = pdata->eeprom_data[off];
69 } else {
70 struct ath_hw *ah = (struct ath_hw *) common->ah;
71
72 common->ops->read(ah, AR5416_EEPROM_OFFSET +
73 (off << AR5416_EEPROM_S));
74
75 if (!ath9k_hw_wait(ah,
76 AR_EEPROM_STATUS_DATA,
77 AR_EEPROM_STATUS_DATA_BUSY |
78 AR_EEPROM_STATUS_DATA_PROT_ACCESS, 0,
79 AH_WAIT_TIMEOUT)) {
80 return false;
81 }
82
83 *data = MS(common->ops->read(ah, AR_EEPROM_STATUS_DATA),
84 AR_EEPROM_STATUS_DATA_VAL);
66 } 85 }
67 86
68 *data = MS(common->ops->read(ah, AR_EEPROM_STATUS_DATA),
69 AR_EEPROM_STATUS_DATA_VAL);
70
71 return true; 87 return true;
72} 88}
73 89
@@ -80,7 +96,7 @@ static void ath_pci_bt_coex_prep(struct ath_common *common)
80 struct pci_dev *pdev = to_pci_dev(sc->dev); 96 struct pci_dev *pdev = to_pci_dev(sc->dev);
81 u8 aspm; 97 u8 aspm;
82 98
83 if (!pdev->is_pcie) 99 if (!pci_is_pcie(pdev))
84 return; 100 return;
85 101
86 pci_read_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, &aspm); 102 pci_read_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, &aspm);
@@ -88,11 +104,23 @@ static void ath_pci_bt_coex_prep(struct ath_common *common)
88 pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm); 104 pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm);
89} 105}
90 106
107static void ath_pci_extn_synch_enable(struct ath_common *common)
108{
109 struct ath_softc *sc = (struct ath_softc *) common->priv;
110 struct pci_dev *pdev = to_pci_dev(sc->dev);
111 u8 lnkctl;
112
113 pci_read_config_byte(pdev, sc->sc_ah->caps.pcie_lcr_offset, &lnkctl);
114 lnkctl |= PCI_EXP_LNKCTL_ES;
115 pci_write_config_byte(pdev, sc->sc_ah->caps.pcie_lcr_offset, lnkctl);
116}
117
91static const struct ath_bus_ops ath_pci_bus_ops = { 118static const struct ath_bus_ops ath_pci_bus_ops = {
92 .ath_bus_type = ATH_PCI, 119 .ath_bus_type = ATH_PCI,
93 .read_cachesize = ath_pci_read_cachesize, 120 .read_cachesize = ath_pci_read_cachesize,
94 .eeprom_read = ath_pci_eeprom_read, 121 .eeprom_read = ath_pci_eeprom_read,
95 .bt_coex_prep = ath_pci_bt_coex_prep, 122 .bt_coex_prep = ath_pci_bt_coex_prep,
123 .extn_synch_en = ath_pci_extn_synch_enable,
96}; 124};
97 125
98static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 126static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -236,6 +264,8 @@ static void ath_pci_remove(struct pci_dev *pdev)
236 struct ath_softc *sc = aphy->sc; 264 struct ath_softc *sc = aphy->sc;
237 void __iomem *mem = sc->mem; 265 void __iomem *mem = sc->mem;
238 266
267 if (!is_ath9k_unloaded)
268 sc->sc_ah->ah_flags |= AH_UNPLUGGED;
239 ath9k_deinit_device(sc); 269 ath9k_deinit_device(sc);
240 free_irq(sc->irq, sc); 270 free_irq(sc->irq, sc);
241 ieee80211_free_hw(sc->hw); 271 ieee80211_free_hw(sc->hw);
@@ -247,34 +277,25 @@ static void ath_pci_remove(struct pci_dev *pdev)
247 277
248#ifdef CONFIG_PM 278#ifdef CONFIG_PM
249 279
250static int ath_pci_suspend(struct pci_dev *pdev, pm_message_t state) 280static int ath_pci_suspend(struct device *device)
251{ 281{
282 struct pci_dev *pdev = to_pci_dev(device);
252 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 283 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
253 struct ath_wiphy *aphy = hw->priv; 284 struct ath_wiphy *aphy = hw->priv;
254 struct ath_softc *sc = aphy->sc; 285 struct ath_softc *sc = aphy->sc;
255 286
256 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); 287 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
257 288
258 pci_save_state(pdev);
259 pci_disable_device(pdev);
260 pci_set_power_state(pdev, PCI_D3hot);
261
262 return 0; 289 return 0;
263} 290}
264 291
265static int ath_pci_resume(struct pci_dev *pdev) 292static int ath_pci_resume(struct device *device)
266{ 293{
294 struct pci_dev *pdev = to_pci_dev(device);
267 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 295 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
268 struct ath_wiphy *aphy = hw->priv; 296 struct ath_wiphy *aphy = hw->priv;
269 struct ath_softc *sc = aphy->sc; 297 struct ath_softc *sc = aphy->sc;
270 u32 val; 298 u32 val;
271 int err;
272
273 pci_restore_state(pdev);
274
275 err = pci_enable_device(pdev);
276 if (err)
277 return err;
278 299
279 /* 300 /*
280 * Suspend/Resume resets the PCI configuration space, so we have to 301 * Suspend/Resume resets the PCI configuration space, so we have to
@@ -290,10 +311,38 @@ static int ath_pci_resume(struct pci_dev *pdev)
290 AR_GPIO_OUTPUT_MUX_AS_OUTPUT); 311 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
291 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1); 312 ath9k_hw_set_gpio(sc->sc_ah, sc->sc_ah->led_pin, 1);
292 313
314 /*
315 * Reset key cache to sane defaults (all entries cleared) instead of
316 * semi-random values after suspend/resume.
317 */
318 ath9k_ps_wakeup(sc);
319 ath9k_init_crypto(sc);
320 ath9k_ps_restore(sc);
321
322 sc->ps_idle = true;
323 ath9k_set_wiphy_idle(aphy, true);
324 ath_radio_disable(sc, hw);
325
293 return 0; 326 return 0;
294} 327}
295 328
296#endif /* CONFIG_PM */ 329static const struct dev_pm_ops ath9k_pm_ops = {
330 .suspend = ath_pci_suspend,
331 .resume = ath_pci_resume,
332 .freeze = ath_pci_suspend,
333 .thaw = ath_pci_resume,
334 .poweroff = ath_pci_suspend,
335 .restore = ath_pci_resume,
336};
337
338#define ATH9K_PM_OPS (&ath9k_pm_ops)
339
340#else /* !CONFIG_PM */
341
342#define ATH9K_PM_OPS NULL
343
344#endif /* !CONFIG_PM */
345
297 346
298MODULE_DEVICE_TABLE(pci, ath_pci_id_table); 347MODULE_DEVICE_TABLE(pci, ath_pci_id_table);
299 348
@@ -302,10 +351,7 @@ static struct pci_driver ath_pci_driver = {
302 .id_table = ath_pci_id_table, 351 .id_table = ath_pci_id_table,
303 .probe = ath_pci_probe, 352 .probe = ath_pci_probe,
304 .remove = ath_pci_remove, 353 .remove = ath_pci_remove,
305#ifdef CONFIG_PM 354 .driver.pm = ATH9K_PM_OPS,
306 .suspend = ath_pci_suspend,
307 .resume = ath_pci_resume,
308#endif /* CONFIG_PM */
309}; 355};
310 356
311int ath_pci_init(void) 357int ath_pci_init(void)
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index 17969af842f6..5e3d7496986e 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -19,6 +19,7 @@
19 19
20#define CHANSEL_DIV 15 20#define CHANSEL_DIV 15
21#define CHANSEL_2G(_freq) (((_freq) * 0x10000) / CHANSEL_DIV) 21#define CHANSEL_2G(_freq) (((_freq) * 0x10000) / CHANSEL_DIV)
22#define CHANSEL_2G_9485(_freq) ((((_freq) * 0x10000) - 215) / CHANSEL_DIV)
22#define CHANSEL_5G(_freq) (((_freq) * 0x8000) / CHANSEL_DIV) 23#define CHANSEL_5G(_freq) (((_freq) * 0x8000) / CHANSEL_DIV)
23 24
24#define AR_PHY_BASE 0x9800 25#define AR_PHY_BASE 0x9800
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 89978d71617f..e45147820eae 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -381,25 +381,6 @@ static const struct ath_rate_table ar5416_11g_ratetable = {
381static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table, 381static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table,
382 struct ieee80211_tx_rate *rate); 382 struct ieee80211_tx_rate *rate);
383 383
384static inline int8_t median(int8_t a, int8_t b, int8_t c)
385{
386 if (a >= b) {
387 if (b >= c)
388 return b;
389 else if (a > c)
390 return c;
391 else
392 return a;
393 } else {
394 if (a >= c)
395 return a;
396 else if (b >= c)
397 return c;
398 else
399 return b;
400 }
401}
402
403static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table, 384static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
404 struct ath_rate_priv *ath_rc_priv) 385 struct ath_rate_priv *ath_rc_priv)
405{ 386{
@@ -419,7 +400,7 @@ static void ath_rc_sort_validrates(const struct ath_rate_table *rate_table,
419 } 400 }
420} 401}
421 402
422static void ath_rc_init_valid_txmask(struct ath_rate_priv *ath_rc_priv) 403static void ath_rc_init_valid_rate_idx(struct ath_rate_priv *ath_rc_priv)
423{ 404{
424 u8 i; 405 u8 i;
425 406
@@ -427,7 +408,7 @@ static void ath_rc_init_valid_txmask(struct ath_rate_priv *ath_rc_priv)
427 ath_rc_priv->valid_rate_index[i] = 0; 408 ath_rc_priv->valid_rate_index[i] = 0;
428} 409}
429 410
430static inline void ath_rc_set_valid_txmask(struct ath_rate_priv *ath_rc_priv, 411static inline void ath_rc_set_valid_rate_idx(struct ath_rate_priv *ath_rc_priv,
431 u8 index, int valid_tx_rate) 412 u8 index, int valid_tx_rate)
432{ 413{
433 BUG_ON(index > ath_rc_priv->rate_table_size); 414 BUG_ON(index > ath_rc_priv->rate_table_size);
@@ -508,7 +489,7 @@ static u8 ath_rc_init_validrates(struct ath_rate_priv *ath_rc_priv,
508 489
509 ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i; 490 ath_rc_priv->valid_phy_rateidx[phy][valid_rate_count] = i;
510 ath_rc_priv->valid_phy_ratecnt[phy] += 1; 491 ath_rc_priv->valid_phy_ratecnt[phy] += 1;
511 ath_rc_set_valid_txmask(ath_rc_priv, i, 1); 492 ath_rc_set_valid_rate_idx(ath_rc_priv, i, 1);
512 hi = i; 493 hi = i;
513 } 494 }
514 } 495 }
@@ -551,7 +532,7 @@ static u8 ath_rc_setvalid_rates(struct ath_rate_priv *ath_rc_priv,
551 ath_rc_priv->valid_phy_rateidx[phy] 532 ath_rc_priv->valid_phy_rateidx[phy]
552 [valid_rate_count] = j; 533 [valid_rate_count] = j;
553 ath_rc_priv->valid_phy_ratecnt[phy] += 1; 534 ath_rc_priv->valid_phy_ratecnt[phy] += 1;
554 ath_rc_set_valid_txmask(ath_rc_priv, j, 1); 535 ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1);
555 hi = A_MAX(hi, j); 536 hi = A_MAX(hi, j);
556 } 537 }
557 } 538 }
@@ -587,7 +568,7 @@ static u8 ath_rc_setvalid_htrates(struct ath_rate_priv *ath_rc_priv,
587 ath_rc_priv->valid_phy_rateidx[phy] 568 ath_rc_priv->valid_phy_rateidx[phy]
588 [ath_rc_priv->valid_phy_ratecnt[phy]] = j; 569 [ath_rc_priv->valid_phy_ratecnt[phy]] = j;
589 ath_rc_priv->valid_phy_ratecnt[phy] += 1; 570 ath_rc_priv->valid_phy_ratecnt[phy] += 1;
590 ath_rc_set_valid_txmask(ath_rc_priv, j, 1); 571 ath_rc_set_valid_rate_idx(ath_rc_priv, j, 1);
591 hi = A_MAX(hi, j); 572 hi = A_MAX(hi, j);
592 } 573 }
593 } 574 }
@@ -883,7 +864,7 @@ static bool ath_rc_update_per(struct ath_softc *sc,
883 bool state_change = false; 864 bool state_change = false;
884 int count, n_bad_frames; 865 int count, n_bad_frames;
885 u8 last_per; 866 u8 last_per;
886 static u32 nretry_to_per_lookup[10] = { 867 static const u32 nretry_to_per_lookup[10] = {
887 100 * 0 / 1, 868 100 * 0 / 1,
888 100 * 1 / 4, 869 100 * 1 / 4,
889 100 * 1 / 2, 870 100 * 1 / 2,
@@ -1106,13 +1087,13 @@ static int ath_rc_get_rateindex(const struct ath_rate_table *rate_table,
1106 struct ieee80211_tx_rate *rate) 1087 struct ieee80211_tx_rate *rate)
1107{ 1088{
1108 int rix = 0, i = 0; 1089 int rix = 0, i = 0;
1109 int mcs_rix_off[] = { 7, 15, 20, 21, 22, 23 }; 1090 static const int mcs_rix_off[] = { 7, 15, 20, 21, 22, 23 };
1110 1091
1111 if (!(rate->flags & IEEE80211_TX_RC_MCS)) 1092 if (!(rate->flags & IEEE80211_TX_RC_MCS))
1112 return rate->idx; 1093 return rate->idx;
1113 1094
1114 while (rate->idx > mcs_rix_off[i] && 1095 while (rate->idx > mcs_rix_off[i] &&
1115 i < sizeof(mcs_rix_off)/sizeof(int)) { 1096 i < ARRAY_SIZE(mcs_rix_off)) {
1116 rix++; i++; 1097 rix++; i++;
1117 } 1098 }
1118 1099
@@ -1203,7 +1184,7 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
1203 return &ar5416_11na_ratetable; 1184 return &ar5416_11na_ratetable;
1204 return &ar5416_11a_ratetable; 1185 return &ar5416_11a_ratetable;
1205 default: 1186 default:
1206 ath_print(common, ATH_DBG_CONFIG, "Invalid band\n"); 1187 ath_dbg(common, ATH_DBG_CONFIG, "Invalid band\n");
1207 return NULL; 1188 return NULL;
1208 } 1189 }
1209} 1190}
@@ -1229,7 +1210,7 @@ static void ath_rc_init(struct ath_softc *sc,
1229 } 1210 }
1230 1211
1231 /* Determine the valid rates */ 1212 /* Determine the valid rates */
1232 ath_rc_init_valid_txmask(ath_rc_priv); 1213 ath_rc_init_valid_rate_idx(ath_rc_priv);
1233 1214
1234 for (i = 0; i < WLAN_RC_PHY_MAX; i++) { 1215 for (i = 0; i < WLAN_RC_PHY_MAX; i++) {
1235 for (j = 0; j < MAX_TX_RATE_PHY; j++) 1216 for (j = 0; j < MAX_TX_RATE_PHY; j++)
@@ -1278,9 +1259,9 @@ static void ath_rc_init(struct ath_softc *sc,
1278 ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4]; 1259 ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
1279 ath_rc_priv->rate_table = rate_table; 1260 ath_rc_priv->rate_table = rate_table;
1280 1261
1281 ath_print(common, ATH_DBG_CONFIG, 1262 ath_dbg(common, ATH_DBG_CONFIG,
1282 "RC Initialized with capabilities: 0x%x\n", 1263 "RC Initialized with capabilities: 0x%x\n",
1283 ath_rc_priv->ht_cap); 1264 ath_rc_priv->ht_cap);
1284} 1265}
1285 1266
1286static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta, 1267static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -1340,7 +1321,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1340 struct ath_rate_priv *ath_rc_priv = priv_sta; 1321 struct ath_rate_priv *ath_rc_priv = priv_sta;
1341 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1322 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1342 struct ieee80211_hdr *hdr; 1323 struct ieee80211_hdr *hdr;
1343 int final_ts_idx = 0, tx_status = 0, is_underrun = 0; 1324 int final_ts_idx = 0, tx_status = 0;
1344 int long_retry = 0; 1325 int long_retry = 0;
1345 __le16 fc; 1326 __le16 fc;
1346 int i; 1327 int i;
@@ -1373,32 +1354,17 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1373 tx_info->status.ampdu_len = 1; 1354 tx_info->status.ampdu_len = 1;
1374 } 1355 }
1375 1356
1376 /* 1357 if (!(tx_info->flags & IEEE80211_TX_STAT_ACK))
1377 * If an underrun error is seen assume it as an excessive retry only
1378 * if max frame trigger level has been reached (2 KB for singel stream,
1379 * and 4 KB for dual stream). Adjust the long retry as if the frame was
1380 * tried hw->max_rate_tries times to affect how ratectrl updates PER for
1381 * the failed rate. In case of congestion on the bus penalizing these
1382 * type of underruns should help hardware actually transmit new frames
1383 * successfully by eventually preferring slower rates. This itself
1384 * should also alleviate congestion on the bus.
1385 */
1386 if ((tx_info->pad[0] & ATH_TX_INFO_UNDERRUN) &&
1387 (sc->sc_ah->tx_trig_level >= ath_rc_priv->tx_triglevel_max)) {
1388 tx_status = 1;
1389 is_underrun = 1;
1390 }
1391
1392 if (tx_info->pad[0] & ATH_TX_INFO_XRETRY)
1393 tx_status = 1; 1358 tx_status = 1;
1394 1359
1395 ath_rc_tx_status(sc, ath_rc_priv, tx_info, final_ts_idx, tx_status, 1360 ath_rc_tx_status(sc, ath_rc_priv, tx_info, final_ts_idx, tx_status,
1396 (is_underrun) ? sc->hw->max_rate_tries : long_retry); 1361 long_retry);
1397 1362
1398 /* Check if aggregation has to be enabled for this tid */ 1363 /* Check if aggregation has to be enabled for this tid */
1399 if (conf_is_ht(&sc->hw->conf) && 1364 if (conf_is_ht(&sc->hw->conf) &&
1400 !(skb->protocol == cpu_to_be16(ETH_P_PAE))) { 1365 !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
1401 if (ieee80211_is_data_qos(fc)) { 1366 if (ieee80211_is_data_qos(fc) &&
1367 skb_get_queue_mapping(skb) != IEEE80211_AC_VO) {
1402 u8 *qc, tid; 1368 u8 *qc, tid;
1403 struct ath_node *an; 1369 struct ath_node *an;
1404 1370
@@ -1407,7 +1373,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband,
1407 an = (struct ath_node *)sta->drv_priv; 1373 an = (struct ath_node *)sta->drv_priv;
1408 1374
1409 if(ath_tx_aggr_check(sc, an, tid)) 1375 if(ath_tx_aggr_check(sc, an, tid))
1410 ieee80211_start_tx_ba_session(sta, tid); 1376 ieee80211_start_tx_ba_session(sta, tid, 0);
1411 } 1377 }
1412 } 1378 }
1413 1379
@@ -1444,12 +1410,12 @@ static void ath_rate_init(void *priv, struct ieee80211_supported_band *sband,
1444 ath_rc_priv->neg_ht_rates.rs_nrates = j; 1410 ath_rc_priv->neg_ht_rates.rs_nrates = j;
1445 } 1411 }
1446 1412
1447 is_cw40 = sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40; 1413 is_cw40 = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40);
1448 1414
1449 if (is_cw40) 1415 if (is_cw40)
1450 is_sgi = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40; 1416 is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_40);
1451 else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20) 1417 else if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_SGI_20)
1452 is_sgi = sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20; 1418 is_sgi = !!(sta->ht_cap.cap & IEEE80211_HT_CAP_SGI_20);
1453 1419
1454 /* Choose rate table first */ 1420 /* Choose rate table first */
1455 1421
@@ -1468,10 +1434,8 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1468 struct ath_rate_priv *ath_rc_priv = priv_sta; 1434 struct ath_rate_priv *ath_rc_priv = priv_sta;
1469 const struct ath_rate_table *rate_table = NULL; 1435 const struct ath_rate_table *rate_table = NULL;
1470 bool oper_cw40 = false, oper_sgi; 1436 bool oper_cw40 = false, oper_sgi;
1471 bool local_cw40 = (ath_rc_priv->ht_cap & WLAN_RC_40_FLAG) ? 1437 bool local_cw40 = !!(ath_rc_priv->ht_cap & WLAN_RC_40_FLAG);
1472 true : false; 1438 bool local_sgi = !!(ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG);
1473 bool local_sgi = (ath_rc_priv->ht_cap & WLAN_RC_SGI_FLAG) ?
1474 true : false;
1475 1439
1476 /* FIXME: Handle AP mode later when we support CWM */ 1440 /* FIXME: Handle AP mode later when we support CWM */
1477 1441
@@ -1499,9 +1463,9 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1499 oper_cw40, oper_sgi); 1463 oper_cw40, oper_sgi);
1500 ath_rc_init(sc, priv_sta, sband, sta, rate_table); 1464 ath_rc_init(sc, priv_sta, sband, sta, rate_table);
1501 1465
1502 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG, 1466 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
1503 "Operating HT Bandwidth changed to: %d\n", 1467 "Operating HT Bandwidth changed to: %d\n",
1504 sc->hw->conf.channel_type); 1468 sc->hw->conf.channel_type);
1505 } 1469 }
1506 } 1470 }
1507} 1471}
@@ -1612,13 +1576,11 @@ static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp
1612 1576
1613 rate_priv = kzalloc(sizeof(struct ath_rate_priv), gfp); 1577 rate_priv = kzalloc(sizeof(struct ath_rate_priv), gfp);
1614 if (!rate_priv) { 1578 if (!rate_priv) {
1615 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 1579 ath_err(ath9k_hw_common(sc->sc_ah),
1616 "Unable to allocate private rc structure\n"); 1580 "Unable to allocate private rc structure\n");
1617 return NULL; 1581 return NULL;
1618 } 1582 }
1619 1583
1620 rate_priv->tx_triglevel_max = sc->sc_ah->caps.tx_triglevel_max;
1621
1622 return rate_priv; 1584 return rate_priv;
1623} 1585}
1624 1586
diff --git a/drivers/net/wireless/ath/ath9k/rc.h b/drivers/net/wireless/ath/ath9k/rc.h
index 2f46a2266ba1..5d984b8acdb1 100644
--- a/drivers/net/wireless/ath/ath9k/rc.h
+++ b/drivers/net/wireless/ath/ath9k/rc.h
@@ -195,7 +195,6 @@ struct ath_rc_stats {
195 * @rate_max_phy: phy index for the max rate 195 * @rate_max_phy: phy index for the max rate
196 * @per: PER for every valid rate in % 196 * @per: PER for every valid rate in %
197 * @probe_interval: interval for ratectrl to probe for other rates 197 * @probe_interval: interval for ratectrl to probe for other rates
198 * @prev_data_rix: rate idx of last data frame
199 * @ht_cap: HT capabilities 198 * @ht_cap: HT capabilities
200 * @neg_rates: Negotatied rates 199 * @neg_rates: Negotatied rates
201 * @neg_ht_rates: Negotiated HT rates 200 * @neg_ht_rates: Negotiated HT rates
@@ -214,22 +213,14 @@ struct ath_rate_priv {
214 u32 probe_time; 213 u32 probe_time;
215 u32 per_down_time; 214 u32 per_down_time;
216 u32 probe_interval; 215 u32 probe_interval;
217 u32 prev_data_rix;
218 u32 tx_triglevel_max;
219 struct ath_rateset neg_rates; 216 struct ath_rateset neg_rates;
220 struct ath_rateset neg_ht_rates; 217 struct ath_rateset neg_ht_rates;
221 struct ath_rate_softc *asc;
222 const struct ath_rate_table *rate_table; 218 const struct ath_rate_table *rate_table;
223 219
224 struct dentry *debugfs_rcstats; 220 struct dentry *debugfs_rcstats;
225 struct ath_rc_stats rcstats[RATE_TABLE_SIZE]; 221 struct ath_rc_stats rcstats[RATE_TABLE_SIZE];
226}; 222};
227 223
228#define ATH_TX_INFO_FRAME_TYPE_INTERNAL (1 << 0)
229#define ATH_TX_INFO_FRAME_TYPE_PAUSE (1 << 1)
230#define ATH_TX_INFO_XRETRY (1 << 3)
231#define ATH_TX_INFO_UNDERRUN (1 << 4)
232
233enum ath9k_internal_frame_type { 224enum ath9k_internal_frame_type {
234 ATH9K_IFT_NOT_INTERNAL, 225 ATH9K_IFT_NOT_INTERNAL,
235 ATH9K_IFT_PAUSE, 226 ATH9K_IFT_PAUSE,
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index fdc2ec52b42f..b2497b8601e5 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -165,7 +165,7 @@ static void ath_rx_addbuffer_edma(struct ath_softc *sc,
165 u32 nbuf = 0; 165 u32 nbuf = 0;
166 166
167 if (list_empty(&sc->rx.rxbuf)) { 167 if (list_empty(&sc->rx.rxbuf)) {
168 ath_print(common, ATH_DBG_QUEUE, "No free rx buf available\n"); 168 ath_dbg(common, ATH_DBG_QUEUE, "No free rx buf available\n");
169 return; 169 return;
170 } 170 }
171 171
@@ -269,7 +269,7 @@ static int ath_rx_edma_init(struct ath_softc *sc, int nbufs)
269 dev_kfree_skb_any(skb); 269 dev_kfree_skb_any(skb);
270 bf->bf_mpdu = NULL; 270 bf->bf_mpdu = NULL;
271 bf->bf_buf_addr = 0; 271 bf->bf_buf_addr = 0;
272 ath_print(common, ATH_DBG_FATAL, 272 ath_err(common,
273 "dma_mapping_error() on RX init\n"); 273 "dma_mapping_error() on RX init\n");
274 error = -ENOMEM; 274 error = -ENOMEM;
275 goto rx_init_fail; 275 goto rx_init_fail;
@@ -317,7 +317,7 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
317 struct ath_buf *bf; 317 struct ath_buf *bf;
318 int error = 0; 318 int error = 0;
319 319
320 spin_lock_init(&sc->rx.pcu_lock); 320 spin_lock_init(&sc->sc_pcu_lock);
321 sc->sc_flags &= ~SC_OP_RXFLUSH; 321 sc->sc_flags &= ~SC_OP_RXFLUSH;
322 spin_lock_init(&sc->rx.rxbuflock); 322 spin_lock_init(&sc->rx.rxbuflock);
323 323
@@ -327,17 +327,17 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
327 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN, 327 common->rx_bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
328 min(common->cachelsz, (u16)64)); 328 min(common->cachelsz, (u16)64));
329 329
330 ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", 330 ath_dbg(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
331 common->cachelsz, common->rx_bufsize); 331 common->cachelsz, common->rx_bufsize);
332 332
333 /* Initialize rx descriptors */ 333 /* Initialize rx descriptors */
334 334
335 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 335 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
336 "rx", nbufs, 1, 0); 336 "rx", nbufs, 1, 0);
337 if (error != 0) { 337 if (error != 0) {
338 ath_print(common, ATH_DBG_FATAL, 338 ath_err(common,
339 "failed to allocate rx descriptors: %d\n", 339 "failed to allocate rx descriptors: %d\n",
340 error); 340 error);
341 goto err; 341 goto err;
342 } 342 }
343 343
@@ -358,8 +358,8 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
358 dev_kfree_skb_any(skb); 358 dev_kfree_skb_any(skb);
359 bf->bf_mpdu = NULL; 359 bf->bf_mpdu = NULL;
360 bf->bf_buf_addr = 0; 360 bf->bf_buf_addr = 0;
361 ath_print(common, ATH_DBG_FATAL, 361 ath_err(common,
362 "dma_mapping_error() on RX init\n"); 362 "dma_mapping_error() on RX init\n");
363 error = -ENOMEM; 363 error = -ENOMEM;
364 goto err; 364 goto err;
365 } 365 }
@@ -528,6 +528,13 @@ bool ath_stoprecv(struct ath_softc *sc)
528 sc->rx.rxlink = NULL; 528 sc->rx.rxlink = NULL;
529 spin_unlock_bh(&sc->rx.rxbuflock); 529 spin_unlock_bh(&sc->rx.rxbuflock);
530 530
531 if (!(ah->ah_flags & AH_UNPLUGGED) &&
532 unlikely(!stopped)) {
533 ath_err(ath9k_hw_common(sc->sc_ah),
534 "Could not stop RX, we could be "
535 "confusing the DMA engine when we start RX up\n");
536 ATH_DBG_WARN_ON_ONCE(!stopped);
537 }
531 return stopped; 538 return stopped;
532} 539}
533 540
@@ -588,9 +595,8 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
588 595
589 if (sc->ps_flags & PS_BEACON_SYNC) { 596 if (sc->ps_flags & PS_BEACON_SYNC) {
590 sc->ps_flags &= ~PS_BEACON_SYNC; 597 sc->ps_flags &= ~PS_BEACON_SYNC;
591 ath_print(common, ATH_DBG_PS, 598 ath_dbg(common, ATH_DBG_PS,
592 "Reconfigure Beacon timers based on " 599 "Reconfigure Beacon timers based on timestamp from the AP\n");
593 "timestamp from the AP\n");
594 ath_beacon_config(sc, NULL); 600 ath_beacon_config(sc, NULL);
595 } 601 }
596 602
@@ -602,8 +608,8 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
602 * a backup trigger for returning into NETWORK SLEEP state, 608 * a backup trigger for returning into NETWORK SLEEP state,
603 * so we are waiting for it as well. 609 * so we are waiting for it as well.
604 */ 610 */
605 ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating " 611 ath_dbg(common, ATH_DBG_PS,
606 "buffered broadcast/multicast frame(s)\n"); 612 "Received DTIM beacon indicating buffered broadcast/multicast frame(s)\n");
607 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON; 613 sc->ps_flags |= PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON;
608 return; 614 return;
609 } 615 }
@@ -615,8 +621,8 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
615 * been delivered. 621 * been delivered.
616 */ 622 */
617 sc->ps_flags &= ~PS_WAIT_FOR_CAB; 623 sc->ps_flags &= ~PS_WAIT_FOR_CAB;
618 ath_print(common, ATH_DBG_PS, 624 ath_dbg(common, ATH_DBG_PS,
619 "PS wait for CAB frames timed out\n"); 625 "PS wait for CAB frames timed out\n");
620 } 626 }
621} 627}
622 628
@@ -641,15 +647,14 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
641 * point. 647 * point.
642 */ 648 */
643 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON); 649 sc->ps_flags &= ~(PS_WAIT_FOR_CAB | PS_WAIT_FOR_BEACON);
644 ath_print(common, ATH_DBG_PS, 650 ath_dbg(common, ATH_DBG_PS,
645 "All PS CAB frames received, back to sleep\n"); 651 "All PS CAB frames received, back to sleep\n");
646 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) && 652 } else if ((sc->ps_flags & PS_WAIT_FOR_PSPOLL_DATA) &&
647 !is_multicast_ether_addr(hdr->addr1) && 653 !is_multicast_ether_addr(hdr->addr1) &&
648 !ieee80211_has_morefrags(hdr->frame_control)) { 654 !ieee80211_has_morefrags(hdr->frame_control)) {
649 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA; 655 sc->ps_flags &= ~PS_WAIT_FOR_PSPOLL_DATA;
650 ath_print(common, ATH_DBG_PS, 656 ath_dbg(common, ATH_DBG_PS,
651 "Going back to sleep after having received " 657 "Going back to sleep after having received PS-Poll data (0x%lx)\n",
652 "PS-Poll data (0x%lx)\n",
653 sc->ps_flags & (PS_WAIT_FOR_BEACON | 658 sc->ps_flags & (PS_WAIT_FOR_BEACON |
654 PS_WAIT_FOR_CAB | 659 PS_WAIT_FOR_CAB |
655 PS_WAIT_FOR_PSPOLL_DATA | 660 PS_WAIT_FOR_PSPOLL_DATA |
@@ -658,8 +663,7 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
658} 663}
659 664
660static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw, 665static void ath_rx_send_to_mac80211(struct ieee80211_hw *hw,
661 struct ath_softc *sc, struct sk_buff *skb, 666 struct ath_softc *sc, struct sk_buff *skb)
662 struct ieee80211_rx_status *rxs)
663{ 667{
664 struct ieee80211_hdr *hdr; 668 struct ieee80211_hdr *hdr;
665 669
@@ -958,8 +962,9 @@ static int ath9k_process_rate(struct ath_common *common,
958 * No valid hardware bitrate found -- we should not get here 962 * No valid hardware bitrate found -- we should not get here
959 * because hardware has already validated this frame as OK. 963 * because hardware has already validated this frame as OK.
960 */ 964 */
961 ath_print(common, ATH_DBG_XMIT, "unsupported hw bitrate detected " 965 ath_dbg(common, ATH_DBG_XMIT,
962 "0x%02x using 1 Mbit\n", rx_stats->rs_rate); 966 "unsupported hw bitrate detected 0x%02x using 1 Mbit\n",
967 rx_stats->rs_rate);
963 968
964 return -EINVAL; 969 return -EINVAL;
965} 970}
@@ -969,36 +974,23 @@ static void ath9k_process_rssi(struct ath_common *common,
969 struct ieee80211_hdr *hdr, 974 struct ieee80211_hdr *hdr,
970 struct ath_rx_status *rx_stats) 975 struct ath_rx_status *rx_stats)
971{ 976{
977 struct ath_wiphy *aphy = hw->priv;
972 struct ath_hw *ah = common->ah; 978 struct ath_hw *ah = common->ah;
973 struct ieee80211_sta *sta; 979 int last_rssi;
974 struct ath_node *an;
975 int last_rssi = ATH_RSSI_DUMMY_MARKER;
976 __le16 fc; 980 __le16 fc;
977 981
982 if (ah->opmode != NL80211_IFTYPE_STATION)
983 return;
984
978 fc = hdr->frame_control; 985 fc = hdr->frame_control;
986 if (!ieee80211_is_beacon(fc) ||
987 compare_ether_addr(hdr->addr3, common->curbssid))
988 return;
979 989
980 rcu_read_lock(); 990 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD && !rx_stats->rs_moreaggr)
981 /* 991 ATH_RSSI_LPF(aphy->last_rssi, rx_stats->rs_rssi);
982 * XXX: use ieee80211_find_sta! This requires quite a bit of work
983 * under the current ath9k virtual wiphy implementation as we have
984 * no way of tying a vif to wiphy. Typically vifs are attached to
985 * at least one sdata of a wiphy on mac80211 but with ath9k virtual
986 * wiphy you'd have to iterate over every wiphy and each sdata.
987 */
988 if (is_multicast_ether_addr(hdr->addr1))
989 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, NULL);
990 else
991 sta = ieee80211_find_sta_by_ifaddr(hw, hdr->addr2, hdr->addr1);
992
993 if (sta) {
994 an = (struct ath_node *) sta->drv_priv;
995 if (rx_stats->rs_rssi != ATH9K_RSSI_BAD &&
996 !rx_stats->rs_moreaggr)
997 ATH_RSSI_LPF(an->last_rssi, rx_stats->rs_rssi);
998 last_rssi = an->last_rssi;
999 }
1000 rcu_read_unlock();
1001 992
993 last_rssi = aphy->last_rssi;
1002 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER)) 994 if (likely(last_rssi != ATH_RSSI_DUMMY_MARKER))
1003 rx_stats->rs_rssi = ATH_EP_RND(last_rssi, 995 rx_stats->rs_rssi = ATH_EP_RND(last_rssi,
1004 ATH_RSSI_EP_MULTIPLIER); 996 ATH_RSSI_EP_MULTIPLIER);
@@ -1006,8 +998,7 @@ static void ath9k_process_rssi(struct ath_common *common,
1006 rx_stats->rs_rssi = 0; 998 rx_stats->rs_rssi = 0;
1007 999
1008 /* Update Beacon RSSI, this is used by ANI. */ 1000 /* Update Beacon RSSI, this is used by ANI. */
1009 if (ieee80211_is_beacon(fc)) 1001 ah->stats.avgbrssi = rx_stats->rs_rssi;
1010 ah->stats.avgbrssi = rx_stats->rs_rssi;
1011} 1002}
1012 1003
1013/* 1004/*
@@ -1637,7 +1628,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1637 struct ath_hw *ah = sc->sc_ah; 1628 struct ath_hw *ah = sc->sc_ah;
1638 struct ath_common *common = ath9k_hw_common(ah); 1629 struct ath_common *common = ath9k_hw_common(ah);
1639 /* 1630 /*
1640 * The hw can techncically differ from common->hw when using ath9k 1631 * The hw can technically differ from common->hw when using ath9k
1641 * virtual wiphy so to account for that we iterate over the active 1632 * virtual wiphy so to account for that we iterate over the active
1642 * wiphys and find the appropriate wiphy and therefore hw. 1633 * wiphys and find the appropriate wiphy and therefore hw.
1643 */ 1634 */
@@ -1744,9 +1735,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1744 dev_kfree_skb_any(requeue_skb); 1735 dev_kfree_skb_any(requeue_skb);
1745 bf->bf_mpdu = NULL; 1736 bf->bf_mpdu = NULL;
1746 bf->bf_buf_addr = 0; 1737 bf->bf_buf_addr = 0;
1747 ath_print(common, ATH_DBG_FATAL, 1738 ath_err(common, "dma_mapping_error() on RX\n");
1748 "dma_mapping_error() on RX\n"); 1739 ath_rx_send_to_mac80211(hw, sc, skb);
1749 ath_rx_send_to_mac80211(hw, sc, skb, rxs);
1750 break; 1740 break;
1751 } 1741 }
1752 1742
@@ -1762,17 +1752,18 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush, bool hp)
1762 } 1752 }
1763 1753
1764 spin_lock_irqsave(&sc->sc_pm_lock, flags); 1754 spin_lock_irqsave(&sc->sc_pm_lock, flags);
1765 if (unlikely(ath9k_check_auto_sleep(sc) || 1755
1766 (sc->ps_flags & (PS_WAIT_FOR_BEACON | 1756 if ((sc->ps_flags & (PS_WAIT_FOR_BEACON |
1767 PS_WAIT_FOR_CAB | 1757 PS_WAIT_FOR_CAB |
1768 PS_WAIT_FOR_PSPOLL_DATA)))) 1758 PS_WAIT_FOR_PSPOLL_DATA)) ||
1759 unlikely(ath9k_check_auto_sleep(sc)))
1769 ath_rx_ps(sc, skb); 1760 ath_rx_ps(sc, skb);
1770 spin_unlock_irqrestore(&sc->sc_pm_lock, flags); 1761 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
1771 1762
1772 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB) 1763 if (ah->caps.hw_caps & ATH9K_HW_CAP_ANT_DIV_COMB)
1773 ath_ant_comb_scan(sc, &rs); 1764 ath_ant_comb_scan(sc, &rs);
1774 1765
1775 ath_rx_send_to_mac80211(hw, sc, skb, rxs); 1766 ath_rx_send_to_mac80211(hw, sc, skb);
1776 1767
1777requeue: 1768requeue:
1778 if (edma) { 1769 if (edma) {
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index 2c6a22fbb0f0..4df5659c6c16 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -787,6 +787,8 @@
787#define AR_SREV_REVISION_9271_11 1 787#define AR_SREV_REVISION_9271_11 1
788#define AR_SREV_VERSION_9300 0x1c0 788#define AR_SREV_VERSION_9300 0x1c0
789#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */ 789#define AR_SREV_REVISION_9300_20 2 /* 2.0 and 2.1 */
790#define AR_SREV_VERSION_9485 0x240
791#define AR_SREV_REVISION_9485_10 0
790 792
791#define AR_SREV_5416(_ah) \ 793#define AR_SREV_5416(_ah) \
792 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \ 794 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_5416_PCI) || \
@@ -859,20 +861,24 @@
859 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300) && \ 861 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9300) && \
860 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9300_20))) 862 ((_ah)->hw_version.macRev >= AR_SREV_REVISION_9300_20)))
861 863
864#define AR_SREV_9485(_ah) \
865 (((_ah)->hw_version.macVersion == AR_SREV_VERSION_9485))
866#define AR_SREV_9485_10(_ah) \
867 (AR_SREV_9485(_ah) && \
868 ((_ah)->hw_version.macRev == AR_SREV_REVISION_9485_10))
869
862#define AR_SREV_9285E_20(_ah) \ 870#define AR_SREV_9285E_20(_ah) \
863 (AR_SREV_9285_12_OR_LATER(_ah) && \ 871 (AR_SREV_9285_12_OR_LATER(_ah) && \
864 ((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1)) 872 ((REG_READ(_ah, AR_AN_SYNTH9) & 0x7) == 0x1))
865 873
866#define AR_DEVID_7010(_ah) \ 874enum ath_usb_dev {
867 (((_ah)->hw_version.devid == 0x7010) || \ 875 AR9280_USB = 1, /* AR7010 + AR9280, UB94 */
868 ((_ah)->hw_version.devid == 0x7015) || \ 876 AR9287_USB = 2, /* AR7010 + AR9287, UB95 */
869 ((_ah)->hw_version.devid == 0x9018) || \ 877};
870 ((_ah)->hw_version.devid == 0xA704) || \
871 ((_ah)->hw_version.devid == 0x1200))
872 878
873#define AR9287_HTC_DEVID(_ah) \ 879#define AR_DEVID_7010(_ah) \
874 (((_ah)->hw_version.devid == 0x7015) || \ 880 (((_ah)->hw_version.usbdev == AR9280_USB) || \
875 ((_ah)->hw_version.devid == 0x1200)) 881 ((_ah)->hw_version.usbdev == AR9287_USB))
876 882
877#define AR_RADIO_SREV_MAJOR 0xf0 883#define AR_RADIO_SREV_MAJOR 0xf0
878#define AR_RAD5133_SREV_MAJOR 0xc0 884#define AR_RAD5133_SREV_MAJOR 0xc0
@@ -1074,6 +1080,9 @@ enum {
1074#define AR_INTR_PRIO_ASYNC_MASK 0x40c8 1080#define AR_INTR_PRIO_ASYNC_MASK 0x40c8
1075#define AR_INTR_PRIO_SYNC_MASK 0x40cc 1081#define AR_INTR_PRIO_SYNC_MASK 0x40cc
1076#define AR_INTR_PRIO_ASYNC_ENABLE 0x40d4 1082#define AR_INTR_PRIO_ASYNC_ENABLE 0x40d4
1083#define AR_ENT_OTP 0x40d8
1084#define AR_ENT_OTP_CHAIN2_DISABLE 0x00020000
1085#define AR_ENT_OTP_MPSD 0x00800000
1077 1086
1078#define AR_RTC_9300_PLL_DIV 0x000003ff 1087#define AR_RTC_9300_PLL_DIV 0x000003ff
1079#define AR_RTC_9300_PLL_DIV_S 0 1088#define AR_RTC_9300_PLL_DIV_S 0
@@ -1111,6 +1120,8 @@ enum {
1111#define AR_RTC_PLL_CONTROL \ 1120#define AR_RTC_PLL_CONTROL \
1112 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014) 1121 ((AR_SREV_9100(ah)) ? (AR_RTC_BASE + 0x0014) : 0x7014)
1113 1122
1123#define AR_RTC_PLL_CONTROL2 0x703c
1124
1114#define AR_RTC_PLL_DIV 0x0000001f 1125#define AR_RTC_PLL_DIV 0x0000001f
1115#define AR_RTC_PLL_DIV_S 0 1126#define AR_RTC_PLL_DIV_S 0
1116#define AR_RTC_PLL_DIV2 0x00000020 1127#define AR_RTC_PLL_DIV2 0x00000020
@@ -1574,6 +1585,7 @@ enum {
1574#define AR_PCU_TBTT_PROTECT 0x00200000 1585#define AR_PCU_TBTT_PROTECT 0x00200000
1575#define AR_PCU_CLEAR_VMF 0x01000000 1586#define AR_PCU_CLEAR_VMF 0x01000000
1576#define AR_PCU_CLEAR_BA_VALID 0x04000000 1587#define AR_PCU_CLEAR_BA_VALID 0x04000000
1588#define AR_PCU_ALWAYS_PERFORM_KEYSEARCH 0x10000000
1577 1589
1578#define AR_PCU_BT_ANT_PREVENT_RX 0x00100000 1590#define AR_PCU_BT_ANT_PREVENT_RX 0x00100000
1579#define AR_PCU_BT_ANT_PREVENT_RX_S 20 1591#define AR_PCU_BT_ANT_PREVENT_RX_S 20
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index ec7cf5ee56bc..2dc7095e56d1 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -107,6 +107,7 @@ int ath9k_wiphy_add(struct ath_softc *sc)
107 aphy->sc = sc; 107 aphy->sc = sc;
108 aphy->hw = hw; 108 aphy->hw = hw;
109 sc->sec_wiphy[i] = aphy; 109 sc->sec_wiphy[i] = aphy;
110 aphy->last_rssi = ATH_RSSI_DUMMY_MARKER;
110 spin_unlock_bh(&sc->wiphy_lock); 111 spin_unlock_bh(&sc->wiphy_lock);
111 112
112 memcpy(addr, common->macaddr, ETH_ALEN); 113 memcpy(addr, common->macaddr, ETH_ALEN);
@@ -186,7 +187,7 @@ static int ath9k_send_nullfunc(struct ath_wiphy *aphy,
186 info->control.rates[1].idx = -1; 187 info->control.rates[1].idx = -1;
187 188
188 memset(&txctl, 0, sizeof(struct ath_tx_control)); 189 memset(&txctl, 0, sizeof(struct ath_tx_control));
189 txctl.txq = &sc->tx.txq[sc->tx.hwq_map[WME_AC_VO]]; 190 txctl.txq = sc->tx.txq_map[WME_AC_VO];
190 txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE; 191 txctl.frame_type = ps ? ATH9K_IFT_PAUSE : ATH9K_IFT_UNPAUSE;
191 192
192 if (ath_tx_start(aphy->hw, skb, &txctl) != 0) 193 if (ath_tx_start(aphy->hw, skb, &txctl) != 0)
@@ -287,7 +288,6 @@ void ath9k_wiphy_chan_work(struct work_struct *work)
287 /* sync hw configuration for hw code */ 288 /* sync hw configuration for hw code */
288 common->hw = aphy->hw; 289 common->hw = aphy->hw;
289 290
290 ath_update_chainmask(sc, sc->chan_is_ht);
291 if (ath_set_channel(sc, aphy->hw, 291 if (ath_set_channel(sc, aphy->hw,
292 &sc->sc_ah->channels[sc->chan_idx]) < 0) { 292 &sc->sc_ah->channels[sc->chan_idx]) < 0) {
293 printk(KERN_DEBUG "ath9k: Failed to set channel for new " 293 printk(KERN_DEBUG "ath9k: Failed to set channel for new "
@@ -304,13 +304,12 @@ void ath9k_wiphy_chan_work(struct work_struct *work)
304 * ath9k version of ieee80211_tx_status() for TX frames that are generated 304 * ath9k version of ieee80211_tx_status() for TX frames that are generated
305 * internally in the driver. 305 * internally in the driver.
306 */ 306 */
307void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb) 307void ath9k_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ftype)
308{ 308{
309 struct ath_wiphy *aphy = hw->priv; 309 struct ath_wiphy *aphy = hw->priv;
310 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 310 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
311 311
312 if ((tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_PAUSE) && 312 if (ftype == ATH9K_IFT_PAUSE && aphy->state == ATH_WIPHY_PAUSING) {
313 aphy->state == ATH_WIPHY_PAUSING) {
314 if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) { 313 if (!(tx_info->flags & IEEE80211_TX_STAT_ACK)) {
315 printk(KERN_DEBUG "ath9k: %s: no ACK for pause " 314 printk(KERN_DEBUG "ath9k: %s: no ACK for pause "
316 "frame\n", wiphy_name(hw->wiphy)); 315 "frame\n", wiphy_name(hw->wiphy));
@@ -656,10 +655,9 @@ void ath9k_set_wiphy_idle(struct ath_wiphy *aphy, bool idle)
656 struct ath_softc *sc = aphy->sc; 655 struct ath_softc *sc = aphy->sc;
657 656
658 aphy->idle = idle; 657 aphy->idle = idle;
659 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG, 658 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
660 "Marking %s as %s\n", 659 "Marking %s as %sidle\n",
661 wiphy_name(aphy->hw->wiphy), 660 wiphy_name(aphy->hw->wiphy), idle ? "" : "not-");
662 idle ? "idle" : "not-idle");
663} 661}
664/* Only bother starting a queue on an active virtual wiphy */ 662/* Only bother starting a queue on an active virtual wiphy */
665bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue) 663bool ath_mac80211_start_queue(struct ath_softc *sc, u16 skb_queue)
diff --git a/drivers/net/wireless/ath/ath9k/wmi.c b/drivers/net/wireless/ath/ath9k/wmi.c
index 93a8bda09c25..dc862f5e1162 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.c
+++ b/drivers/net/wireless/ath/ath9k/wmi.c
@@ -120,17 +120,27 @@ void ath9k_deinit_wmi(struct ath9k_htc_priv *priv)
120 kfree(priv->wmi); 120 kfree(priv->wmi);
121} 121}
122 122
123void ath9k_wmi_tasklet(unsigned long data) 123void ath9k_swba_tasklet(unsigned long data)
124{ 124{
125 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data; 125 struct ath9k_htc_priv *priv = (struct ath9k_htc_priv *)data;
126 struct ath_common *common = ath9k_hw_common(priv->ah); 126 struct ath_common *common = ath9k_hw_common(priv->ah);
127 127
128 ath_print(common, ATH_DBG_WMI, "SWBA Event received\n"); 128 ath_dbg(common, ATH_DBG_WMI, "SWBA Event received\n");
129 129
130 ath9k_htc_swba(priv, priv->wmi->beacon_pending); 130 ath9k_htc_swba(priv, priv->wmi->beacon_pending);
131 131
132} 132}
133 133
134void ath9k_fatal_work(struct work_struct *work)
135{
136 struct ath9k_htc_priv *priv = container_of(work, struct ath9k_htc_priv,
137 fatal_work);
138 struct ath_common *common = ath9k_hw_common(priv->ah);
139
140 ath_dbg(common, ATH_DBG_FATAL, "FATAL Event received, resetting device\n");
141 ath9k_htc_reset(priv);
142}
143
134static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb) 144static void ath9k_wmi_rsp_callback(struct wmi *wmi, struct sk_buff *skb)
135{ 145{
136 skb_pull(skb, sizeof(struct wmi_cmd_hdr)); 146 skb_pull(skb, sizeof(struct wmi_cmd_hdr));
@@ -163,7 +173,11 @@ static void ath9k_wmi_ctrl_rx(void *priv, struct sk_buff *skb,
163 switch (cmd_id) { 173 switch (cmd_id) {
164 case WMI_SWBA_EVENTID: 174 case WMI_SWBA_EVENTID:
165 wmi->beacon_pending = *(u8 *)wmi_event; 175 wmi->beacon_pending = *(u8 *)wmi_event;
166 tasklet_schedule(&wmi->drv_priv->wmi_tasklet); 176 tasklet_schedule(&wmi->drv_priv->swba_tasklet);
177 break;
178 case WMI_FATAL_EVENTID:
179 ieee80211_queue_work(wmi->drv_priv->hw,
180 &wmi->drv_priv->fatal_work);
167 break; 181 break;
168 case WMI_TXRATE_EVENTID: 182 case WMI_TXRATE_EVENTID:
169#ifdef CONFIG_ATH9K_HTC_DEBUGFS 183#ifdef CONFIG_ATH9K_HTC_DEBUGFS
@@ -250,7 +264,7 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
250 int time_left, ret = 0; 264 int time_left, ret = 0;
251 unsigned long flags; 265 unsigned long flags;
252 266
253 if (wmi->drv_priv->op_flags & OP_UNPLUGGED) 267 if (ah->ah_flags & AH_UNPLUGGED)
254 return 0; 268 return 0;
255 269
256 skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC); 270 skb = alloc_skb(headroom + cmd_len, GFP_ATOMIC);
@@ -286,9 +300,9 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
286 300
287 time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout); 301 time_left = wait_for_completion_timeout(&wmi->cmd_wait, timeout);
288 if (!time_left) { 302 if (!time_left) {
289 ath_print(common, ATH_DBG_WMI, 303 ath_dbg(common, ATH_DBG_WMI,
290 "Timeout waiting for WMI command: %s\n", 304 "Timeout waiting for WMI command: %s\n",
291 wmi_cmd_to_name(cmd_id)); 305 wmi_cmd_to_name(cmd_id));
292 mutex_unlock(&wmi->op_mutex); 306 mutex_unlock(&wmi->op_mutex);
293 return -ETIMEDOUT; 307 return -ETIMEDOUT;
294 } 308 }
@@ -298,8 +312,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
298 return 0; 312 return 0;
299 313
300out: 314out:
301 ath_print(common, ATH_DBG_WMI, 315 ath_dbg(common, ATH_DBG_WMI,
302 "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id)); 316 "WMI failure for: %s\n", wmi_cmd_to_name(cmd_id));
303 mutex_unlock(&wmi->op_mutex); 317 mutex_unlock(&wmi->op_mutex);
304 kfree_skb(skb); 318 kfree_skb(skb);
305 319
diff --git a/drivers/net/wireless/ath/ath9k/wmi.h b/drivers/net/wireless/ath/ath9k/wmi.h
index ac61074af8ac..42084277522d 100644
--- a/drivers/net/wireless/ath/ath9k/wmi.h
+++ b/drivers/net/wireless/ath/ath9k/wmi.h
@@ -117,7 +117,8 @@ int ath9k_wmi_cmd(struct wmi *wmi, enum wmi_cmd_id cmd_id,
117 u8 *cmd_buf, u32 cmd_len, 117 u8 *cmd_buf, u32 cmd_len,
118 u8 *rsp_buf, u32 rsp_len, 118 u8 *rsp_buf, u32 rsp_len,
119 u32 timeout); 119 u32 timeout);
120void ath9k_wmi_tasklet(unsigned long data); 120void ath9k_swba_tasklet(unsigned long data);
121void ath9k_fatal_work(struct work_struct *work);
121 122
122#define WMI_CMD(_wmi_cmd) \ 123#define WMI_CMD(_wmi_cmd) \
123 do { \ 124 do { \
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index aff04789f794..332d1feb5c18 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -48,19 +48,17 @@ static u16 bits_per_symbol[][2] = {
48 48
49#define IS_HT_RATE(_rate) ((_rate) & 0x80) 49#define IS_HT_RATE(_rate) ((_rate) & 0x80)
50 50
51static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq, 51static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
52 struct ath_atx_tid *tid, 52 struct ath_atx_tid *tid,
53 struct list_head *bf_head); 53 struct list_head *bf_head);
54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf, 54static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
55 struct ath_txq *txq, struct list_head *bf_q, 55 struct ath_txq *txq, struct list_head *bf_q,
56 struct ath_tx_status *ts, int txok, int sendbar); 56 struct ath_tx_status *ts, int txok, int sendbar);
57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq, 57static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
58 struct list_head *head); 58 struct list_head *head);
59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf); 59static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len);
60static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
61 struct ath_tx_status *ts, int txok);
62static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, 60static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
63 int nbad, int txok, bool update_rc); 61 int nframes, int nbad, int txok, bool update_rc);
64static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 62static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
65 int seqno); 63 int seqno);
66 64
@@ -124,7 +122,7 @@ static void ath_tx_queue_tid(struct ath_txq *txq, struct ath_atx_tid *tid)
124 122
125static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 123static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
126{ 124{
127 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 125 struct ath_txq *txq = tid->ac->txq;
128 126
129 WARN_ON(!tid->paused); 127 WARN_ON(!tid->paused);
130 128
@@ -140,12 +138,21 @@ unlock:
140 spin_unlock_bh(&txq->axq_lock); 138 spin_unlock_bh(&txq->axq_lock);
141} 139}
142 140
141static struct ath_frame_info *get_frame_info(struct sk_buff *skb)
142{
143 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
144 BUILD_BUG_ON(sizeof(struct ath_frame_info) >
145 sizeof(tx_info->rate_driver_data));
146 return (struct ath_frame_info *) &tx_info->rate_driver_data[0];
147}
148
143static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid) 149static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
144{ 150{
145 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 151 struct ath_txq *txq = tid->ac->txq;
146 struct ath_buf *bf; 152 struct ath_buf *bf;
147 struct list_head bf_head; 153 struct list_head bf_head;
148 struct ath_tx_status ts; 154 struct ath_tx_status ts;
155 struct ath_frame_info *fi;
149 156
150 INIT_LIST_HEAD(&bf_head); 157 INIT_LIST_HEAD(&bf_head);
151 158
@@ -156,12 +163,15 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
156 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 163 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
157 list_move_tail(&bf->list, &bf_head); 164 list_move_tail(&bf->list, &bf_head);
158 165
159 if (bf_isretried(bf)) { 166 spin_unlock_bh(&txq->axq_lock);
160 ath_tx_update_baw(sc, tid, bf->bf_seqno); 167 fi = get_frame_info(bf->bf_mpdu);
168 if (fi->retries) {
169 ath_tx_update_baw(sc, tid, fi->seqno);
161 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 170 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
162 } else { 171 } else {
163 ath_tx_send_ht_normal(sc, txq, tid, &bf_head); 172 ath_tx_send_normal(sc, txq, tid, &bf_head);
164 } 173 }
174 spin_lock_bh(&txq->axq_lock);
165 } 175 }
166 176
167 spin_unlock_bh(&txq->axq_lock); 177 spin_unlock_bh(&txq->axq_lock);
@@ -184,14 +194,11 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
184} 194}
185 195
186static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid, 196static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
187 struct ath_buf *bf) 197 u16 seqno)
188{ 198{
189 int index, cindex; 199 int index, cindex;
190 200
191 if (bf_isretried(bf)) 201 index = ATH_BA_INDEX(tid->seq_start, seqno);
192 return;
193
194 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
195 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 202 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
196 __set_bit(cindex, tid->tx_buf); 203 __set_bit(cindex, tid->tx_buf);
197 204
@@ -215,6 +222,7 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
215 struct ath_buf *bf; 222 struct ath_buf *bf;
216 struct list_head bf_head; 223 struct list_head bf_head;
217 struct ath_tx_status ts; 224 struct ath_tx_status ts;
225 struct ath_frame_info *fi;
218 226
219 memset(&ts, 0, sizeof(ts)); 227 memset(&ts, 0, sizeof(ts));
220 INIT_LIST_HEAD(&bf_head); 228 INIT_LIST_HEAD(&bf_head);
@@ -226,8 +234,9 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
226 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 234 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
227 list_move_tail(&bf->list, &bf_head); 235 list_move_tail(&bf->list, &bf_head);
228 236
229 if (bf_isretried(bf)) 237 fi = get_frame_info(bf->bf_mpdu);
230 ath_tx_update_baw(sc, tid, bf->bf_seqno); 238 if (fi->retries)
239 ath_tx_update_baw(sc, tid, fi->seqno);
231 240
232 spin_unlock(&txq->axq_lock); 241 spin_unlock(&txq->axq_lock);
233 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 242 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
@@ -239,16 +248,15 @@ static void ath_tid_drain(struct ath_softc *sc, struct ath_txq *txq,
239} 248}
240 249
241static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq, 250static void ath_tx_set_retry(struct ath_softc *sc, struct ath_txq *txq,
242 struct ath_buf *bf) 251 struct sk_buff *skb)
243{ 252{
244 struct sk_buff *skb; 253 struct ath_frame_info *fi = get_frame_info(skb);
245 struct ieee80211_hdr *hdr; 254 struct ieee80211_hdr *hdr;
246 255
247 bf->bf_state.bf_type |= BUF_RETRY;
248 bf->bf_retries++;
249 TX_STAT_INC(txq->axq_qnum, a_retries); 256 TX_STAT_INC(txq->axq_qnum, a_retries);
257 if (fi->retries++ > 0)
258 return;
250 259
251 skb = bf->bf_mpdu;
252 hdr = (struct ieee80211_hdr *)skb->data; 260 hdr = (struct ieee80211_hdr *)skb->data;
253 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY); 261 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_RETRY);
254} 262}
@@ -298,9 +306,41 @@ static struct ath_buf* ath_clone_txbuf(struct ath_softc *sc, struct ath_buf *bf)
298 return tbf; 306 return tbf;
299} 307}
300 308
309static void ath_tx_count_frames(struct ath_softc *sc, struct ath_buf *bf,
310 struct ath_tx_status *ts, int txok,
311 int *nframes, int *nbad)
312{
313 struct ath_frame_info *fi;
314 u16 seq_st = 0;
315 u32 ba[WME_BA_BMP_SIZE >> 5];
316 int ba_index;
317 int isaggr = 0;
318
319 *nbad = 0;
320 *nframes = 0;
321
322 isaggr = bf_isaggr(bf);
323 if (isaggr) {
324 seq_st = ts->ts_seqnum;
325 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
326 }
327
328 while (bf) {
329 fi = get_frame_info(bf->bf_mpdu);
330 ba_index = ATH_BA_INDEX(seq_st, fi->seqno);
331
332 (*nframes)++;
333 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
334 (*nbad)++;
335
336 bf = bf->bf_next;
337 }
338}
339
340
301static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq, 341static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
302 struct ath_buf *bf, struct list_head *bf_q, 342 struct ath_buf *bf, struct list_head *bf_q,
303 struct ath_tx_status *ts, int txok) 343 struct ath_tx_status *ts, int txok, bool retry)
304{ 344{
305 struct ath_node *an = NULL; 345 struct ath_node *an = NULL;
306 struct sk_buff *skb; 346 struct sk_buff *skb;
@@ -316,7 +356,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
316 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0; 356 int isaggr, txfail, txpending, sendbar = 0, needreset = 0, nbad = 0;
317 bool rc_update = true; 357 bool rc_update = true;
318 struct ieee80211_tx_rate rates[4]; 358 struct ieee80211_tx_rate rates[4];
359 struct ath_frame_info *fi;
319 int nframes; 360 int nframes;
361 u8 tidno;
320 362
321 skb = bf->bf_mpdu; 363 skb = bf->bf_mpdu;
322 hdr = (struct ieee80211_hdr *)skb->data; 364 hdr = (struct ieee80211_hdr *)skb->data;
@@ -325,7 +367,6 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
325 hw = bf->aphy->hw; 367 hw = bf->aphy->hw;
326 368
327 memcpy(rates, tx_info->control.rates, sizeof(rates)); 369 memcpy(rates, tx_info->control.rates, sizeof(rates));
328 nframes = bf->bf_nframes;
329 370
330 rcu_read_lock(); 371 rcu_read_lock();
331 372
@@ -342,7 +383,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
342 !bf->bf_stale || bf_next != NULL) 383 !bf->bf_stale || bf_next != NULL)
343 list_move_tail(&bf->list, &bf_head); 384 list_move_tail(&bf->list, &bf_head);
344 385
345 ath_tx_rc_status(bf, ts, 1, 0, false); 386 ath_tx_rc_status(bf, ts, 1, 1, 0, false);
346 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 387 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
347 0, 0); 388 0, 0);
348 389
@@ -352,14 +393,15 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
352 } 393 }
353 394
354 an = (struct ath_node *)sta->drv_priv; 395 an = (struct ath_node *)sta->drv_priv;
355 tid = ATH_AN_2_TID(an, bf->bf_tidno); 396 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
397 tid = ATH_AN_2_TID(an, tidno);
356 398
357 /* 399 /*
358 * The hardware occasionally sends a tx status for the wrong TID. 400 * The hardware occasionally sends a tx status for the wrong TID.
359 * In this case, the BA status cannot be considered valid and all 401 * In this case, the BA status cannot be considered valid and all
360 * subframes need to be retransmitted 402 * subframes need to be retransmitted
361 */ 403 */
362 if (bf->bf_tidno != ts->tid) 404 if (tidno != ts->tid)
363 txok = false; 405 txok = false;
364 406
365 isaggr = bf_isaggr(bf); 407 isaggr = bf_isaggr(bf);
@@ -385,15 +427,16 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
385 INIT_LIST_HEAD(&bf_pending); 427 INIT_LIST_HEAD(&bf_pending);
386 INIT_LIST_HEAD(&bf_head); 428 INIT_LIST_HEAD(&bf_head);
387 429
388 nbad = ath_tx_num_badfrms(sc, bf, ts, txok); 430 ath_tx_count_frames(sc, bf, ts, txok, &nframes, &nbad);
389 while (bf) { 431 while (bf) {
390 txfail = txpending = 0; 432 txfail = txpending = 0;
391 bf_next = bf->bf_next; 433 bf_next = bf->bf_next;
392 434
393 skb = bf->bf_mpdu; 435 skb = bf->bf_mpdu;
394 tx_info = IEEE80211_SKB_CB(skb); 436 tx_info = IEEE80211_SKB_CB(skb);
437 fi = get_frame_info(skb);
395 438
396 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, bf->bf_seqno))) { 439 if (ATH_BA_ISSET(ba, ATH_BA_INDEX(seq_st, fi->seqno))) {
397 /* transmit completion, subframe is 440 /* transmit completion, subframe is
398 * acked by block ack */ 441 * acked by block ack */
399 acked_cnt++; 442 acked_cnt++;
@@ -401,10 +444,9 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
401 /* transmit completion */ 444 /* transmit completion */
402 acked_cnt++; 445 acked_cnt++;
403 } else { 446 } else {
404 if (!(tid->state & AGGR_CLEANUP) && 447 if (!(tid->state & AGGR_CLEANUP) && retry) {
405 !bf_last->bf_tx_aborted) { 448 if (fi->retries < ATH_MAX_SW_RETRIES) {
406 if (bf->bf_retries < ATH_MAX_SW_RETRIES) { 449 ath_tx_set_retry(sc, txq, bf->bf_mpdu);
407 ath_tx_set_retry(sc, txq, bf);
408 txpending = 1; 450 txpending = 1;
409 } else { 451 } else {
410 bf->bf_state.bf_type |= BUF_XRETRY; 452 bf->bf_state.bf_type |= BUF_XRETRY;
@@ -442,16 +484,15 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
442 * block-ack window 484 * block-ack window
443 */ 485 */
444 spin_lock_bh(&txq->axq_lock); 486 spin_lock_bh(&txq->axq_lock);
445 ath_tx_update_baw(sc, tid, bf->bf_seqno); 487 ath_tx_update_baw(sc, tid, fi->seqno);
446 spin_unlock_bh(&txq->axq_lock); 488 spin_unlock_bh(&txq->axq_lock);
447 489
448 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) { 490 if (rc_update && (acked_cnt == 1 || txfail_cnt == 1)) {
449 memcpy(tx_info->control.rates, rates, sizeof(rates)); 491 memcpy(tx_info->control.rates, rates, sizeof(rates));
450 bf->bf_nframes = nframes; 492 ath_tx_rc_status(bf, ts, nframes, nbad, txok, true);
451 ath_tx_rc_status(bf, ts, nbad, txok, true);
452 rc_update = false; 493 rc_update = false;
453 } else { 494 } else {
454 ath_tx_rc_status(bf, ts, nbad, txok, false); 495 ath_tx_rc_status(bf, ts, nframes, nbad, txok, false);
455 } 496 }
456 497
457 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts, 498 ath_tx_complete_buf(sc, bf, txq, &bf_head, ts,
@@ -470,14 +511,13 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
470 */ 511 */
471 if (!tbf) { 512 if (!tbf) {
472 spin_lock_bh(&txq->axq_lock); 513 spin_lock_bh(&txq->axq_lock);
473 ath_tx_update_baw(sc, tid, 514 ath_tx_update_baw(sc, tid, fi->seqno);
474 bf->bf_seqno);
475 spin_unlock_bh(&txq->axq_lock); 515 spin_unlock_bh(&txq->axq_lock);
476 516
477 bf->bf_state.bf_type |= 517 bf->bf_state.bf_type |=
478 BUF_XRETRY; 518 BUF_XRETRY;
479 ath_tx_rc_status(bf, ts, nbad, 519 ath_tx_rc_status(bf, ts, nframes,
480 0, false); 520 nbad, 0, false);
481 ath_tx_complete_buf(sc, bf, txq, 521 ath_tx_complete_buf(sc, bf, txq,
482 &bf_head, 522 &bf_head,
483 ts, 0, 0); 523 ts, 0, 0);
@@ -611,6 +651,7 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
611 u16 minlen; 651 u16 minlen;
612 u8 flags, rix; 652 u8 flags, rix;
613 int width, streams, half_gi, ndelim, mindelim; 653 int width, streams, half_gi, ndelim, mindelim;
654 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
614 655
615 /* Select standard number of delimiters based on frame length alone */ 656 /* Select standard number of delimiters based on frame length alone */
616 ndelim = ATH_AGGR_GET_NDELIM(frmlen); 657 ndelim = ATH_AGGR_GET_NDELIM(frmlen);
@@ -621,7 +662,7 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
621 * TODO - this could be improved to be dependent on the rate. 662 * TODO - this could be improved to be dependent on the rate.
622 * The hardware can keep up at lower rates, but not higher rates 663 * The hardware can keep up at lower rates, but not higher rates
623 */ 664 */
624 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) 665 if (fi->keyix != ATH9K_TXKEYIX_INVALID)
625 ndelim += ATH_AGGR_ENCRYPTDELIM; 666 ndelim += ATH_AGGR_ENCRYPTDELIM;
626 667
627 /* 668 /*
@@ -665,7 +706,8 @@ static int ath_compute_num_delims(struct ath_softc *sc, struct ath_atx_tid *tid,
665static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc, 706static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
666 struct ath_txq *txq, 707 struct ath_txq *txq,
667 struct ath_atx_tid *tid, 708 struct ath_atx_tid *tid,
668 struct list_head *bf_q) 709 struct list_head *bf_q,
710 int *aggr_len)
669{ 711{
670#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4) 712#define PADBYTES(_len) ((4 - ((_len) % 4)) % 4)
671 struct ath_buf *bf, *bf_first, *bf_prev = NULL; 713 struct ath_buf *bf, *bf_first, *bf_prev = NULL;
@@ -674,14 +716,16 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
674 al_delta, h_baw = tid->baw_size / 2; 716 al_delta, h_baw = tid->baw_size / 2;
675 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE; 717 enum ATH_AGGR_STATUS status = ATH_AGGR_DONE;
676 struct ieee80211_tx_info *tx_info; 718 struct ieee80211_tx_info *tx_info;
719 struct ath_frame_info *fi;
677 720
678 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list); 721 bf_first = list_first_entry(&tid->buf_q, struct ath_buf, list);
679 722
680 do { 723 do {
681 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 724 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
725 fi = get_frame_info(bf->bf_mpdu);
682 726
683 /* do not step over block-ack window */ 727 /* do not step over block-ack window */
684 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno)) { 728 if (!BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno)) {
685 status = ATH_AGGR_BAW_CLOSED; 729 status = ATH_AGGR_BAW_CLOSED;
686 break; 730 break;
687 } 731 }
@@ -692,7 +736,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
692 } 736 }
693 737
694 /* do not exceed aggregation limit */ 738 /* do not exceed aggregation limit */
695 al_delta = ATH_AGGR_DELIM_SZ + bf->bf_frmlen; 739 al_delta = ATH_AGGR_DELIM_SZ + fi->framelen;
696 740
697 if (nframes && 741 if (nframes &&
698 (aggr_limit < (al + bpad + al_delta + prev_al))) { 742 (aggr_limit < (al + bpad + al_delta + prev_al))) {
@@ -719,14 +763,15 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
719 * Get the delimiters needed to meet the MPDU 763 * Get the delimiters needed to meet the MPDU
720 * density for this node. 764 * density for this node.
721 */ 765 */
722 ndelim = ath_compute_num_delims(sc, tid, bf_first, bf->bf_frmlen); 766 ndelim = ath_compute_num_delims(sc, tid, bf_first, fi->framelen);
723 bpad = PADBYTES(al_delta) + (ndelim << 2); 767 bpad = PADBYTES(al_delta) + (ndelim << 2);
724 768
725 bf->bf_next = NULL; 769 bf->bf_next = NULL;
726 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0); 770 ath9k_hw_set_desc_link(sc->sc_ah, bf->bf_desc, 0);
727 771
728 /* link buffers of this frame to the aggregate */ 772 /* link buffers of this frame to the aggregate */
729 ath_tx_addto_baw(sc, tid, bf); 773 if (!fi->retries)
774 ath_tx_addto_baw(sc, tid, fi->seqno);
730 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim); 775 ath9k_hw_set11n_aggr_middle(sc->sc_ah, bf->bf_desc, ndelim);
731 list_move_tail(&bf->list, bf_q); 776 list_move_tail(&bf->list, bf_q);
732 if (bf_prev) { 777 if (bf_prev) {
@@ -738,8 +783,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
738 783
739 } while (!list_empty(&tid->buf_q)); 784 } while (!list_empty(&tid->buf_q));
740 785
741 bf_first->bf_al = al; 786 *aggr_len = al;
742 bf_first->bf_nframes = nframes;
743 787
744 return status; 788 return status;
745#undef PADBYTES 789#undef PADBYTES
@@ -750,7 +794,9 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
750{ 794{
751 struct ath_buf *bf; 795 struct ath_buf *bf;
752 enum ATH_AGGR_STATUS status; 796 enum ATH_AGGR_STATUS status;
797 struct ath_frame_info *fi;
753 struct list_head bf_q; 798 struct list_head bf_q;
799 int aggr_len;
754 800
755 do { 801 do {
756 if (list_empty(&tid->buf_q)) 802 if (list_empty(&tid->buf_q))
@@ -758,7 +804,7 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
758 804
759 INIT_LIST_HEAD(&bf_q); 805 INIT_LIST_HEAD(&bf_q);
760 806
761 status = ath_tx_form_aggr(sc, txq, tid, &bf_q); 807 status = ath_tx_form_aggr(sc, txq, tid, &bf_q, &aggr_len);
762 808
763 /* 809 /*
764 * no frames picked up to be aggregated; 810 * no frames picked up to be aggregated;
@@ -771,18 +817,20 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
771 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list); 817 bf->bf_lastbf = list_entry(bf_q.prev, struct ath_buf, list);
772 818
773 /* if only one frame, send as non-aggregate */ 819 /* if only one frame, send as non-aggregate */
774 if (bf->bf_nframes == 1) { 820 if (bf == bf->bf_lastbf) {
821 fi = get_frame_info(bf->bf_mpdu);
822
775 bf->bf_state.bf_type &= ~BUF_AGGR; 823 bf->bf_state.bf_type &= ~BUF_AGGR;
776 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc); 824 ath9k_hw_clr11n_aggr(sc->sc_ah, bf->bf_desc);
777 ath_buf_set_rate(sc, bf); 825 ath_buf_set_rate(sc, bf, fi->framelen);
778 ath_tx_txqaddbuf(sc, txq, &bf_q); 826 ath_tx_txqaddbuf(sc, txq, &bf_q);
779 continue; 827 continue;
780 } 828 }
781 829
782 /* setup first desc of aggregate */ 830 /* setup first desc of aggregate */
783 bf->bf_state.bf_type |= BUF_AGGR; 831 bf->bf_state.bf_type |= BUF_AGGR;
784 ath_buf_set_rate(sc, bf); 832 ath_buf_set_rate(sc, bf, aggr_len);
785 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, bf->bf_al); 833 ath9k_hw_set11n_aggr_first(sc->sc_ah, bf->bf_desc, aggr_len);
786 834
787 /* anchor last desc of aggregate */ 835 /* anchor last desc of aggregate */
788 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc); 836 ath9k_hw_set11n_aggr_last(sc->sc_ah, bf->bf_lastbf->bf_desc);
@@ -790,7 +838,7 @@ static void ath_tx_sched_aggr(struct ath_softc *sc, struct ath_txq *txq,
790 ath_tx_txqaddbuf(sc, txq, &bf_q); 838 ath_tx_txqaddbuf(sc, txq, &bf_q);
791 TX_STAT_INC(txq->axq_qnum, a_aggr); 839 TX_STAT_INC(txq->axq_qnum, a_aggr);
792 840
793 } while (txq->axq_depth < ATH_AGGR_MIN_QDEPTH && 841 } while (txq->axq_ampdu_depth < ATH_AGGR_MIN_QDEPTH &&
794 status != ATH_AGGR_BAW_CLOSED); 842 status != ATH_AGGR_BAW_CLOSED);
795} 843}
796 844
@@ -817,7 +865,7 @@ void ath_tx_aggr_stop(struct ath_softc *sc, struct ieee80211_sta *sta, u16 tid)
817{ 865{
818 struct ath_node *an = (struct ath_node *)sta->drv_priv; 866 struct ath_node *an = (struct ath_node *)sta->drv_priv;
819 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid); 867 struct ath_atx_tid *txtid = ATH_AN_2_TID(an, tid);
820 struct ath_txq *txq = &sc->tx.txq[txtid->ac->qnum]; 868 struct ath_txq *txq = txtid->ac->txq;
821 869
822 if (txtid->state & AGGR_CLEANUP) 870 if (txtid->state & AGGR_CLEANUP)
823 return; 871 return;
@@ -888,10 +936,16 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
888 struct ath_hw *ah = sc->sc_ah; 936 struct ath_hw *ah = sc->sc_ah;
889 struct ath_common *common = ath9k_hw_common(ah); 937 struct ath_common *common = ath9k_hw_common(ah);
890 struct ath9k_tx_queue_info qi; 938 struct ath9k_tx_queue_info qi;
939 static const int subtype_txq_to_hwq[] = {
940 [WME_AC_BE] = ATH_TXQ_AC_BE,
941 [WME_AC_BK] = ATH_TXQ_AC_BK,
942 [WME_AC_VI] = ATH_TXQ_AC_VI,
943 [WME_AC_VO] = ATH_TXQ_AC_VO,
944 };
891 int qnum, i; 945 int qnum, i;
892 946
893 memset(&qi, 0, sizeof(qi)); 947 memset(&qi, 0, sizeof(qi));
894 qi.tqi_subtype = subtype; 948 qi.tqi_subtype = subtype_txq_to_hwq[subtype];
895 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT; 949 qi.tqi_aifs = ATH9K_TXQ_USEDEFAULT;
896 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT; 950 qi.tqi_cwmin = ATH9K_TXQ_USEDEFAULT;
897 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT; 951 qi.tqi_cwmax = ATH9K_TXQ_USEDEFAULT;
@@ -931,22 +985,21 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
931 return NULL; 985 return NULL;
932 } 986 }
933 if (qnum >= ARRAY_SIZE(sc->tx.txq)) { 987 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
934 ath_print(common, ATH_DBG_FATAL, 988 ath_err(common, "qnum %u out of range, max %zu!\n",
935 "qnum %u out of range, max %u!\n", 989 qnum, ARRAY_SIZE(sc->tx.txq));
936 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
937 ath9k_hw_releasetxqueue(ah, qnum); 990 ath9k_hw_releasetxqueue(ah, qnum);
938 return NULL; 991 return NULL;
939 } 992 }
940 if (!ATH_TXQ_SETUP(sc, qnum)) { 993 if (!ATH_TXQ_SETUP(sc, qnum)) {
941 struct ath_txq *txq = &sc->tx.txq[qnum]; 994 struct ath_txq *txq = &sc->tx.txq[qnum];
942 995
943 txq->axq_class = subtype;
944 txq->axq_qnum = qnum; 996 txq->axq_qnum = qnum;
945 txq->axq_link = NULL; 997 txq->axq_link = NULL;
946 INIT_LIST_HEAD(&txq->axq_q); 998 INIT_LIST_HEAD(&txq->axq_q);
947 INIT_LIST_HEAD(&txq->axq_acq); 999 INIT_LIST_HEAD(&txq->axq_acq);
948 spin_lock_init(&txq->axq_lock); 1000 spin_lock_init(&txq->axq_lock);
949 txq->axq_depth = 0; 1001 txq->axq_depth = 0;
1002 txq->axq_ampdu_depth = 0;
950 txq->axq_tx_inprogress = false; 1003 txq->axq_tx_inprogress = false;
951 sc->tx.txqsetup |= 1<<qnum; 1004 sc->tx.txqsetup |= 1<<qnum;
952 1005
@@ -985,8 +1038,8 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
985 qi.tqi_readyTime = qinfo->tqi_readyTime; 1038 qi.tqi_readyTime = qinfo->tqi_readyTime;
986 1039
987 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) { 1040 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
988 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 1041 ath_err(ath9k_hw_common(sc->sc_ah),
989 "Unable to update hardware queue %u!\n", qnum); 1042 "Unable to update hardware queue %u!\n", qnum);
990 error = -EIO; 1043 error = -EIO;
991 } else { 1044 } else {
992 ath9k_hw_resettxqueue(ah, qnum); 1045 ath9k_hw_resettxqueue(ah, qnum);
@@ -1016,6 +1069,12 @@ int ath_cabq_update(struct ath_softc *sc)
1016 return 0; 1069 return 0;
1017} 1070}
1018 1071
1072static bool bf_is_ampdu_not_probing(struct ath_buf *bf)
1073{
1074 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(bf->bf_mpdu);
1075 return bf_isampdu(bf) && !(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE);
1076}
1077
1019/* 1078/*
1020 * Drain a given TX queue (could be Beacon or Data) 1079 * Drain a given TX queue (could be Beacon or Data)
1021 * 1080 *
@@ -1062,8 +1121,6 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1062 } 1121 }
1063 1122
1064 lastbf = bf->bf_lastbf; 1123 lastbf = bf->bf_lastbf;
1065 if (!retry_tx)
1066 lastbf->bf_tx_aborted = true;
1067 1124
1068 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1125 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1069 list_cut_position(&bf_head, 1126 list_cut_position(&bf_head,
@@ -1076,11 +1133,13 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1076 } 1133 }
1077 1134
1078 txq->axq_depth--; 1135 txq->axq_depth--;
1079 1136 if (bf_is_ampdu_not_probing(bf))
1137 txq->axq_ampdu_depth--;
1080 spin_unlock_bh(&txq->axq_lock); 1138 spin_unlock_bh(&txq->axq_lock);
1081 1139
1082 if (bf_isampdu(bf)) 1140 if (bf_isampdu(bf))
1083 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0); 1141 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, 0,
1142 retry_tx);
1084 else 1143 else
1085 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0); 1144 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, 0, 0);
1086 } 1145 }
@@ -1101,7 +1160,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1101 1160
1102 if (bf_isampdu(bf)) 1161 if (bf_isampdu(bf))
1103 ath_tx_complete_aggr(sc, txq, bf, &bf_head, 1162 ath_tx_complete_aggr(sc, txq, bf, &bf_head,
1104 &ts, 0); 1163 &ts, 0, retry_tx);
1105 else 1164 else
1106 ath_tx_complete_buf(sc, bf, txq, &bf_head, 1165 ath_tx_complete_buf(sc, bf, txq, &bf_head,
1107 &ts, 0, 0); 1166 &ts, 0, 0);
@@ -1143,7 +1202,7 @@ bool ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1143 } 1202 }
1144 1203
1145 if (npend) 1204 if (npend)
1146 ath_print(common, ATH_DBG_FATAL, "Failed to stop TX DMA!\n"); 1205 ath_err(common, "Failed to stop TX DMA!\n");
1147 1206
1148 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) { 1207 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) {
1149 if (ATH_TXQ_SETUP(sc, i)) 1208 if (ATH_TXQ_SETUP(sc, i))
@@ -1202,24 +1261,6 @@ void ath_txq_schedule(struct ath_softc *sc, struct ath_txq *txq)
1202 } 1261 }
1203} 1262}
1204 1263
1205int ath_tx_setup(struct ath_softc *sc, int haltype)
1206{
1207 struct ath_txq *txq;
1208
1209 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
1210 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1211 "HAL AC %u out of range, max %zu!\n",
1212 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1213 return 0;
1214 }
1215 txq = ath_txq_setup(sc, ATH9K_TX_QUEUE_DATA, haltype);
1216 if (txq != NULL) {
1217 sc->tx.hwq_map[haltype] = txq->axq_qnum;
1218 return 1;
1219 } else
1220 return 0;
1221}
1222
1223/***********/ 1264/***********/
1224/* TX, DMA */ 1265/* TX, DMA */
1225/***********/ 1266/***********/
@@ -1245,8 +1286,8 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1245 1286
1246 bf = list_first_entry(head, struct ath_buf, list); 1287 bf = list_first_entry(head, struct ath_buf, list);
1247 1288
1248 ath_print(common, ATH_DBG_QUEUE, 1289 ath_dbg(common, ATH_DBG_QUEUE,
1249 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth); 1290 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
1250 1291
1251 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) { 1292 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) {
1252 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) { 1293 if (txq->axq_depth >= ATH_TXFIFO_DEPTH) {
@@ -1254,47 +1295,45 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1254 return; 1295 return;
1255 } 1296 }
1256 if (!list_empty(&txq->txq_fifo[txq->txq_headidx])) 1297 if (!list_empty(&txq->txq_fifo[txq->txq_headidx]))
1257 ath_print(common, ATH_DBG_XMIT, 1298 ath_dbg(common, ATH_DBG_XMIT,
1258 "Initializing tx fifo %d which " 1299 "Initializing tx fifo %d which is non-empty\n",
1259 "is non-empty\n", 1300 txq->txq_headidx);
1260 txq->txq_headidx);
1261 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]); 1301 INIT_LIST_HEAD(&txq->txq_fifo[txq->txq_headidx]);
1262 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]); 1302 list_splice_init(head, &txq->txq_fifo[txq->txq_headidx]);
1263 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH); 1303 INCR(txq->txq_headidx, ATH_TXFIFO_DEPTH);
1264 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1304 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1265 ath_print(common, ATH_DBG_XMIT, 1305 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1266 "TXDP[%u] = %llx (%p)\n", 1306 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1267 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1268 } else { 1307 } else {
1269 list_splice_tail_init(head, &txq->axq_q); 1308 list_splice_tail_init(head, &txq->axq_q);
1270 1309
1271 if (txq->axq_link == NULL) { 1310 if (txq->axq_link == NULL) {
1272 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1311 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1273 ath_print(common, ATH_DBG_XMIT, 1312 ath_dbg(common, ATH_DBG_XMIT, "TXDP[%u] = %llx (%p)\n",
1274 "TXDP[%u] = %llx (%p)\n", 1313 txq->axq_qnum, ito64(bf->bf_daddr),
1275 txq->axq_qnum, ito64(bf->bf_daddr), 1314 bf->bf_desc);
1276 bf->bf_desc);
1277 } else { 1315 } else {
1278 *txq->axq_link = bf->bf_daddr; 1316 *txq->axq_link = bf->bf_daddr;
1279 ath_print(common, ATH_DBG_XMIT, 1317 ath_dbg(common, ATH_DBG_XMIT,
1280 "link[%u] (%p)=%llx (%p)\n", 1318 "link[%u] (%p)=%llx (%p)\n",
1281 txq->axq_qnum, txq->axq_link, 1319 txq->axq_qnum, txq->axq_link,
1282 ito64(bf->bf_daddr), bf->bf_desc); 1320 ito64(bf->bf_daddr), bf->bf_desc);
1283 } 1321 }
1284 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc, 1322 ath9k_hw_get_desc_link(ah, bf->bf_lastbf->bf_desc,
1285 &txq->axq_link); 1323 &txq->axq_link);
1286 ath9k_hw_txstart(ah, txq->axq_qnum); 1324 ath9k_hw_txstart(ah, txq->axq_qnum);
1287 } 1325 }
1288 txq->axq_depth++; 1326 txq->axq_depth++;
1327 if (bf_is_ampdu_not_probing(bf))
1328 txq->axq_ampdu_depth++;
1289} 1329}
1290 1330
1291static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid, 1331static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1292 struct list_head *bf_head, 1332 struct ath_buf *bf, struct ath_tx_control *txctl)
1293 struct ath_tx_control *txctl)
1294{ 1333{
1295 struct ath_buf *bf; 1334 struct ath_frame_info *fi = get_frame_info(bf->bf_mpdu);
1335 struct list_head bf_head;
1296 1336
1297 bf = list_first_entry(bf_head, struct ath_buf, list);
1298 bf->bf_state.bf_type |= BUF_AMPDU; 1337 bf->bf_state.bf_type |= BUF_AMPDU;
1299 TX_STAT_INC(txctl->txq->axq_qnum, a_queued); 1338 TX_STAT_INC(txctl->txq->axq_qnum, a_queued);
1300 1339
@@ -1306,56 +1345,47 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1306 * - h/w queue depth exceeds low water mark 1345 * - h/w queue depth exceeds low water mark
1307 */ 1346 */
1308 if (!list_empty(&tid->buf_q) || tid->paused || 1347 if (!list_empty(&tid->buf_q) || tid->paused ||
1309 !BAW_WITHIN(tid->seq_start, tid->baw_size, bf->bf_seqno) || 1348 !BAW_WITHIN(tid->seq_start, tid->baw_size, fi->seqno) ||
1310 txctl->txq->axq_depth >= ATH_AGGR_MIN_QDEPTH) { 1349 txctl->txq->axq_ampdu_depth >= ATH_AGGR_MIN_QDEPTH) {
1311 /* 1350 /*
1312 * Add this frame to software queue for scheduling later 1351 * Add this frame to software queue for scheduling later
1313 * for aggregation. 1352 * for aggregation.
1314 */ 1353 */
1315 list_move_tail(&bf->list, &tid->buf_q); 1354 list_add_tail(&bf->list, &tid->buf_q);
1316 ath_tx_queue_tid(txctl->txq, tid); 1355 ath_tx_queue_tid(txctl->txq, tid);
1317 return; 1356 return;
1318 } 1357 }
1319 1358
1359 INIT_LIST_HEAD(&bf_head);
1360 list_add(&bf->list, &bf_head);
1361
1320 /* Add sub-frame to BAW */ 1362 /* Add sub-frame to BAW */
1321 ath_tx_addto_baw(sc, tid, bf); 1363 if (!fi->retries)
1364 ath_tx_addto_baw(sc, tid, fi->seqno);
1322 1365
1323 /* Queue to h/w without aggregation */ 1366 /* Queue to h/w without aggregation */
1324 bf->bf_nframes = 1;
1325 bf->bf_lastbf = bf; 1367 bf->bf_lastbf = bf;
1326 ath_buf_set_rate(sc, bf); 1368 ath_buf_set_rate(sc, bf, fi->framelen);
1327 ath_tx_txqaddbuf(sc, txctl->txq, bf_head); 1369 ath_tx_txqaddbuf(sc, txctl->txq, &bf_head);
1328} 1370}
1329 1371
1330static void ath_tx_send_ht_normal(struct ath_softc *sc, struct ath_txq *txq, 1372static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1331 struct ath_atx_tid *tid, 1373 struct ath_atx_tid *tid,
1332 struct list_head *bf_head) 1374 struct list_head *bf_head)
1333{ 1375{
1376 struct ath_frame_info *fi;
1334 struct ath_buf *bf; 1377 struct ath_buf *bf;
1335 1378
1336 bf = list_first_entry(bf_head, struct ath_buf, list); 1379 bf = list_first_entry(bf_head, struct ath_buf, list);
1337 bf->bf_state.bf_type &= ~BUF_AMPDU; 1380 bf->bf_state.bf_type &= ~BUF_AMPDU;
1338 1381
1339 /* update starting sequence number for subsequent ADDBA request */ 1382 /* update starting sequence number for subsequent ADDBA request */
1340 INCR(tid->seq_start, IEEE80211_SEQ_MAX); 1383 if (tid)
1341 1384 INCR(tid->seq_start, IEEE80211_SEQ_MAX);
1342 bf->bf_nframes = 1;
1343 bf->bf_lastbf = bf;
1344 ath_buf_set_rate(sc, bf);
1345 ath_tx_txqaddbuf(sc, txq, bf_head);
1346 TX_STAT_INC(txq->axq_qnum, queued);
1347}
1348
1349static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1350 struct list_head *bf_head)
1351{
1352 struct ath_buf *bf;
1353
1354 bf = list_first_entry(bf_head, struct ath_buf, list);
1355 1385
1356 bf->bf_lastbf = bf; 1386 bf->bf_lastbf = bf;
1357 bf->bf_nframes = 1; 1387 fi = get_frame_info(bf->bf_mpdu);
1358 ath_buf_set_rate(sc, bf); 1388 ath_buf_set_rate(sc, bf, fi->framelen);
1359 ath_tx_txqaddbuf(sc, txq, bf_head); 1389 ath_tx_txqaddbuf(sc, txq, bf_head);
1360 TX_STAT_INC(txq->axq_qnum, queued); 1390 TX_STAT_INC(txq->axq_qnum, queued);
1361} 1391}
@@ -1383,40 +1413,52 @@ static enum ath9k_pkt_type get_hw_packet_type(struct sk_buff *skb)
1383 return htype; 1413 return htype;
1384} 1414}
1385 1415
1386static void assign_aggr_tid_seqno(struct sk_buff *skb, 1416static void setup_frame_info(struct ieee80211_hw *hw, struct sk_buff *skb,
1387 struct ath_buf *bf) 1417 int framelen)
1388{ 1418{
1419 struct ath_wiphy *aphy = hw->priv;
1420 struct ath_softc *sc = aphy->sc;
1389 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1421 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1422 struct ieee80211_sta *sta = tx_info->control.sta;
1423 struct ieee80211_key_conf *hw_key = tx_info->control.hw_key;
1390 struct ieee80211_hdr *hdr; 1424 struct ieee80211_hdr *hdr;
1425 struct ath_frame_info *fi = get_frame_info(skb);
1391 struct ath_node *an; 1426 struct ath_node *an;
1392 struct ath_atx_tid *tid; 1427 struct ath_atx_tid *tid;
1393 __le16 fc; 1428 enum ath9k_key_type keytype;
1394 u8 *qc; 1429 u16 seqno = 0;
1430 u8 tidno;
1395 1431
1396 if (!tx_info->control.sta) 1432 keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
1397 return;
1398 1433
1399 an = (struct ath_node *)tx_info->control.sta->drv_priv;
1400 hdr = (struct ieee80211_hdr *)skb->data; 1434 hdr = (struct ieee80211_hdr *)skb->data;
1401 fc = hdr->frame_control; 1435 if (sta && ieee80211_is_data_qos(hdr->frame_control) &&
1436 conf_is_ht(&hw->conf) && (sc->sc_flags & SC_OP_TXAGGR)) {
1402 1437
1403 if (ieee80211_is_data_qos(fc)) { 1438 an = (struct ath_node *) sta->drv_priv;
1404 qc = ieee80211_get_qos_ctl(hdr); 1439 tidno = ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
1405 bf->bf_tidno = qc[0] & 0xf; 1440
1441 /*
1442 * Override seqno set by upper layer with the one
1443 * in tx aggregation state.
1444 */
1445 tid = ATH_AN_2_TID(an, tidno);
1446 seqno = tid->seq_next;
1447 hdr->seq_ctrl = cpu_to_le16(seqno << IEEE80211_SEQ_SEQ_SHIFT);
1448 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1406 } 1449 }
1407 1450
1408 /* 1451 memset(fi, 0, sizeof(*fi));
1409 * For HT capable stations, we save tidno for later use. 1452 if (hw_key)
1410 * We also override seqno set by upper layer with the one 1453 fi->keyix = hw_key->hw_key_idx;
1411 * in tx aggregation state. 1454 else
1412 */ 1455 fi->keyix = ATH9K_TXKEYIX_INVALID;
1413 tid = ATH_AN_2_TID(an, bf->bf_tidno); 1456 fi->keytype = keytype;
1414 hdr->seq_ctrl = cpu_to_le16(tid->seq_next << IEEE80211_SEQ_SEQ_SHIFT); 1457 fi->framelen = framelen;
1415 bf->bf_seqno = tid->seq_next; 1458 fi->seqno = seqno;
1416 INCR(tid->seq_next, IEEE80211_SEQ_MAX);
1417} 1459}
1418 1460
1419static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc) 1461static int setup_tx_flags(struct sk_buff *skb)
1420{ 1462{
1421 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1463 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1422 int flags = 0; 1464 int flags = 0;
@@ -1427,7 +1469,7 @@ static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
1427 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK) 1469 if (tx_info->flags & IEEE80211_TX_CTL_NO_ACK)
1428 flags |= ATH9K_TXDESC_NOACK; 1470 flags |= ATH9K_TXDESC_NOACK;
1429 1471
1430 if (use_ldpc) 1472 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1431 flags |= ATH9K_TXDESC_LDPC; 1473 flags |= ATH9K_TXDESC_LDPC;
1432 1474
1433 return flags; 1475 return flags;
@@ -1439,13 +1481,11 @@ static int setup_tx_flags(struct sk_buff *skb, bool use_ldpc)
1439 * width - 0 for 20 MHz, 1 for 40 MHz 1481 * width - 0 for 20 MHz, 1 for 40 MHz
1440 * half_gi - to use 4us v/s 3.6 us for symbol time 1482 * half_gi - to use 4us v/s 3.6 us for symbol time
1441 */ 1483 */
1442static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf, 1484static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, int pktlen,
1443 int width, int half_gi, bool shortPreamble) 1485 int width, int half_gi, bool shortPreamble)
1444{ 1486{
1445 u32 nbits, nsymbits, duration, nsymbols; 1487 u32 nbits, nsymbits, duration, nsymbols;
1446 int streams, pktlen; 1488 int streams;
1447
1448 pktlen = bf_isaggr(bf) ? bf->bf_al : bf->bf_frmlen;
1449 1489
1450 /* find number of symbols: PLCP + data */ 1490 /* find number of symbols: PLCP + data */
1451 streams = HT_RC_2_STREAMS(rix); 1491 streams = HT_RC_2_STREAMS(rix);
@@ -1464,7 +1504,19 @@ static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1464 return duration; 1504 return duration;
1465} 1505}
1466 1506
1467static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf) 1507u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1508{
1509 struct ath_hw *ah = sc->sc_ah;
1510 struct ath9k_channel *curchan = ah->curchan;
1511 if ((sc->sc_flags & SC_OP_ENABLE_APM) &&
1512 (curchan->channelFlags & CHANNEL_5GHZ) &&
1513 (chainmask == 0x7) && (rate < 0x90))
1514 return 0x3;
1515 else
1516 return chainmask;
1517}
1518
1519static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf, int len)
1468{ 1520{
1469 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1521 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1470 struct ath9k_11n_rate_series series[4]; 1522 struct ath9k_11n_rate_series series[4];
@@ -1504,7 +1556,6 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1504 1556
1505 rix = rates[i].idx; 1557 rix = rates[i].idx;
1506 series[i].Tries = rates[i].count; 1558 series[i].Tries = rates[i].count;
1507 series[i].ChSel = common->tx_chainmask;
1508 1559
1509 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) || 1560 if ((sc->config.ath_aggr_prot && bf_isaggr(bf)) ||
1510 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) { 1561 (rates[i].flags & IEEE80211_TX_RC_USE_RTS_CTS)) {
@@ -1527,14 +1578,16 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1527 if (rates[i].flags & IEEE80211_TX_RC_MCS) { 1578 if (rates[i].flags & IEEE80211_TX_RC_MCS) {
1528 /* MCS rates */ 1579 /* MCS rates */
1529 series[i].Rate = rix | 0x80; 1580 series[i].Rate = rix | 0x80;
1530 series[i].PktDuration = ath_pkt_duration(sc, rix, bf, 1581 series[i].ChSel = ath_txchainmask_reduction(sc,
1582 common->tx_chainmask, series[i].Rate);
1583 series[i].PktDuration = ath_pkt_duration(sc, rix, len,
1531 is_40, is_sgi, is_sp); 1584 is_40, is_sgi, is_sp);
1532 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC)) 1585 if (rix < 8 && (tx_info->flags & IEEE80211_TX_CTL_STBC))
1533 series[i].RateFlags |= ATH9K_RATESERIES_STBC; 1586 series[i].RateFlags |= ATH9K_RATESERIES_STBC;
1534 continue; 1587 continue;
1535 } 1588 }
1536 1589
1537 /* legcay rates */ 1590 /* legacy rates */
1538 if ((tx_info->band == IEEE80211_BAND_2GHZ) && 1591 if ((tx_info->band == IEEE80211_BAND_2GHZ) &&
1539 !(rate->flags & IEEE80211_RATE_ERP_G)) 1592 !(rate->flags & IEEE80211_RATE_ERP_G))
1540 phy = WLAN_RC_PHY_CCK; 1593 phy = WLAN_RC_PHY_CCK;
@@ -1550,12 +1603,18 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1550 is_sp = false; 1603 is_sp = false;
1551 } 1604 }
1552 1605
1606 if (bf->bf_state.bfs_paprd)
1607 series[i].ChSel = common->tx_chainmask;
1608 else
1609 series[i].ChSel = ath_txchainmask_reduction(sc,
1610 common->tx_chainmask, series[i].Rate);
1611
1553 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah, 1612 series[i].PktDuration = ath9k_hw_computetxtime(sc->sc_ah,
1554 phy, rate->bitrate * 100, bf->bf_frmlen, rix, is_sp); 1613 phy, rate->bitrate * 100, len, rix, is_sp);
1555 } 1614 }
1556 1615
1557 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */ 1616 /* For AR5416 - RTS cannot be followed by a frame larger than 8K */
1558 if (bf_isaggr(bf) && (bf->bf_al > sc->sc_ah->caps.rts_aggr_limit)) 1617 if (bf_isaggr(bf) && (len > sc->sc_ah->caps.rts_aggr_limit))
1559 flags &= ~ATH9K_TXDESC_RTSENA; 1618 flags &= ~ATH9K_TXDESC_RTSENA;
1560 1619
1561 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */ 1620 /* ATH9K_TXDESC_RTSENA and ATH9K_TXDESC_CTSENA are mutually exclusive. */
@@ -1572,67 +1631,29 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1572 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192); 1631 ath9k_hw_set11n_burstduration(sc->sc_ah, bf->bf_desc, 8192);
1573} 1632}
1574 1633
1575static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf, 1634static struct ath_buf *ath_tx_setup_buffer(struct ieee80211_hw *hw,
1576 struct sk_buff *skb, 1635 struct ath_txq *txq,
1577 struct ath_tx_control *txctl) 1636 struct sk_buff *skb)
1578{ 1637{
1579 struct ath_wiphy *aphy = hw->priv; 1638 struct ath_wiphy *aphy = hw->priv;
1580 struct ath_softc *sc = aphy->sc; 1639 struct ath_softc *sc = aphy->sc;
1581 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1640 struct ath_hw *ah = sc->sc_ah;
1582 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1641 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1583 int hdrlen; 1642 struct ath_frame_info *fi = get_frame_info(skb);
1584 __le16 fc; 1643 struct ath_buf *bf;
1585 int padpos, padsize; 1644 struct ath_desc *ds;
1586 bool use_ldpc = false; 1645 int frm_type;
1587 1646
1588 tx_info->pad[0] = 0; 1647 bf = ath_tx_get_buffer(sc);
1589 switch (txctl->frame_type) { 1648 if (!bf) {
1590 case ATH9K_IFT_NOT_INTERNAL: 1649 ath_dbg(common, ATH_DBG_XMIT, "TX buffers are full\n");
1591 break; 1650 return NULL;
1592 case ATH9K_IFT_PAUSE:
1593 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_PAUSE;
1594 /* fall through */
1595 case ATH9K_IFT_UNPAUSE:
1596 tx_info->pad[0] |= ATH_TX_INFO_FRAME_TYPE_INTERNAL;
1597 break;
1598 } 1651 }
1599 hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1600 fc = hdr->frame_control;
1601 1652
1602 ATH_TXBUF_RESET(bf); 1653 ATH_TXBUF_RESET(bf);
1603 1654
1604 bf->aphy = aphy; 1655 bf->aphy = aphy;
1605 bf->bf_frmlen = skb->len + FCS_LEN; 1656 bf->bf_flags = setup_tx_flags(skb);
1606 /* Remove the padding size from bf_frmlen, if any */
1607 padpos = ath9k_cmn_padpos(hdr->frame_control);
1608 padsize = padpos & 3;
1609 if (padsize && skb->len>padpos+padsize) {
1610 bf->bf_frmlen -= padsize;
1611 }
1612
1613 if (!txctl->paprd && conf_is_ht(&hw->conf)) {
1614 bf->bf_state.bf_type |= BUF_HT;
1615 if (tx_info->flags & IEEE80211_TX_CTL_LDPC)
1616 use_ldpc = true;
1617 }
1618
1619 bf->bf_state.bfs_paprd = txctl->paprd;
1620 if (txctl->paprd)
1621 bf->bf_state.bfs_paprd_timestamp = jiffies;
1622 bf->bf_flags = setup_tx_flags(skb, use_ldpc);
1623
1624 bf->bf_keytype = ath9k_cmn_get_hw_crypto_keytype(skb);
1625 if (bf->bf_keytype != ATH9K_KEY_TYPE_CLEAR) {
1626 bf->bf_frmlen += tx_info->control.hw_key->icv_len;
1627 bf->bf_keyix = tx_info->control.hw_key->hw_key_idx;
1628 } else {
1629 bf->bf_keyix = ATH9K_TXKEYIX_INVALID;
1630 }
1631
1632 if (ieee80211_is_data_qos(fc) && bf_isht(bf) &&
1633 (sc->sc_flags & SC_OP_TXAGGR))
1634 assign_aggr_tid_seqno(skb, bf);
1635
1636 bf->bf_mpdu = skb; 1657 bf->bf_mpdu = skb;
1637 1658
1638 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data, 1659 bf->bf_buf_addr = dma_map_single(sc->dev, skb->data,
@@ -1640,42 +1661,19 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1640 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { 1661 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
1641 bf->bf_mpdu = NULL; 1662 bf->bf_mpdu = NULL;
1642 bf->bf_buf_addr = 0; 1663 bf->bf_buf_addr = 0;
1643 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL, 1664 ath_err(ath9k_hw_common(sc->sc_ah),
1644 "dma_mapping_error() on TX\n"); 1665 "dma_mapping_error() on TX\n");
1645 return -ENOMEM; 1666 ath_tx_return_buffer(sc, bf);
1667 return NULL;
1646 } 1668 }
1647 1669
1648 bf->bf_tx_aborted = false;
1649
1650 return 0;
1651}
1652
1653/* FIXME: tx power */
1654static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1655 struct ath_tx_control *txctl)
1656{
1657 struct sk_buff *skb = bf->bf_mpdu;
1658 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1659 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1660 struct ath_node *an = NULL;
1661 struct list_head bf_head;
1662 struct ath_desc *ds;
1663 struct ath_atx_tid *tid;
1664 struct ath_hw *ah = sc->sc_ah;
1665 int frm_type;
1666 __le16 fc;
1667
1668 frm_type = get_hw_packet_type(skb); 1670 frm_type = get_hw_packet_type(skb);
1669 fc = hdr->frame_control;
1670
1671 INIT_LIST_HEAD(&bf_head);
1672 list_add_tail(&bf->list, &bf_head);
1673 1671
1674 ds = bf->bf_desc; 1672 ds = bf->bf_desc;
1675 ath9k_hw_set_desc_link(ah, ds, 0); 1673 ath9k_hw_set_desc_link(ah, ds, 0);
1676 1674
1677 ath9k_hw_set11n_txdesc(ah, ds, bf->bf_frmlen, frm_type, MAX_RATE_POWER, 1675 ath9k_hw_set11n_txdesc(ah, ds, fi->framelen, frm_type, MAX_RATE_POWER,
1678 bf->bf_keyix, bf->bf_keytype, bf->bf_flags); 1676 fi->keyix, fi->keytype, bf->bf_flags);
1679 1677
1680 ath9k_hw_filltxdesc(ah, ds, 1678 ath9k_hw_filltxdesc(ah, ds,
1681 skb->len, /* segment length */ 1679 skb->len, /* segment length */
@@ -1683,42 +1681,53 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1683 true, /* last segment */ 1681 true, /* last segment */
1684 ds, /* first descriptor */ 1682 ds, /* first descriptor */
1685 bf->bf_buf_addr, 1683 bf->bf_buf_addr,
1686 txctl->txq->axq_qnum); 1684 txq->axq_qnum);
1685
1686
1687 return bf;
1688}
1687 1689
1688 if (bf->bf_state.bfs_paprd) 1690/* FIXME: tx power */
1689 ar9003_hw_set_paprd_txdesc(ah, ds, bf->bf_state.bfs_paprd); 1691static void ath_tx_start_dma(struct ath_softc *sc, struct ath_buf *bf,
1692 struct ath_tx_control *txctl)
1693{
1694 struct sk_buff *skb = bf->bf_mpdu;
1695 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1696 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1697 struct list_head bf_head;
1698 struct ath_atx_tid *tid = NULL;
1699 u8 tidno;
1690 1700
1691 spin_lock_bh(&txctl->txq->axq_lock); 1701 spin_lock_bh(&txctl->txq->axq_lock);
1692 1702
1693 if (bf_isht(bf) && (sc->sc_flags & SC_OP_TXAGGR) && 1703 if (ieee80211_is_data_qos(hdr->frame_control) && txctl->an) {
1694 tx_info->control.sta) { 1704 tidno = ieee80211_get_qos_ctl(hdr)[0] &
1695 an = (struct ath_node *)tx_info->control.sta->drv_priv; 1705 IEEE80211_QOS_CTL_TID_MASK;
1696 tid = ATH_AN_2_TID(an, bf->bf_tidno); 1706 tid = ATH_AN_2_TID(txctl->an, tidno);
1697 1707
1698 if (!ieee80211_is_data_qos(fc)) { 1708 WARN_ON(tid->ac->txq != txctl->txq);
1699 ath_tx_send_normal(sc, txctl->txq, &bf_head); 1709 }
1700 goto tx_done;
1701 }
1702 1710
1703 if (tx_info->flags & IEEE80211_TX_CTL_AMPDU) { 1711 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && tid) {
1704 /* 1712 /*
1705 * Try aggregation if it's a unicast data frame 1713 * Try aggregation if it's a unicast data frame
1706 * and the destination is HT capable. 1714 * and the destination is HT capable.
1707 */ 1715 */
1708 ath_tx_send_ampdu(sc, tid, &bf_head, txctl); 1716 ath_tx_send_ampdu(sc, tid, bf, txctl);
1709 } else {
1710 /*
1711 * Send this frame as regular when ADDBA
1712 * exchange is neither complete nor pending.
1713 */
1714 ath_tx_send_ht_normal(sc, txctl->txq,
1715 tid, &bf_head);
1716 }
1717 } else { 1717 } else {
1718 ath_tx_send_normal(sc, txctl->txq, &bf_head); 1718 INIT_LIST_HEAD(&bf_head);
1719 list_add_tail(&bf->list, &bf_head);
1720
1721 bf->bf_state.bfs_ftype = txctl->frame_type;
1722 bf->bf_state.bfs_paprd = txctl->paprd;
1723
1724 if (bf->bf_state.bfs_paprd)
1725 ar9003_hw_set_paprd_txdesc(sc->sc_ah, bf->bf_desc,
1726 bf->bf_state.bfs_paprd);
1727
1728 ath_tx_send_normal(sc, txctl->txq, tid, &bf_head);
1719 } 1729 }
1720 1730
1721tx_done:
1722 spin_unlock_bh(&txctl->txq->axq_lock); 1731 spin_unlock_bh(&txctl->txq->axq_lock);
1723} 1732}
1724 1733
@@ -1726,66 +1735,23 @@ tx_done:
1726int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb, 1735int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1727 struct ath_tx_control *txctl) 1736 struct ath_tx_control *txctl)
1728{ 1737{
1738 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1739 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1740 struct ieee80211_sta *sta = info->control.sta;
1729 struct ath_wiphy *aphy = hw->priv; 1741 struct ath_wiphy *aphy = hw->priv;
1730 struct ath_softc *sc = aphy->sc; 1742 struct ath_softc *sc = aphy->sc;
1731 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1732 struct ath_txq *txq = txctl->txq; 1743 struct ath_txq *txq = txctl->txq;
1733 struct ath_buf *bf; 1744 struct ath_buf *bf;
1734 int q, r;
1735
1736 bf = ath_tx_get_buffer(sc);
1737 if (!bf) {
1738 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
1739 return -1;
1740 }
1741
1742 r = ath_tx_setup_buffer(hw, bf, skb, txctl);
1743 if (unlikely(r)) {
1744 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
1745
1746 /* upon ath_tx_processq() this TX queue will be resumed, we
1747 * guarantee this will happen by knowing beforehand that
1748 * we will at least have to run TX completionon one buffer
1749 * on the queue */
1750 spin_lock_bh(&txq->axq_lock);
1751 if (!txq->stopped && txq->axq_depth > 1) {
1752 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1753 txq->stopped = 1;
1754 }
1755 spin_unlock_bh(&txq->axq_lock);
1756
1757 ath_tx_return_buffer(sc, bf);
1758
1759 return r;
1760 }
1761
1762 q = skb_get_queue_mapping(skb);
1763 if (q >= 4)
1764 q = 0;
1765
1766 spin_lock_bh(&txq->axq_lock);
1767 if (++sc->tx.pending_frames[q] > ATH_MAX_QDEPTH && !txq->stopped) {
1768 ath_mac80211_stop_queue(sc, skb_get_queue_mapping(skb));
1769 txq->stopped = 1;
1770 }
1771 spin_unlock_bh(&txq->axq_lock);
1772
1773 ath_tx_start_dma(sc, bf, txctl);
1774
1775 return 0;
1776}
1777
1778void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
1779{
1780 struct ath_wiphy *aphy = hw->priv;
1781 struct ath_softc *sc = aphy->sc;
1782 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1783 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
1784 int padpos, padsize; 1745 int padpos, padsize;
1785 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1746 int frmlen = skb->len + FCS_LEN;
1786 struct ath_tx_control txctl; 1747 int q;
1748
1749 /* NOTE: sta can be NULL according to net/mac80211.h */
1750 if (sta)
1751 txctl->an = (struct ath_node *)sta->drv_priv;
1787 1752
1788 memset(&txctl, 0, sizeof(struct ath_tx_control)); 1753 if (info->control.hw_key)
1754 frmlen += info->control.hw_key->icv_len;
1789 1755
1790 /* 1756 /*
1791 * As a temporary workaround, assign seq# here; this will likely need 1757 * As a temporary workaround, assign seq# here; this will likely need
@@ -1802,30 +1768,37 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
1802 /* Add the padding after the header if this is not already done */ 1768 /* Add the padding after the header if this is not already done */
1803 padpos = ath9k_cmn_padpos(hdr->frame_control); 1769 padpos = ath9k_cmn_padpos(hdr->frame_control);
1804 padsize = padpos & 3; 1770 padsize = padpos & 3;
1805 if (padsize && skb->len>padpos) { 1771 if (padsize && skb->len > padpos) {
1806 if (skb_headroom(skb) < padsize) { 1772 if (skb_headroom(skb) < padsize)
1807 ath_print(common, ATH_DBG_XMIT, 1773 return -ENOMEM;
1808 "TX CABQ padding failed\n"); 1774
1809 dev_kfree_skb_any(skb);
1810 return;
1811 }
1812 skb_push(skb, padsize); 1775 skb_push(skb, padsize);
1813 memmove(skb->data, skb->data + padsize, padpos); 1776 memmove(skb->data, skb->data + padsize, padpos);
1814 } 1777 }
1815 1778
1816 txctl.txq = sc->beacon.cabq; 1779 setup_frame_info(hw, skb, frmlen);
1817 1780
1818 ath_print(common, ATH_DBG_XMIT, 1781 /*
1819 "transmitting CABQ packet, skb: %p\n", skb); 1782 * At this point, the vif, hw_key and sta pointers in the tx control
1783 * info are no longer valid (overwritten by the ath_frame_info data.
1784 */
1785
1786 bf = ath_tx_setup_buffer(hw, txctl->txq, skb);
1787 if (unlikely(!bf))
1788 return -ENOMEM;
1820 1789
1821 if (ath_tx_start(hw, skb, &txctl) != 0) { 1790 q = skb_get_queue_mapping(skb);
1822 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n"); 1791 spin_lock_bh(&txq->axq_lock);
1823 goto exit; 1792 if (txq == sc->tx.txq_map[q] &&
1793 ++txq->pending_frames > ATH_MAX_QDEPTH && !txq->stopped) {
1794 ath_mac80211_stop_queue(sc, q);
1795 txq->stopped = 1;
1824 } 1796 }
1797 spin_unlock_bh(&txq->axq_lock);
1825 1798
1826 return; 1799 ath_tx_start_dma(sc, bf, txctl);
1827exit: 1800
1828 dev_kfree_skb_any(skb); 1801 return 0;
1829} 1802}
1830 1803
1831/*****************/ 1804/*****************/
@@ -1833,7 +1806,8 @@ exit:
1833/*****************/ 1806/*****************/
1834 1807
1835static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb, 1808static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1836 struct ath_wiphy *aphy, int tx_flags) 1809 struct ath_wiphy *aphy, int tx_flags, int ftype,
1810 struct ath_txq *txq)
1837{ 1811{
1838 struct ieee80211_hw *hw = sc->hw; 1812 struct ieee80211_hw *hw = sc->hw;
1839 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1813 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
@@ -1841,7 +1815,7 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1841 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data; 1815 struct ieee80211_hdr * hdr = (struct ieee80211_hdr *)skb->data;
1842 int q, padpos, padsize; 1816 int q, padpos, padsize;
1843 1817
1844 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); 1818 ath_dbg(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
1845 1819
1846 if (aphy) 1820 if (aphy)
1847 hw = aphy->hw; 1821 hw = aphy->hw;
@@ -1867,24 +1841,24 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1867 1841
1868 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) { 1842 if (sc->ps_flags & PS_WAIT_FOR_TX_ACK) {
1869 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK; 1843 sc->ps_flags &= ~PS_WAIT_FOR_TX_ACK;
1870 ath_print(common, ATH_DBG_PS, 1844 ath_dbg(common, ATH_DBG_PS,
1871 "Going back to sleep after having " 1845 "Going back to sleep after having received TX status (0x%lx)\n",
1872 "received TX status (0x%lx)\n",
1873 sc->ps_flags & (PS_WAIT_FOR_BEACON | 1846 sc->ps_flags & (PS_WAIT_FOR_BEACON |
1874 PS_WAIT_FOR_CAB | 1847 PS_WAIT_FOR_CAB |
1875 PS_WAIT_FOR_PSPOLL_DATA | 1848 PS_WAIT_FOR_PSPOLL_DATA |
1876 PS_WAIT_FOR_TX_ACK)); 1849 PS_WAIT_FOR_TX_ACK));
1877 } 1850 }
1878 1851
1879 if (unlikely(tx_info->pad[0] & ATH_TX_INFO_FRAME_TYPE_INTERNAL)) 1852 if (unlikely(ftype))
1880 ath9k_tx_status(hw, skb); 1853 ath9k_tx_status(hw, skb, ftype);
1881 else { 1854 else {
1882 q = skb_get_queue_mapping(skb); 1855 q = skb_get_queue_mapping(skb);
1883 if (q >= 4) 1856 if (txq == sc->tx.txq_map[q]) {
1884 q = 0; 1857 spin_lock_bh(&txq->axq_lock);
1885 1858 if (WARN_ON(--txq->pending_frames < 0))
1886 if (--sc->tx.pending_frames[q] < 0) 1859 txq->pending_frames = 0;
1887 sc->tx.pending_frames[q] = 0; 1860 spin_unlock_bh(&txq->axq_lock);
1861 }
1888 1862
1889 ieee80211_tx_status(hw, skb); 1863 ieee80211_tx_status(hw, skb);
1890 } 1864 }
@@ -1912,15 +1886,14 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1912 bf->bf_buf_addr = 0; 1886 bf->bf_buf_addr = 0;
1913 1887
1914 if (bf->bf_state.bfs_paprd) { 1888 if (bf->bf_state.bfs_paprd) {
1915 if (time_after(jiffies, 1889 if (!sc->paprd_pending)
1916 bf->bf_state.bfs_paprd_timestamp +
1917 msecs_to_jiffies(ATH_PAPRD_TIMEOUT)))
1918 dev_kfree_skb_any(skb); 1890 dev_kfree_skb_any(skb);
1919 else 1891 else
1920 complete(&sc->paprd_complete); 1892 complete(&sc->paprd_complete);
1921 } else { 1893 } else {
1922 ath_debug_stat_tx(sc, txq, bf, ts); 1894 ath_debug_stat_tx(sc, bf, ts);
1923 ath_tx_complete(sc, skb, bf->aphy, tx_flags); 1895 ath_tx_complete(sc, skb, bf->aphy, tx_flags,
1896 bf->bf_state.bfs_ftype, txq);
1924 } 1897 }
1925 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't 1898 /* At this point, skb (bf->bf_mpdu) is consumed...make sure we don't
1926 * accidentally reference it later. 1899 * accidentally reference it later.
@@ -1935,42 +1908,15 @@ static void ath_tx_complete_buf(struct ath_softc *sc, struct ath_buf *bf,
1935 spin_unlock_irqrestore(&sc->tx.txbuflock, flags); 1908 spin_unlock_irqrestore(&sc->tx.txbuflock, flags);
1936} 1909}
1937 1910
1938static int ath_tx_num_badfrms(struct ath_softc *sc, struct ath_buf *bf,
1939 struct ath_tx_status *ts, int txok)
1940{
1941 u16 seq_st = 0;
1942 u32 ba[WME_BA_BMP_SIZE >> 5];
1943 int ba_index;
1944 int nbad = 0;
1945 int isaggr = 0;
1946
1947 if (bf->bf_lastbf->bf_tx_aborted)
1948 return 0;
1949
1950 isaggr = bf_isaggr(bf);
1951 if (isaggr) {
1952 seq_st = ts->ts_seqnum;
1953 memcpy(ba, &ts->ba_low, WME_BA_BMP_SIZE >> 3);
1954 }
1955
1956 while (bf) {
1957 ba_index = ATH_BA_INDEX(seq_st, bf->bf_seqno);
1958 if (!txok || (isaggr && !ATH_BA_ISSET(ba, ba_index)))
1959 nbad++;
1960
1961 bf = bf->bf_next;
1962 }
1963
1964 return nbad;
1965}
1966
1967static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts, 1911static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
1968 int nbad, int txok, bool update_rc) 1912 int nframes, int nbad, int txok, bool update_rc)
1969{ 1913{
1970 struct sk_buff *skb = bf->bf_mpdu; 1914 struct sk_buff *skb = bf->bf_mpdu;
1971 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 1915 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1972 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1916 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1973 struct ieee80211_hw *hw = bf->aphy->hw; 1917 struct ieee80211_hw *hw = bf->aphy->hw;
1918 struct ath_softc *sc = bf->aphy->sc;
1919 struct ath_hw *ah = sc->sc_ah;
1974 u8 i, tx_rateindex; 1920 u8 i, tx_rateindex;
1975 1921
1976 if (txok) 1922 if (txok)
@@ -1984,22 +1930,32 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
1984 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) { 1930 if ((tx_info->flags & IEEE80211_TX_CTL_AMPDU) && update_rc) {
1985 tx_info->flags |= IEEE80211_TX_STAT_AMPDU; 1931 tx_info->flags |= IEEE80211_TX_STAT_AMPDU;
1986 1932
1987 BUG_ON(nbad > bf->bf_nframes); 1933 BUG_ON(nbad > nframes);
1988 1934
1989 tx_info->status.ampdu_len = bf->bf_nframes; 1935 tx_info->status.ampdu_len = nframes;
1990 tx_info->status.ampdu_ack_len = bf->bf_nframes - nbad; 1936 tx_info->status.ampdu_ack_len = nframes - nbad;
1991 } 1937 }
1992 1938
1993 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 && 1939 if ((ts->ts_status & ATH9K_TXERR_FILT) == 0 &&
1994 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) { 1940 (bf->bf_flags & ATH9K_TXDESC_NOACK) == 0 && update_rc) {
1995 if (ieee80211_is_data(hdr->frame_control)) { 1941 /*
1996 if (ts->ts_flags & 1942 * If an underrun error is seen assume it as an excessive
1997 (ATH9K_TX_DATA_UNDERRUN | ATH9K_TX_DELIM_UNDERRUN)) 1943 * retry only if max frame trigger level has been reached
1998 tx_info->pad[0] |= ATH_TX_INFO_UNDERRUN; 1944 * (2 KB for single stream, and 4 KB for dual stream).
1999 if ((ts->ts_status & ATH9K_TXERR_XRETRY) || 1945 * Adjust the long retry as if the frame was tried
2000 (ts->ts_status & ATH9K_TXERR_FIFO)) 1946 * hw->max_rate_tries times to affect how rate control updates
2001 tx_info->pad[0] |= ATH_TX_INFO_XRETRY; 1947 * PER for the failed rate.
2002 } 1948 * In case of congestion on the bus penalizing this type of
1949 * underruns should help hardware actually transmit new frames
1950 * successfully by eventually preferring slower rates.
1951 * This itself should also alleviate congestion on the bus.
1952 */
1953 if (ieee80211_is_data(hdr->frame_control) &&
1954 (ts->ts_flags & (ATH9K_TX_DATA_UNDERRUN |
1955 ATH9K_TX_DELIM_UNDERRUN)) &&
1956 ah->tx_trig_level >= sc->sc_ah->caps.tx_triglevel_max)
1957 tx_info->status.rates[tx_rateindex].count =
1958 hw->max_rate_tries;
2003 } 1959 }
2004 1960
2005 for (i = tx_rateindex + 1; i < hw->max_rates; i++) { 1961 for (i = tx_rateindex + 1; i < hw->max_rates; i++) {
@@ -2010,16 +1966,13 @@ static void ath_tx_rc_status(struct ath_buf *bf, struct ath_tx_status *ts,
2010 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1; 1966 tx_info->status.rates[tx_rateindex].count = ts->ts_longretry + 1;
2011} 1967}
2012 1968
2013static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq) 1969static void ath_wake_mac80211_queue(struct ath_softc *sc, int qnum)
2014{ 1970{
2015 int qnum; 1971 struct ath_txq *txq;
2016
2017 qnum = ath_get_mac80211_qnum(txq->axq_class, sc);
2018 if (qnum == -1)
2019 return;
2020 1972
1973 txq = sc->tx.txq_map[qnum];
2021 spin_lock_bh(&txq->axq_lock); 1974 spin_lock_bh(&txq->axq_lock);
2022 if (txq->stopped && sc->tx.pending_frames[qnum] < ATH_MAX_QDEPTH) { 1975 if (txq->stopped && txq->pending_frames < ATH_MAX_QDEPTH) {
2023 if (ath_mac80211_start_queue(sc, qnum)) 1976 if (ath_mac80211_start_queue(sc, qnum))
2024 txq->stopped = 0; 1977 txq->stopped = 0;
2025 } 1978 }
@@ -2036,10 +1989,11 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2036 struct ath_tx_status ts; 1989 struct ath_tx_status ts;
2037 int txok; 1990 int txok;
2038 int status; 1991 int status;
1992 int qnum;
2039 1993
2040 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", 1994 ath_dbg(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
2041 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), 1995 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
2042 txq->axq_link); 1996 txq->axq_link);
2043 1997
2044 for (;;) { 1998 for (;;) {
2045 spin_lock_bh(&txq->axq_lock); 1999 spin_lock_bh(&txq->axq_lock);
@@ -2096,6 +2050,9 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2096 txq->axq_tx_inprogress = false; 2050 txq->axq_tx_inprogress = false;
2097 if (bf_held) 2051 if (bf_held)
2098 list_del(&bf_held->list); 2052 list_del(&bf_held->list);
2053
2054 if (bf_is_ampdu_not_probing(bf))
2055 txq->axq_ampdu_depth--;
2099 spin_unlock_bh(&txq->axq_lock); 2056 spin_unlock_bh(&txq->axq_lock);
2100 2057
2101 if (bf_held) 2058 if (bf_held)
@@ -2108,15 +2065,19 @@ static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
2108 */ 2065 */
2109 if (ts.ts_status & ATH9K_TXERR_XRETRY) 2066 if (ts.ts_status & ATH9K_TXERR_XRETRY)
2110 bf->bf_state.bf_type |= BUF_XRETRY; 2067 bf->bf_state.bf_type |= BUF_XRETRY;
2111 ath_tx_rc_status(bf, &ts, txok ? 0 : 1, txok, true); 2068 ath_tx_rc_status(bf, &ts, 1, txok ? 0 : 1, txok, true);
2112 } 2069 }
2113 2070
2071 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2072
2114 if (bf_isampdu(bf)) 2073 if (bf_isampdu(bf))
2115 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok); 2074 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &ts, txok,
2075 true);
2116 else 2076 else
2117 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0); 2077 ath_tx_complete_buf(sc, bf, txq, &bf_head, &ts, txok, 0);
2118 2078
2119 ath_wake_mac80211_queue(sc, txq); 2079 if (txq == sc->tx.txq_map[qnum])
2080 ath_wake_mac80211_queue(sc, qnum);
2120 2081
2121 spin_lock_bh(&txq->axq_lock); 2082 spin_lock_bh(&txq->axq_lock);
2122 if (sc->sc_flags & SC_OP_TXAGGR) 2083 if (sc->sc_flags & SC_OP_TXAGGR)
@@ -2150,8 +2111,8 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
2150 } 2111 }
2151 2112
2152 if (needreset) { 2113 if (needreset) {
2153 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET, 2114 ath_dbg(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2154 "tx hung, resetting the chip\n"); 2115 "tx hung, resetting the chip\n");
2155 ath9k_ps_wakeup(sc); 2116 ath9k_ps_wakeup(sc);
2156 ath_reset(sc, true); 2117 ath_reset(sc, true);
2157 ath9k_ps_restore(sc); 2118 ath9k_ps_restore(sc);
@@ -2186,14 +2147,15 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2186 struct list_head bf_head; 2147 struct list_head bf_head;
2187 int status; 2148 int status;
2188 int txok; 2149 int txok;
2150 int qnum;
2189 2151
2190 for (;;) { 2152 for (;;) {
2191 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs); 2153 status = ath9k_hw_txprocdesc(ah, NULL, (void *)&txs);
2192 if (status == -EINPROGRESS) 2154 if (status == -EINPROGRESS)
2193 break; 2155 break;
2194 if (status == -EIO) { 2156 if (status == -EIO) {
2195 ath_print(common, ATH_DBG_XMIT, 2157 ath_dbg(common, ATH_DBG_XMIT,
2196 "Error processing tx status\n"); 2158 "Error processing tx status\n");
2197 break; 2159 break;
2198 } 2160 }
2199 2161
@@ -2219,6 +2181,8 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2219 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH); 2181 INCR(txq->txq_tailidx, ATH_TXFIFO_DEPTH);
2220 txq->axq_depth--; 2182 txq->axq_depth--;
2221 txq->axq_tx_inprogress = false; 2183 txq->axq_tx_inprogress = false;
2184 if (bf_is_ampdu_not_probing(bf))
2185 txq->axq_ampdu_depth--;
2222 spin_unlock_bh(&txq->axq_lock); 2186 spin_unlock_bh(&txq->axq_lock);
2223 2187
2224 txok = !(txs.ts_status & ATH9K_TXERR_MASK); 2188 txok = !(txs.ts_status & ATH9K_TXERR_MASK);
@@ -2226,16 +2190,20 @@ void ath_tx_edma_tasklet(struct ath_softc *sc)
2226 if (!bf_isampdu(bf)) { 2190 if (!bf_isampdu(bf)) {
2227 if (txs.ts_status & ATH9K_TXERR_XRETRY) 2191 if (txs.ts_status & ATH9K_TXERR_XRETRY)
2228 bf->bf_state.bf_type |= BUF_XRETRY; 2192 bf->bf_state.bf_type |= BUF_XRETRY;
2229 ath_tx_rc_status(bf, &txs, txok ? 0 : 1, txok, true); 2193 ath_tx_rc_status(bf, &txs, 1, txok ? 0 : 1, txok, true);
2230 } 2194 }
2231 2195
2196 qnum = skb_get_queue_mapping(bf->bf_mpdu);
2197
2232 if (bf_isampdu(bf)) 2198 if (bf_isampdu(bf))
2233 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs, txok); 2199 ath_tx_complete_aggr(sc, txq, bf, &bf_head, &txs,
2200 txok, true);
2234 else 2201 else
2235 ath_tx_complete_buf(sc, bf, txq, &bf_head, 2202 ath_tx_complete_buf(sc, bf, txq, &bf_head,
2236 &txs, txok, 0); 2203 &txs, txok, 0);
2237 2204
2238 ath_wake_mac80211_queue(sc, txq); 2205 if (txq == sc->tx.txq_map[qnum])
2206 ath_wake_mac80211_queue(sc, qnum);
2239 2207
2240 spin_lock_bh(&txq->axq_lock); 2208 spin_lock_bh(&txq->axq_lock);
2241 if (!list_empty(&txq->txq_fifo_pending)) { 2209 if (!list_empty(&txq->txq_fifo_pending)) {
@@ -2300,16 +2268,16 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
2300 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, 2268 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2301 "tx", nbufs, 1, 1); 2269 "tx", nbufs, 1, 1);
2302 if (error != 0) { 2270 if (error != 0) {
2303 ath_print(common, ATH_DBG_FATAL, 2271 ath_err(common,
2304 "Failed to allocate tx descriptors: %d\n", error); 2272 "Failed to allocate tx descriptors: %d\n", error);
2305 goto err; 2273 goto err;
2306 } 2274 }
2307 2275
2308 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf, 2276 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2309 "beacon", ATH_BCBUF, 1, 1); 2277 "beacon", ATH_BCBUF, 1, 1);
2310 if (error != 0) { 2278 if (error != 0) {
2311 ath_print(common, ATH_DBG_FATAL, 2279 ath_err(common,
2312 "Failed to allocate beacon descriptors: %d\n", error); 2280 "Failed to allocate beacon descriptors: %d\n", error);
2313 goto err; 2281 goto err;
2314 } 2282 }
2315 2283
@@ -2367,7 +2335,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an)
2367 for (acno = 0, ac = &an->ac[acno]; 2335 for (acno = 0, ac = &an->ac[acno];
2368 acno < WME_NUM_AC; acno++, ac++) { 2336 acno < WME_NUM_AC; acno++, ac++) {
2369 ac->sched = false; 2337 ac->sched = false;
2370 ac->qnum = sc->tx.hwq_map[acno]; 2338 ac->txq = sc->tx.txq_map[acno];
2371 INIT_LIST_HEAD(&ac->tid_q); 2339 INIT_LIST_HEAD(&ac->tid_q);
2372 } 2340 }
2373} 2341}
@@ -2377,17 +2345,13 @@ void ath_tx_node_cleanup(struct ath_softc *sc, struct ath_node *an)
2377 struct ath_atx_ac *ac; 2345 struct ath_atx_ac *ac;
2378 struct ath_atx_tid *tid; 2346 struct ath_atx_tid *tid;
2379 struct ath_txq *txq; 2347 struct ath_txq *txq;
2380 int i, tidno; 2348 int tidno;
2381 2349
2382 for (tidno = 0, tid = &an->tid[tidno]; 2350 for (tidno = 0, tid = &an->tid[tidno];
2383 tidno < WME_NUM_TID; tidno++, tid++) { 2351 tidno < WME_NUM_TID; tidno++, tid++) {
2384 i = tid->ac->qnum;
2385
2386 if (!ATH_TXQ_SETUP(sc, i))
2387 continue;
2388 2352
2389 txq = &sc->tx.txq[i];
2390 ac = tid->ac; 2353 ac = tid->ac;
2354 txq = ac->txq;
2391 2355
2392 spin_lock_bh(&txq->axq_lock); 2356 spin_lock_bh(&txq->axq_lock);
2393 2357
diff --git a/drivers/net/wireless/ath/carl9170/carl9170.h b/drivers/net/wireless/ath/carl9170/carl9170.h
index 6cf0c9ef47aa..d07ff7f2fd92 100644
--- a/drivers/net/wireless/ath/carl9170/carl9170.h
+++ b/drivers/net/wireless/ath/carl9170/carl9170.h
@@ -48,7 +48,7 @@
48#include <linux/usb.h> 48#include <linux/usb.h>
49#ifdef CONFIG_CARL9170_LEDS 49#ifdef CONFIG_CARL9170_LEDS
50#include <linux/leds.h> 50#include <linux/leds.h>
51#endif /* CONFIG_CARL170_LEDS */ 51#endif /* CONFIG_CARL9170_LEDS */
52#ifdef CONFIG_CARL9170_WPC 52#ifdef CONFIG_CARL9170_WPC
53#include <linux/input.h> 53#include <linux/input.h>
54#endif /* CONFIG_CARL9170_WPC */ 54#endif /* CONFIG_CARL9170_WPC */
@@ -215,7 +215,7 @@ enum carl9170_restart_reasons {
215 CARL9170_RR_TOO_MANY_FIRMWARE_ERRORS, 215 CARL9170_RR_TOO_MANY_FIRMWARE_ERRORS,
216 CARL9170_RR_WATCHDOG, 216 CARL9170_RR_WATCHDOG,
217 CARL9170_RR_STUCK_TX, 217 CARL9170_RR_STUCK_TX,
218 CARL9170_RR_SLOW_SYSTEM, 218 CARL9170_RR_UNRESPONSIVE_DEVICE,
219 CARL9170_RR_COMMAND_TIMEOUT, 219 CARL9170_RR_COMMAND_TIMEOUT,
220 CARL9170_RR_TOO_MANY_PHY_ERRORS, 220 CARL9170_RR_TOO_MANY_PHY_ERRORS,
221 CARL9170_RR_LOST_RSP, 221 CARL9170_RR_LOST_RSP,
@@ -287,6 +287,7 @@ struct ar9170 {
287 287
288 /* reset / stuck frames/queue detection */ 288 /* reset / stuck frames/queue detection */
289 struct work_struct restart_work; 289 struct work_struct restart_work;
290 struct work_struct ping_work;
290 unsigned int restart_counter; 291 unsigned int restart_counter;
291 unsigned long queue_stop_timeout[__AR9170_NUM_TXQ]; 292 unsigned long queue_stop_timeout[__AR9170_NUM_TXQ];
292 unsigned long max_queue_stop_timeout[__AR9170_NUM_TXQ]; 293 unsigned long max_queue_stop_timeout[__AR9170_NUM_TXQ];
diff --git a/drivers/net/wireless/ath/carl9170/cmd.c b/drivers/net/wireless/ath/carl9170/cmd.c
index c21f3364bfec..cdfc94c371b4 100644
--- a/drivers/net/wireless/ath/carl9170/cmd.c
+++ b/drivers/net/wireless/ath/carl9170/cmd.c
@@ -41,7 +41,7 @@
41 41
42int carl9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val) 42int carl9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val)
43{ 43{
44 __le32 buf[2] = { 44 const __le32 buf[2] = {
45 cpu_to_le32(reg), 45 cpu_to_le32(reg),
46 cpu_to_le32(val), 46 cpu_to_le32(val),
47 }; 47 };
diff --git a/drivers/net/wireless/ath/carl9170/fwcmd.h b/drivers/net/wireless/ath/carl9170/fwcmd.h
index d552166db505..3680dfc70f46 100644
--- a/drivers/net/wireless/ath/carl9170/fwcmd.h
+++ b/drivers/net/wireless/ath/carl9170/fwcmd.h
@@ -97,13 +97,13 @@ struct carl9170_set_key_cmd {
97 __le16 type; 97 __le16 type;
98 u8 macAddr[6]; 98 u8 macAddr[6];
99 u32 key[4]; 99 u32 key[4];
100} __packed; 100} __packed __aligned(4);
101#define CARL9170_SET_KEY_CMD_SIZE 28 101#define CARL9170_SET_KEY_CMD_SIZE 28
102 102
103struct carl9170_disable_key_cmd { 103struct carl9170_disable_key_cmd {
104 __le16 user; 104 __le16 user;
105 __le16 padding; 105 __le16 padding;
106} __packed; 106} __packed __aligned(4);
107#define CARL9170_DISABLE_KEY_CMD_SIZE 4 107#define CARL9170_DISABLE_KEY_CMD_SIZE 4
108 108
109struct carl9170_u32_list { 109struct carl9170_u32_list {
@@ -206,7 +206,7 @@ struct carl9170_cmd {
206 struct carl9170_rx_filter_cmd rx_filter; 206 struct carl9170_rx_filter_cmd rx_filter;
207 u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN]; 207 u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN];
208 } __packed; 208 } __packed;
209} __packed; 209} __packed __aligned(4);
210 210
211#define CARL9170_TX_STATUS_QUEUE 3 211#define CARL9170_TX_STATUS_QUEUE 3
212#define CARL9170_TX_STATUS_QUEUE_S 0 212#define CARL9170_TX_STATUS_QUEUE_S 0
@@ -216,6 +216,7 @@ struct carl9170_cmd {
216#define CARL9170_TX_STATUS_TRIES (7 << CARL9170_TX_STATUS_TRIES_S) 216#define CARL9170_TX_STATUS_TRIES (7 << CARL9170_TX_STATUS_TRIES_S)
217#define CARL9170_TX_STATUS_SUCCESS 0x80 217#define CARL9170_TX_STATUS_SUCCESS 0x80
218 218
219#ifdef __CARL9170FW__
219/* 220/*
220 * NOTE: 221 * NOTE:
221 * Both structs [carl9170_tx_status and _carl9170_tx_status] 222 * Both structs [carl9170_tx_status and _carl9170_tx_status]
@@ -232,6 +233,8 @@ struct carl9170_tx_status {
232 u8 tries:3; 233 u8 tries:3;
233 u8 success:1; 234 u8 success:1;
234} __packed; 235} __packed;
236#endif /* __CARL9170FW__ */
237
235struct _carl9170_tx_status { 238struct _carl9170_tx_status {
236 /* 239 /*
237 * This version should be immune to all alignment bugs. 240 * This version should be immune to all alignment bugs.
@@ -272,13 +275,15 @@ struct carl9170_rsp {
272 struct carl9170_rf_init_result rf_init_res; 275 struct carl9170_rf_init_result rf_init_res;
273 struct carl9170_u32_list rreg_res; 276 struct carl9170_u32_list rreg_res;
274 struct carl9170_u32_list echo; 277 struct carl9170_u32_list echo;
278#ifdef __CARL9170FW__
275 struct carl9170_tx_status tx_status[0]; 279 struct carl9170_tx_status tx_status[0];
280#endif /* __CARL9170FW__ */
276 struct _carl9170_tx_status _tx_status[0]; 281 struct _carl9170_tx_status _tx_status[0];
277 struct carl9170_gpio gpio; 282 struct carl9170_gpio gpio;
278 struct carl9170_tsf_rsp tsf; 283 struct carl9170_tsf_rsp tsf;
279 struct carl9170_psm psm; 284 struct carl9170_psm psm;
280 u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN]; 285 u8 data[CARL9170_MAX_CMD_PAYLOAD_LEN];
281 } __packed; 286 } __packed;
282} __packed; 287} __packed __aligned(4);
283 288
284#endif /* __CARL9170_SHARED_FWCMD_H */ 289#endif /* __CARL9170_SHARED_FWCMD_H */
diff --git a/drivers/net/wireless/ath/carl9170/hw.h b/drivers/net/wireless/ath/carl9170/hw.h
index 2f471b3f05af..e85df6edfed3 100644
--- a/drivers/net/wireless/ath/carl9170/hw.h
+++ b/drivers/net/wireless/ath/carl9170/hw.h
@@ -712,7 +712,8 @@ struct ar9170_stream {
712 __le16 tag; 712 __le16 tag;
713 713
714 u8 payload[0]; 714 u8 payload[0];
715}; 715} __packed __aligned(4);
716#define AR9170_STREAM_LEN 4
716 717
717#define AR9170_MAX_ACKTABLE_ENTRIES 8 718#define AR9170_MAX_ACKTABLE_ENTRIES 8
718#define AR9170_MAX_VIRTUAL_MAC 7 719#define AR9170_MAX_VIRTUAL_MAC 7
@@ -736,4 +737,8 @@ struct ar9170_stream {
736 737
737#define MOD_VAL(reg, value, newvalue) \ 738#define MOD_VAL(reg, value, newvalue) \
738 (((value) & ~reg) | (((newvalue) << reg##_S) & reg)) 739 (((value) & ~reg) | (((newvalue) << reg##_S) & reg))
740
741#define GET_VAL(reg, value) \
742 (((value) & reg) >> reg##_S)
743
739#endif /* __CARL9170_SHARED_HW_H */ 744#endif /* __CARL9170_SHARED_HW_H */
diff --git a/drivers/net/wireless/ath/carl9170/mac.c b/drivers/net/wireless/ath/carl9170/mac.c
index 2305bc27151c..385cf508479b 100644
--- a/drivers/net/wireless/ath/carl9170/mac.c
+++ b/drivers/net/wireless/ath/carl9170/mac.c
@@ -205,8 +205,8 @@ int carl9170_init_mac(struct ar9170 *ar)
205 carl9170_regwrite(AR9170_MAC_REG_BACKOFF_PROTECT, 0x105); 205 carl9170_regwrite(AR9170_MAC_REG_BACKOFF_PROTECT, 0x105);
206 206
207 /* Aggregation MAX number and timeout */ 207 /* Aggregation MAX number and timeout */
208 carl9170_regwrite(AR9170_MAC_REG_AMPDU_FACTOR, 0xa); 208 carl9170_regwrite(AR9170_MAC_REG_AMPDU_FACTOR, 0x8000a);
209 carl9170_regwrite(AR9170_MAC_REG_AMPDU_DENSITY, 0x140a00); 209 carl9170_regwrite(AR9170_MAC_REG_AMPDU_DENSITY, 0x140a07);
210 210
211 carl9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER, 211 carl9170_regwrite(AR9170_MAC_REG_FRAMETYPE_FILTER,
212 AR9170_MAC_FTF_DEFAULTS); 212 AR9170_MAC_FTF_DEFAULTS);
@@ -457,8 +457,9 @@ int carl9170_set_beacon_timers(struct ar9170 *ar)
457 457
458int carl9170_update_beacon(struct ar9170 *ar, const bool submit) 458int carl9170_update_beacon(struct ar9170 *ar, const bool submit)
459{ 459{
460 struct sk_buff *skb; 460 struct sk_buff *skb = NULL;
461 struct carl9170_vif_info *cvif; 461 struct carl9170_vif_info *cvif;
462 struct ieee80211_tx_info *txinfo;
462 __le32 *data, *old = NULL; 463 __le32 *data, *old = NULL;
463 u32 word, off, addr, len; 464 u32 word, off, addr, len;
464 int i = 0, err = 0; 465 int i = 0, err = 0;
@@ -487,7 +488,13 @@ found:
487 488
488 if (!skb) { 489 if (!skb) {
489 err = -ENOMEM; 490 err = -ENOMEM;
490 goto out_unlock; 491 goto err_free;
492 }
493
494 txinfo = IEEE80211_SKB_CB(skb);
495 if (txinfo->control.rates[0].flags & IEEE80211_TX_RC_MCS) {
496 err = -EINVAL;
497 goto err_free;
491 } 498 }
492 499
493 spin_lock_bh(&ar->beacon_lock); 500 spin_lock_bh(&ar->beacon_lock);
@@ -504,11 +511,8 @@ found:
504 wiphy_err(ar->hw->wiphy, "beacon does not " 511 wiphy_err(ar->hw->wiphy, "beacon does not "
505 "fit into device memory!\n"); 512 "fit into device memory!\n");
506 } 513 }
507
508 spin_unlock_bh(&ar->beacon_lock);
509 dev_kfree_skb_any(skb);
510 err = -EINVAL; 514 err = -EINVAL;
511 goto out_unlock; 515 goto err_unlock;
512 } 516 }
513 517
514 if (len > AR9170_MAC_BCN_LENGTH_MAX) { 518 if (len > AR9170_MAC_BCN_LENGTH_MAX) {
@@ -518,22 +522,22 @@ found:
518 AR9170_MAC_BCN_LENGTH_MAX, len); 522 AR9170_MAC_BCN_LENGTH_MAX, len);
519 } 523 }
520 524
521 spin_unlock_bh(&ar->beacon_lock);
522 dev_kfree_skb_any(skb);
523 err = -EMSGSIZE; 525 err = -EMSGSIZE;
524 goto out_unlock; 526 goto err_unlock;
525 } 527 }
526 528
527 carl9170_async_regwrite_begin(ar); 529 i = txinfo->control.rates[0].idx;
530 if (txinfo->band != IEEE80211_BAND_2GHZ)
531 i += 4;
528 532
529 /* XXX: use skb->cb info */ 533 word = __carl9170_ratetable[i].hw_value & 0xf;
530 if (ar->hw->conf.channel->band == IEEE80211_BAND_2GHZ) { 534 if (i < 4)
531 carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, 535 word |= ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400;
532 ((skb->len + FCS_LEN) << (3 + 16)) + 0x0400); 536 else
533 } else { 537 word |= ((skb->len + FCS_LEN) << 16) + 0x0010;
534 carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, 538
535 ((skb->len + FCS_LEN) << 16) + 0x001b); 539 carl9170_async_regwrite_begin(ar);
536 } 540 carl9170_async_regwrite(AR9170_MAC_REG_BCN_PLCP, word);
537 541
538 for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) { 542 for (i = 0; i < DIV_ROUND_UP(skb->len, 4); i++) {
539 /* 543 /*
@@ -557,7 +561,7 @@ found:
557 cvif->beacon = skb; 561 cvif->beacon = skb;
558 spin_unlock_bh(&ar->beacon_lock); 562 spin_unlock_bh(&ar->beacon_lock);
559 if (err) 563 if (err)
560 goto out_unlock; 564 goto err_free;
561 565
562 if (submit) { 566 if (submit) {
563 err = carl9170_bcn_ctrl(ar, cvif->id, 567 err = carl9170_bcn_ctrl(ar, cvif->id,
@@ -565,10 +569,18 @@ found:
565 addr, skb->len + FCS_LEN); 569 addr, skb->len + FCS_LEN);
566 570
567 if (err) 571 if (err)
568 goto out_unlock; 572 goto err_free;
569 } 573 }
570out_unlock: 574out_unlock:
571 rcu_read_unlock(); 575 rcu_read_unlock();
576 return 0;
577
578err_unlock:
579 spin_unlock_bh(&ar->beacon_lock);
580
581err_free:
582 rcu_read_unlock();
583 dev_kfree_skb_any(skb);
572 return err; 584 return err;
573} 585}
574 586
diff --git a/drivers/net/wireless/ath/carl9170/main.c b/drivers/net/wireless/ath/carl9170/main.c
index dc7b30b170d0..870df8c42622 100644
--- a/drivers/net/wireless/ath/carl9170/main.c
+++ b/drivers/net/wireless/ath/carl9170/main.c
@@ -428,6 +428,7 @@ static void carl9170_cancel_worker(struct ar9170 *ar)
428 cancel_delayed_work_sync(&ar->led_work); 428 cancel_delayed_work_sync(&ar->led_work);
429#endif /* CONFIG_CARL9170_LEDS */ 429#endif /* CONFIG_CARL9170_LEDS */
430 cancel_work_sync(&ar->ps_work); 430 cancel_work_sync(&ar->ps_work);
431 cancel_work_sync(&ar->ping_work);
431 cancel_work_sync(&ar->ampdu_work); 432 cancel_work_sync(&ar->ampdu_work);
432} 433}
433 434
@@ -533,6 +534,21 @@ void carl9170_restart(struct ar9170 *ar, const enum carl9170_restart_reasons r)
533 */ 534 */
534} 535}
535 536
537static void carl9170_ping_work(struct work_struct *work)
538{
539 struct ar9170 *ar = container_of(work, struct ar9170, ping_work);
540 int err;
541
542 if (!IS_STARTED(ar))
543 return;
544
545 mutex_lock(&ar->mutex);
546 err = carl9170_echo_test(ar, 0xdeadbeef);
547 if (err)
548 carl9170_restart(ar, CARL9170_RR_UNRESPONSIVE_DEVICE);
549 mutex_unlock(&ar->mutex);
550}
551
536static int carl9170_init_interface(struct ar9170 *ar, 552static int carl9170_init_interface(struct ar9170 *ar,
537 struct ieee80211_vif *vif) 553 struct ieee80211_vif *vif)
538{ 554{
@@ -1614,6 +1630,7 @@ void *carl9170_alloc(size_t priv_size)
1614 skb_queue_head_init(&ar->tx_pending[i]); 1630 skb_queue_head_init(&ar->tx_pending[i]);
1615 } 1631 }
1616 INIT_WORK(&ar->ps_work, carl9170_ps_work); 1632 INIT_WORK(&ar->ps_work, carl9170_ps_work);
1633 INIT_WORK(&ar->ping_work, carl9170_ping_work);
1617 INIT_WORK(&ar->restart_work, carl9170_restart_work); 1634 INIT_WORK(&ar->restart_work, carl9170_restart_work);
1618 INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work); 1635 INIT_WORK(&ar->ampdu_work, carl9170_ampdu_work);
1619 INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor); 1636 INIT_DELAYED_WORK(&ar->tx_janitor, carl9170_tx_janitor);
@@ -1829,7 +1846,7 @@ int carl9170_register(struct ar9170 *ar)
1829 err = carl9170_led_register(ar); 1846 err = carl9170_led_register(ar);
1830 if (err) 1847 if (err)
1831 goto err_unreg; 1848 goto err_unreg;
1832#endif /* CONFIG_CAR9L170_LEDS */ 1849#endif /* CONFIG_CARL9170_LEDS */
1833 1850
1834#ifdef CONFIG_CARL9170_WPC 1851#ifdef CONFIG_CARL9170_WPC
1835 err = carl9170_register_wps_button(ar); 1852 err = carl9170_register_wps_button(ar);
diff --git a/drivers/net/wireless/ath/carl9170/phy.c b/drivers/net/wireless/ath/carl9170/phy.c
index 89deca37a988..b6b0de600506 100644
--- a/drivers/net/wireless/ath/carl9170/phy.c
+++ b/drivers/net/wireless/ath/carl9170/phy.c
@@ -1029,8 +1029,6 @@ static int carl9170_init_rf_bank4_pwr(struct ar9170 *ar, bool band5ghz,
1029 if (err) 1029 if (err)
1030 return err; 1030 return err;
1031 1031
1032 msleep(20);
1033
1034 return 0; 1032 return 0;
1035} 1033}
1036 1034
@@ -1554,15 +1552,6 @@ static int carl9170_set_power_cal(struct ar9170 *ar, u32 freq,
1554 return carl9170_regwrite_result(); 1552 return carl9170_regwrite_result();
1555} 1553}
1556 1554
1557/* TODO: replace this with sign_extend32(noise, 8) */
1558static int carl9170_calc_noise_dbm(u32 raw_noise)
1559{
1560 if (raw_noise & 0x100)
1561 return ~0x1ff | raw_noise;
1562 else
1563 return raw_noise;
1564}
1565
1566int carl9170_get_noisefloor(struct ar9170 *ar) 1555int carl9170_get_noisefloor(struct ar9170 *ar)
1567{ 1556{
1568 static const u32 phy_regs[] = { 1557 static const u32 phy_regs[] = {
@@ -1578,11 +1567,11 @@ int carl9170_get_noisefloor(struct ar9170 *ar)
1578 return err; 1567 return err;
1579 1568
1580 for (i = 0; i < 2; i++) { 1569 for (i = 0; i < 2; i++) {
1581 ar->noise[i] = carl9170_calc_noise_dbm( 1570 ar->noise[i] = sign_extend32(GET_VAL(
1582 (phy_res[i] >> 19) & 0x1ff); 1571 AR9170_PHY_CCA_MIN_PWR, phy_res[i]), 8);
1583 1572
1584 ar->noise[i + 2] = carl9170_calc_noise_dbm( 1573 ar->noise[i + 2] = sign_extend32(GET_VAL(
1585 (phy_res[i + 2] >> 23) & 0x1ff); 1574 AR9170_PHY_EXT_CCA_MIN_PWR, phy_res[i + 2]), 8);
1586 } 1575 }
1587 1576
1588 return 0; 1577 return 0;
@@ -1669,12 +1658,6 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
1669 return err; 1658 return err;
1670 1659
1671 cmd = CARL9170_CMD_RF_INIT; 1660 cmd = CARL9170_CMD_RF_INIT;
1672
1673 msleep(100);
1674
1675 err = carl9170_echo_test(ar, 0xaabbccdd);
1676 if (err)
1677 return err;
1678 } else { 1661 } else {
1679 cmd = CARL9170_CMD_FREQUENCY; 1662 cmd = CARL9170_CMD_FREQUENCY;
1680 } 1663 }
@@ -1685,6 +1668,8 @@ int carl9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
1685 1668
1686 err = carl9170_write_reg(ar, AR9170_PHY_REG_HEAVY_CLIP_ENABLE, 1669 err = carl9170_write_reg(ar, AR9170_PHY_REG_HEAVY_CLIP_ENABLE,
1687 0x200); 1670 0x200);
1671 if (err)
1672 return err;
1688 1673
1689 err = carl9170_init_rf_bank4_pwr(ar, 1674 err = carl9170_init_rf_bank4_pwr(ar,
1690 channel->band == IEEE80211_BAND_5GHZ, 1675 channel->band == IEEE80211_BAND_5GHZ,
diff --git a/drivers/net/wireless/ath/carl9170/phy.h b/drivers/net/wireless/ath/carl9170/phy.h
index 02c34eb4ebde..024fb42bc787 100644
--- a/drivers/net/wireless/ath/carl9170/phy.h
+++ b/drivers/net/wireless/ath/carl9170/phy.h
@@ -139,8 +139,8 @@
139#define AR9170_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000 139#define AR9170_PHY_AGC_CONTROL_NO_UPDATE_NF 0x00020000
140 140
141#define AR9170_PHY_REG_CCA (AR9170_PHY_REG_BASE + 0x0064) 141#define AR9170_PHY_REG_CCA (AR9170_PHY_REG_BASE + 0x0064)
142#define AR9170_PHY_CCA_MINCCA_PWR 0x0ff80000 142#define AR9170_PHY_CCA_MIN_PWR 0x0ff80000
143#define AR9170_PHY_CCA_MINCCA_PWR_S 19 143#define AR9170_PHY_CCA_MIN_PWR_S 19
144#define AR9170_PHY_CCA_THRESH62 0x0007f000 144#define AR9170_PHY_CCA_THRESH62 0x0007f000
145#define AR9170_PHY_CCA_THRESH62_S 12 145#define AR9170_PHY_CCA_THRESH62_S 12
146 146
@@ -338,8 +338,8 @@
338#define AR9170_PHY_EXT_CCA_CYCPWR_THR1_S 9 338#define AR9170_PHY_EXT_CCA_CYCPWR_THR1_S 9
339#define AR9170_PHY_EXT_CCA_THRESH62 0x007f0000 339#define AR9170_PHY_EXT_CCA_THRESH62 0x007f0000
340#define AR9170_PHY_EXT_CCA_THRESH62_S 16 340#define AR9170_PHY_EXT_CCA_THRESH62_S 16
341#define AR9170_PHY_EXT_MINCCA_PWR 0xff800000 341#define AR9170_PHY_EXT_CCA_MIN_PWR 0xff800000
342#define AR9170_PHY_EXT_MINCCA_PWR_S 23 342#define AR9170_PHY_EXT_CCA_MIN_PWR_S 23
343 343
344#define AR9170_PHY_REG_SFCORR_EXT (AR9170_PHY_REG_BASE + 0x01c0) 344#define AR9170_PHY_REG_SFCORR_EXT (AR9170_PHY_REG_BASE + 0x01c0)
345#define AR9170_PHY_SFCORR_EXT_M1_THRESH 0x0000007f 345#define AR9170_PHY_SFCORR_EXT_M1_THRESH 0x0000007f
@@ -546,19 +546,19 @@
546#define AR9170_PHY_FORCE_XPA_CFG_S 0 546#define AR9170_PHY_FORCE_XPA_CFG_S 0
547 547
548#define AR9170_PHY_REG_CH1_CCA (AR9170_PHY_REG_BASE + 0x1064) 548#define AR9170_PHY_REG_CH1_CCA (AR9170_PHY_REG_BASE + 0x1064)
549#define AR9170_PHY_CH1_MINCCA_PWR 0x0ff80000 549#define AR9170_PHY_CH1_CCA_MIN_PWR 0x0ff80000
550#define AR9170_PHY_CH1_MINCCA_PWR_S 19 550#define AR9170_PHY_CH1_CCA_MIN_PWR_S 19
551 551
552#define AR9170_PHY_REG_CH2_CCA (AR9170_PHY_REG_BASE + 0x2064) 552#define AR9170_PHY_REG_CH2_CCA (AR9170_PHY_REG_BASE + 0x2064)
553#define AR9170_PHY_CH2_MINCCA_PWR 0x0ff80000 553#define AR9170_PHY_CH2_CCA_MIN_PWR 0x0ff80000
554#define AR9170_PHY_CH2_MINCCA_PWR_S 19 554#define AR9170_PHY_CH2_CCA_MIN_PWR_S 19
555 555
556#define AR9170_PHY_REG_CH1_EXT_CCA (AR9170_PHY_REG_BASE + 0x11bc) 556#define AR9170_PHY_REG_CH1_EXT_CCA (AR9170_PHY_REG_BASE + 0x11bc)
557#define AR9170_PHY_CH1_EXT_MINCCA_PWR 0xff800000 557#define AR9170_PHY_CH1_EXT_CCA_MIN_PWR 0xff800000
558#define AR9170_PHY_CH1_EXT_MINCCA_PWR_S 23 558#define AR9170_PHY_CH1_EXT_CCA_MIN_PWR_S 23
559 559
560#define AR9170_PHY_REG_CH2_EXT_CCA (AR9170_PHY_REG_BASE + 0x21bc) 560#define AR9170_PHY_REG_CH2_EXT_CCA (AR9170_PHY_REG_BASE + 0x21bc)
561#define AR9170_PHY_CH2_EXT_MINCCA_PWR 0xff800000 561#define AR9170_PHY_CH2_EXT_CCA_MIN_PWR 0xff800000
562#define AR9170_PHY_CH2_EXT_MINCCA_PWR_S 23 562#define AR9170_PHY_CH2_EXT_CCA_MIN_PWR_S 23
563 563
564#endif /* __CARL9170_SHARED_PHY_H */ 564#endif /* __CARL9170_SHARED_PHY_H */
diff --git a/drivers/net/wireless/ath/carl9170/tx.c b/drivers/net/wireless/ath/carl9170/tx.c
index 7e6506a77bbb..6cc58e052d10 100644
--- a/drivers/net/wireless/ath/carl9170/tx.c
+++ b/drivers/net/wireless/ath/carl9170/tx.c
@@ -242,9 +242,11 @@ static void carl9170_tx_release(struct kref *ref)
242 ar->tx_ampdu_schedule = true; 242 ar->tx_ampdu_schedule = true;
243 243
244 if (txinfo->flags & IEEE80211_TX_STAT_AMPDU) { 244 if (txinfo->flags & IEEE80211_TX_STAT_AMPDU) {
245 txinfo->status.ampdu_len = txinfo->pad[0]; 245 struct _carl9170_tx_superframe *super;
246 txinfo->status.ampdu_ack_len = txinfo->pad[1]; 246
247 txinfo->pad[0] = txinfo->pad[1] = 0; 247 super = (void *)skb->data;
248 txinfo->status.ampdu_len = super->s.rix;
249 txinfo->status.ampdu_ack_len = super->s.cnt;
248 } else if (txinfo->flags & IEEE80211_TX_STAT_ACK) { 250 } else if (txinfo->flags & IEEE80211_TX_STAT_ACK) {
249 /* 251 /*
250 * drop redundant tx_status reports: 252 * drop redundant tx_status reports:
@@ -337,7 +339,8 @@ static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
337 u8 tid; 339 u8 tid;
338 340
339 if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) || 341 if (!(txinfo->flags & IEEE80211_TX_CTL_AMPDU) ||
340 txinfo->flags & IEEE80211_TX_CTL_INJECTED) 342 txinfo->flags & IEEE80211_TX_CTL_INJECTED ||
343 (!(super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_AGGR))))
341 return; 344 return;
342 345
343 tx_info = IEEE80211_SKB_CB(skb); 346 tx_info = IEEE80211_SKB_CB(skb);
@@ -389,8 +392,8 @@ static void carl9170_tx_status_process_ampdu(struct ar9170 *ar,
389 sta_info->stats[tid].ampdu_ack_len++; 392 sta_info->stats[tid].ampdu_ack_len++;
390 393
391 if (super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_IMM_BA)) { 394 if (super->f.mac_control & cpu_to_le16(AR9170_TX_MAC_IMM_BA)) {
392 txinfo->pad[0] = sta_info->stats[tid].ampdu_len; 395 super->s.rix = sta_info->stats[tid].ampdu_len;
393 txinfo->pad[1] = sta_info->stats[tid].ampdu_ack_len; 396 super->s.cnt = sta_info->stats[tid].ampdu_ack_len;
394 txinfo->flags |= IEEE80211_TX_STAT_AMPDU; 397 txinfo->flags |= IEEE80211_TX_STAT_AMPDU;
395 sta_info->stats[tid].clear = true; 398 sta_info->stats[tid].clear = true;
396 } 399 }
@@ -524,6 +527,59 @@ next:
524 } 527 }
525} 528}
526 529
530static void carl9170_tx_ampdu_timeout(struct ar9170 *ar)
531{
532 struct carl9170_sta_tid *iter;
533 struct sk_buff *skb;
534 struct ieee80211_tx_info *txinfo;
535 struct carl9170_tx_info *arinfo;
536 struct _carl9170_tx_superframe *super;
537 struct ieee80211_sta *sta;
538 struct ieee80211_vif *vif;
539 struct ieee80211_hdr *hdr;
540 unsigned int vif_id;
541
542 rcu_read_lock();
543 list_for_each_entry_rcu(iter, &ar->tx_ampdu_list, list) {
544 if (iter->state < CARL9170_TID_STATE_IDLE)
545 continue;
546
547 spin_lock_bh(&iter->lock);
548 skb = skb_peek(&iter->queue);
549 if (!skb)
550 goto unlock;
551
552 txinfo = IEEE80211_SKB_CB(skb);
553 arinfo = (void *)txinfo->rate_driver_data;
554 if (time_is_after_jiffies(arinfo->timeout +
555 msecs_to_jiffies(CARL9170_QUEUE_TIMEOUT)))
556 goto unlock;
557
558 super = (void *) skb->data;
559 hdr = (void *) super->frame_data;
560
561 vif_id = (super->s.misc & CARL9170_TX_SUPER_MISC_VIF_ID) >>
562 CARL9170_TX_SUPER_MISC_VIF_ID_S;
563
564 if (WARN_ON(vif_id >= AR9170_MAX_VIRTUAL_MAC))
565 goto unlock;
566
567 vif = rcu_dereference(ar->vif_priv[vif_id].vif);
568 if (WARN_ON(!vif))
569 goto unlock;
570
571 sta = ieee80211_find_sta(vif, hdr->addr1);
572 if (WARN_ON(!sta))
573 goto unlock;
574
575 ieee80211_stop_tx_ba_session(sta, iter->tid);
576unlock:
577 spin_unlock_bh(&iter->lock);
578
579 }
580 rcu_read_unlock();
581}
582
527void carl9170_tx_janitor(struct work_struct *work) 583void carl9170_tx_janitor(struct work_struct *work)
528{ 584{
529 struct ar9170 *ar = container_of(work, struct ar9170, 585 struct ar9170 *ar = container_of(work, struct ar9170,
@@ -534,6 +590,7 @@ void carl9170_tx_janitor(struct work_struct *work)
534 ar->tx_janitor_last_run = jiffies; 590 ar->tx_janitor_last_run = jiffies;
535 591
536 carl9170_check_queue_stop_timeout(ar); 592 carl9170_check_queue_stop_timeout(ar);
593 carl9170_tx_ampdu_timeout(ar);
537 594
538 if (!atomic_read(&ar->tx_total_queued)) 595 if (!atomic_read(&ar->tx_total_queued))
539 return; 596 return;
@@ -842,10 +899,8 @@ static int carl9170_tx_prepare(struct ar9170 *ar, struct sk_buff *skb)
842 if (unlikely(!sta || !cvif)) 899 if (unlikely(!sta || !cvif))
843 goto err_out; 900 goto err_out;
844 901
845 factor = min_t(unsigned int, 1u, 902 factor = min_t(unsigned int, 1u, sta->ht_cap.ampdu_factor);
846 info->control.sta->ht_cap.ampdu_factor); 903 density = sta->ht_cap.ampdu_density;
847
848 density = info->control.sta->ht_cap.ampdu_density;
849 904
850 if (density) { 905 if (density) {
851 /* 906 /*
@@ -1206,6 +1261,7 @@ static void carl9170_tx(struct ar9170 *ar)
1206static bool carl9170_tx_ampdu_queue(struct ar9170 *ar, 1261static bool carl9170_tx_ampdu_queue(struct ar9170 *ar,
1207 struct ieee80211_sta *sta, struct sk_buff *skb) 1262 struct ieee80211_sta *sta, struct sk_buff *skb)
1208{ 1263{
1264 struct _carl9170_tx_superframe *super = (void *) skb->data;
1209 struct carl9170_sta_info *sta_info; 1265 struct carl9170_sta_info *sta_info;
1210 struct carl9170_sta_tid *agg; 1266 struct carl9170_sta_tid *agg;
1211 struct sk_buff *iter; 1267 struct sk_buff *iter;
@@ -1274,6 +1330,7 @@ err_unlock:
1274 1330
1275err_unlock_rcu: 1331err_unlock_rcu:
1276 rcu_read_unlock(); 1332 rcu_read_unlock();
1333 super->f.mac_control &= ~cpu_to_le16(AR9170_TX_MAC_AGGR);
1277 carl9170_tx_status(ar, skb, false); 1334 carl9170_tx_status(ar, skb, false);
1278 ar->tx_dropped++; 1335 ar->tx_dropped++;
1279 return false; 1336 return false;
@@ -1302,9 +1359,6 @@ int carl9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1302 */ 1359 */
1303 1360
1304 if (info->flags & IEEE80211_TX_CTL_AMPDU) { 1361 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1305 if (WARN_ON_ONCE(!sta))
1306 goto err_free;
1307
1308 run = carl9170_tx_ampdu_queue(ar, sta, skb); 1362 run = carl9170_tx_ampdu_queue(ar, sta, skb);
1309 if (run) 1363 if (run)
1310 carl9170_tx_ampdu(ar); 1364 carl9170_tx_ampdu(ar);
diff --git a/drivers/net/wireless/ath/carl9170/usb.c b/drivers/net/wireless/ath/carl9170/usb.c
index 7504ed14c725..537732e5964f 100644
--- a/drivers/net/wireless/ath/carl9170/usb.c
+++ b/drivers/net/wireless/ath/carl9170/usb.c
@@ -160,8 +160,7 @@ err_acc:
160 160
161static void carl9170_usb_tx_data_complete(struct urb *urb) 161static void carl9170_usb_tx_data_complete(struct urb *urb)
162{ 162{
163 struct ar9170 *ar = (struct ar9170 *) 163 struct ar9170 *ar = usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
164 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
165 164
166 if (WARN_ON_ONCE(!ar)) { 165 if (WARN_ON_ONCE(!ar)) {
167 dev_kfree_skb_irq(urb->context); 166 dev_kfree_skb_irq(urb->context);
@@ -433,7 +432,7 @@ static void carl9170_usb_rx_complete(struct urb *urb)
433 * device. 432 * device.
434 */ 433 */
435 434
436 carl9170_restart(ar, CARL9170_RR_SLOW_SYSTEM); 435 ieee80211_queue_work(ar->hw, &ar->ping_work);
437 } 436 }
438 } else { 437 } else {
439 /* 438 /*
@@ -835,7 +834,7 @@ static int carl9170_usb_load_firmware(struct ar9170 *ar)
835 if (err) 834 if (err)
836 goto err_out; 835 goto err_out;
837 836
838 /* firmware restarts cmd counter */ 837 /* now, start the command response counter */
839 ar->cmd_seq = -1; 838 ar->cmd_seq = -1;
840 839
841 return 0; 840 return 0;
@@ -852,7 +851,12 @@ int carl9170_usb_restart(struct ar9170 *ar)
852 if (ar->intf->condition != USB_INTERFACE_BOUND) 851 if (ar->intf->condition != USB_INTERFACE_BOUND)
853 return 0; 852 return 0;
854 853
855 /* Disable command response sequence counter. */ 854 /*
855 * Disable the command response sequence counter check.
856 * We already know that the device/firmware is in a bad state.
857 * So, no extra points are awarded to anyone who reminds the
858 * driver about that.
859 */
856 ar->cmd_seq = -2; 860 ar->cmd_seq = -2;
857 861
858 err = carl9170_reboot(ar); 862 err = carl9170_reboot(ar);
@@ -904,6 +908,15 @@ static int carl9170_usb_init_device(struct ar9170 *ar)
904{ 908{
905 int err; 909 int err;
906 910
911 /*
912 * The carl9170 firmware let's the driver know when it's
913 * ready for action. But we have to be prepared to gracefully
914 * handle all spurious [flushed] messages after each (re-)boot.
915 * Thus the command response counter remains disabled until it
916 * can be safely synchronized.
917 */
918 ar->cmd_seq = -2;
919
907 err = carl9170_usb_send_rx_irq_urb(ar); 920 err = carl9170_usb_send_rx_irq_urb(ar);
908 if (err) 921 if (err)
909 goto err_out; 922 goto err_out;
@@ -912,14 +925,21 @@ static int carl9170_usb_init_device(struct ar9170 *ar)
912 if (err) 925 if (err)
913 goto err_unrx; 926 goto err_unrx;
914 927
928 err = carl9170_usb_open(ar);
929 if (err)
930 goto err_unrx;
931
915 mutex_lock(&ar->mutex); 932 mutex_lock(&ar->mutex);
916 err = carl9170_usb_load_firmware(ar); 933 err = carl9170_usb_load_firmware(ar);
917 mutex_unlock(&ar->mutex); 934 mutex_unlock(&ar->mutex);
918 if (err) 935 if (err)
919 goto err_unrx; 936 goto err_stop;
920 937
921 return 0; 938 return 0;
922 939
940err_stop:
941 carl9170_usb_stop(ar);
942
923err_unrx: 943err_unrx:
924 carl9170_usb_cancel_urbs(ar); 944 carl9170_usb_cancel_urbs(ar);
925 945
@@ -965,10 +985,6 @@ static void carl9170_usb_firmware_finish(struct ar9170 *ar)
965 if (err) 985 if (err)
966 goto err_freefw; 986 goto err_freefw;
967 987
968 err = carl9170_usb_open(ar);
969 if (err)
970 goto err_unrx;
971
972 err = carl9170_register(ar); 988 err = carl9170_register(ar);
973 989
974 carl9170_usb_stop(ar); 990 carl9170_usb_stop(ar);
@@ -1044,7 +1060,6 @@ static int carl9170_usb_probe(struct usb_interface *intf,
1044 atomic_set(&ar->rx_work_urbs, 0); 1060 atomic_set(&ar->rx_work_urbs, 0);
1045 atomic_set(&ar->rx_anch_urbs, 0); 1061 atomic_set(&ar->rx_anch_urbs, 0);
1046 atomic_set(&ar->rx_pool_urbs, 0); 1062 atomic_set(&ar->rx_pool_urbs, 0);
1047 ar->cmd_seq = -2;
1048 1063
1049 usb_get_dev(ar->udev); 1064 usb_get_dev(ar->udev);
1050 1065
@@ -1091,10 +1106,6 @@ static int carl9170_usb_suspend(struct usb_interface *intf,
1091 1106
1092 carl9170_usb_cancel_urbs(ar); 1107 carl9170_usb_cancel_urbs(ar);
1093 1108
1094 /*
1095 * firmware automatically reboots for usb suspend.
1096 */
1097
1098 return 0; 1109 return 0;
1099} 1110}
1100 1111
@@ -1107,12 +1118,20 @@ static int carl9170_usb_resume(struct usb_interface *intf)
1107 return -ENODEV; 1118 return -ENODEV;
1108 1119
1109 usb_unpoison_anchored_urbs(&ar->rx_anch); 1120 usb_unpoison_anchored_urbs(&ar->rx_anch);
1121 carl9170_set_state(ar, CARL9170_STOPPED);
1110 1122
1111 err = carl9170_usb_init_device(ar); 1123 /*
1112 if (err) 1124 * The USB documentation demands that [for suspend] all traffic
1113 goto err_unrx; 1125 * to and from the device has to stop. This would be fine, but
1126 * there's a catch: the device[usb phy] does not come back.
1127 *
1128 * Upon resume the firmware will "kill" itself and the
1129 * boot-code sorts out the magic voodoo.
1130 * Not very nice, but there's not much what could go wrong.
1131 */
1132 msleep(1100);
1114 1133
1115 err = carl9170_usb_open(ar); 1134 err = carl9170_usb_init_device(ar);
1116 if (err) 1135 if (err)
1117 goto err_unrx; 1136 goto err_unrx;
1118 1137
@@ -1134,6 +1153,7 @@ static struct usb_driver carl9170_driver = {
1134#ifdef CONFIG_PM 1153#ifdef CONFIG_PM
1135 .suspend = carl9170_usb_suspend, 1154 .suspend = carl9170_usb_suspend,
1136 .resume = carl9170_usb_resume, 1155 .resume = carl9170_usb_resume,
1156 .reset_resume = carl9170_usb_resume,
1137#endif /* CONFIG_PM */ 1157#endif /* CONFIG_PM */
1138}; 1158};
1139 1159
diff --git a/drivers/net/wireless/ath/carl9170/version.h b/drivers/net/wireless/ath/carl9170/version.h
index ff53f078a0b5..ee0f84f2a2f6 100644
--- a/drivers/net/wireless/ath/carl9170/version.h
+++ b/drivers/net/wireless/ath/carl9170/version.h
@@ -1,7 +1,7 @@
1#ifndef __CARL9170_SHARED_VERSION_H 1#ifndef __CARL9170_SHARED_VERSION_H
2#define __CARL9170_SHARED_VERSION_H 2#define __CARL9170_SHARED_VERSION_H
3#define CARL9170FW_VERSION_YEAR 10 3#define CARL9170FW_VERSION_YEAR 10
4#define CARL9170FW_VERSION_MONTH 9 4#define CARL9170FW_VERSION_MONTH 10
5#define CARL9170FW_VERSION_DAY 28 5#define CARL9170FW_VERSION_DAY 29
6#define CARL9170FW_VERSION_GIT "1.8.8.3" 6#define CARL9170FW_VERSION_GIT "1.9.0"
7#endif /* __CARL9170_SHARED_VERSION_H */ 7#endif /* __CARL9170_SHARED_VERSION_H */
diff --git a/drivers/net/wireless/ath/debug.c b/drivers/net/wireless/ath/debug.c
index dacfb234f491..5367b1086e09 100644
--- a/drivers/net/wireless/ath/debug.c
+++ b/drivers/net/wireless/ath/debug.c
@@ -15,21 +15,6 @@
15 */ 15 */
16 16
17#include "ath.h" 17#include "ath.h"
18#include "debug.h"
19
20void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
21{
22 va_list args;
23
24 if (likely(!(common->debug_mask & dbg_mask)))
25 return;
26
27 va_start(args, fmt);
28 printk(KERN_DEBUG "ath: ");
29 vprintk(fmt, args);
30 va_end(args);
31}
32EXPORT_SYMBOL(ath_print);
33 18
34const char *ath_opmode_to_string(enum nl80211_iftype opmode) 19const char *ath_opmode_to_string(enum nl80211_iftype opmode)
35{ 20{
diff --git a/drivers/net/wireless/ath/debug.h b/drivers/net/wireless/ath/debug.h
deleted file mode 100644
index 64e4af2c2887..000000000000
--- a/drivers/net/wireless/ath/debug.h
+++ /dev/null
@@ -1,90 +0,0 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef ATH_DEBUG_H
18#define ATH_DEBUG_H
19
20#include "ath.h"
21
22/**
23 * enum ath_debug_level - atheros wireless debug level
24 *
25 * @ATH_DBG_RESET: reset processing
26 * @ATH_DBG_QUEUE: hardware queue management
27 * @ATH_DBG_EEPROM: eeprom processing
28 * @ATH_DBG_CALIBRATE: periodic calibration
29 * @ATH_DBG_INTERRUPT: interrupt processing
30 * @ATH_DBG_REGULATORY: regulatory processing
31 * @ATH_DBG_ANI: adaptive noise immunitive processing
32 * @ATH_DBG_XMIT: basic xmit operation
33 * @ATH_DBG_BEACON: beacon handling
34 * @ATH_DBG_CONFIG: configuration of the hardware
35 * @ATH_DBG_FATAL: fatal errors, this is the default, DBG_DEFAULT
36 * @ATH_DBG_PS: power save processing
37 * @ATH_DBG_HWTIMER: hardware timer handling
38 * @ATH_DBG_BTCOEX: bluetooth coexistance
39 * @ATH_DBG_BSTUCK: stuck beacons
40 * @ATH_DBG_ANY: enable all debugging
41 *
42 * The debug level is used to control the amount and type of debugging output
43 * we want to see. Each driver has its own method for enabling debugging and
44 * modifying debug level states -- but this is typically done through a
45 * module parameter 'debug' along with a respective 'debug' debugfs file
46 * entry.
47 */
48enum ATH_DEBUG {
49 ATH_DBG_RESET = 0x00000001,
50 ATH_DBG_QUEUE = 0x00000002,
51 ATH_DBG_EEPROM = 0x00000004,
52 ATH_DBG_CALIBRATE = 0x00000008,
53 ATH_DBG_INTERRUPT = 0x00000010,
54 ATH_DBG_REGULATORY = 0x00000020,
55 ATH_DBG_ANI = 0x00000040,
56 ATH_DBG_XMIT = 0x00000080,
57 ATH_DBG_BEACON = 0x00000100,
58 ATH_DBG_CONFIG = 0x00000200,
59 ATH_DBG_FATAL = 0x00000400,
60 ATH_DBG_PS = 0x00000800,
61 ATH_DBG_HWTIMER = 0x00001000,
62 ATH_DBG_BTCOEX = 0x00002000,
63 ATH_DBG_WMI = 0x00004000,
64 ATH_DBG_BSTUCK = 0x00008000,
65 ATH_DBG_ANY = 0xffffffff
66};
67
68#define ATH_DBG_DEFAULT (ATH_DBG_FATAL)
69
70#ifdef CONFIG_ATH_DEBUG
71void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
72 __attribute__ ((format (printf, 3, 4)));
73#else
74static inline void __attribute__ ((format (printf, 3, 4)))
75ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
76{
77}
78#endif /* CONFIG_ATH_DEBUG */
79
80/** Returns string describing opmode, or NULL if unknown mode. */
81#ifdef CONFIG_ATH_DEBUG
82const char *ath_opmode_to_string(enum nl80211_iftype opmode);
83#else
84static inline const char *ath_opmode_to_string(enum nl80211_iftype opmode)
85{
86 return "UNKNOWN";
87}
88#endif
89
90#endif /* ATH_DEBUG_H */
diff --git a/drivers/net/wireless/ath/key.c b/drivers/net/wireless/ath/key.c
index bd21a4d82085..5d465e5fcf24 100644
--- a/drivers/net/wireless/ath/key.c
+++ b/drivers/net/wireless/ath/key.c
@@ -20,7 +20,6 @@
20 20
21#include "ath.h" 21#include "ath.h"
22#include "reg.h" 22#include "reg.h"
23#include "debug.h"
24 23
25#define REG_READ (common->ops->read) 24#define REG_READ (common->ops->read)
26#define REG_WRITE(_ah, _reg, _val) (common->ops->write)(_ah, _val, _reg) 25#define REG_WRITE(_ah, _reg, _val) (common->ops->write)(_ah, _val, _reg)
@@ -37,8 +36,7 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
37 void *ah = common->ah; 36 void *ah = common->ah;
38 37
39 if (entry >= common->keymax) { 38 if (entry >= common->keymax) {
40 ath_print(common, ATH_DBG_FATAL, 39 ath_err(common, "keycache entry %u out of range\n", entry);
41 "keychache entry %u out of range\n", entry);
42 return false; 40 return false;
43 } 41 }
44 42
@@ -60,6 +58,8 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
60 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0); 58 REG_WRITE(ah, AR_KEYTABLE_KEY1(micentry), 0);
61 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0); 59 REG_WRITE(ah, AR_KEYTABLE_KEY2(micentry), 0);
62 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0); 60 REG_WRITE(ah, AR_KEYTABLE_KEY3(micentry), 0);
61 if (common->crypt_caps & ATH_CRYPT_CAP_MIC_COMBINED)
62 REG_WRITE(ah, AR_KEYTABLE_KEY4(micentry), 0);
63 63
64 } 64 }
65 65
@@ -67,15 +67,15 @@ bool ath_hw_keyreset(struct ath_common *common, u16 entry)
67} 67}
68EXPORT_SYMBOL(ath_hw_keyreset); 68EXPORT_SYMBOL(ath_hw_keyreset);
69 69
70bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac) 70static bool ath_hw_keysetmac(struct ath_common *common,
71 u16 entry, const u8 *mac)
71{ 72{
72 u32 macHi, macLo; 73 u32 macHi, macLo;
73 u32 unicast_flag = AR_KEYTABLE_VALID; 74 u32 unicast_flag = AR_KEYTABLE_VALID;
74 void *ah = common->ah; 75 void *ah = common->ah;
75 76
76 if (entry >= common->keymax) { 77 if (entry >= common->keymax) {
77 ath_print(common, ATH_DBG_FATAL, 78 ath_err(common, "keycache entry %u out of range\n", entry);
78 "keychache entry %u out of range\n", entry);
79 return false; 79 return false;
80 } 80 }
81 81
@@ -107,17 +107,16 @@ bool ath_hw_keysetmac(struct ath_common *common, u16 entry, const u8 *mac)
107 return true; 107 return true;
108} 108}
109 109
110bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry, 110static bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
111 const struct ath_keyval *k, 111 const struct ath_keyval *k,
112 const u8 *mac) 112 const u8 *mac)
113{ 113{
114 void *ah = common->ah; 114 void *ah = common->ah;
115 u32 key0, key1, key2, key3, key4; 115 u32 key0, key1, key2, key3, key4;
116 u32 keyType; 116 u32 keyType;
117 117
118 if (entry >= common->keymax) { 118 if (entry >= common->keymax) {
119 ath_print(common, ATH_DBG_FATAL, 119 ath_err(common, "keycache entry %u out of range\n", entry);
120 "keycache entry %u out of range\n", entry);
121 return false; 120 return false;
122 } 121 }
123 122
@@ -127,8 +126,8 @@ bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
127 break; 126 break;
128 case ATH_CIPHER_AES_CCM: 127 case ATH_CIPHER_AES_CCM:
129 if (!(common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)) { 128 if (!(common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)) {
130 ath_print(common, ATH_DBG_ANY, 129 ath_dbg(common, ATH_DBG_ANY,
131 "AES-CCM not supported by this mac rev\n"); 130 "AES-CCM not supported by this mac rev\n");
132 return false; 131 return false;
133 } 132 }
134 keyType = AR_KEYTABLE_TYPE_CCM; 133 keyType = AR_KEYTABLE_TYPE_CCM;
@@ -136,15 +135,15 @@ bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
136 case ATH_CIPHER_TKIP: 135 case ATH_CIPHER_TKIP:
137 keyType = AR_KEYTABLE_TYPE_TKIP; 136 keyType = AR_KEYTABLE_TYPE_TKIP;
138 if (entry + 64 >= common->keymax) { 137 if (entry + 64 >= common->keymax) {
139 ath_print(common, ATH_DBG_ANY, 138 ath_dbg(common, ATH_DBG_ANY,
140 "entry %u inappropriate for TKIP\n", entry); 139 "entry %u inappropriate for TKIP\n", entry);
141 return false; 140 return false;
142 } 141 }
143 break; 142 break;
144 case ATH_CIPHER_WEP: 143 case ATH_CIPHER_WEP:
145 if (k->kv_len < WLAN_KEY_LEN_WEP40) { 144 if (k->kv_len < WLAN_KEY_LEN_WEP40) {
146 ath_print(common, ATH_DBG_ANY, 145 ath_dbg(common, ATH_DBG_ANY,
147 "WEP key length %u too small\n", k->kv_len); 146 "WEP key length %u too small\n", k->kv_len);
148 return false; 147 return false;
149 } 148 }
150 if (k->kv_len <= WLAN_KEY_LEN_WEP40) 149 if (k->kv_len <= WLAN_KEY_LEN_WEP40)
@@ -158,8 +157,7 @@ bool ath_hw_set_keycache_entry(struct ath_common *common, u16 entry,
158 keyType = AR_KEYTABLE_TYPE_CLR; 157 keyType = AR_KEYTABLE_TYPE_CLR;
159 break; 158 break;
160 default: 159 default:
161 ath_print(common, ATH_DBG_FATAL, 160 ath_err(common, "cipher %u not supported\n", k->kv_type);
162 "cipher %u not supported\n", k->kv_type);
163 return false; 161 return false;
164 } 162 }
165 163
@@ -340,8 +338,7 @@ static int ath_setkey_tkip(struct ath_common *common, u16 keyix, const u8 *key,
340 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic)); 338 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
341 if (!ath_hw_set_keycache_entry(common, keyix, hk, NULL)) { 339 if (!ath_hw_set_keycache_entry(common, keyix, hk, NULL)) {
342 /* TX MIC entry failed. No need to proceed further */ 340 /* TX MIC entry failed. No need to proceed further */
343 ath_print(common, ATH_DBG_FATAL, 341 ath_err(common, "Setting TX MIC Key Failed\n");
344 "Setting TX MIC Key Failed\n");
345 return 0; 342 return 0;
346 } 343 }
347 344
diff --git a/drivers/net/wireless/ath/main.c b/drivers/net/wireless/ath/main.c
index 487193f1de1a..c325202fdc5f 100644
--- a/drivers/net/wireless/ath/main.c
+++ b/drivers/net/wireless/ath/main.c
@@ -56,3 +56,23 @@ struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
56 return skb; 56 return skb;
57} 57}
58EXPORT_SYMBOL(ath_rxbuf_alloc); 58EXPORT_SYMBOL(ath_rxbuf_alloc);
59
60int ath_printk(const char *level, struct ath_common *common,
61 const char *fmt, ...)
62{
63 struct va_format vaf;
64 va_list args;
65 int rtn;
66
67 va_start(args, fmt);
68
69 vaf.fmt = fmt;
70 vaf.va = &args;
71
72 rtn = printk("%sath: %pV", level, &vaf);
73
74 va_end(args);
75
76 return rtn;
77}
78EXPORT_SYMBOL(ath_printk);
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 3f4244f56ce5..2b14775e6bc6 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -342,6 +342,14 @@ int ath_reg_notifier_apply(struct wiphy *wiphy,
342 /* We always apply this */ 342 /* We always apply this */
343 ath_reg_apply_radar_flags(wiphy); 343 ath_reg_apply_radar_flags(wiphy);
344 344
345 /*
346 * This would happen when we have sent a custom regulatory request
347 * a world regulatory domain and the scheduler hasn't yet processed
348 * any pending requests in the queue.
349 */
350 if (!request)
351 return 0;
352
345 switch (request->initiator) { 353 switch (request->initiator) {
346 case NL80211_REGDOM_SET_BY_DRIVER: 354 case NL80211_REGDOM_SET_BY_DRIVER:
347 case NL80211_REGDOM_SET_BY_CORE: 355 case NL80211_REGDOM_SET_BY_CORE:
diff --git a/drivers/net/wireless/atmel.c b/drivers/net/wireless/atmel.c
index c8f7090b27d3..46e382ed46aa 100644
--- a/drivers/net/wireless/atmel.c
+++ b/drivers/net/wireless/atmel.c
@@ -1161,7 +1161,7 @@ static irqreturn_t service_interrupt(int irq, void *dev_id)
1161 struct atmel_private *priv = netdev_priv(dev); 1161 struct atmel_private *priv = netdev_priv(dev);
1162 u8 isr; 1162 u8 isr;
1163 int i = -1; 1163 int i = -1;
1164 static u8 irq_order[] = { 1164 static const u8 irq_order[] = {
1165 ISR_OUT_OF_RANGE, 1165 ISR_OUT_OF_RANGE,
1166 ISR_RxCOMPLETE, 1166 ISR_RxCOMPLETE,
1167 ISR_TxCOMPLETE, 1167 ISR_TxCOMPLETE,
@@ -3771,7 +3771,9 @@ static int probe_atmel_card(struct net_device *dev)
3771 3771
3772 if (rc) { 3772 if (rc) {
3773 if (dev->dev_addr[0] == 0xFF) { 3773 if (dev->dev_addr[0] == 0xFF) {
3774 u8 default_mac[] = {0x00, 0x04, 0x25, 0x00, 0x00, 0x00}; 3774 static const u8 default_mac[] = {
3775 0x00, 0x04, 0x25, 0x00, 0x00, 0x00
3776 };
3775 printk(KERN_ALERT "%s: *** Invalid MAC address. UPGRADE Firmware ****\n", dev->name); 3777 printk(KERN_ALERT "%s: *** Invalid MAC address. UPGRADE Firmware ****\n", dev->name);
3776 memcpy(dev->dev_addr, default_mac, 6); 3778 memcpy(dev->dev_addr, default_mac, 6);
3777 } 3779 }
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 0a00d42642cd..47033f6a1c2b 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -86,15 +86,16 @@ config B43_PIO
86 select SSB_BLOCKIO 86 select SSB_BLOCKIO
87 default y 87 default y
88 88
89config B43_NPHY 89config B43_PHY_N
90 bool "Pre IEEE 802.11n support (BROKEN)" 90 bool "Support for 802.11n (N-PHY) devices (EXPERIMENTAL)"
91 depends on B43 && EXPERIMENTAL && BROKEN 91 depends on B43 && EXPERIMENTAL
92 ---help--- 92 ---help---
93 Support for the IEEE 802.11n draft. 93 Support for the N-PHY.
94 94
95 THIS IS BROKEN AND DOES NOT WORK YET. 95 This enables support for devices with N-PHY revision up to 2.
96 96
97 SAY N. 97 Say N if you expect high stability and performance. Saying Y will not
98 affect other devices support and may provide support for basic needs.
98 99
99config B43_PHY_LP 100config B43_PHY_LP
100 bool "Support for low-power (LP-PHY) devices (EXPERIMENTAL)" 101 bool "Support for low-power (LP-PHY) devices (EXPERIMENTAL)"
diff --git a/drivers/net/wireless/b43/Makefile b/drivers/net/wireless/b43/Makefile
index 69d4af09a6cb..cef334a8c669 100644
--- a/drivers/net/wireless/b43/Makefile
+++ b/drivers/net/wireless/b43/Makefile
@@ -1,12 +1,12 @@
1b43-y += main.o 1b43-y += main.o
2b43-y += tables.o 2b43-y += tables.o
3b43-$(CONFIG_B43_NPHY) += tables_nphy.o 3b43-$(CONFIG_B43_PHY_N) += tables_nphy.o
4b43-$(CONFIG_B43_NPHY) += radio_2055.o 4b43-$(CONFIG_B43_PHY_N) += radio_2055.o
5b43-$(CONFIG_B43_NPHY) += radio_2056.o 5b43-$(CONFIG_B43_PHY_N) += radio_2056.o
6b43-y += phy_common.o 6b43-y += phy_common.o
7b43-y += phy_g.o 7b43-y += phy_g.o
8b43-y += phy_a.o 8b43-y += phy_a.o
9b43-$(CONFIG_B43_NPHY) += phy_n.o 9b43-$(CONFIG_B43_PHY_N) += phy_n.o
10b43-$(CONFIG_B43_PHY_LP) += phy_lp.o 10b43-$(CONFIG_B43_PHY_LP) += phy_lp.o
11b43-$(CONFIG_B43_PHY_LP) += tables_lpphy.o 11b43-$(CONFIG_B43_PHY_LP) += tables_lpphy.o
12b43-y += sysfs.o 12b43-y += sysfs.o
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 72821c456b02..bd4cb75b6ca3 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -153,6 +153,19 @@
153#define B43_BFH_FEM_BT 0x0040 /* has FEM and switch to share antenna 153#define B43_BFH_FEM_BT 0x0040 /* has FEM and switch to share antenna
154 * with bluetooth */ 154 * with bluetooth */
155 155
156/* SPROM boardflags2_lo values */
157#define B43_BFL2_RXBB_INT_REG_DIS 0x0001 /* external RX BB regulator present */
158#define B43_BFL2_APLL_WAR 0x0002 /* alternative A-band PLL settings implemented */
159#define B43_BFL2_TXPWRCTRL_EN 0x0004 /* permits enabling TX Power Control */
160#define B43_BFL2_2X4_DIV 0x0008 /* 2x4 diversity switch */
161#define B43_BFL2_5G_PWRGAIN 0x0010 /* supports 5G band power gain */
162#define B43_BFL2_PCIEWAR_OVR 0x0020 /* overrides ASPM and Clkreq settings */
163#define B43_BFL2_CAESERS_BRD 0x0040 /* is Caesers board (unused) */
164#define B43_BFL2_BTC3WIRE 0x0080 /* used 3-wire bluetooth coexist */
165#define B43_BFL2_SKWRKFEM_BRD 0x0100 /* 4321mcm93 uses Skyworks FEM */
166#define B43_BFL2_SPUR_WAR 0x0200 /* has a workaround for clock-harmonic spurs */
167#define B43_BFL2_GPLL_WAR 0x0400 /* altenative G-band PLL settings implemented */
168
156/* GPIO register offset, in both ChipCommon and PCI core. */ 169/* GPIO register offset, in both ChipCommon and PCI core. */
157#define B43_GPIO_CONTROL 0x6c 170#define B43_GPIO_CONTROL 0x6c
158 171
@@ -403,10 +416,10 @@ enum {
403 416
404/* 802.11 core specific TM State Low (SSB_TMSLOW) flags */ 417/* 802.11 core specific TM State Low (SSB_TMSLOW) flags */
405#define B43_TMSLOW_GMODE 0x20000000 /* G Mode Enable */ 418#define B43_TMSLOW_GMODE 0x20000000 /* G Mode Enable */
406#define B43_TMSLOW_PHYCLKSPEED 0x00C00000 /* PHY clock speed mask (N-PHY only) */ 419#define B43_TMSLOW_PHY_BANDWIDTH 0x00C00000 /* PHY band width and clock speed mask (N-PHY only) */
407#define B43_TMSLOW_PHYCLKSPEED_40MHZ 0x00000000 /* 40 MHz PHY */ 420#define B43_TMSLOW_PHY_BANDWIDTH_10MHZ 0x00000000 /* 10 MHz bandwidth, 40 MHz PHY */
408#define B43_TMSLOW_PHYCLKSPEED_80MHZ 0x00400000 /* 80 MHz PHY */ 421#define B43_TMSLOW_PHY_BANDWIDTH_20MHZ 0x00400000 /* 20 MHz bandwidth, 80 MHz PHY */
409#define B43_TMSLOW_PHYCLKSPEED_160MHZ 0x00800000 /* 160 MHz PHY */ 422#define B43_TMSLOW_PHY_BANDWIDTH_40MHZ 0x00800000 /* 40 MHz bandwidth, 160 MHz PHY */
410#define B43_TMSLOW_PLLREFSEL 0x00200000 /* PLL Frequency Reference Select (rev >= 5) */ 423#define B43_TMSLOW_PLLREFSEL 0x00200000 /* PLL Frequency Reference Select (rev >= 5) */
411#define B43_TMSLOW_MACPHYCLKEN 0x00100000 /* MAC PHY Clock Control Enable (rev >= 5) */ 424#define B43_TMSLOW_MACPHYCLKEN 0x00100000 /* MAC PHY Clock Control Enable (rev >= 5) */
412#define B43_TMSLOW_PHYRESET 0x00080000 /* PHY Reset */ 425#define B43_TMSLOW_PHYRESET 0x00080000 /* PHY Reset */
diff --git a/drivers/net/wireless/b43/dma.c b/drivers/net/wireless/b43/dma.c
index 10d0aaf754c5..3d5566e7af0a 100644
--- a/drivers/net/wireless/b43/dma.c
+++ b/drivers/net/wireless/b43/dma.c
@@ -415,11 +415,6 @@ static int alloc_ringmemory(struct b43_dmaring *ring)
415 415
416static void free_ringmemory(struct b43_dmaring *ring) 416static void free_ringmemory(struct b43_dmaring *ring)
417{ 417{
418 gfp_t flags = GFP_KERNEL;
419
420 if (ring->type == B43_DMA_64BIT)
421 flags |= GFP_DMA;
422
423 dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE, 418 dma_free_coherent(ring->dev->dev->dma_dev, B43_DMA_RINGMEMSIZE,
424 ring->descbase, ring->dmabase); 419 ring->descbase, ring->dmabase);
425} 420}
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index a1186525c70d..22bc9f17f634 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -322,59 +322,83 @@ static int b43_ratelimit(struct b43_wl *wl)
322 322
323void b43info(struct b43_wl *wl, const char *fmt, ...) 323void b43info(struct b43_wl *wl, const char *fmt, ...)
324{ 324{
325 struct va_format vaf;
325 va_list args; 326 va_list args;
326 327
327 if (b43_modparam_verbose < B43_VERBOSITY_INFO) 328 if (b43_modparam_verbose < B43_VERBOSITY_INFO)
328 return; 329 return;
329 if (!b43_ratelimit(wl)) 330 if (!b43_ratelimit(wl))
330 return; 331 return;
332
331 va_start(args, fmt); 333 va_start(args, fmt);
332 printk(KERN_INFO "b43-%s: ", 334
333 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); 335 vaf.fmt = fmt;
334 vprintk(fmt, args); 336 vaf.va = &args;
337
338 printk(KERN_INFO "b43-%s: %pV",
339 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
340
335 va_end(args); 341 va_end(args);
336} 342}
337 343
338void b43err(struct b43_wl *wl, const char *fmt, ...) 344void b43err(struct b43_wl *wl, const char *fmt, ...)
339{ 345{
346 struct va_format vaf;
340 va_list args; 347 va_list args;
341 348
342 if (b43_modparam_verbose < B43_VERBOSITY_ERROR) 349 if (b43_modparam_verbose < B43_VERBOSITY_ERROR)
343 return; 350 return;
344 if (!b43_ratelimit(wl)) 351 if (!b43_ratelimit(wl))
345 return; 352 return;
353
346 va_start(args, fmt); 354 va_start(args, fmt);
347 printk(KERN_ERR "b43-%s ERROR: ", 355
348 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); 356 vaf.fmt = fmt;
349 vprintk(fmt, args); 357 vaf.va = &args;
358
359 printk(KERN_ERR "b43-%s ERROR: %pV",
360 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
361
350 va_end(args); 362 va_end(args);
351} 363}
352 364
353void b43warn(struct b43_wl *wl, const char *fmt, ...) 365void b43warn(struct b43_wl *wl, const char *fmt, ...)
354{ 366{
367 struct va_format vaf;
355 va_list args; 368 va_list args;
356 369
357 if (b43_modparam_verbose < B43_VERBOSITY_WARN) 370 if (b43_modparam_verbose < B43_VERBOSITY_WARN)
358 return; 371 return;
359 if (!b43_ratelimit(wl)) 372 if (!b43_ratelimit(wl))
360 return; 373 return;
374
361 va_start(args, fmt); 375 va_start(args, fmt);
362 printk(KERN_WARNING "b43-%s warning: ", 376
363 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); 377 vaf.fmt = fmt;
364 vprintk(fmt, args); 378 vaf.va = &args;
379
380 printk(KERN_WARNING "b43-%s warning: %pV",
381 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
382
365 va_end(args); 383 va_end(args);
366} 384}
367 385
368void b43dbg(struct b43_wl *wl, const char *fmt, ...) 386void b43dbg(struct b43_wl *wl, const char *fmt, ...)
369{ 387{
388 struct va_format vaf;
370 va_list args; 389 va_list args;
371 390
372 if (b43_modparam_verbose < B43_VERBOSITY_DEBUG) 391 if (b43_modparam_verbose < B43_VERBOSITY_DEBUG)
373 return; 392 return;
393
374 va_start(args, fmt); 394 va_start(args, fmt);
375 printk(KERN_DEBUG "b43-%s debug: ", 395
376 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); 396 vaf.fmt = fmt;
377 vprintk(fmt, args); 397 vaf.va = &args;
398
399 printk(KERN_DEBUG "b43-%s debug: %pV",
400 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
401
378 va_end(args); 402 va_end(args);
379} 403}
380 404
@@ -1126,6 +1150,8 @@ void b43_wireless_core_reset(struct b43_wldev *dev, u32 flags)
1126 1150
1127 flags |= B43_TMSLOW_PHYCLKEN; 1151 flags |= B43_TMSLOW_PHYCLKEN;
1128 flags |= B43_TMSLOW_PHYRESET; 1152 flags |= B43_TMSLOW_PHYRESET;
1153 if (dev->phy.type == B43_PHYTYPE_N)
1154 flags |= B43_TMSLOW_PHY_BANDWIDTH_20MHZ; /* Make 20 MHz def */
1129 ssb_device_enable(dev->dev, flags); 1155 ssb_device_enable(dev->dev, flags);
1130 msleep(2); /* Wait for the PLL to turn on. */ 1156 msleep(2); /* Wait for the PLL to turn on. */
1131 1157
@@ -2095,8 +2121,10 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
2095 filename = "ucode13"; 2121 filename = "ucode13";
2096 else if (rev == 14) 2122 else if (rev == 14)
2097 filename = "ucode14"; 2123 filename = "ucode14";
2098 else if (rev >= 15) 2124 else if (rev == 15)
2099 filename = "ucode15"; 2125 filename = "ucode15";
2126 else if ((rev >= 16) && (rev <= 20))
2127 filename = "ucode16_mimo";
2100 else 2128 else
2101 goto err_no_ucode; 2129 goto err_no_ucode;
2102 err = b43_do_request_fw(ctx, filename, &fw->ucode); 2130 err = b43_do_request_fw(ctx, filename, &fw->ucode);
@@ -2139,7 +2167,9 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
2139 goto err_no_initvals; 2167 goto err_no_initvals;
2140 break; 2168 break;
2141 case B43_PHYTYPE_N: 2169 case B43_PHYTYPE_N:
2142 if ((rev >= 11) && (rev <= 12)) 2170 if (rev >= 16)
2171 filename = "n0initvals16";
2172 else if ((rev >= 11) && (rev <= 12))
2143 filename = "n0initvals11"; 2173 filename = "n0initvals11";
2144 else 2174 else
2145 goto err_no_initvals; 2175 goto err_no_initvals;
@@ -2183,7 +2213,9 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
2183 goto err_no_initvals; 2213 goto err_no_initvals;
2184 break; 2214 break;
2185 case B43_PHYTYPE_N: 2215 case B43_PHYTYPE_N:
2186 if ((rev >= 11) && (rev <= 12)) 2216 if (rev >= 16)
2217 filename = "n0bsinitvals16";
2218 else if ((rev >= 11) && (rev <= 12))
2187 filename = "n0bsinitvals11"; 2219 filename = "n0bsinitvals11";
2188 else 2220 else
2189 goto err_no_initvals; 2221 goto err_no_initvals;
@@ -4022,9 +4054,9 @@ static int b43_phy_versioning(struct b43_wldev *dev)
4022 if (phy_rev > 9) 4054 if (phy_rev > 9)
4023 unsupported = 1; 4055 unsupported = 1;
4024 break; 4056 break;
4025#ifdef CONFIG_B43_NPHY 4057#ifdef CONFIG_B43_PHY_N
4026 case B43_PHYTYPE_N: 4058 case B43_PHYTYPE_N:
4027 if (phy_rev > 4) 4059 if (phy_rev > 9)
4028 unsupported = 1; 4060 unsupported = 1;
4029 break; 4061 break;
4030#endif 4062#endif
@@ -5067,7 +5099,7 @@ static void b43_print_driverinfo(void)
5067#ifdef CONFIG_B43_PCMCIA 5099#ifdef CONFIG_B43_PCMCIA
5068 feat_pcmcia = "M"; 5100 feat_pcmcia = "M";
5069#endif 5101#endif
5070#ifdef CONFIG_B43_NPHY 5102#ifdef CONFIG_B43_PHY_N
5071 feat_nphy = "N"; 5103 feat_nphy = "N";
5072#endif 5104#endif
5073#ifdef CONFIG_B43_LEDS 5105#ifdef CONFIG_B43_LEDS
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index 7b2ea6781457..b5c5ce94d3fd 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -50,7 +50,7 @@ int b43_phy_allocate(struct b43_wldev *dev)
50 phy->ops = &b43_phyops_g; 50 phy->ops = &b43_phyops_g;
51 break; 51 break;
52 case B43_PHYTYPE_N: 52 case B43_PHYTYPE_N:
53#ifdef CONFIG_B43_NPHY 53#ifdef CONFIG_B43_PHY_N
54 phy->ops = &b43_phyops_n; 54 phy->ops = &b43_phyops_n;
55#endif 55#endif
56 break; 56 break;
@@ -231,6 +231,7 @@ void b43_radio_maskset(struct b43_wldev *dev, u16 offset, u16 mask, u16 set)
231u16 b43_phy_read(struct b43_wldev *dev, u16 reg) 231u16 b43_phy_read(struct b43_wldev *dev, u16 reg)
232{ 232{
233 assert_mac_suspended(dev); 233 assert_mac_suspended(dev);
234 dev->phy.writes_counter = 0;
234 return dev->phy.ops->phy_read(dev, reg); 235 return dev->phy.ops->phy_read(dev, reg);
235} 236}
236 237
@@ -238,6 +239,10 @@ void b43_phy_write(struct b43_wldev *dev, u16 reg, u16 value)
238{ 239{
239 assert_mac_suspended(dev); 240 assert_mac_suspended(dev);
240 dev->phy.ops->phy_write(dev, reg, value); 241 dev->phy.ops->phy_write(dev, reg, value);
242 if (++dev->phy.writes_counter == B43_MAX_WRITES_IN_ROW) {
243 b43_read16(dev, B43_MMIO_PHY_VER);
244 dev->phy.writes_counter = 0;
245 }
241} 246}
242 247
243void b43_phy_copy(struct b43_wldev *dev, u16 destreg, u16 srcreg) 248void b43_phy_copy(struct b43_wldev *dev, u16 destreg, u16 srcreg)
@@ -424,12 +429,21 @@ void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on)
424 b43_write16(dev, B43_MMIO_PHY0, on ? 0 : 0xF4); 429 b43_write16(dev, B43_MMIO_PHY0, on ? 0 : 0xF4);
425} 430}
426 431
432
433bool b43_channel_type_is_40mhz(enum nl80211_channel_type channel_type)
434{
435 return (channel_type == NL80211_CHAN_HT40MINUS ||
436 channel_type == NL80211_CHAN_HT40PLUS);
437}
438
427/* http://bcm-v4.sipsolutions.net/802.11/PHY/Cordic */ 439/* http://bcm-v4.sipsolutions.net/802.11/PHY/Cordic */
428struct b43_c32 b43_cordic(int theta) 440struct b43_c32 b43_cordic(int theta)
429{ 441{
430 u32 arctg[] = { 2949120, 1740967, 919879, 466945, 234379, 117304, 442 static const u32 arctg[] = {
431 58666, 29335, 14668, 7334, 3667, 1833, 917, 458, 443 2949120, 1740967, 919879, 466945, 234379, 117304,
432 229, 115, 57, 29, }; 444 58666, 29335, 14668, 7334, 3667, 1833,
445 917, 458, 229, 115, 57, 29,
446 };
433 u8 i; 447 u8 i;
434 s32 tmp; 448 s32 tmp;
435 s8 signx = 1; 449 s8 signx = 1;
diff --git a/drivers/net/wireless/b43/phy_common.h b/drivers/net/wireless/b43/phy_common.h
index 0e6194228845..2401bee8b081 100644
--- a/drivers/net/wireless/b43/phy_common.h
+++ b/drivers/net/wireless/b43/phy_common.h
@@ -39,6 +39,9 @@ struct b43_c32 { s32 i, q; };
39#define B43_PHYVER_TYPE_SHIFT 8 39#define B43_PHYVER_TYPE_SHIFT 8
40#define B43_PHYVER_VERSION 0x00FF 40#define B43_PHYVER_VERSION 0x00FF
41 41
42/* PHY writes need to be flushed if we reach limit */
43#define B43_MAX_WRITES_IN_ROW 24
44
42/** 45/**
43 * enum b43_interference_mitigation - Interference Mitigation mode 46 * enum b43_interference_mitigation - Interference Mitigation mode
44 * 47 *
@@ -232,6 +235,9 @@ struct b43_phy {
232 /* PHY revision number. */ 235 /* PHY revision number. */
233 u8 rev; 236 u8 rev;
234 237
238 /* Count writes since last read */
239 u8 writes_counter;
240
235 /* Radio versioning */ 241 /* Radio versioning */
236 u16 radio_manuf; /* Radio manufacturer */ 242 u16 radio_manuf; /* Radio manufacturer */
237 u16 radio_ver; /* Radio version */ 243 u16 radio_ver; /* Radio version */
@@ -430,6 +436,8 @@ int b43_phy_shm_tssi_read(struct b43_wldev *dev, u16 shm_offset);
430 */ 436 */
431void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on); 437void b43_phyop_switch_analog_generic(struct b43_wldev *dev, bool on);
432 438
439bool b43_channel_type_is_40mhz(enum nl80211_channel_type channel_type);
440
433struct b43_c32 b43_cordic(int theta); 441struct b43_c32 b43_cordic(int theta);
434 442
435#endif /* LINUX_B43_PHY_COMMON_H_ */ 443#endif /* LINUX_B43_PHY_COMMON_H_ */
diff --git a/drivers/net/wireless/b43/phy_n.c b/drivers/net/wireless/b43/phy_n.c
index e0f2d122e124..ab81ed8b19d7 100644
--- a/drivers/net/wireless/b43/phy_n.c
+++ b/drivers/net/wireless/b43/phy_n.c
@@ -67,6 +67,18 @@ enum b43_nphy_rf_sequence {
67 B43_RFSEQ_UPDATE_GAINU, 67 B43_RFSEQ_UPDATE_GAINU,
68}; 68};
69 69
70enum b43_nphy_rssi_type {
71 B43_NPHY_RSSI_X = 0,
72 B43_NPHY_RSSI_Y,
73 B43_NPHY_RSSI_Z,
74 B43_NPHY_RSSI_PWRDET,
75 B43_NPHY_RSSI_TSSI_I,
76 B43_NPHY_RSSI_TSSI_Q,
77 B43_NPHY_RSSI_TBD,
78};
79
80static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev,
81 bool enable);
70static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd, 82static void b43_nphy_set_rf_sequence(struct b43_wldev *dev, u8 cmd,
71 u8 *events, u8 *delays, u8 length); 83 u8 *events, u8 *delays, u8 length);
72static void b43_nphy_force_rf_sequence(struct b43_wldev *dev, 84static void b43_nphy_force_rf_sequence(struct b43_wldev *dev,
@@ -76,13 +88,6 @@ static void b43_nphy_rf_control_override(struct b43_wldev *dev, u16 field,
76static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field, 88static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
77 u16 value, u8 core); 89 u16 value, u8 core);
78 90
79static inline bool b43_channel_type_is_40mhz(
80 enum nl80211_channel_type channel_type)
81{
82 return (channel_type == NL80211_CHAN_HT40MINUS ||
83 channel_type == NL80211_CHAN_HT40PLUS);
84}
85
86void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna) 91void b43_nphy_set_rxantenna(struct b43_wldev *dev, int antenna)
87{//TODO 92{//TODO
88} 93}
@@ -134,6 +139,99 @@ static void b43_chantab_radio_upload(struct b43_wldev *dev,
134 b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim); 139 b43_radio_write(dev, B2055_C2_TX_MXBGTRIM, e->radio_c2_tx_mxbgtrim);
135} 140}
136 141
142static void b43_chantab_radio_2056_upload(struct b43_wldev *dev,
143 const struct b43_nphy_channeltab_entry_rev3 *e)
144{
145 b43_radio_write(dev, B2056_SYN_PLL_VCOCAL1, e->radio_syn_pll_vcocal1);
146 b43_radio_write(dev, B2056_SYN_PLL_VCOCAL2, e->radio_syn_pll_vcocal2);
147 b43_radio_write(dev, B2056_SYN_PLL_REFDIV, e->radio_syn_pll_refdiv);
148 b43_radio_write(dev, B2056_SYN_PLL_MMD2, e->radio_syn_pll_mmd2);
149 b43_radio_write(dev, B2056_SYN_PLL_MMD1, e->radio_syn_pll_mmd1);
150 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER1,
151 e->radio_syn_pll_loopfilter1);
152 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER2,
153 e->radio_syn_pll_loopfilter2);
154 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER3,
155 e->radio_syn_pll_loopfilter3);
156 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER4,
157 e->radio_syn_pll_loopfilter4);
158 b43_radio_write(dev, B2056_SYN_PLL_LOOPFILTER5,
159 e->radio_syn_pll_loopfilter5);
160 b43_radio_write(dev, B2056_SYN_RESERVED_ADDR27,
161 e->radio_syn_reserved_addr27);
162 b43_radio_write(dev, B2056_SYN_RESERVED_ADDR28,
163 e->radio_syn_reserved_addr28);
164 b43_radio_write(dev, B2056_SYN_RESERVED_ADDR29,
165 e->radio_syn_reserved_addr29);
166 b43_radio_write(dev, B2056_SYN_LOGEN_VCOBUF1,
167 e->radio_syn_logen_vcobuf1);
168 b43_radio_write(dev, B2056_SYN_LOGEN_MIXER2, e->radio_syn_logen_mixer2);
169 b43_radio_write(dev, B2056_SYN_LOGEN_BUF3, e->radio_syn_logen_buf3);
170 b43_radio_write(dev, B2056_SYN_LOGEN_BUF4, e->radio_syn_logen_buf4);
171
172 b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAA_TUNE,
173 e->radio_rx0_lnaa_tune);
174 b43_radio_write(dev, B2056_RX0 | B2056_RX_LNAG_TUNE,
175 e->radio_rx0_lnag_tune);
176
177 b43_radio_write(dev, B2056_TX0 | B2056_TX_INTPAA_BOOST_TUNE,
178 e->radio_tx0_intpaa_boost_tune);
179 b43_radio_write(dev, B2056_TX0 | B2056_TX_INTPAG_BOOST_TUNE,
180 e->radio_tx0_intpag_boost_tune);
181 b43_radio_write(dev, B2056_TX0 | B2056_TX_PADA_BOOST_TUNE,
182 e->radio_tx0_pada_boost_tune);
183 b43_radio_write(dev, B2056_TX0 | B2056_TX_PADG_BOOST_TUNE,
184 e->radio_tx0_padg_boost_tune);
185 b43_radio_write(dev, B2056_TX0 | B2056_TX_PGAA_BOOST_TUNE,
186 e->radio_tx0_pgaa_boost_tune);
187 b43_radio_write(dev, B2056_TX0 | B2056_TX_PGAG_BOOST_TUNE,
188 e->radio_tx0_pgag_boost_tune);
189 b43_radio_write(dev, B2056_TX0 | B2056_TX_MIXA_BOOST_TUNE,
190 e->radio_tx0_mixa_boost_tune);
191 b43_radio_write(dev, B2056_TX0 | B2056_TX_MIXG_BOOST_TUNE,
192 e->radio_tx0_mixg_boost_tune);
193
194 b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAA_TUNE,
195 e->radio_rx1_lnaa_tune);
196 b43_radio_write(dev, B2056_RX1 | B2056_RX_LNAG_TUNE,
197 e->radio_rx1_lnag_tune);
198
199 b43_radio_write(dev, B2056_TX1 | B2056_TX_INTPAA_BOOST_TUNE,
200 e->radio_tx1_intpaa_boost_tune);
201 b43_radio_write(dev, B2056_TX1 | B2056_TX_INTPAG_BOOST_TUNE,
202 e->radio_tx1_intpag_boost_tune);
203 b43_radio_write(dev, B2056_TX1 | B2056_TX_PADA_BOOST_TUNE,
204 e->radio_tx1_pada_boost_tune);
205 b43_radio_write(dev, B2056_TX1 | B2056_TX_PADG_BOOST_TUNE,
206 e->radio_tx1_padg_boost_tune);
207 b43_radio_write(dev, B2056_TX1 | B2056_TX_PGAA_BOOST_TUNE,
208 e->radio_tx1_pgaa_boost_tune);
209 b43_radio_write(dev, B2056_TX1 | B2056_TX_PGAG_BOOST_TUNE,
210 e->radio_tx1_pgag_boost_tune);
211 b43_radio_write(dev, B2056_TX1 | B2056_TX_MIXA_BOOST_TUNE,
212 e->radio_tx1_mixa_boost_tune);
213 b43_radio_write(dev, B2056_TX1 | B2056_TX_MIXG_BOOST_TUNE,
214 e->radio_tx1_mixg_boost_tune);
215}
216
217/* http://bcm-v4.sipsolutions.net/802.11/PHY/Radio/2056Setup */
218static void b43_radio_2056_setup(struct b43_wldev *dev,
219 const struct b43_nphy_channeltab_entry_rev3 *e)
220{
221 B43_WARN_ON(dev->phy.rev < 3);
222
223 b43_chantab_radio_2056_upload(dev, e);
224 /* TODO */
225 udelay(50);
226 /* VCO calibration */
227 b43_radio_write(dev, B2056_SYN_PLL_VCOCAL12, 0x00);
228 b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x38);
229 b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x18);
230 b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x38);
231 b43_radio_write(dev, B2056_TX_INTPAA_PA_MISC, 0x39);
232 udelay(300);
233}
234
137static void b43_chantab_phy_upload(struct b43_wldev *dev, 235static void b43_chantab_phy_upload(struct b43_wldev *dev,
138 const struct b43_phy_n_sfo_cfg *e) 236 const struct b43_phy_n_sfo_cfg *e)
139{ 237{
@@ -145,9 +243,154 @@ static void b43_chantab_phy_upload(struct b43_wldev *dev,
145 b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6); 243 b43_phy_write(dev, B43_NPHY_BW6, e->phy_bw6);
146} 244}
147 245
246/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrCtrlEnable */
247static void b43_nphy_tx_power_ctrl(struct b43_wldev *dev, bool enable)
248{
249 struct b43_phy_n *nphy = dev->phy.n;
250 u8 i;
251 u16 tmp;
252
253 if (nphy->hang_avoid)
254 b43_nphy_stay_in_carrier_search(dev, 1);
255
256 nphy->txpwrctrl = enable;
257 if (!enable) {
258 if (dev->phy.rev >= 3)
259 ; /* TODO */
260
261 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6840);
262 for (i = 0; i < 84; i++)
263 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0);
264
265 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x6C40);
266 for (i = 0; i < 84; i++)
267 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0);
268
269 tmp = B43_NPHY_TXPCTL_CMD_COEFF | B43_NPHY_TXPCTL_CMD_HWPCTLEN;
270 if (dev->phy.rev >= 3)
271 tmp |= B43_NPHY_TXPCTL_CMD_PCTLEN;
272 b43_phy_mask(dev, B43_NPHY_TXPCTL_CMD, ~tmp);
273
274 if (dev->phy.rev >= 3) {
275 b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100);
276 b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100);
277 } else {
278 b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000);
279 }
280
281 if (dev->phy.rev == 2)
282 b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
283 ~B43_NPHY_BPHY_CTL3_SCALE, 0x53);
284 else if (dev->phy.rev < 2)
285 b43_phy_maskset(dev, B43_NPHY_BPHY_CTL3,
286 ~B43_NPHY_BPHY_CTL3_SCALE, 0x5A);
287
288 if (dev->phy.rev < 2 && 0)
289 ; /* TODO */
290 } else {
291 b43err(dev->wl, "enabling tx pwr ctrl not implemented yet\n");
292 }
293
294 if (nphy->hang_avoid)
295 b43_nphy_stay_in_carrier_search(dev, 0);
296}
297
298/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/TxPwrFix */
148static void b43_nphy_tx_power_fix(struct b43_wldev *dev) 299static void b43_nphy_tx_power_fix(struct b43_wldev *dev)
149{ 300{
150 //TODO 301 struct b43_phy_n *nphy = dev->phy.n;
302 struct ssb_sprom *sprom = &(dev->dev->bus->sprom);
303
304 u8 txpi[2], bbmult, i;
305 u16 tmp, radio_gain, dac_gain;
306 u16 freq = dev->phy.channel_freq;
307 u32 txgain;
308 /* u32 gaintbl; rev3+ */
309
310 if (nphy->hang_avoid)
311 b43_nphy_stay_in_carrier_search(dev, 1);
312
313 if (dev->phy.rev >= 3) {
314 txpi[0] = 40;
315 txpi[1] = 40;
316 } else if (sprom->revision < 4) {
317 txpi[0] = 72;
318 txpi[1] = 72;
319 } else {
320 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
321 txpi[0] = sprom->txpid2g[0];
322 txpi[1] = sprom->txpid2g[1];
323 } else if (freq >= 4900 && freq < 5100) {
324 txpi[0] = sprom->txpid5gl[0];
325 txpi[1] = sprom->txpid5gl[1];
326 } else if (freq >= 5100 && freq < 5500) {
327 txpi[0] = sprom->txpid5g[0];
328 txpi[1] = sprom->txpid5g[1];
329 } else if (freq >= 5500) {
330 txpi[0] = sprom->txpid5gh[0];
331 txpi[1] = sprom->txpid5gh[1];
332 } else {
333 txpi[0] = 91;
334 txpi[1] = 91;
335 }
336 }
337
338 /*
339 for (i = 0; i < 2; i++) {
340 nphy->txpwrindex[i].index_internal = txpi[i];
341 nphy->txpwrindex[i].index_internal_save = txpi[i];
342 }
343 */
344
345 for (i = 0; i < 2; i++) {
346 if (dev->phy.rev >= 3) {
347 /* FIXME: support 5GHz */
348 txgain = b43_ntab_tx_gain_rev3plus_2ghz[txpi[i]];
349 radio_gain = (txgain >> 16) & 0x1FFFF;
350 } else {
351 txgain = b43_ntab_tx_gain_rev0_1_2[txpi[i]];
352 radio_gain = (txgain >> 16) & 0x1FFF;
353 }
354
355 dac_gain = (txgain >> 8) & 0x3F;
356 bbmult = txgain & 0xFF;
357
358 if (dev->phy.rev >= 3) {
359 if (i == 0)
360 b43_phy_set(dev, B43_NPHY_AFECTL_OVER1, 0x0100);
361 else
362 b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x0100);
363 } else {
364 b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x4000);
365 }
366
367 if (i == 0)
368 b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN1, dac_gain);
369 else
370 b43_phy_write(dev, B43_NPHY_AFECTL_DACGAIN2, dac_gain);
371
372 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D10 + i);
373 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, radio_gain);
374
375 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x3C57);
376 tmp = b43_phy_read(dev, B43_NPHY_TABLE_DATALO);
377
378 if (i == 0)
379 tmp = (tmp & 0x00FF) | (bbmult << 8);
380 else
381 tmp = (tmp & 0xFF00) | bbmult;
382
383 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x3C57);
384 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, tmp);
385
386 if (0)
387 ; /* TODO */
388 }
389
390 b43_phy_mask(dev, B43_NPHY_BPHY_CTL2, ~B43_NPHY_BPHY_CTL2_LUT);
391
392 if (nphy->hang_avoid)
393 b43_nphy_stay_in_carrier_search(dev, 0);
151} 394}
152 395
153 396
@@ -191,7 +434,8 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
191 binfo->type != 0x46D || 434 binfo->type != 0x46D ||
192 binfo->rev < 0x41); 435 binfo->rev < 0x41);
193 else 436 else
194 workaround = ((sprom->boardflags_hi & B43_BFH_NOPA) == 0); 437 workaround =
438 !(sprom->boardflags2_lo & B43_BFL2_RXBB_INT_REG_DIS);
195 439
196 b43_radio_mask(dev, B2055_MASTER1, 0xFFF3); 440 b43_radio_mask(dev, B2055_MASTER1, 0xFFF3);
197 if (workaround) { 441 if (workaround) {
@@ -240,23 +484,55 @@ static void b43_radio_init2055_post(struct b43_wldev *dev)
240static void b43_radio_init2055(struct b43_wldev *dev) 484static void b43_radio_init2055(struct b43_wldev *dev)
241{ 485{
242 b43_radio_init2055_pre(dev); 486 b43_radio_init2055_pre(dev);
243 if (b43_status(dev) < B43_STAT_INITIALIZED) 487 if (b43_status(dev) < B43_STAT_INITIALIZED) {
244 b2055_upload_inittab(dev, 0, 1); 488 /* Follow wl, not specs. Do not force uploading all regs */
245 else 489 b2055_upload_inittab(dev, 0, 0);
246 b2055_upload_inittab(dev, 0/*FIXME on 5ghz band*/, 0); 490 } else {
491 bool ghz5 = b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ;
492 b2055_upload_inittab(dev, ghz5, 0);
493 }
247 b43_radio_init2055_post(dev); 494 b43_radio_init2055_post(dev);
248} 495}
249 496
497static void b43_radio_init2056_pre(struct b43_wldev *dev)
498{
499 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
500 ~B43_NPHY_RFCTL_CMD_CHIP0PU);
501 /* Maybe wl meant to reset and set (order?) RFCTL_CMD_OEPORFORCE? */
502 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
503 B43_NPHY_RFCTL_CMD_OEPORFORCE);
504 b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
505 ~B43_NPHY_RFCTL_CMD_OEPORFORCE);
506 b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
507 B43_NPHY_RFCTL_CMD_CHIP0PU);
508}
509
510static void b43_radio_init2056_post(struct b43_wldev *dev)
511{
512 b43_radio_set(dev, B2056_SYN_COM_CTRL, 0xB);
513 b43_radio_set(dev, B2056_SYN_COM_PU, 0x2);
514 b43_radio_set(dev, B2056_SYN_COM_RESET, 0x2);
515 msleep(1);
516 b43_radio_mask(dev, B2056_SYN_COM_RESET, ~0x2);
517 b43_radio_mask(dev, B2056_SYN_PLL_MAST2, ~0xFC);
518 b43_radio_mask(dev, B2056_SYN_RCCAL_CTRL0, ~0x1);
519 /*
520 if (nphy->init_por)
521 Call Radio 2056 Recalibrate
522 */
523}
524
250/* 525/*
251 * Initialize a Broadcom 2056 N-radio 526 * Initialize a Broadcom 2056 N-radio
252 * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init 527 * http://bcm-v4.sipsolutions.net/802.11/Radio/2056/Init
253 */ 528 */
254static void b43_radio_init2056(struct b43_wldev *dev) 529static void b43_radio_init2056(struct b43_wldev *dev)
255{ 530{
256 /* TODO */ 531 b43_radio_init2056_pre(dev);
532 b2056_upload_inittabs(dev, 0, 0);
533 b43_radio_init2056_post(dev);
257} 534}
258 535
259
260/* 536/*
261 * Upload the N-PHY tables. 537 * Upload the N-PHY tables.
262 * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables 538 * http://bcm-v4.sipsolutions.net/802.11/PHY/N/InitTables
@@ -453,6 +729,8 @@ static void b43_nphy_rx_iq_coeffs(struct b43_wldev *dev, bool write,
453 } 729 }
454} 730}
455 731
732#if 0
733/* Ready but not used anywhere */
456/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */ 734/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RxCalPhyCleanup */
457static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core) 735static void b43_nphy_rx_cal_phy_cleanup(struct b43_wldev *dev, u8 core)
458{ 736{
@@ -534,6 +812,7 @@ static void b43_nphy_rx_cal_phy_setup(struct b43_wldev *dev, u8 core)
534 b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1)); 812 b43_nphy_rf_control_intc_override(dev, 1, rxval, (core + 1));
535 b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core)); 813 b43_nphy_rf_control_intc_override(dev, 1, txval, (2 - core));
536} 814}
815#endif
537 816
538/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */ 817/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/CalcRxIqComp */
539static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask) 818static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
@@ -569,7 +848,6 @@ static void b43_nphy_calc_rx_iq_comp(struct b43_wldev *dev, u8 mask)
569 ii = est.i1_pwr; 848 ii = est.i1_pwr;
570 qq = est.q1_pwr; 849 qq = est.q1_pwr;
571 } else { 850 } else {
572 B43_WARN_ON(1);
573 continue; 851 continue;
574 } 852 }
575 853
@@ -651,7 +929,8 @@ static void b43_nphy_tx_iq_workaround(struct b43_wldev *dev)
651} 929}
652 930
653/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */ 931/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/clip-detection */
654static void b43_nphy_write_clip_detection(struct b43_wldev *dev, u16 *clip_st) 932static void b43_nphy_write_clip_detection(struct b43_wldev *dev,
933 const u16 *clip_st)
655{ 934{
656 b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]); 935 b43_phy_write(dev, B43_NPHY_C1_CLIP1THRES, clip_st[0]);
657 b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]); 936 b43_phy_write(dev, B43_NPHY_C2_CLIP1THRES, clip_st[1]);
@@ -727,7 +1006,7 @@ static void b43_nphy_stay_in_carrier_search(struct b43_wldev *dev, bool enable)
727 struct b43_phy_n *nphy = phy->n; 1006 struct b43_phy_n *nphy = phy->n;
728 1007
729 if (enable) { 1008 if (enable) {
730 u16 clip[] = { 0xFFFF, 0xFFFF }; 1009 static const u16 clip[] = { 0xFFFF, 0xFFFF };
731 if (nphy->deaf_count++ == 0) { 1010 if (nphy->deaf_count++ == 0) {
732 nphy->classifier_state = b43_nphy_classifier(dev, 0, 0); 1011 nphy->classifier_state = b43_nphy_classifier(dev, 0, 0);
733 b43_nphy_classifier(dev, 0x7, 0); 1012 b43_nphy_classifier(dev, 0x7, 0);
@@ -839,7 +1118,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
839 u16 data[4]; 1118 u16 data[4];
840 s16 gain[2]; 1119 s16 gain[2];
841 u16 minmax[2]; 1120 u16 minmax[2];
842 u16 lna_gain[4] = { -2, 10, 19, 25 }; 1121 static const u16 lna_gain[4] = { -2, 10, 19, 25 };
843 1122
844 if (nphy->hang_avoid) 1123 if (nphy->hang_avoid)
845 b43_nphy_stay_in_carrier_search(dev, 1); 1124 b43_nphy_stay_in_carrier_search(dev, 1);
@@ -871,7 +1150,7 @@ static void b43_nphy_adjust_lna_gain_table(struct b43_wldev *dev)
871 data[2] = lna_gain[2] + gain[i]; 1150 data[2] = lna_gain[2] + gain[i];
872 data[3] = lna_gain[3] + gain[i]; 1151 data[3] = lna_gain[3] + gain[i];
873 } 1152 }
874 b43_ntab_write_bulk(dev, B43_NTAB16(10, 8), 4, data); 1153 b43_ntab_write_bulk(dev, B43_NTAB16(i, 8), 4, data);
875 1154
876 minmax[i] = 23 + gain[i]; 1155 minmax[i] = 23 + gain[i];
877 } 1156 }
@@ -891,6 +1170,7 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
891 struct b43_phy_n *nphy = dev->phy.n; 1170 struct b43_phy_n *nphy = dev->phy.n;
892 u8 i, j; 1171 u8 i, j;
893 u8 code; 1172 u8 code;
1173 u16 tmp;
894 1174
895 /* TODO: for PHY >= 3 1175 /* TODO: for PHY >= 3
896 s8 *lna1_gain, *lna2_gain; 1176 s8 *lna1_gain, *lna2_gain;
@@ -913,15 +1193,15 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
913 B43_NPHY_C2_CGAINI_CL2DETECT); 1193 B43_NPHY_C2_CGAINI_CL2DETECT);
914 1194
915 /* Set narrowband clip threshold */ 1195 /* Set narrowband clip threshold */
916 b43_phy_set(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84); 1196 b43_phy_write(dev, B43_NPHY_C1_NBCLIPTHRES, 0x84);
917 b43_phy_set(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84); 1197 b43_phy_write(dev, B43_NPHY_C2_NBCLIPTHRES, 0x84);
918 1198
919 if (!dev->phy.is_40mhz) { 1199 if (!dev->phy.is_40mhz) {
920 /* Set dwell lengths */ 1200 /* Set dwell lengths */
921 b43_phy_set(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B); 1201 b43_phy_write(dev, B43_NPHY_CLIP1_NBDWELL_LEN, 0x002B);
922 b43_phy_set(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B); 1202 b43_phy_write(dev, B43_NPHY_CLIP2_NBDWELL_LEN, 0x002B);
923 b43_phy_set(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009); 1203 b43_phy_write(dev, B43_NPHY_W1CLIP1_DWELL_LEN, 0x0009);
924 b43_phy_set(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009); 1204 b43_phy_write(dev, B43_NPHY_W1CLIP2_DWELL_LEN, 0x0009);
925 } 1205 }
926 1206
927 /* Set wideband clip 2 threshold */ 1207 /* Set wideband clip 2 threshold */
@@ -943,7 +1223,7 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
943 ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1); 1223 ~B43_NPHY_C2_CCK_CGAINI_GAINBKOFF, 0x1);
944 } 1224 }
945 1225
946 b43_phy_set(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C); 1226 b43_phy_write(dev, B43_NPHY_CCK_SHIFTB_REF, 0x809C);
947 1227
948 if (nphy->gain_boost) { 1228 if (nphy->gain_boost) {
949 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ && 1229 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ &&
@@ -964,10 +1244,10 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
964 code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT); 1244 code << B43_NPHY_C2_INITGAIN_HPVGA2_SHIFT);
965 1245
966 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06); 1246 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
967 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 1247 /* specs say about 2 loops, but wl does 4 */
968 (code << 8 | 0x7C)); 1248 for (i = 0; i < 4; i++)
969 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 1249 b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
970 (code << 8 | 0x7C)); 1250 (code << 8 | 0x7C));
971 1251
972 b43_nphy_adjust_lna_gain_table(dev); 1252 b43_nphy_adjust_lna_gain_table(dev);
973 1253
@@ -985,19 +1265,21 @@ static void b43_nphy_gain_ctrl_workarounds(struct b43_wldev *dev)
985 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1); 1265 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x1);
986 1266
987 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06); 1267 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x1D06);
988 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 1268 /* specs say about 2 loops, but wl does 4 */
989 (code << 8 | 0x74)); 1269 for (i = 0; i < 4; i++)
990 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 1270 b43_phy_write(dev, B43_NPHY_TABLE_DATALO,
991 (code << 8 | 0x74)); 1271 (code << 8 | 0x74));
992 } 1272 }
993 1273
994 if (dev->phy.rev == 2) { 1274 if (dev->phy.rev == 2) {
995 for (i = 0; i < 4; i++) { 1275 for (i = 0; i < 4; i++) {
996 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 1276 b43_phy_write(dev, B43_NPHY_TABLE_ADDR,
997 (0x0400 * i) + 0x0020); 1277 (0x0400 * i) + 0x0020);
998 for (j = 0; j < 21; j++) 1278 for (j = 0; j < 21; j++) {
1279 tmp = j * (i < 2 ? 3 : 1);
999 b43_phy_write(dev, 1280 b43_phy_write(dev,
1000 B43_NPHY_TABLE_DATALO, 3 * j); 1281 B43_NPHY_TABLE_DATALO, tmp);
1282 }
1001 } 1283 }
1002 1284
1003 b43_nphy_set_rf_sequence(dev, 5, 1285 b43_nphy_set_rf_sequence(dev, 5,
@@ -1026,7 +1308,7 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
1026 u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 }; 1308 u8 events2[7] = { 0x0, 0x3, 0x5, 0x4, 0x2, 0x1, 0x8 };
1027 u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 }; 1309 u8 delays2[7] = { 0x8, 0x6, 0x2, 0x4, 0x4, 0x6, 0x1 };
1028 1310
1029 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) 1311 if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ)
1030 b43_nphy_classifier(dev, 1, 0); 1312 b43_nphy_classifier(dev, 1, 0);
1031 else 1313 else
1032 b43_nphy_classifier(dev, 1, 1); 1314 b43_nphy_classifier(dev, 1, 1);
@@ -1049,29 +1331,18 @@ static void b43_nphy_workarounds(struct b43_wldev *dev)
1049 b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8); 1331 b43_radio_set(dev, B2055_C2_TX_RF_SPARE, 0x8);
1050 } 1332 }
1051 1333
1052 /* TODO: convert to b43_ntab_write? */ 1334 b43_ntab_write(dev, B43_NTAB16(8, 0x00), 0x000A);
1053 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2000); 1335 b43_ntab_write(dev, B43_NTAB16(8, 0x10), 0x000A);
1054 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x000A); 1336 b43_ntab_write(dev, B43_NTAB16(8, 0x02), 0xCDAA);
1055 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2010); 1337 b43_ntab_write(dev, B43_NTAB16(8, 0x12), 0xCDAA);
1056 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x000A);
1057 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2002);
1058 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0xCDAA);
1059 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2012);
1060 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0xCDAA);
1061 1338
1062 if (dev->phy.rev < 2) { 1339 if (dev->phy.rev < 2) {
1063 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2008); 1340 b43_ntab_write(dev, B43_NTAB16(8, 0x08), 0x0000);
1064 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0000); 1341 b43_ntab_write(dev, B43_NTAB16(8, 0x18), 0x0000);
1065 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2018); 1342 b43_ntab_write(dev, B43_NTAB16(8, 0x07), 0x7AAB);
1066 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0000); 1343 b43_ntab_write(dev, B43_NTAB16(8, 0x17), 0x7AAB);
1067 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2007); 1344 b43_ntab_write(dev, B43_NTAB16(8, 0x06), 0x0800);
1068 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x7AAB); 1345 b43_ntab_write(dev, B43_NTAB16(8, 0x16), 0x0800);
1069 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2017);
1070 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x7AAB);
1071 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2006);
1072 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0800);
1073 b43_phy_write(dev, B43_NPHY_TABLE_ADDR, 0x2016);
1074 b43_phy_write(dev, B43_NPHY_TABLE_DATALO, 0x0800);
1075 } 1346 }
1076 1347
1077 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8); 1348 b43_phy_write(dev, B43_NPHY_RFCTL_LUT_TRSW_LO1, 0x2D8);
@@ -1565,19 +1836,20 @@ static void b43_nphy_rf_control_intc_override(struct b43_wldev *dev, u8 field,
1565 } 1836 }
1566} 1837}
1567 1838
1839/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/BPHYInit */
1568static void b43_nphy_bphy_init(struct b43_wldev *dev) 1840static void b43_nphy_bphy_init(struct b43_wldev *dev)
1569{ 1841{
1570 unsigned int i; 1842 unsigned int i;
1571 u16 val; 1843 u16 val;
1572 1844
1573 val = 0x1E1F; 1845 val = 0x1E1F;
1574 for (i = 0; i < 14; i++) { 1846 for (i = 0; i < 16; i++) {
1575 b43_phy_write(dev, B43_PHY_N_BMODE(0x88 + i), val); 1847 b43_phy_write(dev, B43_PHY_N_BMODE(0x88 + i), val);
1576 val -= 0x202; 1848 val -= 0x202;
1577 } 1849 }
1578 val = 0x3E3F; 1850 val = 0x3E3F;
1579 for (i = 0; i < 16; i++) { 1851 for (i = 0; i < 16; i++) {
1580 b43_phy_write(dev, B43_PHY_N_BMODE(0x97 + i), val); 1852 b43_phy_write(dev, B43_PHY_N_BMODE(0x98 + i), val);
1581 val -= 0x202; 1853 val -= 0x202;
1582 } 1854 }
1583 b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668); 1855 b43_phy_write(dev, B43_PHY_N_BMODE(0x38), 0x668);
@@ -1585,7 +1857,8 @@ static void b43_nphy_bphy_init(struct b43_wldev *dev)
1585 1857
1586/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */ 1858/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/ScaleOffsetRssi */
1587static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale, 1859static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
1588 s8 offset, u8 core, u8 rail, u8 type) 1860 s8 offset, u8 core, u8 rail,
1861 enum b43_nphy_rssi_type type)
1589{ 1862{
1590 u16 tmp; 1863 u16 tmp;
1591 bool core1or5 = (core == 1) || (core == 5); 1864 bool core1or5 = (core == 1) || (core == 5);
@@ -1594,53 +1867,59 @@ static void b43_nphy_scale_offset_rssi(struct b43_wldev *dev, u16 scale,
1594 offset = clamp_val(offset, -32, 31); 1867 offset = clamp_val(offset, -32, 31);
1595 tmp = ((scale & 0x3F) << 8) | (offset & 0x3F); 1868 tmp = ((scale & 0x3F) << 8) | (offset & 0x3F);
1596 1869
1597 if (core1or5 && (rail == 0) && (type == 2)) 1870 if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_Z))
1598 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, tmp); 1871 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Z, tmp);
1599 if (core1or5 && (rail == 1) && (type == 2)) 1872 if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_Z))
1600 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, tmp); 1873 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Z, tmp);
1601 if (core2or5 && (rail == 0) && (type == 2)) 1874 if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_Z))
1602 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, tmp); 1875 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Z, tmp);
1603 if (core2or5 && (rail == 1) && (type == 2)) 1876 if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_Z))
1604 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, tmp); 1877 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Z, tmp);
1605 if (core1or5 && (rail == 0) && (type == 0)) 1878
1879 if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_X))
1606 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, tmp); 1880 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_X, tmp);
1607 if (core1or5 && (rail == 1) && (type == 0)) 1881 if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_X))
1608 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, tmp); 1882 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_X, tmp);
1609 if (core2or5 && (rail == 0) && (type == 0)) 1883 if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_X))
1610 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, tmp); 1884 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_X, tmp);
1611 if (core2or5 && (rail == 1) && (type == 0)) 1885 if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_X))
1612 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, tmp); 1886 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_X, tmp);
1613 if (core1or5 && (rail == 0) && (type == 1)) 1887
1888 if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_Y))
1614 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, tmp); 1889 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_RSSI_Y, tmp);
1615 if (core1or5 && (rail == 1) && (type == 1)) 1890 if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_Y))
1616 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, tmp); 1891 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_RSSI_Y, tmp);
1617 if (core2or5 && (rail == 0) && (type == 1)) 1892 if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_Y))
1618 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, tmp); 1893 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_RSSI_Y, tmp);
1619 if (core2or5 && (rail == 1) && (type == 1)) 1894 if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_Y))
1620 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, tmp); 1895 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_RSSI_Y, tmp);
1621 if (core1or5 && (rail == 0) && (type == 6)) 1896
1897 if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_TBD))
1622 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TBD, tmp); 1898 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TBD, tmp);
1623 if (core1or5 && (rail == 1) && (type == 6)) 1899 if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_TBD))
1624 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TBD, tmp); 1900 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TBD, tmp);
1625 if (core2or5 && (rail == 0) && (type == 6)) 1901 if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_TBD))
1626 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TBD, tmp); 1902 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TBD, tmp);
1627 if (core2or5 && (rail == 1) && (type == 6)) 1903 if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_TBD))
1628 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TBD, tmp); 1904 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TBD, tmp);
1629 if (core1or5 && (rail == 0) && (type == 3)) 1905
1906 if (core1or5 && (rail == 0) && (type == B43_NPHY_RSSI_PWRDET))
1630 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_PWRDET, tmp); 1907 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_PWRDET, tmp);
1631 if (core1or5 && (rail == 1) && (type == 3)) 1908 if (core1or5 && (rail == 1) && (type == B43_NPHY_RSSI_PWRDET))
1632 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_PWRDET, tmp); 1909 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_PWRDET, tmp);
1633 if (core2or5 && (rail == 0) && (type == 3)) 1910 if (core2or5 && (rail == 0) && (type == B43_NPHY_RSSI_PWRDET))
1634 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_PWRDET, tmp); 1911 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_PWRDET, tmp);
1635 if (core2or5 && (rail == 1) && (type == 3)) 1912 if (core2or5 && (rail == 1) && (type == B43_NPHY_RSSI_PWRDET))
1636 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_PWRDET, tmp); 1913 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_PWRDET, tmp);
1637 if (core1or5 && (type == 4)) 1914
1915 if (core1or5 && (type == B43_NPHY_RSSI_TSSI_I))
1638 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TSSI, tmp); 1916 b43_phy_write(dev, B43_NPHY_RSSIMC_0I_TSSI, tmp);
1639 if (core2or5 && (type == 4)) 1917 if (core2or5 && (type == B43_NPHY_RSSI_TSSI_I))
1640 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TSSI, tmp); 1918 b43_phy_write(dev, B43_NPHY_RSSIMC_1I_TSSI, tmp);
1641 if (core1or5 && (type == 5)) 1919
1920 if (core1or5 && (type == B43_NPHY_RSSI_TSSI_Q))
1642 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TSSI, tmp); 1921 b43_phy_write(dev, B43_NPHY_RSSIMC_0Q_TSSI, tmp);
1643 if (core2or5 && (type == 5)) 1922 if (core2or5 && (type == B43_NPHY_RSSI_TSSI_Q))
1644 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp); 1923 b43_phy_write(dev, B43_NPHY_RSSIMC_1Q_TSSI, tmp);
1645} 1924}
1646 1925
@@ -1668,27 +1947,39 @@ static void b43_nphy_rev2_rssi_select(struct b43_wldev *dev, u8 code, u8 type)
1668 (type + 1) << 4); 1947 (type + 1) << 4);
1669 } 1948 }
1670 1949
1671 /* TODO use some definitions */
1672 if (code == 0) { 1950 if (code == 0) {
1673 b43_phy_maskset(dev, B43_NPHY_AFECTL_OVER, 0xCFFF, 0); 1951 b43_phy_mask(dev, B43_NPHY_AFECTL_OVER, ~0x3000);
1674 if (type < 3) { 1952 if (type < 3) {
1675 b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFEC7, 0); 1953 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
1676 b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xEFDC, 0); 1954 ~(B43_NPHY_RFCTL_CMD_RXEN |
1677 b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFFFE, 0); 1955 B43_NPHY_RFCTL_CMD_CORESEL));
1956 b43_phy_mask(dev, B43_NPHY_RFCTL_OVER,
1957 ~(0x1 << 12 |
1958 0x1 << 5 |
1959 0x1 << 1 |
1960 0x1));
1961 b43_phy_mask(dev, B43_NPHY_RFCTL_CMD,
1962 ~B43_NPHY_RFCTL_CMD_START);
1678 udelay(20); 1963 udelay(20);
1679 b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xFFFE, 0); 1964 b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
1680 } 1965 }
1681 } else { 1966 } else {
1682 b43_phy_maskset(dev, B43_NPHY_AFECTL_OVER, 0xCFFF, 1967 b43_phy_set(dev, B43_NPHY_AFECTL_OVER, 0x3000);
1683 0x3000);
1684 if (type < 3) { 1968 if (type < 3) {
1685 b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 1969 b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD,
1686 0xFEC7, 0x0180); 1970 ~(B43_NPHY_RFCTL_CMD_RXEN |
1687 b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 1971 B43_NPHY_RFCTL_CMD_CORESEL),
1688 0xEFDC, (code << 1 | 0x1021)); 1972 (B43_NPHY_RFCTL_CMD_RXEN |
1689 b43_phy_maskset(dev, B43_NPHY_RFCTL_CMD, 0xFFFE, 0x1); 1973 code << B43_NPHY_RFCTL_CMD_CORESEL_SHIFT));
1974 b43_phy_set(dev, B43_NPHY_RFCTL_OVER,
1975 (0x1 << 12 |
1976 0x1 << 5 |
1977 0x1 << 1 |
1978 0x1));
1979 b43_phy_set(dev, B43_NPHY_RFCTL_CMD,
1980 B43_NPHY_RFCTL_CMD_START);
1690 udelay(20); 1981 udelay(20);
1691 b43_phy_maskset(dev, B43_NPHY_RFCTL_OVER, 0xFFFE, 0); 1982 b43_phy_mask(dev, B43_NPHY_RFCTL_OVER, ~0x1);
1692 } 1983 }
1693 } 1984 }
1694} 1985}
@@ -1837,6 +2128,14 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
1837 save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER); 2128 save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
1838 save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0); 2129 save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B1S0);
1839 save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1); 2130 save_regs_phy[7] = b43_phy_read(dev, B43_NPHY_TXF_40CO_B32S1);
2131 } else if (dev->phy.rev == 2) {
2132 save_regs_phy[0] = b43_phy_read(dev, B43_NPHY_AFECTL_C1);
2133 save_regs_phy[1] = b43_phy_read(dev, B43_NPHY_AFECTL_C2);
2134 save_regs_phy[2] = b43_phy_read(dev, B43_NPHY_AFECTL_OVER);
2135 save_regs_phy[3] = b43_phy_read(dev, B43_NPHY_RFCTL_CMD);
2136 save_regs_phy[4] = b43_phy_read(dev, B43_NPHY_RFCTL_OVER);
2137 save_regs_phy[5] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO1);
2138 save_regs_phy[6] = b43_phy_read(dev, B43_NPHY_RFCTL_RSSIO2);
1840 } 2139 }
1841 2140
1842 b43_nphy_rssi_select(dev, 5, type); 2141 b43_nphy_rssi_select(dev, 5, type);
@@ -1880,6 +2179,14 @@ static int b43_nphy_poll_rssi(struct b43_wldev *dev, u8 type, s32 *buf,
1880 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]); 2179 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[5]);
1881 b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]); 2180 b43_phy_write(dev, B43_NPHY_TXF_40CO_B1S0, save_regs_phy[6]);
1882 b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]); 2181 b43_phy_write(dev, B43_NPHY_TXF_40CO_B32S1, save_regs_phy[7]);
2182 } else if (dev->phy.rev == 2) {
2183 b43_phy_write(dev, B43_NPHY_AFECTL_C1, save_regs_phy[0]);
2184 b43_phy_write(dev, B43_NPHY_AFECTL_C2, save_regs_phy[1]);
2185 b43_phy_write(dev, B43_NPHY_AFECTL_OVER, save_regs_phy[2]);
2186 b43_phy_write(dev, B43_NPHY_RFCTL_CMD, save_regs_phy[3]);
2187 b43_phy_write(dev, B43_NPHY_RFCTL_OVER, save_regs_phy[4]);
2188 b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO1, save_regs_phy[5]);
2189 b43_phy_write(dev, B43_NPHY_RFCTL_RSSIO2, save_regs_phy[6]);
1883 } 2190 }
1884 2191
1885 return out; 2192 return out;
@@ -1894,7 +2201,10 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
1894 u16 class, override; 2201 u16 class, override;
1895 u8 regs_save_radio[2]; 2202 u8 regs_save_radio[2];
1896 u16 regs_save_phy[2]; 2203 u16 regs_save_phy[2];
2204
1897 s8 offset[4]; 2205 s8 offset[4];
2206 u8 core;
2207 u8 rail;
1898 2208
1899 u16 clip_state[2]; 2209 u16 clip_state[2];
1900 u16 clip_off[2] = { 0xFFFF, 0xFFFF }; 2210 u16 clip_off[2] = { 0xFFFF, 0xFFFF };
@@ -1995,16 +2305,15 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
1995 if (results_min[i] == 248) 2305 if (results_min[i] == 248)
1996 offset[i] = code - 32; 2306 offset[i] = code - 32;
1997 2307
1998 if (i % 2 == 0) 2308 core = (i / 2) ? 2 : 1;
1999 b43_nphy_scale_offset_rssi(dev, 0, offset[i], 1, 0, 2309 rail = (i % 2) ? 1 : 0;
2000 type); 2310
2001 else 2311 b43_nphy_scale_offset_rssi(dev, 0, offset[i], core, rail,
2002 b43_nphy_scale_offset_rssi(dev, 0, offset[i], 2, 1, 2312 type);
2003 type);
2004 } 2313 }
2005 2314
2006 b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[0]); 2315 b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[0]);
2007 b43_radio_maskset(dev, B2055_C1_PD_RSSIMISC, 0xF8, state[1]); 2316 b43_radio_maskset(dev, B2055_C2_PD_RSSIMISC, 0xF8, state[1]);
2008 2317
2009 switch (state[2]) { 2318 switch (state[2]) {
2010 case 1: 2319 case 1:
@@ -2042,6 +2351,9 @@ static void b43_nphy_rev2_rssi_cal(struct b43_wldev *dev, u8 type)
2042 2351
2043 b43_nphy_classifier(dev, 7, class); 2352 b43_nphy_classifier(dev, 7, class);
2044 b43_nphy_write_clip_detection(dev, clip_state); 2353 b43_nphy_write_clip_detection(dev, clip_state);
2354 /* Specs don't say about reset here, but it makes wl and b43 dumps
2355 identical, it really seems wl performs this */
2356 b43_nphy_reset_cca(dev);
2045} 2357}
2046 2358
2047/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */ 2359/* http://bcm-v4.sipsolutions.net/802.11/PHY/N/RSSICalRev3 */
@@ -2059,9 +2371,9 @@ static void b43_nphy_rssi_cal(struct b43_wldev *dev)
2059 if (dev->phy.rev >= 3) { 2371 if (dev->phy.rev >= 3) {
2060 b43_nphy_rev3_rssi_cal(dev); 2372 b43_nphy_rev3_rssi_cal(dev);
2061 } else { 2373 } else {
2062 b43_nphy_rev2_rssi_cal(dev, 2); 2374 b43_nphy_rev2_rssi_cal(dev, B43_NPHY_RSSI_Z);
2063 b43_nphy_rev2_rssi_cal(dev, 0); 2375 b43_nphy_rev2_rssi_cal(dev, B43_NPHY_RSSI_X);
2064 b43_nphy_rev2_rssi_cal(dev, 1); 2376 b43_nphy_rev2_rssi_cal(dev, B43_NPHY_RSSI_Y);
2065 } 2377 }
2066} 2378}
2067 2379
@@ -2295,7 +2607,7 @@ static void b43_nphy_int_pa_set_tx_dig_filters(struct b43_wldev *dev)
2295{ 2607{
2296 int i, j; 2608 int i, j;
2297 /* B43_NPHY_TXF_20CO_S0A1, B43_NPHY_TXF_40CO_S0A1, unknown */ 2609 /* B43_NPHY_TXF_20CO_S0A1, B43_NPHY_TXF_40CO_S0A1, unknown */
2298 u16 offset[] = { 0x186, 0x195, 0x2C5 }; 2610 static const u16 offset[] = { 0x186, 0x195, 0x2C5 };
2299 2611
2300 for (i = 0; i < 3; i++) 2612 for (i = 0; i < 3; i++)
2301 for (j = 0; j < 15; j++) 2613 for (j = 0; j < 15; j++)
@@ -2327,7 +2639,7 @@ static struct nphy_txgains b43_nphy_get_tx_gains(struct b43_wldev *dev)
2327 struct nphy_txgains target; 2639 struct nphy_txgains target;
2328 const u32 *table = NULL; 2640 const u32 *table = NULL;
2329 2641
2330 if (nphy->txpwrctrl == 0) { 2642 if (!nphy->txpwrctrl) {
2331 int i; 2643 int i;
2332 2644
2333 if (nphy->hang_avoid) 2645 if (nphy->hang_avoid)
@@ -2884,7 +3196,7 @@ static int b43_nphy_rev2_cal_rx_iq(struct b43_wldev *dev,
2884 u8 rfctl[2]; 3196 u8 rfctl[2];
2885 u8 afectl_core; 3197 u8 afectl_core;
2886 u16 tmp[6]; 3198 u16 tmp[6];
2887 u16 cur_hpf1, cur_hpf2, cur_lna; 3199 u16 uninitialized_var(cur_hpf1), uninitialized_var(cur_hpf2), cur_lna;
2888 u32 real, imag; 3200 u32 real, imag;
2889 enum ieee80211_band band; 3201 enum ieee80211_band band;
2890 3202
@@ -3077,9 +3389,9 @@ static void b43_nphy_mac_phy_clock_set(struct b43_wldev *dev, bool on)
3077{ 3389{
3078 u32 tmslow = ssb_read32(dev->dev, SSB_TMSLOW); 3390 u32 tmslow = ssb_read32(dev->dev, SSB_TMSLOW);
3079 if (on) 3391 if (on)
3080 tmslow |= SSB_TMSLOW_PHYCLK; 3392 tmslow |= B43_TMSLOW_MACPHYCLKEN;
3081 else 3393 else
3082 tmslow &= ~SSB_TMSLOW_PHYCLK; 3394 tmslow &= ~B43_TMSLOW_MACPHYCLKEN;
3083 ssb_write32(dev->dev, SSB_TMSLOW, tmslow); 3395 ssb_write32(dev->dev, SSB_TMSLOW, tmslow);
3084} 3396}
3085 3397
@@ -3088,7 +3400,7 @@ static void b43_nphy_set_rx_core_state(struct b43_wldev *dev, u8 mask)
3088{ 3400{
3089 struct b43_phy *phy = &dev->phy; 3401 struct b43_phy *phy = &dev->phy;
3090 struct b43_phy_n *nphy = phy->n; 3402 struct b43_phy_n *nphy = phy->n;
3091 u16 buf[16]; 3403 /* u16 buf[16]; it's rev3+ */
3092 3404
3093 nphy->phyrxchain = mask; 3405 nphy->phyrxchain = mask;
3094 3406
@@ -3232,10 +3544,12 @@ int b43_phy_initn(struct b43_wldev *dev)
3232 3544
3233 b43_nphy_classifier(dev, 0, 0); 3545 b43_nphy_classifier(dev, 0, 0);
3234 b43_nphy_read_clip_detection(dev, clip); 3546 b43_nphy_read_clip_detection(dev, clip);
3547 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ)
3548 b43_nphy_bphy_init(dev);
3549
3235 tx_pwr_state = nphy->txpwrctrl; 3550 tx_pwr_state = nphy->txpwrctrl;
3236 /* TODO N PHY TX power control with argument 0 3551 b43_nphy_tx_power_ctrl(dev, false);
3237 (turning off power control) */ 3552 b43_nphy_tx_power_fix(dev);
3238 /* TODO Fix the TX Power Settings */
3239 /* TODO N PHY TX Power Control Idle TSSI */ 3553 /* TODO N PHY TX Power Control Idle TSSI */
3240 /* TODO N PHY TX Power Control Setup */ 3554 /* TODO N PHY TX Power Control Setup */
3241 3555
@@ -3292,21 +3606,18 @@ int b43_phy_initn(struct b43_wldev *dev)
3292 /* TODO N PHY Pre Calibrate TX Gain */ 3606 /* TODO N PHY Pre Calibrate TX Gain */
3293 target = b43_nphy_get_tx_gains(dev); 3607 target = b43_nphy_get_tx_gains(dev);
3294 } 3608 }
3295 } 3609 if (!b43_nphy_cal_tx_iq_lo(dev, target, true, false))
3610 if (b43_nphy_cal_rx_iq(dev, target, 2, 0) == 0)
3611 b43_nphy_save_cal(dev);
3612 } else if (nphy->mphase_cal_phase_id == 0)
3613 ;/* N PHY Periodic Calibration with arg 3 */
3614 } else {
3615 b43_nphy_restore_cal(dev);
3296 } 3616 }
3297 } 3617 }
3298 3618
3299 if (!b43_nphy_cal_tx_iq_lo(dev, target, true, false)) {
3300 if (b43_nphy_cal_rx_iq(dev, target, 2, 0) == 0)
3301 b43_nphy_save_cal(dev);
3302 else if (nphy->mphase_cal_phase_id == 0)
3303 ;/* N PHY Periodic Calibration with argument 3 */
3304 } else {
3305 b43_nphy_restore_cal(dev);
3306 }
3307
3308 b43_nphy_tx_pwr_ctrl_coef_setup(dev); 3619 b43_nphy_tx_pwr_ctrl_coef_setup(dev);
3309 /* TODO N PHY TX Power Control Enable with argument tx_pwr_state */ 3620 b43_nphy_tx_power_ctrl(dev, tx_pwr_state);
3310 b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015); 3621 b43_phy_write(dev, B43_NPHY_TXMACIF_HOLDOFF, 0x0015);
3311 b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320); 3622 b43_phy_write(dev, B43_NPHY_TXMACDELAY, 0x0320);
3312 if (phy->rev >= 3 && phy->rev <= 6) 3623 if (phy->rev >= 3 && phy->rev <= 6)
@@ -3315,7 +3626,6 @@ int b43_phy_initn(struct b43_wldev *dev)
3315 if (phy->rev >= 3) 3626 if (phy->rev >= 3)
3316 b43_nphy_spur_workaround(dev); 3627 b43_nphy_spur_workaround(dev);
3317 3628
3318 b43err(dev->wl, "IEEE 802.11n devices are not supported, yet.\n");
3319 return 0; 3629 return 0;
3320} 3630}
3321 3631
@@ -3357,7 +3667,7 @@ static void b43_nphy_channel_setup(struct b43_wldev *dev,
3357 b43_phy_mask(dev, B43_PHY_B_TEST, ~0x840); 3667 b43_phy_mask(dev, B43_PHY_B_TEST, ~0x840);
3358 } 3668 }
3359 3669
3360 if (nphy->txpwrctrl) 3670 if (!nphy->txpwrctrl)
3361 b43_nphy_tx_power_fix(dev); 3671 b43_nphy_tx_power_fix(dev);
3362 3672
3363 if (dev->phy.rev < 3) 3673 if (dev->phy.rev < 3)
@@ -3381,7 +3691,6 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
3381 enum nl80211_channel_type channel_type) 3691 enum nl80211_channel_type channel_type)
3382{ 3692{
3383 struct b43_phy *phy = &dev->phy; 3693 struct b43_phy *phy = &dev->phy;
3384 struct b43_phy_n *nphy = dev->phy.n;
3385 3694
3386 const struct b43_nphy_channeltab_entry_rev2 *tabent_r2; 3695 const struct b43_nphy_channeltab_entry_rev2 *tabent_r2;
3387 const struct b43_nphy_channeltab_entry_rev3 *tabent_r3; 3696 const struct b43_nphy_channeltab_entry_rev3 *tabent_r3;
@@ -3391,7 +3700,6 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
3391 if (dev->phy.rev >= 3) { 3700 if (dev->phy.rev >= 3) {
3392 tabent_r3 = b43_nphy_get_chantabent_rev3(dev, 3701 tabent_r3 = b43_nphy_get_chantabent_rev3(dev,
3393 channel->center_freq); 3702 channel->center_freq);
3394 tabent_r3 = NULL;
3395 if (!tabent_r3) 3703 if (!tabent_r3)
3396 return -ESRCH; 3704 return -ESRCH;
3397 } else { 3705 } else {
@@ -3420,7 +3728,7 @@ static int b43_nphy_set_channel(struct b43_wldev *dev,
3420 if (dev->phy.rev >= 3) { 3728 if (dev->phy.rev >= 3) {
3421 tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 4 : 0; 3729 tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 4 : 0;
3422 b43_radio_maskset(dev, 0x08, 0xFFFB, tmp); 3730 b43_radio_maskset(dev, 0x08, 0xFFFB, tmp);
3423 /* TODO: PHY Radio2056 Setup (dev, tabent_r3); */ 3731 b43_radio_2056_setup(dev, tabent_r3);
3424 b43_nphy_channel_setup(dev, &(tabent_r3->phy_regs), channel); 3732 b43_nphy_channel_setup(dev, &(tabent_r3->phy_regs), channel);
3425 } else { 3733 } else {
3426 tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 0x0020 : 0x0050; 3734 tmp = (channel->band == IEEE80211_BAND_5GHZ) ? 0x0020 : 0x0050;
@@ -3451,7 +3759,11 @@ static void b43_nphy_op_prepare_structs(struct b43_wldev *dev)
3451 3759
3452 memset(nphy, 0, sizeof(*nphy)); 3760 memset(nphy, 0, sizeof(*nphy));
3453 3761
3454 //TODO init struct b43_phy_n 3762 nphy->hang_avoid = (phy->rev == 3 || phy->rev == 4);
3763 nphy->gain_boost = true; /* this way we follow wl, assume it is true */
3764 nphy->txrx_chain = 2; /* sth different than 0 and 1 for now */
3765 nphy->phyrxchain = 3; /* to avoid b43_nphy_set_rx_core_state like wl */
3766 nphy->perical = 2; /* avoid additional rssi cal on init (like wl) */
3455} 3767}
3456 3768
3457static void b43_nphy_op_free(struct b43_wldev *dev) 3769static void b43_nphy_op_free(struct b43_wldev *dev)
@@ -3500,6 +3812,15 @@ static void b43_nphy_op_write(struct b43_wldev *dev, u16 reg, u16 value)
3500 b43_write16(dev, B43_MMIO_PHY_DATA, value); 3812 b43_write16(dev, B43_MMIO_PHY_DATA, value);
3501} 3813}
3502 3814
3815static void b43_nphy_op_maskset(struct b43_wldev *dev, u16 reg, u16 mask,
3816 u16 set)
3817{
3818 check_phyreg(dev, reg);
3819 b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
3820 b43_write16(dev, B43_MMIO_PHY_DATA,
3821 (b43_read16(dev, B43_MMIO_PHY_DATA) & mask) | set);
3822}
3823
3503static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg) 3824static u16 b43_nphy_op_radio_read(struct b43_wldev *dev, u16 reg)
3504{ 3825{
3505 /* Register 1 is a 32-bit register. */ 3826 /* Register 1 is a 32-bit register. */
@@ -3524,8 +3845,6 @@ static void b43_nphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
3524static void b43_nphy_op_software_rfkill(struct b43_wldev *dev, 3845static void b43_nphy_op_software_rfkill(struct b43_wldev *dev,
3525 bool blocked) 3846 bool blocked)
3526{ 3847{
3527 struct b43_phy_n *nphy = dev->phy.n;
3528
3529 if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED) 3848 if (b43_read32(dev, B43_MMIO_MACCTL) & B43_MACCTL_ENABLED)
3530 b43err(dev->wl, "MAC not suspended\n"); 3849 b43err(dev->wl, "MAC not suspended\n");
3531 3850
@@ -3596,6 +3915,7 @@ const struct b43_phy_operations b43_phyops_n = {
3596 .init = b43_nphy_op_init, 3915 .init = b43_nphy_op_init,
3597 .phy_read = b43_nphy_op_read, 3916 .phy_read = b43_nphy_op_read,
3598 .phy_write = b43_nphy_op_write, 3917 .phy_write = b43_nphy_op_write,
3918 .phy_maskset = b43_nphy_op_maskset,
3599 .radio_read = b43_nphy_op_radio_read, 3919 .radio_read = b43_nphy_op_radio_read,
3600 .radio_write = b43_nphy_op_radio_write, 3920 .radio_write = b43_nphy_op_radio_write,
3601 .software_rfkill = b43_nphy_op_software_rfkill, 3921 .software_rfkill = b43_nphy_op_software_rfkill,
diff --git a/drivers/net/wireless/b43/phy_n.h b/drivers/net/wireless/b43/phy_n.h
index c144e59a708b..001e841f118c 100644
--- a/drivers/net/wireless/b43/phy_n.h
+++ b/drivers/net/wireless/b43/phy_n.h
@@ -782,7 +782,7 @@ struct b43_phy_n {
782 u16 mphase_txcal_numcmds; 782 u16 mphase_txcal_numcmds;
783 u16 mphase_txcal_bestcoeffs[11]; 783 u16 mphase_txcal_bestcoeffs[11];
784 784
785 u8 txpwrctrl; 785 bool txpwrctrl;
786 u16 txcal_bbmult; 786 u16 txcal_bbmult;
787 u16 txiqlocal_bestc[11]; 787 u16 txiqlocal_bestc[11];
788 bool txiqlocal_coeffsvalid; 788 bool txiqlocal_coeffsvalid;
diff --git a/drivers/net/wireless/b43/radio_2055.c b/drivers/net/wireless/b43/radio_2055.c
index 1b5316586cbf..44c6dea66882 100644
--- a/drivers/net/wireless/b43/radio_2055.c
+++ b/drivers/net/wireless/b43/radio_2055.c
@@ -244,7 +244,7 @@ static const struct b2055_inittab_entry b2055_inittab [] = {
244 [0xCB] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 244 [0xCB] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
245 [0xCC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 245 [0xCC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
246 [B2055_C1_LNA_GAINBST] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 246 [B2055_C1_LNA_GAINBST] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
247 [0xCE] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 247 [0xCE] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
248 [0xCF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 248 [0xCF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
249 [0xD0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 249 [0xD0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
250 [0xD1] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, }, 250 [0xD1] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
@@ -256,7 +256,7 @@ static const struct b2055_inittab_entry b2055_inittab [] = {
256 [0xD7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 256 [0xD7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
257 [0xD8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 257 [0xD8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
258 [B2055_C2_LNA_GAINBST] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 258 [B2055_C2_LNA_GAINBST] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
259 [0xDA] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 259 [0xDA] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
260 [0xDB] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 260 [0xDB] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
261 [0xDC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, }, 261 [0xDC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
262 [0xDD] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, }, 262 [0xDD] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
@@ -304,178 +304,178 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
304 { .channel = 184, 304 { .channel = 184,
305 .freq = 4920, /* MHz */ 305 .freq = 4920, /* MHz */
306 .unk2 = 3280, 306 .unk2 = 3280,
307 RADIOREGS(0x71, 0x01, 0xEC, 0x0F, 0xFF, 0x01, 0x04, 0x0A, 307 RADIOREGS(0x71, 0xEC, 0x01, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
308 0x00, 0x8F, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x0F, 308 0x00, 0x8F, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x0F,
309 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 309 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
310 PHYREGS(0xB407, 0xB007, 0xAC07, 0x1402, 0x1502, 0x1602), 310 PHYREGS(0x07B4, 0x07B0, 0x07AC, 0x0214, 0x0215, 0x0216),
311 }, 311 },
312 { .channel = 186, 312 { .channel = 186,
313 .freq = 4930, /* MHz */ 313 .freq = 4930, /* MHz */
314 .unk2 = 3287, 314 .unk2 = 3287,
315 RADIOREGS(0x71, 0x01, 0xED, 0x0F, 0xFF, 0x01, 0x04, 0x0A, 315 RADIOREGS(0x71, 0xED, 0x01, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
316 0x00, 0x8F, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x0F, 316 0x00, 0x8F, 0xFF, 0xFF, 0xFF, 0x00, 0x0F, 0x0F,
317 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 317 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
318 PHYREGS(0xB807, 0xB407, 0xB007, 0x1302, 0x1402, 0x1502), 318 PHYREGS(0x07B8, 0x07B4, 0x07B0, 0x0213, 0x0214, 0x0215),
319 }, 319 },
320 { .channel = 188, 320 { .channel = 188,
321 .freq = 4940, /* MHz */ 321 .freq = 4940, /* MHz */
322 .unk2 = 3293, 322 .unk2 = 3293,
323 RADIOREGS(0x71, 0x01, 0xEE, 0x0F, 0xFF, 0x01, 0x04, 0x0A, 323 RADIOREGS(0x71, 0xEE, 0x01, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
324 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F, 324 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
325 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 325 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
326 PHYREGS(0xBC07, 0xB807, 0xB407, 0x1202, 0x1302, 0x1402), 326 PHYREGS(0x07BC, 0x07B8, 0x07B4, 0x0212, 0x0213, 0x0214),
327 }, 327 },
328 { .channel = 190, 328 { .channel = 190,
329 .freq = 4950, /* MHz */ 329 .freq = 4950, /* MHz */
330 .unk2 = 3300, 330 .unk2 = 3300,
331 RADIOREGS(0x71, 0x01, 0xEF, 0x0F, 0xFF, 0x01, 0x04, 0x0A, 331 RADIOREGS(0x71, 0xEF, 0x01, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
332 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F, 332 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
333 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 333 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
334 PHYREGS(0xC007, 0xBC07, 0xB807, 0x1102, 0x1202, 0x1302), 334 PHYREGS(0x07C0, 0x07BC, 0x07B8, 0x0211, 0x0212, 0x0213),
335 }, 335 },
336 { .channel = 192, 336 { .channel = 192,
337 .freq = 4960, /* MHz */ 337 .freq = 4960, /* MHz */
338 .unk2 = 3307, 338 .unk2 = 3307,
339 RADIOREGS(0x71, 0x01, 0xF0, 0x0F, 0xFF, 0x01, 0x04, 0x0A, 339 RADIOREGS(0x71, 0xF0, 0x01, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
340 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F, 340 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
341 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 341 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
342 PHYREGS(0xC407, 0xC007, 0xBC07, 0x0F02, 0x1102, 0x1202), 342 PHYREGS(0x07C4, 0x07C0, 0x07BC, 0x020F, 0x0211, 0x0212),
343 }, 343 },
344 { .channel = 194, 344 { .channel = 194,
345 .freq = 4970, /* MHz */ 345 .freq = 4970, /* MHz */
346 .unk2 = 3313, 346 .unk2 = 3313,
347 RADIOREGS(0x71, 0x01, 0xF1, 0x0F, 0xFF, 0x01, 0x04, 0x0A, 347 RADIOREGS(0x71, 0xF1, 0x01, 0x0F, 0xFF, 0x01, 0x04, 0x0A,
348 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F, 348 0x00, 0x8F, 0xEE, 0xEE, 0xFF, 0x00, 0x0F, 0x0F,
349 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 349 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
350 PHYREGS(0xC807, 0xC407, 0xC007, 0x0E02, 0x0F02, 0x1102), 350 PHYREGS(0x07C8, 0x07C4, 0x07C0, 0x020E, 0x020F, 0x0211),
351 }, 351 },
352 { .channel = 196, 352 { .channel = 196,
353 .freq = 4980, /* MHz */ 353 .freq = 4980, /* MHz */
354 .unk2 = 3320, 354 .unk2 = 3320,
355 RADIOREGS(0x71, 0x01, 0xF2, 0x0E, 0xFF, 0x01, 0x04, 0x0A, 355 RADIOREGS(0x71, 0xF2, 0x01, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
356 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F, 356 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
357 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 357 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
358 PHYREGS(0xCC07, 0xC807, 0xC407, 0x0D02, 0x0E02, 0x0F02), 358 PHYREGS(0x07CC, 0x07C8, 0x07C4, 0x020D, 0x020E, 0x020F),
359 }, 359 },
360 { .channel = 198, 360 { .channel = 198,
361 .freq = 4990, /* MHz */ 361 .freq = 4990, /* MHz */
362 .unk2 = 3327, 362 .unk2 = 3327,
363 RADIOREGS(0x71, 0x01, 0xF3, 0x0E, 0xFF, 0x01, 0x04, 0x0A, 363 RADIOREGS(0x71, 0xF3, 0x01, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
364 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F, 364 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
365 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 365 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
366 PHYREGS(0xD007, 0xCC07, 0xC807, 0x0C02, 0x0D02, 0x0E02), 366 PHYREGS(0x07D0, 0x07CC, 0x07C8, 0x020C, 0x020D, 0x020E),
367 }, 367 },
368 { .channel = 200, 368 { .channel = 200,
369 .freq = 5000, /* MHz */ 369 .freq = 5000, /* MHz */
370 .unk2 = 3333, 370 .unk2 = 3333,
371 RADIOREGS(0x71, 0x01, 0xF4, 0x0E, 0xFF, 0x01, 0x04, 0x0A, 371 RADIOREGS(0x71, 0xF4, 0x01, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
372 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F, 372 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
373 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 373 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
374 PHYREGS(0xD407, 0xD007, 0xCC07, 0x0B02, 0x0C02, 0x0D02), 374 PHYREGS(0x07D4, 0x07D0, 0x07CC, 0x020B, 0x020C, 0x020D),
375 }, 375 },
376 { .channel = 202, 376 { .channel = 202,
377 .freq = 5010, /* MHz */ 377 .freq = 5010, /* MHz */
378 .unk2 = 3340, 378 .unk2 = 3340,
379 RADIOREGS(0x71, 0x01, 0xF5, 0x0E, 0xFF, 0x01, 0x04, 0x0A, 379 RADIOREGS(0x71, 0xF5, 0x01, 0x0E, 0xFF, 0x01, 0x04, 0x0A,
380 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F, 380 0x00, 0x8F, 0xDD, 0xDD, 0xFF, 0x00, 0x0F, 0x0F,
381 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 381 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
382 PHYREGS(0xD807, 0xD407, 0xD007, 0x0A02, 0x0B02, 0x0C02), 382 PHYREGS(0x07D8, 0x07D4, 0x07D0, 0x020A, 0x020B, 0x020C),
383 }, 383 },
384 { .channel = 204, 384 { .channel = 204,
385 .freq = 5020, /* MHz */ 385 .freq = 5020, /* MHz */
386 .unk2 = 3347, 386 .unk2 = 3347,
387 RADIOREGS(0x71, 0x01, 0xF6, 0x0E, 0xF7, 0x01, 0x04, 0x0A, 387 RADIOREGS(0x71, 0xF6, 0x01, 0x0E, 0xF7, 0x01, 0x04, 0x0A,
388 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F, 388 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
389 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 389 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
390 PHYREGS(0xDC07, 0xD807, 0xD407, 0x0902, 0x0A02, 0x0B02), 390 PHYREGS(0x07DC, 0x07D8, 0x07D4, 0x0209, 0x020A, 0x020B),
391 }, 391 },
392 { .channel = 206, 392 { .channel = 206,
393 .freq = 5030, /* MHz */ 393 .freq = 5030, /* MHz */
394 .unk2 = 3353, 394 .unk2 = 3353,
395 RADIOREGS(0x71, 0x01, 0xF7, 0x0E, 0xF7, 0x01, 0x04, 0x0A, 395 RADIOREGS(0x71, 0xF7, 0x01, 0x0E, 0xF7, 0x01, 0x04, 0x0A,
396 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F, 396 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
397 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 397 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
398 PHYREGS(0xE007, 0xDC07, 0xD807, 0x0802, 0x0902, 0x0A02), 398 PHYREGS(0x07E0, 0x07DC, 0x07D8, 0x0208, 0x0209, 0x020A),
399 }, 399 },
400 { .channel = 208, 400 { .channel = 208,
401 .freq = 5040, /* MHz */ 401 .freq = 5040, /* MHz */
402 .unk2 = 3360, 402 .unk2 = 3360,
403 RADIOREGS(0x71, 0x01, 0xF8, 0x0D, 0xEF, 0x01, 0x04, 0x0A, 403 RADIOREGS(0x71, 0xF8, 0x01, 0x0D, 0xEF, 0x01, 0x04, 0x0A,
404 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F, 404 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
405 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 405 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
406 PHYREGS(0xE407, 0xE007, 0xDC07, 0x0702, 0x0802, 0x0902), 406 PHYREGS(0x07E4, 0x07E0, 0x07DC, 0x0207, 0x0208, 0x0209),
407 }, 407 },
408 { .channel = 210, 408 { .channel = 210,
409 .freq = 5050, /* MHz */ 409 .freq = 5050, /* MHz */
410 .unk2 = 3367, 410 .unk2 = 3367,
411 RADIOREGS(0x71, 0x01, 0xF9, 0x0D, 0xEF, 0x01, 0x04, 0x0A, 411 RADIOREGS(0x71, 0xF9, 0x01, 0x0D, 0xEF, 0x01, 0x04, 0x0A,
412 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F, 412 0x00, 0x8F, 0xCC, 0xCC, 0xFF, 0x00, 0x0F, 0x0F,
413 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F), 413 0x8F, 0xFF, 0x00, 0x0F, 0x0F, 0x8F),
414 PHYREGS(0xE807, 0xE407, 0xE007, 0x0602, 0x0702, 0x0802), 414 PHYREGS(0x07E8, 0x07E4, 0x07E0, 0x0206, 0x0207, 0x0208),
415 }, 415 },
416 { .channel = 212, 416 { .channel = 212,
417 .freq = 5060, /* MHz */ 417 .freq = 5060, /* MHz */
418 .unk2 = 3373, 418 .unk2 = 3373,
419 RADIOREGS(0x71, 0x01, 0xFA, 0x0D, 0xE6, 0x01, 0x04, 0x0A, 419 RADIOREGS(0x71, 0xFA, 0x01, 0x0D, 0xE6, 0x01, 0x04, 0x0A,
420 0x00, 0x8F, 0xBB, 0xBB, 0xFF, 0x00, 0x0E, 0x0F, 420 0x00, 0x8F, 0xBB, 0xBB, 0xFF, 0x00, 0x0E, 0x0F,
421 0x8E, 0xFF, 0x00, 0x0E, 0x0F, 0x8E), 421 0x8E, 0xFF, 0x00, 0x0E, 0x0F, 0x8E),
422 PHYREGS(0xEC07, 0xE807, 0xE407, 0x0502, 0x0602, 0x0702), 422 PHYREGS(0x07EC, 0x07E8, 0x07E4, 0x0205, 0x0206, 0x0207),
423 }, 423 },
424 { .channel = 214, 424 { .channel = 214,
425 .freq = 5070, /* MHz */ 425 .freq = 5070, /* MHz */
426 .unk2 = 3380, 426 .unk2 = 3380,
427 RADIOREGS(0x71, 0x01, 0xFB, 0x0D, 0xE6, 0x01, 0x04, 0x0A, 427 RADIOREGS(0x71, 0xFB, 0x01, 0x0D, 0xE6, 0x01, 0x04, 0x0A,
428 0x00, 0x8F, 0xBB, 0xBB, 0xFF, 0x00, 0x0E, 0x0F, 428 0x00, 0x8F, 0xBB, 0xBB, 0xFF, 0x00, 0x0E, 0x0F,
429 0x8E, 0xFF, 0x00, 0x0E, 0x0F, 0x8E), 429 0x8E, 0xFF, 0x00, 0x0E, 0x0F, 0x8E),
430 PHYREGS(0xF007, 0xEC07, 0xE807, 0x0402, 0x0502, 0x0602), 430 PHYREGS(0x07F0, 0x07EC, 0x07E8, 0x0204, 0x0205, 0x0206),
431 }, 431 },
432 { .channel = 216, 432 { .channel = 216,
433 .freq = 5080, /* MHz */ 433 .freq = 5080, /* MHz */
434 .unk2 = 3387, 434 .unk2 = 3387,
435 RADIOREGS(0x71, 0x01, 0xFC, 0x0D, 0xDE, 0x01, 0x04, 0x0A, 435 RADIOREGS(0x71, 0xFC, 0x01, 0x0D, 0xDE, 0x01, 0x04, 0x0A,
436 0x00, 0x8E, 0xBB, 0xBB, 0xEE, 0x00, 0x0E, 0x0F, 436 0x00, 0x8E, 0xBB, 0xBB, 0xEE, 0x00, 0x0E, 0x0F,
437 0x8D, 0xEE, 0x00, 0x0E, 0x0F, 0x8D), 437 0x8D, 0xEE, 0x00, 0x0E, 0x0F, 0x8D),
438 PHYREGS(0xF407, 0xF007, 0xEC07, 0x0302, 0x0402, 0x0502), 438 PHYREGS(0x07F4, 0x07F0, 0x07EC, 0x0203, 0x0204, 0x0205),
439 }, 439 },
440 { .channel = 218, 440 { .channel = 218,
441 .freq = 5090, /* MHz */ 441 .freq = 5090, /* MHz */
442 .unk2 = 3393, 442 .unk2 = 3393,
443 RADIOREGS(0x71, 0x01, 0xFD, 0x0D, 0xDE, 0x01, 0x04, 0x0A, 443 RADIOREGS(0x71, 0xFD, 0x01, 0x0D, 0xDE, 0x01, 0x04, 0x0A,
444 0x00, 0x8E, 0xBB, 0xBB, 0xEE, 0x00, 0x0E, 0x0F, 444 0x00, 0x8E, 0xBB, 0xBB, 0xEE, 0x00, 0x0E, 0x0F,
445 0x8D, 0xEE, 0x00, 0x0E, 0x0F, 0x8D), 445 0x8D, 0xEE, 0x00, 0x0E, 0x0F, 0x8D),
446 PHYREGS(0xF807, 0xF407, 0xF007, 0x0202, 0x0302, 0x0402), 446 PHYREGS(0x07F8, 0x07F4, 0x07F0, 0x0202, 0x0203, 0x0204),
447 }, 447 },
448 { .channel = 220, 448 { .channel = 220,
449 .freq = 5100, /* MHz */ 449 .freq = 5100, /* MHz */
450 .unk2 = 3400, 450 .unk2 = 3400,
451 RADIOREGS(0x71, 0x01, 0xFE, 0x0C, 0xD6, 0x01, 0x04, 0x0A, 451 RADIOREGS(0x71, 0xFE, 0x01, 0x0C, 0xD6, 0x01, 0x04, 0x0A,
452 0x00, 0x8E, 0xAA, 0xAA, 0xEE, 0x00, 0x0D, 0x0F, 452 0x00, 0x8E, 0xAA, 0xAA, 0xEE, 0x00, 0x0D, 0x0F,
453 0x8D, 0xEE, 0x00, 0x0D, 0x0F, 0x8D), 453 0x8D, 0xEE, 0x00, 0x0D, 0x0F, 0x8D),
454 PHYREGS(0xFC07, 0xF807, 0xF407, 0x0102, 0x0202, 0x0302), 454 PHYREGS(0x07FC, 0x07F8, 0x07F4, 0x0201, 0x0202, 0x0203),
455 }, 455 },
456 { .channel = 222, 456 { .channel = 222,
457 .freq = 5110, /* MHz */ 457 .freq = 5110, /* MHz */
458 .unk2 = 3407, 458 .unk2 = 3407,
459 RADIOREGS(0x71, 0x01, 0xFF, 0x0C, 0xD6, 0x01, 0x04, 0x0A, 459 RADIOREGS(0x71, 0xFF, 0x01, 0x0C, 0xD6, 0x01, 0x04, 0x0A,
460 0x00, 0x8E, 0xAA, 0xAA, 0xEE, 0x00, 0x0D, 0x0F, 460 0x00, 0x8E, 0xAA, 0xAA, 0xEE, 0x00, 0x0D, 0x0F,
461 0x8D, 0xEE, 0x00, 0x0D, 0x0F, 0x8D), 461 0x8D, 0xEE, 0x00, 0x0D, 0x0F, 0x8D),
462 PHYREGS(0x0008, 0xFC07, 0xF807, 0x0002, 0x0102, 0x0202), 462 PHYREGS(0x0800, 0x07FC, 0x07F8, 0x0200, 0x0201, 0x0202),
463 }, 463 },
464 { .channel = 224, 464 { .channel = 224,
465 .freq = 5120, /* MHz */ 465 .freq = 5120, /* MHz */
466 .unk2 = 3413, 466 .unk2 = 3413,
467 RADIOREGS(0x71, 0x02, 0x00, 0x0C, 0xCE, 0x01, 0x04, 0x0A, 467 RADIOREGS(0x71, 0x00, 0x02, 0x0C, 0xCE, 0x01, 0x04, 0x0A,
468 0x00, 0x8D, 0xAA, 0xAA, 0xDD, 0x00, 0x0D, 0x0F, 468 0x00, 0x8D, 0xAA, 0xAA, 0xDD, 0x00, 0x0D, 0x0F,
469 0x8C, 0xDD, 0x00, 0x0D, 0x0F, 0x8C), 469 0x8C, 0xDD, 0x00, 0x0D, 0x0F, 0x8C),
470 PHYREGS(0x0408, 0x0008, 0xFC07, 0xFF01, 0x0002, 0x0102), 470 PHYREGS(0x0804, 0x0800, 0x07FC, 0x01FF, 0x0200, 0x0201),
471 }, 471 },
472 { .channel = 226, 472 { .channel = 226,
473 .freq = 5130, /* MHz */ 473 .freq = 5130, /* MHz */
474 .unk2 = 3420, 474 .unk2 = 3420,
475 RADIOREGS(0x71, 0x02, 0x01, 0x0C, 0xCE, 0x01, 0x04, 0x0A, 475 RADIOREGS(0x71, 0x01, 0x02, 0x0C, 0xCE, 0x01, 0x04, 0x0A,
476 0x00, 0x8D, 0xAA, 0xAA, 0xDD, 0x00, 0x0D, 0x0F, 476 0x00, 0x8D, 0xAA, 0xAA, 0xDD, 0x00, 0x0D, 0x0F,
477 0x8C, 0xDD, 0x00, 0x0D, 0x0F, 0x8C), 477 0x8C, 0xDD, 0x00, 0x0D, 0x0F, 0x8C),
478 PHYREGS(0x0808, 0x0408, 0x0008, 0xFE01, 0xFF01, 0x0002), 478 PHYREGS(0x0808, 0x0804, 0x0800, 0x01FE, 0x01FF, 0x0200),
479 }, 479 },
480 { .channel = 228, 480 { .channel = 228,
481 .freq = 5140, /* MHz */ 481 .freq = 5140, /* MHz */
@@ -483,815 +483,815 @@ static const struct b43_nphy_channeltab_entry_rev2 b43_nphy_channeltab_rev2[] =
483 RADIOREGS(0x71, 0x02, 0x02, 0x0C, 0xC6, 0x01, 0x04, 0x0A, 483 RADIOREGS(0x71, 0x02, 0x02, 0x0C, 0xC6, 0x01, 0x04, 0x0A,
484 0x00, 0x8D, 0x99, 0x99, 0xDD, 0x00, 0x0C, 0x0E, 484 0x00, 0x8D, 0x99, 0x99, 0xDD, 0x00, 0x0C, 0x0E,
485 0x8B, 0xDD, 0x00, 0x0C, 0x0E, 0x8B), 485 0x8B, 0xDD, 0x00, 0x0C, 0x0E, 0x8B),
486 PHYREGS(0x0C08, 0x0808, 0x0408, 0xFD01, 0xFE01, 0xFF01), 486 PHYREGS(0x080C, 0x0808, 0x0804, 0x01FD, 0x01FE, 0x01FF),
487 }, 487 },
488 { .channel = 32, 488 { .channel = 32,
489 .freq = 5160, /* MHz */ 489 .freq = 5160, /* MHz */
490 .unk2 = 3440, 490 .unk2 = 3440,
491 RADIOREGS(0x71, 0x02, 0x04, 0x0B, 0xBE, 0x01, 0x04, 0x0A, 491 RADIOREGS(0x71, 0x04, 0x02, 0x0B, 0xBE, 0x01, 0x04, 0x0A,
492 0x00, 0x8C, 0x99, 0x99, 0xCC, 0x00, 0x0B, 0x0D, 492 0x00, 0x8C, 0x99, 0x99, 0xCC, 0x00, 0x0B, 0x0D,
493 0x8A, 0xCC, 0x00, 0x0B, 0x0D, 0x8A), 493 0x8A, 0xCC, 0x00, 0x0B, 0x0D, 0x8A),
494 PHYREGS(0x1408, 0x1008, 0x0C08, 0xFB01, 0xFC01, 0xFD01), 494 PHYREGS(0x0814, 0x0810, 0x080C, 0x01FB, 0x01FC, 0x01FD),
495 }, 495 },
496 { .channel = 34, 496 { .channel = 34,
497 .freq = 5170, /* MHz */ 497 .freq = 5170, /* MHz */
498 .unk2 = 3447, 498 .unk2 = 3447,
499 RADIOREGS(0x71, 0x02, 0x05, 0x0B, 0xBE, 0x01, 0x04, 0x0A, 499 RADIOREGS(0x71, 0x05, 0x02, 0x0B, 0xBE, 0x01, 0x04, 0x0A,
500 0x00, 0x8C, 0x99, 0x99, 0xCC, 0x00, 0x0B, 0x0D, 500 0x00, 0x8C, 0x99, 0x99, 0xCC, 0x00, 0x0B, 0x0D,
501 0x8A, 0xCC, 0x00, 0x0B, 0x0D, 0x8A), 501 0x8A, 0xCC, 0x00, 0x0B, 0x0D, 0x8A),
502 PHYREGS(0x1808, 0x1408, 0x1008, 0xFA01, 0xFB01, 0xFC01), 502 PHYREGS(0x0818, 0x0814, 0x0810, 0x01FA, 0x01FB, 0x01FC),
503 }, 503 },
504 { .channel = 36, 504 { .channel = 36,
505 .freq = 5180, /* MHz */ 505 .freq = 5180, /* MHz */
506 .unk2 = 3453, 506 .unk2 = 3453,
507 RADIOREGS(0x71, 0x02, 0x06, 0x0B, 0xB6, 0x01, 0x04, 0x0A, 507 RADIOREGS(0x71, 0x06, 0x02, 0x0B, 0xB6, 0x01, 0x04, 0x0A,
508 0x00, 0x8C, 0x88, 0x88, 0xCC, 0x00, 0x0B, 0x0C, 508 0x00, 0x8C, 0x88, 0x88, 0xCC, 0x00, 0x0B, 0x0C,
509 0x89, 0xCC, 0x00, 0x0B, 0x0C, 0x89), 509 0x89, 0xCC, 0x00, 0x0B, 0x0C, 0x89),
510 PHYREGS(0x1C08, 0x1808, 0x1408, 0xF901, 0xFA01, 0xFB01), 510 PHYREGS(0x081C, 0x0818, 0x0814, 0x01F9, 0x01FA, 0x01FB),
511 }, 511 },
512 { .channel = 38, 512 { .channel = 38,
513 .freq = 5190, /* MHz */ 513 .freq = 5190, /* MHz */
514 .unk2 = 3460, 514 .unk2 = 3460,
515 RADIOREGS(0x71, 0x02, 0x07, 0x0B, 0xB6, 0x01, 0x04, 0x0A, 515 RADIOREGS(0x71, 0x07, 0x02, 0x0B, 0xB6, 0x01, 0x04, 0x0A,
516 0x00, 0x8C, 0x88, 0x88, 0xCC, 0x00, 0x0B, 0x0C, 516 0x00, 0x8C, 0x88, 0x88, 0xCC, 0x00, 0x0B, 0x0C,
517 0x89, 0xCC, 0x00, 0x0B, 0x0C, 0x89), 517 0x89, 0xCC, 0x00, 0x0B, 0x0C, 0x89),
518 PHYREGS(0x2008, 0x1C08, 0x1808, 0xF801, 0xF901, 0xFA01), 518 PHYREGS(0x0820, 0x081C, 0x0818, 0x01F8, 0x01F9, 0x01FA),
519 }, 519 },
520 { .channel = 40, 520 { .channel = 40,
521 .freq = 5200, /* MHz */ 521 .freq = 5200, /* MHz */
522 .unk2 = 3467, 522 .unk2 = 3467,
523 RADIOREGS(0x71, 0x02, 0x08, 0x0B, 0xAF, 0x01, 0x04, 0x0A, 523 RADIOREGS(0x71, 0x08, 0x02, 0x0B, 0xAF, 0x01, 0x04, 0x0A,
524 0x00, 0x8B, 0x88, 0x88, 0xBB, 0x00, 0x0A, 0x0B, 524 0x00, 0x8B, 0x88, 0x88, 0xBB, 0x00, 0x0A, 0x0B,
525 0x89, 0xBB, 0x00, 0x0A, 0x0B, 0x89), 525 0x89, 0xBB, 0x00, 0x0A, 0x0B, 0x89),
526 PHYREGS(0x2408, 0x2008, 0x1C08, 0xF701, 0xF801, 0xF901), 526 PHYREGS(0x0824, 0x0820, 0x081C, 0x01F7, 0x01F8, 0x01F9),
527 }, 527 },
528 { .channel = 42, 528 { .channel = 42,
529 .freq = 5210, /* MHz */ 529 .freq = 5210, /* MHz */
530 .unk2 = 3473, 530 .unk2 = 3473,
531 RADIOREGS(0x71, 0x02, 0x09, 0x0B, 0xAF, 0x01, 0x04, 0x0A, 531 RADIOREGS(0x71, 0x09, 0x02, 0x0B, 0xAF, 0x01, 0x04, 0x0A,
532 0x00, 0x8B, 0x88, 0x88, 0xBB, 0x00, 0x0A, 0x0B, 532 0x00, 0x8B, 0x88, 0x88, 0xBB, 0x00, 0x0A, 0x0B,
533 0x89, 0xBB, 0x00, 0x0A, 0x0B, 0x89), 533 0x89, 0xBB, 0x00, 0x0A, 0x0B, 0x89),
534 PHYREGS(0x2808, 0x2408, 0x2008, 0xF601, 0xF701, 0xF801), 534 PHYREGS(0x0828, 0x0824, 0x0820, 0x01F6, 0x01F7, 0x01F8),
535 }, 535 },
536 { .channel = 44, 536 { .channel = 44,
537 .freq = 5220, /* MHz */ 537 .freq = 5220, /* MHz */
538 .unk2 = 3480, 538 .unk2 = 3480,
539 RADIOREGS(0x71, 0x02, 0x0A, 0x0A, 0xA7, 0x01, 0x04, 0x0A, 539 RADIOREGS(0x71, 0x0A, 0x02, 0x0A, 0xA7, 0x01, 0x04, 0x0A,
540 0x00, 0x8B, 0x77, 0x77, 0xBB, 0x00, 0x09, 0x0A, 540 0x00, 0x8B, 0x77, 0x77, 0xBB, 0x00, 0x09, 0x0A,
541 0x88, 0xBB, 0x00, 0x09, 0x0A, 0x88), 541 0x88, 0xBB, 0x00, 0x09, 0x0A, 0x88),
542 PHYREGS(0x2C08, 0x2808, 0x2408, 0xF501, 0xF601, 0xF701), 542 PHYREGS(0x082C, 0x0828, 0x0824, 0x01F5, 0x01F6, 0x01F7),
543 }, 543 },
544 { .channel = 46, 544 { .channel = 46,
545 .freq = 5230, /* MHz */ 545 .freq = 5230, /* MHz */
546 .unk2 = 3487, 546 .unk2 = 3487,
547 RADIOREGS(0x71, 0x02, 0x0B, 0x0A, 0xA7, 0x01, 0x04, 0x0A, 547 RADIOREGS(0x71, 0x0B, 0x02, 0x0A, 0xA7, 0x01, 0x04, 0x0A,
548 0x00, 0x8B, 0x77, 0x77, 0xBB, 0x00, 0x09, 0x0A, 548 0x00, 0x8B, 0x77, 0x77, 0xBB, 0x00, 0x09, 0x0A,
549 0x88, 0xBB, 0x00, 0x09, 0x0A, 0x88), 549 0x88, 0xBB, 0x00, 0x09, 0x0A, 0x88),
550 PHYREGS(0x3008, 0x2C08, 0x2808, 0xF401, 0xF501, 0xF601), 550 PHYREGS(0x0830, 0x082C, 0x0828, 0x01F4, 0x01F5, 0x01F6),
551 }, 551 },
552 { .channel = 48, 552 { .channel = 48,
553 .freq = 5240, /* MHz */ 553 .freq = 5240, /* MHz */
554 .unk2 = 3493, 554 .unk2 = 3493,
555 RADIOREGS(0x71, 0x02, 0x0C, 0x0A, 0xA0, 0x01, 0x04, 0x0A, 555 RADIOREGS(0x71, 0x0C, 0x02, 0x0A, 0xA0, 0x01, 0x04, 0x0A,
556 0x00, 0x8A, 0x77, 0x77, 0xAA, 0x00, 0x09, 0x0A, 556 0x00, 0x8A, 0x77, 0x77, 0xAA, 0x00, 0x09, 0x0A,
557 0x87, 0xAA, 0x00, 0x09, 0x0A, 0x87), 557 0x87, 0xAA, 0x00, 0x09, 0x0A, 0x87),
558 PHYREGS(0x3408, 0x3008, 0x2C08, 0xF301, 0xF401, 0xF501), 558 PHYREGS(0x0834, 0x0830, 0x082C, 0x01F3, 0x01F4, 0x01F5),
559 }, 559 },
560 { .channel = 50, 560 { .channel = 50,
561 .freq = 5250, /* MHz */ 561 .freq = 5250, /* MHz */
562 .unk2 = 3500, 562 .unk2 = 3500,
563 RADIOREGS(0x71, 0x02, 0x0D, 0x0A, 0xA0, 0x01, 0x04, 0x0A, 563 RADIOREGS(0x71, 0x0D, 0x02, 0x0A, 0xA0, 0x01, 0x04, 0x0A,
564 0x00, 0x8A, 0x77, 0x77, 0xAA, 0x00, 0x09, 0x0A, 564 0x00, 0x8A, 0x77, 0x77, 0xAA, 0x00, 0x09, 0x0A,
565 0x87, 0xAA, 0x00, 0x09, 0x0A, 0x87), 565 0x87, 0xAA, 0x00, 0x09, 0x0A, 0x87),
566 PHYREGS(0x3808, 0x3408, 0x3008, 0xF201, 0xF301, 0xF401), 566 PHYREGS(0x0838, 0x0834, 0x0830, 0x01F2, 0x01F3, 0x01F4),
567 }, 567 },
568 { .channel = 52, 568 { .channel = 52,
569 .freq = 5260, /* MHz */ 569 .freq = 5260, /* MHz */
570 .unk2 = 3507, 570 .unk2 = 3507,
571 RADIOREGS(0x71, 0x02, 0x0E, 0x0A, 0x98, 0x01, 0x04, 0x0A, 571 RADIOREGS(0x71, 0x0E, 0x02, 0x0A, 0x98, 0x01, 0x04, 0x0A,
572 0x00, 0x8A, 0x66, 0x66, 0xAA, 0x00, 0x08, 0x09, 572 0x00, 0x8A, 0x66, 0x66, 0xAA, 0x00, 0x08, 0x09,
573 0x87, 0xAA, 0x00, 0x08, 0x09, 0x87), 573 0x87, 0xAA, 0x00, 0x08, 0x09, 0x87),
574 PHYREGS(0x3C08, 0x3808, 0x3408, 0xF101, 0xF201, 0xF301), 574 PHYREGS(0x083C, 0x0838, 0x0834, 0x01F1, 0x01F2, 0x01F3),
575 }, 575 },
576 { .channel = 54, 576 { .channel = 54,
577 .freq = 5270, /* MHz */ 577 .freq = 5270, /* MHz */
578 .unk2 = 3513, 578 .unk2 = 3513,
579 RADIOREGS(0x71, 0x02, 0x0F, 0x0A, 0x98, 0x01, 0x04, 0x0A, 579 RADIOREGS(0x71, 0x0F, 0x02, 0x0A, 0x98, 0x01, 0x04, 0x0A,
580 0x00, 0x8A, 0x66, 0x66, 0xAA, 0x00, 0x08, 0x09, 580 0x00, 0x8A, 0x66, 0x66, 0xAA, 0x00, 0x08, 0x09,
581 0x87, 0xAA, 0x00, 0x08, 0x09, 0x87), 581 0x87, 0xAA, 0x00, 0x08, 0x09, 0x87),
582 PHYREGS(0x4008, 0x3C08, 0x3808, 0xF001, 0xF101, 0xF201), 582 PHYREGS(0x0840, 0x083C, 0x0838, 0x01F0, 0x01F1, 0x01F2),
583 }, 583 },
584 { .channel = 56, 584 { .channel = 56,
585 .freq = 5280, /* MHz */ 585 .freq = 5280, /* MHz */
586 .unk2 = 3520, 586 .unk2 = 3520,
587 RADIOREGS(0x71, 0x02, 0x10, 0x09, 0x91, 0x01, 0x04, 0x0A, 587 RADIOREGS(0x71, 0x10, 0x02, 0x09, 0x91, 0x01, 0x04, 0x0A,
588 0x00, 0x89, 0x66, 0x66, 0x99, 0x00, 0x08, 0x08, 588 0x00, 0x89, 0x66, 0x66, 0x99, 0x00, 0x08, 0x08,
589 0x86, 0x99, 0x00, 0x08, 0x08, 0x86), 589 0x86, 0x99, 0x00, 0x08, 0x08, 0x86),
590 PHYREGS(0x4408, 0x4008, 0x3C08, 0xF001, 0xF001, 0xF101), 590 PHYREGS(0x0844, 0x0840, 0x083C, 0x01F0, 0x01F0, 0x01F1),
591 }, 591 },
592 { .channel = 58, 592 { .channel = 58,
593 .freq = 5290, /* MHz */ 593 .freq = 5290, /* MHz */
594 .unk2 = 3527, 594 .unk2 = 3527,
595 RADIOREGS(0x71, 0x02, 0x11, 0x09, 0x91, 0x01, 0x04, 0x0A, 595 RADIOREGS(0x71, 0x11, 0x02, 0x09, 0x91, 0x01, 0x04, 0x0A,
596 0x00, 0x89, 0x66, 0x66, 0x99, 0x00, 0x08, 0x08, 596 0x00, 0x89, 0x66, 0x66, 0x99, 0x00, 0x08, 0x08,
597 0x86, 0x99, 0x00, 0x08, 0x08, 0x86), 597 0x86, 0x99, 0x00, 0x08, 0x08, 0x86),
598 PHYREGS(0x4808, 0x4408, 0x4008, 0xEF01, 0xF001, 0xF001), 598 PHYREGS(0x0848, 0x0844, 0x0840, 0x01EF, 0x01F0, 0x01F0),
599 }, 599 },
600 { .channel = 60, 600 { .channel = 60,
601 .freq = 5300, /* MHz */ 601 .freq = 5300, /* MHz */
602 .unk2 = 3533, 602 .unk2 = 3533,
603 RADIOREGS(0x71, 0x02, 0x12, 0x09, 0x8A, 0x01, 0x04, 0x0A, 603 RADIOREGS(0x71, 0x12, 0x02, 0x09, 0x8A, 0x01, 0x04, 0x0A,
604 0x00, 0x89, 0x55, 0x55, 0x99, 0x00, 0x08, 0x07, 604 0x00, 0x89, 0x55, 0x55, 0x99, 0x00, 0x08, 0x07,
605 0x85, 0x99, 0x00, 0x08, 0x07, 0x85), 605 0x85, 0x99, 0x00, 0x08, 0x07, 0x85),
606 PHYREGS(0x4C08, 0x4808, 0x4408, 0xEE01, 0xEF01, 0xF001), 606 PHYREGS(0x084C, 0x0848, 0x0844, 0x01EE, 0x01EF, 0x01F0),
607 }, 607 },
608 { .channel = 62, 608 { .channel = 62,
609 .freq = 5310, /* MHz */ 609 .freq = 5310, /* MHz */
610 .unk2 = 3540, 610 .unk2 = 3540,
611 RADIOREGS(0x71, 0x02, 0x13, 0x09, 0x8A, 0x01, 0x04, 0x0A, 611 RADIOREGS(0x71, 0x13, 0x02, 0x09, 0x8A, 0x01, 0x04, 0x0A,
612 0x00, 0x89, 0x55, 0x55, 0x99, 0x00, 0x08, 0x07, 612 0x00, 0x89, 0x55, 0x55, 0x99, 0x00, 0x08, 0x07,
613 0x85, 0x99, 0x00, 0x08, 0x07, 0x85), 613 0x85, 0x99, 0x00, 0x08, 0x07, 0x85),
614 PHYREGS(0x5008, 0x4C08, 0x4808, 0xED01, 0xEE01, 0xEF01), 614 PHYREGS(0x0850, 0x084C, 0x0848, 0x01ED, 0x01EE, 0x01EF),
615 }, 615 },
616 { .channel = 64, 616 { .channel = 64,
617 .freq = 5320, /* MHz */ 617 .freq = 5320, /* MHz */
618 .unk2 = 3547, 618 .unk2 = 3547,
619 RADIOREGS(0x71, 0x02, 0x14, 0x09, 0x83, 0x01, 0x04, 0x0A, 619 RADIOREGS(0x71, 0x14, 0x02, 0x09, 0x83, 0x01, 0x04, 0x0A,
620 0x00, 0x88, 0x55, 0x55, 0x88, 0x00, 0x07, 0x07, 620 0x00, 0x88, 0x55, 0x55, 0x88, 0x00, 0x07, 0x07,
621 0x84, 0x88, 0x00, 0x07, 0x07, 0x84), 621 0x84, 0x88, 0x00, 0x07, 0x07, 0x84),
622 PHYREGS(0x5408, 0x5008, 0x4C08, 0xEC01, 0xED01, 0xEE01), 622 PHYREGS(0x0854, 0x0850, 0x084C, 0x01EC, 0x01ED, 0x01EE),
623 }, 623 },
624 { .channel = 66, 624 { .channel = 66,
625 .freq = 5330, /* MHz */ 625 .freq = 5330, /* MHz */
626 .unk2 = 3553, 626 .unk2 = 3553,
627 RADIOREGS(0x71, 0x02, 0x15, 0x09, 0x83, 0x01, 0x04, 0x0A, 627 RADIOREGS(0x71, 0x15, 0x02, 0x09, 0x83, 0x01, 0x04, 0x0A,
628 0x00, 0x88, 0x55, 0x55, 0x88, 0x00, 0x07, 0x07, 628 0x00, 0x88, 0x55, 0x55, 0x88, 0x00, 0x07, 0x07,
629 0x84, 0x88, 0x00, 0x07, 0x07, 0x84), 629 0x84, 0x88, 0x00, 0x07, 0x07, 0x84),
630 PHYREGS(0x5808, 0x5408, 0x5008, 0xEB01, 0xEC01, 0xED01), 630 PHYREGS(0x0858, 0x0854, 0x0850, 0x01EB, 0x01EC, 0x01ED),
631 }, 631 },
632 { .channel = 68, 632 { .channel = 68,
633 .freq = 5340, /* MHz */ 633 .freq = 5340, /* MHz */
634 .unk2 = 3560, 634 .unk2 = 3560,
635 RADIOREGS(0x71, 0x02, 0x16, 0x08, 0x7C, 0x01, 0x04, 0x0A, 635 RADIOREGS(0x71, 0x16, 0x02, 0x08, 0x7C, 0x01, 0x04, 0x0A,
636 0x00, 0x88, 0x44, 0x44, 0x88, 0x00, 0x07, 0x06, 636 0x00, 0x88, 0x44, 0x44, 0x88, 0x00, 0x07, 0x06,
637 0x84, 0x88, 0x00, 0x07, 0x06, 0x84), 637 0x84, 0x88, 0x00, 0x07, 0x06, 0x84),
638 PHYREGS(0x5C08, 0x5808, 0x5408, 0xEA01, 0xEB01, 0xEC01), 638 PHYREGS(0x085C, 0x0858, 0x0854, 0x01EA, 0x01EB, 0x01EC),
639 }, 639 },
640 { .channel = 70, 640 { .channel = 70,
641 .freq = 5350, /* MHz */ 641 .freq = 5350, /* MHz */
642 .unk2 = 3567, 642 .unk2 = 3567,
643 RADIOREGS(0x71, 0x02, 0x17, 0x08, 0x7C, 0x01, 0x04, 0x0A, 643 RADIOREGS(0x71, 0x17, 0x02, 0x08, 0x7C, 0x01, 0x04, 0x0A,
644 0x00, 0x88, 0x44, 0x44, 0x88, 0x00, 0x07, 0x06, 644 0x00, 0x88, 0x44, 0x44, 0x88, 0x00, 0x07, 0x06,
645 0x84, 0x88, 0x00, 0x07, 0x06, 0x84), 645 0x84, 0x88, 0x00, 0x07, 0x06, 0x84),
646 PHYREGS(0x6008, 0x5C08, 0x5808, 0xE901, 0xEA01, 0xEB01), 646 PHYREGS(0x0860, 0x085C, 0x0858, 0x01E9, 0x01EA, 0x01EB),
647 }, 647 },
648 { .channel = 72, 648 { .channel = 72,
649 .freq = 5360, /* MHz */ 649 .freq = 5360, /* MHz */
650 .unk2 = 3573, 650 .unk2 = 3573,
651 RADIOREGS(0x71, 0x02, 0x18, 0x08, 0x75, 0x01, 0x04, 0x0A, 651 RADIOREGS(0x71, 0x18, 0x02, 0x08, 0x75, 0x01, 0x04, 0x0A,
652 0x00, 0x87, 0x44, 0x44, 0x77, 0x00, 0x06, 0x05, 652 0x00, 0x87, 0x44, 0x44, 0x77, 0x00, 0x06, 0x05,
653 0x83, 0x77, 0x00, 0x06, 0x05, 0x83), 653 0x83, 0x77, 0x00, 0x06, 0x05, 0x83),
654 PHYREGS(0x6408, 0x6008, 0x5C08, 0xE801, 0xE901, 0xEA01), 654 PHYREGS(0x0864, 0x0860, 0x085C, 0x01E8, 0x01E9, 0x01EA),
655 }, 655 },
656 { .channel = 74, 656 { .channel = 74,
657 .freq = 5370, /* MHz */ 657 .freq = 5370, /* MHz */
658 .unk2 = 3580, 658 .unk2 = 3580,
659 RADIOREGS(0x71, 0x02, 0x19, 0x08, 0x75, 0x01, 0x04, 0x0A, 659 RADIOREGS(0x71, 0x19, 0x02, 0x08, 0x75, 0x01, 0x04, 0x0A,
660 0x00, 0x87, 0x44, 0x44, 0x77, 0x00, 0x06, 0x05, 660 0x00, 0x87, 0x44, 0x44, 0x77, 0x00, 0x06, 0x05,
661 0x83, 0x77, 0x00, 0x06, 0x05, 0x83), 661 0x83, 0x77, 0x00, 0x06, 0x05, 0x83),
662 PHYREGS(0x6808, 0x6408, 0x6008, 0xE701, 0xE801, 0xE901), 662 PHYREGS(0x0868, 0x0864, 0x0860, 0x01E7, 0x01E8, 0x01E9),
663 }, 663 },
664 { .channel = 76, 664 { .channel = 76,
665 .freq = 5380, /* MHz */ 665 .freq = 5380, /* MHz */
666 .unk2 = 3587, 666 .unk2 = 3587,
667 RADIOREGS(0x71, 0x02, 0x1A, 0x08, 0x6E, 0x01, 0x04, 0x0A, 667 RADIOREGS(0x71, 0x1A, 0x02, 0x08, 0x6E, 0x01, 0x04, 0x0A,
668 0x00, 0x87, 0x33, 0x33, 0x77, 0x00, 0x06, 0x04, 668 0x00, 0x87, 0x33, 0x33, 0x77, 0x00, 0x06, 0x04,
669 0x82, 0x77, 0x00, 0x06, 0x04, 0x82), 669 0x82, 0x77, 0x00, 0x06, 0x04, 0x82),
670 PHYREGS(0x6C08, 0x6808, 0x6408, 0xE601, 0xE701, 0xE801), 670 PHYREGS(0x086C, 0x0868, 0x0864, 0x01E6, 0x01E7, 0x01E8),
671 }, 671 },
672 { .channel = 78, 672 { .channel = 78,
673 .freq = 5390, /* MHz */ 673 .freq = 5390, /* MHz */
674 .unk2 = 3593, 674 .unk2 = 3593,
675 RADIOREGS(0x71, 0x02, 0x1B, 0x08, 0x6E, 0x01, 0x04, 0x0A, 675 RADIOREGS(0x71, 0x1B, 0x02, 0x08, 0x6E, 0x01, 0x04, 0x0A,
676 0x00, 0x87, 0x33, 0x33, 0x77, 0x00, 0x06, 0x04, 676 0x00, 0x87, 0x33, 0x33, 0x77, 0x00, 0x06, 0x04,
677 0x82, 0x77, 0x00, 0x06, 0x04, 0x82), 677 0x82, 0x77, 0x00, 0x06, 0x04, 0x82),
678 PHYREGS(0x7008, 0x6C08, 0x6808, 0xE501, 0xE601, 0xE701), 678 PHYREGS(0x0870, 0x086C, 0x0868, 0x01E5, 0x01E6, 0x01E7),
679 }, 679 },
680 { .channel = 80, 680 { .channel = 80,
681 .freq = 5400, /* MHz */ 681 .freq = 5400, /* MHz */
682 .unk2 = 3600, 682 .unk2 = 3600,
683 RADIOREGS(0x71, 0x02, 0x1C, 0x07, 0x67, 0x01, 0x04, 0x0A, 683 RADIOREGS(0x71, 0x1C, 0x02, 0x07, 0x67, 0x01, 0x04, 0x0A,
684 0x00, 0x86, 0x33, 0x33, 0x66, 0x00, 0x05, 0x04, 684 0x00, 0x86, 0x33, 0x33, 0x66, 0x00, 0x05, 0x04,
685 0x81, 0x66, 0x00, 0x05, 0x04, 0x81), 685 0x81, 0x66, 0x00, 0x05, 0x04, 0x81),
686 PHYREGS(0x7408, 0x7008, 0x6C08, 0xE501, 0xE501, 0xE601), 686 PHYREGS(0x0874, 0x0870, 0x086C, 0x01E5, 0x01E5, 0x01E6),
687 }, 687 },
688 { .channel = 82, 688 { .channel = 82,
689 .freq = 5410, /* MHz */ 689 .freq = 5410, /* MHz */
690 .unk2 = 3607, 690 .unk2 = 3607,
691 RADIOREGS(0x71, 0x02, 0x1D, 0x07, 0x67, 0x01, 0x04, 0x0A, 691 RADIOREGS(0x71, 0x1D, 0x02, 0x07, 0x67, 0x01, 0x04, 0x0A,
692 0x00, 0x86, 0x33, 0x33, 0x66, 0x00, 0x05, 0x04, 692 0x00, 0x86, 0x33, 0x33, 0x66, 0x00, 0x05, 0x04,
693 0x81, 0x66, 0x00, 0x05, 0x04, 0x81), 693 0x81, 0x66, 0x00, 0x05, 0x04, 0x81),
694 PHYREGS(0x7808, 0x7408, 0x7008, 0xE401, 0xE501, 0xE501), 694 PHYREGS(0x0878, 0x0874, 0x0870, 0x01E4, 0x01E5, 0x01E5),
695 }, 695 },
696 { .channel = 84, 696 { .channel = 84,
697 .freq = 5420, /* MHz */ 697 .freq = 5420, /* MHz */
698 .unk2 = 3613, 698 .unk2 = 3613,
699 RADIOREGS(0x71, 0x02, 0x1E, 0x07, 0x61, 0x01, 0x04, 0x0A, 699 RADIOREGS(0x71, 0x1E, 0x02, 0x07, 0x61, 0x01, 0x04, 0x0A,
700 0x00, 0x86, 0x22, 0x22, 0x66, 0x00, 0x05, 0x03, 700 0x00, 0x86, 0x22, 0x22, 0x66, 0x00, 0x05, 0x03,
701 0x80, 0x66, 0x00, 0x05, 0x03, 0x80), 701 0x80, 0x66, 0x00, 0x05, 0x03, 0x80),
702 PHYREGS(0x7C08, 0x7808, 0x7408, 0xE301, 0xE401, 0xE501), 702 PHYREGS(0x087C, 0x0878, 0x0874, 0x01E3, 0x01E4, 0x01E5),
703 }, 703 },
704 { .channel = 86, 704 { .channel = 86,
705 .freq = 5430, /* MHz */ 705 .freq = 5430, /* MHz */
706 .unk2 = 3620, 706 .unk2 = 3620,
707 RADIOREGS(0x71, 0x02, 0x1F, 0x07, 0x61, 0x01, 0x04, 0x0A, 707 RADIOREGS(0x71, 0x1F, 0x02, 0x07, 0x61, 0x01, 0x04, 0x0A,
708 0x00, 0x86, 0x22, 0x22, 0x66, 0x00, 0x05, 0x03, 708 0x00, 0x86, 0x22, 0x22, 0x66, 0x00, 0x05, 0x03,
709 0x80, 0x66, 0x00, 0x05, 0x03, 0x80), 709 0x80, 0x66, 0x00, 0x05, 0x03, 0x80),
710 PHYREGS(0x8008, 0x7C08, 0x7808, 0xE201, 0xE301, 0xE401), 710 PHYREGS(0x0880, 0x087C, 0x0878, 0x01E2, 0x01E3, 0x01E4),
711 }, 711 },
712 { .channel = 88, 712 { .channel = 88,
713 .freq = 5440, /* MHz */ 713 .freq = 5440, /* MHz */
714 .unk2 = 3627, 714 .unk2 = 3627,
715 RADIOREGS(0x71, 0x02, 0x20, 0x07, 0x5A, 0x01, 0x04, 0x0A, 715 RADIOREGS(0x71, 0x20, 0x02, 0x07, 0x5A, 0x01, 0x04, 0x0A,
716 0x00, 0x85, 0x22, 0x22, 0x55, 0x00, 0x04, 0x02, 716 0x00, 0x85, 0x22, 0x22, 0x55, 0x00, 0x04, 0x02,
717 0x80, 0x55, 0x00, 0x04, 0x02, 0x80), 717 0x80, 0x55, 0x00, 0x04, 0x02, 0x80),
718 PHYREGS(0x8408, 0x8008, 0x7C08, 0xE101, 0xE201, 0xE301), 718 PHYREGS(0x0884, 0x0880, 0x087C, 0x01E1, 0x01E2, 0x01E3),
719 }, 719 },
720 { .channel = 90, 720 { .channel = 90,
721 .freq = 5450, /* MHz */ 721 .freq = 5450, /* MHz */
722 .unk2 = 3633, 722 .unk2 = 3633,
723 RADIOREGS(0x71, 0x02, 0x21, 0x07, 0x5A, 0x01, 0x04, 0x0A, 723 RADIOREGS(0x71, 0x21, 0x02, 0x07, 0x5A, 0x01, 0x04, 0x0A,
724 0x00, 0x85, 0x22, 0x22, 0x55, 0x00, 0x04, 0x02, 724 0x00, 0x85, 0x22, 0x22, 0x55, 0x00, 0x04, 0x02,
725 0x80, 0x55, 0x00, 0x04, 0x02, 0x80), 725 0x80, 0x55, 0x00, 0x04, 0x02, 0x80),
726 PHYREGS(0x8808, 0x8408, 0x8008, 0xE001, 0xE101, 0xE201), 726 PHYREGS(0x0888, 0x0884, 0x0880, 0x01E0, 0x01E1, 0x01E2),
727 }, 727 },
728 { .channel = 92, 728 { .channel = 92,
729 .freq = 5460, /* MHz */ 729 .freq = 5460, /* MHz */
730 .unk2 = 3640, 730 .unk2 = 3640,
731 RADIOREGS(0x71, 0x02, 0x22, 0x06, 0x53, 0x01, 0x04, 0x0A, 731 RADIOREGS(0x71, 0x22, 0x02, 0x06, 0x53, 0x01, 0x04, 0x0A,
732 0x00, 0x85, 0x11, 0x11, 0x55, 0x00, 0x04, 0x01, 732 0x00, 0x85, 0x11, 0x11, 0x55, 0x00, 0x04, 0x01,
733 0x80, 0x55, 0x00, 0x04, 0x01, 0x80), 733 0x80, 0x55, 0x00, 0x04, 0x01, 0x80),
734 PHYREGS(0x8C08, 0x8808, 0x8408, 0xDF01, 0xE001, 0xE101), 734 PHYREGS(0x088C, 0x0888, 0x0884, 0x01DF, 0x01E0, 0x01E1),
735 }, 735 },
736 { .channel = 94, 736 { .channel = 94,
737 .freq = 5470, /* MHz */ 737 .freq = 5470, /* MHz */
738 .unk2 = 3647, 738 .unk2 = 3647,
739 RADIOREGS(0x71, 0x02, 0x23, 0x06, 0x53, 0x01, 0x04, 0x0A, 739 RADIOREGS(0x71, 0x23, 0x02, 0x06, 0x53, 0x01, 0x04, 0x0A,
740 0x00, 0x85, 0x11, 0x11, 0x55, 0x00, 0x04, 0x01, 740 0x00, 0x85, 0x11, 0x11, 0x55, 0x00, 0x04, 0x01,
741 0x80, 0x55, 0x00, 0x04, 0x01, 0x80), 741 0x80, 0x55, 0x00, 0x04, 0x01, 0x80),
742 PHYREGS(0x9008, 0x8C08, 0x8808, 0xDE01, 0xDF01, 0xE001), 742 PHYREGS(0x0890, 0x088C, 0x0888, 0x01DE, 0x01DF, 0x01E0),
743 }, 743 },
744 { .channel = 96, 744 { .channel = 96,
745 .freq = 5480, /* MHz */ 745 .freq = 5480, /* MHz */
746 .unk2 = 3653, 746 .unk2 = 3653,
747 RADIOREGS(0x71, 0x02, 0x24, 0x06, 0x4D, 0x01, 0x04, 0x0A, 747 RADIOREGS(0x71, 0x24, 0x02, 0x06, 0x4D, 0x01, 0x04, 0x0A,
748 0x00, 0x84, 0x11, 0x11, 0x44, 0x00, 0x03, 0x00, 748 0x00, 0x84, 0x11, 0x11, 0x44, 0x00, 0x03, 0x00,
749 0x80, 0x44, 0x00, 0x03, 0x00, 0x80), 749 0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
750 PHYREGS(0x9408, 0x9008, 0x8C08, 0xDD01, 0xDE01, 0xDF01), 750 PHYREGS(0x0894, 0x0890, 0x088C, 0x01DD, 0x01DE, 0x01DF),
751 }, 751 },
752 { .channel = 98, 752 { .channel = 98,
753 .freq = 5490, /* MHz */ 753 .freq = 5490, /* MHz */
754 .unk2 = 3660, 754 .unk2 = 3660,
755 RADIOREGS(0x71, 0x02, 0x25, 0x06, 0x4D, 0x01, 0x04, 0x0A, 755 RADIOREGS(0x71, 0x25, 0x02, 0x06, 0x4D, 0x01, 0x04, 0x0A,
756 0x00, 0x84, 0x11, 0x11, 0x44, 0x00, 0x03, 0x00, 756 0x00, 0x84, 0x11, 0x11, 0x44, 0x00, 0x03, 0x00,
757 0x80, 0x44, 0x00, 0x03, 0x00, 0x80), 757 0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
758 PHYREGS(0x9808, 0x9408, 0x9008, 0xDD01, 0xDD01, 0xDE01), 758 PHYREGS(0x0898, 0x0894, 0x0890, 0x01DD, 0x01DD, 0x01DE),
759 }, 759 },
760 { .channel = 100, 760 { .channel = 100,
761 .freq = 5500, /* MHz */ 761 .freq = 5500, /* MHz */
762 .unk2 = 3667, 762 .unk2 = 3667,
763 RADIOREGS(0x71, 0x02, 0x26, 0x06, 0x47, 0x01, 0x04, 0x0A, 763 RADIOREGS(0x71, 0x26, 0x02, 0x06, 0x47, 0x01, 0x04, 0x0A,
764 0x00, 0x84, 0x00, 0x00, 0x44, 0x00, 0x03, 0x00, 764 0x00, 0x84, 0x00, 0x00, 0x44, 0x00, 0x03, 0x00,
765 0x80, 0x44, 0x00, 0x03, 0x00, 0x80), 765 0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
766 PHYREGS(0x9C08, 0x9808, 0x9408, 0xDC01, 0xDD01, 0xDD01), 766 PHYREGS(0x089C, 0x0898, 0x0894, 0x01DC, 0x01DD, 0x01DD),
767 }, 767 },
768 { .channel = 102, 768 { .channel = 102,
769 .freq = 5510, /* MHz */ 769 .freq = 5510, /* MHz */
770 .unk2 = 3673, 770 .unk2 = 3673,
771 RADIOREGS(0x71, 0x02, 0x27, 0x06, 0x47, 0x01, 0x04, 0x0A, 771 RADIOREGS(0x71, 0x27, 0x02, 0x06, 0x47, 0x01, 0x04, 0x0A,
772 0x00, 0x84, 0x00, 0x00, 0x44, 0x00, 0x03, 0x00, 772 0x00, 0x84, 0x00, 0x00, 0x44, 0x00, 0x03, 0x00,
773 0x80, 0x44, 0x00, 0x03, 0x00, 0x80), 773 0x80, 0x44, 0x00, 0x03, 0x00, 0x80),
774 PHYREGS(0xA008, 0x9C08, 0x9808, 0xDB01, 0xDC01, 0xDD01), 774 PHYREGS(0x08A0, 0x089C, 0x0898, 0x01DB, 0x01DC, 0x01DD),
775 }, 775 },
776 { .channel = 104, 776 { .channel = 104,
777 .freq = 5520, /* MHz */ 777 .freq = 5520, /* MHz */
778 .unk2 = 3680, 778 .unk2 = 3680,
779 RADIOREGS(0x71, 0x02, 0x28, 0x05, 0x40, 0x01, 0x04, 0x0A, 779 RADIOREGS(0x71, 0x28, 0x02, 0x05, 0x40, 0x01, 0x04, 0x0A,
780 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00, 780 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
781 0x80, 0x33, 0x00, 0x02, 0x00, 0x80), 781 0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
782 PHYREGS(0xA408, 0xA008, 0x9C08, 0xDA01, 0xDB01, 0xDC01), 782 PHYREGS(0x08A4, 0x08A0, 0x089C, 0x01DA, 0x01DB, 0x01DC),
783 }, 783 },
784 { .channel = 106, 784 { .channel = 106,
785 .freq = 5530, /* MHz */ 785 .freq = 5530, /* MHz */
786 .unk2 = 3687, 786 .unk2 = 3687,
787 RADIOREGS(0x71, 0x02, 0x29, 0x05, 0x40, 0x01, 0x04, 0x0A, 787 RADIOREGS(0x71, 0x29, 0x02, 0x05, 0x40, 0x01, 0x04, 0x0A,
788 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00, 788 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
789 0x80, 0x33, 0x00, 0x02, 0x00, 0x80), 789 0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
790 PHYREGS(0xA808, 0xA408, 0xA008, 0xD901, 0xDA01, 0xDB01), 790 PHYREGS(0x08A8, 0x08A4, 0x08A0, 0x01D9, 0x01DA, 0x01DB),
791 }, 791 },
792 { .channel = 108, 792 { .channel = 108,
793 .freq = 5540, /* MHz */ 793 .freq = 5540, /* MHz */
794 .unk2 = 3693, 794 .unk2 = 3693,
795 RADIOREGS(0x71, 0x02, 0x2A, 0x05, 0x3A, 0x01, 0x04, 0x0A, 795 RADIOREGS(0x71, 0x2A, 0x02, 0x05, 0x3A, 0x01, 0x04, 0x0A,
796 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00, 796 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
797 0x80, 0x33, 0x00, 0x02, 0x00, 0x80), 797 0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
798 PHYREGS(0xAC08, 0xA808, 0xA408, 0xD801, 0xD901, 0xDA01), 798 PHYREGS(0x08AC, 0x08A8, 0x08A4, 0x01D8, 0x01D9, 0x01DA),
799 }, 799 },
800 { .channel = 110, 800 { .channel = 110,
801 .freq = 5550, /* MHz */ 801 .freq = 5550, /* MHz */
802 .unk2 = 3700, 802 .unk2 = 3700,
803 RADIOREGS(0x71, 0x02, 0x2B, 0x05, 0x3A, 0x01, 0x04, 0x0A, 803 RADIOREGS(0x71, 0x2B, 0x02, 0x05, 0x3A, 0x01, 0x04, 0x0A,
804 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00, 804 0x00, 0x83, 0x00, 0x00, 0x33, 0x00, 0x02, 0x00,
805 0x80, 0x33, 0x00, 0x02, 0x00, 0x80), 805 0x80, 0x33, 0x00, 0x02, 0x00, 0x80),
806 PHYREGS(0xB008, 0xAC08, 0xA808, 0xD701, 0xD801, 0xD901), 806 PHYREGS(0x08B0, 0x08AC, 0x08A8, 0x01D7, 0x01D8, 0x01D9),
807 }, 807 },
808 { .channel = 112, 808 { .channel = 112,
809 .freq = 5560, /* MHz */ 809 .freq = 5560, /* MHz */
810 .unk2 = 3707, 810 .unk2 = 3707,
811 RADIOREGS(0x71, 0x02, 0x2C, 0x05, 0x34, 0x01, 0x04, 0x0A, 811 RADIOREGS(0x71, 0x2C, 0x02, 0x05, 0x34, 0x01, 0x04, 0x0A,
812 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00, 812 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
813 0x80, 0x22, 0x00, 0x01, 0x00, 0x80), 813 0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
814 PHYREGS(0xB408, 0xB008, 0xAC08, 0xD701, 0xD701, 0xD801), 814 PHYREGS(0x08B4, 0x08B0, 0x08AC, 0x01D7, 0x01D7, 0x01D8),
815 }, 815 },
816 { .channel = 114, 816 { .channel = 114,
817 .freq = 5570, /* MHz */ 817 .freq = 5570, /* MHz */
818 .unk2 = 3713, 818 .unk2 = 3713,
819 RADIOREGS(0x71, 0x02, 0x2D, 0x05, 0x34, 0x01, 0x04, 0x0A, 819 RADIOREGS(0x71, 0x2D, 0x02, 0x05, 0x34, 0x01, 0x04, 0x0A,
820 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00, 820 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
821 0x80, 0x22, 0x00, 0x01, 0x00, 0x80), 821 0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
822 PHYREGS(0xB808, 0xB408, 0xB008, 0xD601, 0xD701, 0xD701), 822 PHYREGS(0x08B8, 0x08B4, 0x08B0, 0x01D6, 0x01D7, 0x01D7),
823 }, 823 },
824 { .channel = 116, 824 { .channel = 116,
825 .freq = 5580, /* MHz */ 825 .freq = 5580, /* MHz */
826 .unk2 = 3720, 826 .unk2 = 3720,
827 RADIOREGS(0x71, 0x02, 0x2E, 0x04, 0x2E, 0x01, 0x04, 0x0A, 827 RADIOREGS(0x71, 0x2E, 0x02, 0x04, 0x2E, 0x01, 0x04, 0x0A,
828 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00, 828 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
829 0x80, 0x22, 0x00, 0x01, 0x00, 0x80), 829 0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
830 PHYREGS(0xBC08, 0xB808, 0xB408, 0xD501, 0xD601, 0xD701), 830 PHYREGS(0x08BC, 0x08B8, 0x08B4, 0x01D5, 0x01D6, 0x01D7),
831 }, 831 },
832 { .channel = 118, 832 { .channel = 118,
833 .freq = 5590, /* MHz */ 833 .freq = 5590, /* MHz */
834 .unk2 = 3727, 834 .unk2 = 3727,
835 RADIOREGS(0x71, 0x02, 0x2F, 0x04, 0x2E, 0x01, 0x04, 0x0A, 835 RADIOREGS(0x71, 0x2F, 0x02, 0x04, 0x2E, 0x01, 0x04, 0x0A,
836 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00, 836 0x00, 0x82, 0x00, 0x00, 0x22, 0x00, 0x01, 0x00,
837 0x80, 0x22, 0x00, 0x01, 0x00, 0x80), 837 0x80, 0x22, 0x00, 0x01, 0x00, 0x80),
838 PHYREGS(0xC008, 0xBC08, 0xB808, 0xD401, 0xD501, 0xD601), 838 PHYREGS(0x08C0, 0x08BC, 0x08B8, 0x01D4, 0x01D5, 0x01D6),
839 }, 839 },
840 { .channel = 120, 840 { .channel = 120,
841 .freq = 5600, /* MHz */ 841 .freq = 5600, /* MHz */
842 .unk2 = 3733, 842 .unk2 = 3733,
843 RADIOREGS(0x71, 0x02, 0x30, 0x04, 0x28, 0x01, 0x04, 0x0A, 843 RADIOREGS(0x71, 0x30, 0x02, 0x04, 0x28, 0x01, 0x04, 0x0A,
844 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x01, 0x00, 844 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x01, 0x00,
845 0x80, 0x11, 0x00, 0x01, 0x00, 0x80), 845 0x80, 0x11, 0x00, 0x01, 0x00, 0x80),
846 PHYREGS(0xC408, 0xC008, 0xBC08, 0xD301, 0xD401, 0xD501), 846 PHYREGS(0x08C4, 0x08C0, 0x08BC, 0x01D3, 0x01D4, 0x01D5),
847 }, 847 },
848 { .channel = 122, 848 { .channel = 122,
849 .freq = 5610, /* MHz */ 849 .freq = 5610, /* MHz */
850 .unk2 = 3740, 850 .unk2 = 3740,
851 RADIOREGS(0x71, 0x02, 0x31, 0x04, 0x28, 0x01, 0x04, 0x0A, 851 RADIOREGS(0x71, 0x31, 0x02, 0x04, 0x28, 0x01, 0x04, 0x0A,
852 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x01, 0x00, 852 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x01, 0x00,
853 0x80, 0x11, 0x00, 0x01, 0x00, 0x80), 853 0x80, 0x11, 0x00, 0x01, 0x00, 0x80),
854 PHYREGS(0xC808, 0xC408, 0xC008, 0xD201, 0xD301, 0xD401), 854 PHYREGS(0x08C8, 0x08C4, 0x08C0, 0x01D2, 0x01D3, 0x01D4),
855 }, 855 },
856 { .channel = 124, 856 { .channel = 124,
857 .freq = 5620, /* MHz */ 857 .freq = 5620, /* MHz */
858 .unk2 = 3747, 858 .unk2 = 3747,
859 RADIOREGS(0x71, 0x02, 0x32, 0x04, 0x21, 0x01, 0x04, 0x0A, 859 RADIOREGS(0x71, 0x32, 0x02, 0x04, 0x21, 0x01, 0x04, 0x0A,
860 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 860 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
861 0x80, 0x11, 0x00, 0x00, 0x00, 0x80), 861 0x80, 0x11, 0x00, 0x00, 0x00, 0x80),
862 PHYREGS(0xCC08, 0xC808, 0xC408, 0xD201, 0xD201, 0xD301), 862 PHYREGS(0x08CC, 0x08C8, 0x08C4, 0x01D2, 0x01D2, 0x01D3),
863 }, 863 },
864 { .channel = 126, 864 { .channel = 126,
865 .freq = 5630, /* MHz */ 865 .freq = 5630, /* MHz */
866 .unk2 = 3753, 866 .unk2 = 3753,
867 RADIOREGS(0x71, 0x02, 0x33, 0x04, 0x21, 0x01, 0x04, 0x0A, 867 RADIOREGS(0x71, 0x33, 0x02, 0x04, 0x21, 0x01, 0x04, 0x0A,
868 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00, 868 0x00, 0x81, 0x00, 0x00, 0x11, 0x00, 0x00, 0x00,
869 0x80, 0x11, 0x00, 0x00, 0x00, 0x80), 869 0x80, 0x11, 0x00, 0x00, 0x00, 0x80),
870 PHYREGS(0xD008, 0xCC08, 0xC808, 0xD101, 0xD201, 0xD201), 870 PHYREGS(0x08D0, 0x08CC, 0x08C8, 0x01D1, 0x01D2, 0x01D2),
871 }, 871 },
872 { .channel = 128, 872 { .channel = 128,
873 .freq = 5640, /* MHz */ 873 .freq = 5640, /* MHz */
874 .unk2 = 3760, 874 .unk2 = 3760,
875 RADIOREGS(0x71, 0x02, 0x34, 0x03, 0x1C, 0x01, 0x04, 0x0A, 875 RADIOREGS(0x71, 0x34, 0x02, 0x03, 0x1C, 0x01, 0x04, 0x0A,
876 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 876 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
877 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 877 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
878 PHYREGS(0xD408, 0xD008, 0xCC08, 0xD001, 0xD101, 0xD201), 878 PHYREGS(0x08D4, 0x08D0, 0x08CC, 0x01D0, 0x01D1, 0x01D2),
879 }, 879 },
880 { .channel = 130, 880 { .channel = 130,
881 .freq = 5650, /* MHz */ 881 .freq = 5650, /* MHz */
882 .unk2 = 3767, 882 .unk2 = 3767,
883 RADIOREGS(0x71, 0x02, 0x35, 0x03, 0x1C, 0x01, 0x04, 0x0A, 883 RADIOREGS(0x71, 0x35, 0x02, 0x03, 0x1C, 0x01, 0x04, 0x0A,
884 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 884 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
885 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 885 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
886 PHYREGS(0xD808, 0xD408, 0xD008, 0xCF01, 0xD001, 0xD101), 886 PHYREGS(0x08D8, 0x08D4, 0x08D0, 0x01CF, 0x01D0, 0x01D1),
887 }, 887 },
888 { .channel = 132, 888 { .channel = 132,
889 .freq = 5660, /* MHz */ 889 .freq = 5660, /* MHz */
890 .unk2 = 3773, 890 .unk2 = 3773,
891 RADIOREGS(0x71, 0x02, 0x36, 0x03, 0x16, 0x01, 0x04, 0x0A, 891 RADIOREGS(0x71, 0x36, 0x02, 0x03, 0x16, 0x01, 0x04, 0x0A,
892 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 892 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
893 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 893 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
894 PHYREGS(0xDC08, 0xD808, 0xD408, 0xCE01, 0xCF01, 0xD001), 894 PHYREGS(0x08DC, 0x08D8, 0x08D4, 0x01CE, 0x01CF, 0x01D0),
895 }, 895 },
896 { .channel = 134, 896 { .channel = 134,
897 .freq = 5670, /* MHz */ 897 .freq = 5670, /* MHz */
898 .unk2 = 3780, 898 .unk2 = 3780,
899 RADIOREGS(0x71, 0x02, 0x37, 0x03, 0x16, 0x01, 0x04, 0x0A, 899 RADIOREGS(0x71, 0x37, 0x02, 0x03, 0x16, 0x01, 0x04, 0x0A,
900 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 900 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
901 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 901 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
902 PHYREGS(0xE008, 0xDC08, 0xD808, 0xCE01, 0xCE01, 0xCF01), 902 PHYREGS(0x08E0, 0x08DC, 0x08D8, 0x01CE, 0x01CE, 0x01CF),
903 }, 903 },
904 { .channel = 136, 904 { .channel = 136,
905 .freq = 5680, /* MHz */ 905 .freq = 5680, /* MHz */
906 .unk2 = 3787, 906 .unk2 = 3787,
907 RADIOREGS(0x71, 0x02, 0x38, 0x03, 0x10, 0x01, 0x04, 0x0A, 907 RADIOREGS(0x71, 0x38, 0x02, 0x03, 0x10, 0x01, 0x04, 0x0A,
908 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 908 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
909 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 909 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
910 PHYREGS(0xE408, 0xE008, 0xDC08, 0xCD01, 0xCE01, 0xCE01), 910 PHYREGS(0x08E4, 0x08E0, 0x08DC, 0x01CD, 0x01CE, 0x01CE),
911 }, 911 },
912 { .channel = 138, 912 { .channel = 138,
913 .freq = 5690, /* MHz */ 913 .freq = 5690, /* MHz */
914 .unk2 = 3793, 914 .unk2 = 3793,
915 RADIOREGS(0x71, 0x02, 0x39, 0x03, 0x10, 0x01, 0x04, 0x0A, 915 RADIOREGS(0x71, 0x39, 0x02, 0x03, 0x10, 0x01, 0x04, 0x0A,
916 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 916 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
917 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 917 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
918 PHYREGS(0xE808, 0xE408, 0xE008, 0xCC01, 0xCD01, 0xCE01), 918 PHYREGS(0x08E8, 0x08E4, 0x08E0, 0x01CC, 0x01CD, 0x01CE),
919 }, 919 },
920 { .channel = 140, 920 { .channel = 140,
921 .freq = 5700, /* MHz */ 921 .freq = 5700, /* MHz */
922 .unk2 = 3800, 922 .unk2 = 3800,
923 RADIOREGS(0x71, 0x02, 0x3A, 0x02, 0x0A, 0x01, 0x04, 0x0A, 923 RADIOREGS(0x71, 0x3A, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
924 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 924 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
925 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 925 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
926 PHYREGS(0xEC08, 0xE808, 0xE408, 0xCB01, 0xCC01, 0xCD01), 926 PHYREGS(0x08EC, 0x08E8, 0x08E4, 0x01CB, 0x01CC, 0x01CD),
927 }, 927 },
928 { .channel = 142, 928 { .channel = 142,
929 .freq = 5710, /* MHz */ 929 .freq = 5710, /* MHz */
930 .unk2 = 3807, 930 .unk2 = 3807,
931 RADIOREGS(0x71, 0x02, 0x3B, 0x02, 0x0A, 0x01, 0x04, 0x0A, 931 RADIOREGS(0x71, 0x3B, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
932 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 932 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
933 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 933 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
934 PHYREGS(0xF008, 0xEC08, 0xE808, 0xCA01, 0xCB01, 0xCC01), 934 PHYREGS(0x08F0, 0x08EC, 0x08E8, 0x01CA, 0x01CB, 0x01CC),
935 }, 935 },
936 { .channel = 144, 936 { .channel = 144,
937 .freq = 5720, /* MHz */ 937 .freq = 5720, /* MHz */
938 .unk2 = 3813, 938 .unk2 = 3813,
939 RADIOREGS(0x71, 0x02, 0x3C, 0x02, 0x0A, 0x01, 0x04, 0x0A, 939 RADIOREGS(0x71, 0x3C, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
940 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 940 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
941 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 941 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
942 PHYREGS(0xF408, 0xF008, 0xEC08, 0xC901, 0xCA01, 0xCB01), 942 PHYREGS(0x08F4, 0x08F0, 0x08EC, 0x01C9, 0x01CA, 0x01CB),
943 }, 943 },
944 { .channel = 145, 944 { .channel = 145,
945 .freq = 5725, /* MHz */ 945 .freq = 5725, /* MHz */
946 .unk2 = 3817, 946 .unk2 = 3817,
947 RADIOREGS(0x72, 0x04, 0x79, 0x02, 0x03, 0x01, 0x03, 0x14, 947 RADIOREGS(0x72, 0x79, 0x04, 0x02, 0x03, 0x01, 0x03, 0x14,
948 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 948 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
949 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 949 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
950 PHYREGS(0xF608, 0xF208, 0xEE08, 0xC901, 0xCA01, 0xCB01), 950 PHYREGS(0x08F6, 0x08F2, 0x08EE, 0x01C9, 0x01CA, 0x01CB),
951 }, 951 },
952 { .channel = 146, 952 { .channel = 146,
953 .freq = 5730, /* MHz */ 953 .freq = 5730, /* MHz */
954 .unk2 = 3820, 954 .unk2 = 3820,
955 RADIOREGS(0x71, 0x02, 0x3D, 0x02, 0x0A, 0x01, 0x04, 0x0A, 955 RADIOREGS(0x71, 0x3D, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
956 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 956 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
957 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 957 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
958 PHYREGS(0xF808, 0xF408, 0xF008, 0xC901, 0xC901, 0xCA01), 958 PHYREGS(0x08F8, 0x08F4, 0x08F0, 0x01C9, 0x01C9, 0x01CA),
959 }, 959 },
960 { .channel = 147, 960 { .channel = 147,
961 .freq = 5735, /* MHz */ 961 .freq = 5735, /* MHz */
962 .unk2 = 3823, 962 .unk2 = 3823,
963 RADIOREGS(0x72, 0x04, 0x7B, 0x02, 0x03, 0x01, 0x03, 0x14, 963 RADIOREGS(0x72, 0x7B, 0x04, 0x02, 0x03, 0x01, 0x03, 0x14,
964 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 964 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
965 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 965 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
966 PHYREGS(0xFA08, 0xF608, 0xF208, 0xC801, 0xC901, 0xCA01), 966 PHYREGS(0x08FA, 0x08F6, 0x08F2, 0x01C8, 0x01C9, 0x01CA),
967 }, 967 },
968 { .channel = 148, 968 { .channel = 148,
969 .freq = 5740, /* MHz */ 969 .freq = 5740, /* MHz */
970 .unk2 = 3827, 970 .unk2 = 3827,
971 RADIOREGS(0x71, 0x02, 0x3E, 0x02, 0x0A, 0x01, 0x04, 0x0A, 971 RADIOREGS(0x71, 0x3E, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
972 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 972 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
973 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 973 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
974 PHYREGS(0xFC08, 0xF808, 0xF408, 0xC801, 0xC901, 0xC901), 974 PHYREGS(0x08FC, 0x08F8, 0x08F4, 0x01C8, 0x01C9, 0x01C9),
975 }, 975 },
976 { .channel = 149, 976 { .channel = 149,
977 .freq = 5745, /* MHz */ 977 .freq = 5745, /* MHz */
978 .unk2 = 3830, 978 .unk2 = 3830,
979 RADIOREGS(0x72, 0x04, 0x7D, 0x02, 0xFE, 0x00, 0x03, 0x14, 979 RADIOREGS(0x72, 0x7D, 0x04, 0x02, 0xFE, 0x00, 0x03, 0x14,
980 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 980 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
981 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 981 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
982 PHYREGS(0xFE08, 0xFA08, 0xF608, 0xC801, 0xC801, 0xC901), 982 PHYREGS(0x08FE, 0x08FA, 0x08F6, 0x01C8, 0x01C8, 0x01C9),
983 }, 983 },
984 { .channel = 150, 984 { .channel = 150,
985 .freq = 5750, /* MHz */ 985 .freq = 5750, /* MHz */
986 .unk2 = 3833, 986 .unk2 = 3833,
987 RADIOREGS(0x71, 0x02, 0x3F, 0x02, 0x0A, 0x01, 0x04, 0x0A, 987 RADIOREGS(0x71, 0x3F, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
988 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 988 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
989 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 989 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
990 PHYREGS(0x0009, 0xFC08, 0xF808, 0xC701, 0xC801, 0xC901), 990 PHYREGS(0x0900, 0x08FC, 0x08F8, 0x01C7, 0x01C8, 0x01C9),
991 }, 991 },
992 { .channel = 151, 992 { .channel = 151,
993 .freq = 5755, /* MHz */ 993 .freq = 5755, /* MHz */
994 .unk2 = 3837, 994 .unk2 = 3837,
995 RADIOREGS(0x72, 0x04, 0x7F, 0x02, 0xFE, 0x00, 0x03, 0x14, 995 RADIOREGS(0x72, 0x7F, 0x04, 0x02, 0xFE, 0x00, 0x03, 0x14,
996 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 996 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
997 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 997 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
998 PHYREGS(0x0209, 0xFE08, 0xFA08, 0xC701, 0xC801, 0xC801), 998 PHYREGS(0x0902, 0x08FE, 0x08FA, 0x01C7, 0x01C8, 0x01C8),
999 }, 999 },
1000 { .channel = 152, 1000 { .channel = 152,
1001 .freq = 5760, /* MHz */ 1001 .freq = 5760, /* MHz */
1002 .unk2 = 3840, 1002 .unk2 = 3840,
1003 RADIOREGS(0x71, 0x02, 0x40, 0x02, 0x0A, 0x01, 0x04, 0x0A, 1003 RADIOREGS(0x71, 0x40, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
1004 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1004 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1005 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1005 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1006 PHYREGS(0x0409, 0x0009, 0xFC08, 0xC601, 0xC701, 0xC801), 1006 PHYREGS(0x0904, 0x0900, 0x08FC, 0x01C6, 0x01C7, 0x01C8),
1007 }, 1007 },
1008 { .channel = 153, 1008 { .channel = 153,
1009 .freq = 5765, /* MHz */ 1009 .freq = 5765, /* MHz */
1010 .unk2 = 3843, 1010 .unk2 = 3843,
1011 RADIOREGS(0x72, 0x04, 0x81, 0x02, 0xF8, 0x00, 0x03, 0x14, 1011 RADIOREGS(0x72, 0x81, 0x04, 0x02, 0xF8, 0x00, 0x03, 0x14,
1012 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1012 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1013 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1013 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1014 PHYREGS(0x0609, 0x0209, 0xFE08, 0xC601, 0xC701, 0xC801), 1014 PHYREGS(0x0906, 0x0902, 0x08FE, 0x01C6, 0x01C7, 0x01C8),
1015 }, 1015 },
1016 { .channel = 154, 1016 { .channel = 154,
1017 .freq = 5770, /* MHz */ 1017 .freq = 5770, /* MHz */
1018 .unk2 = 3847, 1018 .unk2 = 3847,
1019 RADIOREGS(0x71, 0x02, 0x41, 0x02, 0x0A, 0x01, 0x04, 0x0A, 1019 RADIOREGS(0x71, 0x41, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
1020 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1020 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1021 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1021 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1022 PHYREGS(0x0809, 0x0409, 0x0009, 0xC601, 0xC601, 0xC701), 1022 PHYREGS(0x0908, 0x0904, 0x0900, 0x01C6, 0x01C6, 0x01C7),
1023 }, 1023 },
1024 { .channel = 155, 1024 { .channel = 155,
1025 .freq = 5775, /* MHz */ 1025 .freq = 5775, /* MHz */
1026 .unk2 = 3850, 1026 .unk2 = 3850,
1027 RADIOREGS(0x72, 0x04, 0x83, 0x02, 0xF8, 0x00, 0x03, 0x14, 1027 RADIOREGS(0x72, 0x83, 0x04, 0x02, 0xF8, 0x00, 0x03, 0x14,
1028 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1028 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1029 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1029 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1030 PHYREGS(0x0A09, 0x0609, 0x0209, 0xC501, 0xC601, 0xC701), 1030 PHYREGS(0x090A, 0x0906, 0x0902, 0x01C5, 0x01C6, 0x01C7),
1031 }, 1031 },
1032 { .channel = 156, 1032 { .channel = 156,
1033 .freq = 5780, /* MHz */ 1033 .freq = 5780, /* MHz */
1034 .unk2 = 3853, 1034 .unk2 = 3853,
1035 RADIOREGS(0x71, 0x02, 0x42, 0x02, 0x0A, 0x01, 0x04, 0x0A, 1035 RADIOREGS(0x71, 0x42, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
1036 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1036 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1037 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1037 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1038 PHYREGS(0x0C09, 0x0809, 0x0409, 0xC501, 0xC601, 0xC601), 1038 PHYREGS(0x090C, 0x0908, 0x0904, 0x01C5, 0x01C6, 0x01C6),
1039 }, 1039 },
1040 { .channel = 157, 1040 { .channel = 157,
1041 .freq = 5785, /* MHz */ 1041 .freq = 5785, /* MHz */
1042 .unk2 = 3857, 1042 .unk2 = 3857,
1043 RADIOREGS(0x72, 0x04, 0x85, 0x02, 0xF2, 0x00, 0x03, 0x14, 1043 RADIOREGS(0x72, 0x85, 0x04, 0x02, 0xF2, 0x00, 0x03, 0x14,
1044 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1044 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1045 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1045 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1046 PHYREGS(0x0E09, 0x0A09, 0x0609, 0xC401, 0xC501, 0xC601), 1046 PHYREGS(0x090E, 0x090A, 0x0906, 0x01C4, 0x01C5, 0x01C6),
1047 }, 1047 },
1048 { .channel = 158, 1048 { .channel = 158,
1049 .freq = 5790, /* MHz */ 1049 .freq = 5790, /* MHz */
1050 .unk2 = 3860, 1050 .unk2 = 3860,
1051 RADIOREGS(0x71, 0x02, 0x43, 0x02, 0x0A, 0x01, 0x04, 0x0A, 1051 RADIOREGS(0x71, 0x43, 0x02, 0x02, 0x0A, 0x01, 0x04, 0x0A,
1052 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1052 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1053 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1053 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1054 PHYREGS(0x1009, 0x0C09, 0x0809, 0xC401, 0xC501, 0xC601), 1054 PHYREGS(0x0910, 0x090C, 0x0908, 0x01C4, 0x01C5, 0x01C6),
1055 }, 1055 },
1056 { .channel = 159, 1056 { .channel = 159,
1057 .freq = 5795, /* MHz */ 1057 .freq = 5795, /* MHz */
1058 .unk2 = 3863, 1058 .unk2 = 3863,
1059 RADIOREGS(0x72, 0x04, 0x87, 0x02, 0xF2, 0x00, 0x03, 0x14, 1059 RADIOREGS(0x72, 0x87, 0x04, 0x02, 0xF2, 0x00, 0x03, 0x14,
1060 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1060 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1061 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1061 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1062 PHYREGS(0x1209, 0x0E09, 0x0A09, 0xC401, 0xC401, 0xC501), 1062 PHYREGS(0x0912, 0x090E, 0x090A, 0x01C4, 0x01C4, 0x01C5),
1063 }, 1063 },
1064 { .channel = 160, 1064 { .channel = 160,
1065 .freq = 5800, /* MHz */ 1065 .freq = 5800, /* MHz */
1066 .unk2 = 3867, 1066 .unk2 = 3867,
1067 RADIOREGS(0x71, 0x02, 0x44, 0x01, 0x0A, 0x01, 0x04, 0x0A, 1067 RADIOREGS(0x71, 0x44, 0x02, 0x01, 0x0A, 0x01, 0x04, 0x0A,
1068 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1068 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1069 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1069 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1070 PHYREGS(0x1409, 0x1009, 0x0C09, 0xC301, 0xC401, 0xC501), 1070 PHYREGS(0x0914, 0x0910, 0x090C, 0x01C3, 0x01C4, 0x01C5),
1071 }, 1071 },
1072 { .channel = 161, 1072 { .channel = 161,
1073 .freq = 5805, /* MHz */ 1073 .freq = 5805, /* MHz */
1074 .unk2 = 3870, 1074 .unk2 = 3870,
1075 RADIOREGS(0x72, 0x04, 0x89, 0x01, 0xED, 0x00, 0x03, 0x14, 1075 RADIOREGS(0x72, 0x89, 0x04, 0x01, 0xED, 0x00, 0x03, 0x14,
1076 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1076 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1077 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1077 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1078 PHYREGS(0x1609, 0x1209, 0x0E09, 0xC301, 0xC401, 0xC401), 1078 PHYREGS(0x0916, 0x0912, 0x090E, 0x01C3, 0x01C4, 0x01C4),
1079 }, 1079 },
1080 { .channel = 162, 1080 { .channel = 162,
1081 .freq = 5810, /* MHz */ 1081 .freq = 5810, /* MHz */
1082 .unk2 = 3873, 1082 .unk2 = 3873,
1083 RADIOREGS(0x71, 0x02, 0x45, 0x01, 0x0A, 0x01, 0x04, 0x0A, 1083 RADIOREGS(0x71, 0x45, 0x02, 0x01, 0x0A, 0x01, 0x04, 0x0A,
1084 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1084 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1085 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1085 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1086 PHYREGS(0x1809, 0x1409, 0x1009, 0xC201, 0xC301, 0xC401), 1086 PHYREGS(0x0918, 0x0914, 0x0910, 0x01C2, 0x01C3, 0x01C4),
1087 }, 1087 },
1088 { .channel = 163, 1088 { .channel = 163,
1089 .freq = 5815, /* MHz */ 1089 .freq = 5815, /* MHz */
1090 .unk2 = 3877, 1090 .unk2 = 3877,
1091 RADIOREGS(0x72, 0x04, 0x8B, 0x01, 0xED, 0x00, 0x03, 0x14, 1091 RADIOREGS(0x72, 0x8B, 0x04, 0x01, 0xED, 0x00, 0x03, 0x14,
1092 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1092 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1093 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1093 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1094 PHYREGS(0x1A09, 0x1609, 0x1209, 0xC201, 0xC301, 0xC401), 1094 PHYREGS(0x091A, 0x0916, 0x0912, 0x01C2, 0x01C3, 0x01C4),
1095 }, 1095 },
1096 { .channel = 164, 1096 { .channel = 164,
1097 .freq = 5820, /* MHz */ 1097 .freq = 5820, /* MHz */
1098 .unk2 = 3880, 1098 .unk2 = 3880,
1099 RADIOREGS(0x71, 0x02, 0x46, 0x01, 0x0A, 0x01, 0x04, 0x0A, 1099 RADIOREGS(0x71, 0x46, 0x02, 0x01, 0x0A, 0x01, 0x04, 0x0A,
1100 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1100 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1101 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1101 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1102 PHYREGS(0x1C09, 0x1809, 0x1409, 0xC201, 0xC201, 0xC301), 1102 PHYREGS(0x091C, 0x0918, 0x0914, 0x01C2, 0x01C2, 0x01C3),
1103 }, 1103 },
1104 { .channel = 165, 1104 { .channel = 165,
1105 .freq = 5825, /* MHz */ 1105 .freq = 5825, /* MHz */
1106 .unk2 = 3883, 1106 .unk2 = 3883,
1107 RADIOREGS(0x72, 0x04, 0x8D, 0x01, 0xED, 0x00, 0x03, 0x14, 1107 RADIOREGS(0x72, 0x8D, 0x04, 0x01, 0xED, 0x00, 0x03, 0x14,
1108 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1108 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1109 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1109 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1110 PHYREGS(0x1E09, 0x1A09, 0x1609, 0xC101, 0xC201, 0xC301), 1110 PHYREGS(0x091E, 0x091A, 0x0916, 0x01C1, 0x01C2, 0x01C3),
1111 }, 1111 },
1112 { .channel = 166, 1112 { .channel = 166,
1113 .freq = 5830, /* MHz */ 1113 .freq = 5830, /* MHz */
1114 .unk2 = 3887, 1114 .unk2 = 3887,
1115 RADIOREGS(0x71, 0x02, 0x47, 0x01, 0x0A, 0x01, 0x04, 0x0A, 1115 RADIOREGS(0x71, 0x47, 0x02, 0x01, 0x0A, 0x01, 0x04, 0x0A,
1116 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1116 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1117 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1117 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1118 PHYREGS(0x2009, 0x1C09, 0x1809, 0xC101, 0xC201, 0xC201), 1118 PHYREGS(0x0920, 0x091C, 0x0918, 0x01C1, 0x01C2, 0x01C2),
1119 }, 1119 },
1120 { .channel = 168, 1120 { .channel = 168,
1121 .freq = 5840, /* MHz */ 1121 .freq = 5840, /* MHz */
1122 .unk2 = 3893, 1122 .unk2 = 3893,
1123 RADIOREGS(0x71, 0x02, 0x48, 0x01, 0x0A, 0x01, 0x04, 0x0A, 1123 RADIOREGS(0x71, 0x48, 0x02, 0x01, 0x0A, 0x01, 0x04, 0x0A,
1124 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1124 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1125 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1125 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1126 PHYREGS(0x2409, 0x2009, 0x1C09, 0xC001, 0xC101, 0xC201), 1126 PHYREGS(0x0924, 0x0920, 0x091C, 0x01C0, 0x01C1, 0x01C2),
1127 }, 1127 },
1128 { .channel = 170, 1128 { .channel = 170,
1129 .freq = 5850, /* MHz */ 1129 .freq = 5850, /* MHz */
1130 .unk2 = 3900, 1130 .unk2 = 3900,
1131 RADIOREGS(0x71, 0x02, 0x49, 0x01, 0xE0, 0x00, 0x04, 0x0A, 1131 RADIOREGS(0x71, 0x49, 0x02, 0x01, 0xE0, 0x00, 0x04, 0x0A,
1132 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1132 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1133 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1133 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1134 PHYREGS(0x2809, 0x2409, 0x2009, 0xBF01, 0xC001, 0xC101), 1134 PHYREGS(0x0928, 0x0924, 0x0920, 0x01BF, 0x01C0, 0x01C1),
1135 }, 1135 },
1136 { .channel = 172, 1136 { .channel = 172,
1137 .freq = 5860, /* MHz */ 1137 .freq = 5860, /* MHz */
1138 .unk2 = 3907, 1138 .unk2 = 3907,
1139 RADIOREGS(0x71, 0x02, 0x4A, 0x01, 0xDE, 0x00, 0x04, 0x0A, 1139 RADIOREGS(0x71, 0x4A, 0x02, 0x01, 0xDE, 0x00, 0x04, 0x0A,
1140 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1140 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1141 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1141 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1142 PHYREGS(0x2C09, 0x2809, 0x2409, 0xBF01, 0xBF01, 0xC001), 1142 PHYREGS(0x092C, 0x0928, 0x0924, 0x01BF, 0x01BF, 0x01C0),
1143 }, 1143 },
1144 { .channel = 174, 1144 { .channel = 174,
1145 .freq = 5870, /* MHz */ 1145 .freq = 5870, /* MHz */
1146 .unk2 = 3913, 1146 .unk2 = 3913,
1147 RADIOREGS(0x71, 0x02, 0x4B, 0x00, 0xDB, 0x00, 0x04, 0x0A, 1147 RADIOREGS(0x71, 0x4B, 0x02, 0x00, 0xDB, 0x00, 0x04, 0x0A,
1148 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1148 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1149 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1149 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1150 PHYREGS(0x3009, 0x2C09, 0x2809, 0xBE01, 0xBF01, 0xBF01), 1150 PHYREGS(0x0930, 0x092C, 0x0928, 0x01BE, 0x01BF, 0x01BF),
1151 }, 1151 },
1152 { .channel = 176, 1152 { .channel = 176,
1153 .freq = 5880, /* MHz */ 1153 .freq = 5880, /* MHz */
1154 .unk2 = 3920, 1154 .unk2 = 3920,
1155 RADIOREGS(0x71, 0x02, 0x4C, 0x00, 0xD8, 0x00, 0x04, 0x0A, 1155 RADIOREGS(0x71, 0x4C, 0x02, 0x00, 0xD8, 0x00, 0x04, 0x0A,
1156 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1156 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1157 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1157 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1158 PHYREGS(0x3409, 0x3009, 0x2C09, 0xBD01, 0xBE01, 0xBF01), 1158 PHYREGS(0x0934, 0x0930, 0x092C, 0x01BD, 0x01BE, 0x01BF),
1159 }, 1159 },
1160 { .channel = 178, 1160 { .channel = 178,
1161 .freq = 5890, /* MHz */ 1161 .freq = 5890, /* MHz */
1162 .unk2 = 3927, 1162 .unk2 = 3927,
1163 RADIOREGS(0x71, 0x02, 0x4D, 0x00, 0xD6, 0x00, 0x04, 0x0A, 1163 RADIOREGS(0x71, 0x4D, 0x02, 0x00, 0xD6, 0x00, 0x04, 0x0A,
1164 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1164 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1165 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1165 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1166 PHYREGS(0x3809, 0x3409, 0x3009, 0xBC01, 0xBD01, 0xBE01), 1166 PHYREGS(0x0938, 0x0934, 0x0930, 0x01BC, 0x01BD, 0x01BE),
1167 }, 1167 },
1168 { .channel = 180, 1168 { .channel = 180,
1169 .freq = 5900, /* MHz */ 1169 .freq = 5900, /* MHz */
1170 .unk2 = 3933, 1170 .unk2 = 3933,
1171 RADIOREGS(0x71, 0x02, 0x4E, 0x00, 0xD3, 0x00, 0x04, 0x0A, 1171 RADIOREGS(0x71, 0x4E, 0x02, 0x00, 0xD3, 0x00, 0x04, 0x0A,
1172 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1172 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1173 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1173 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1174 PHYREGS(0x3C09, 0x3809, 0x3409, 0xBC01, 0xBC01, 0xBD01), 1174 PHYREGS(0x093C, 0x0938, 0x0934, 0x01BC, 0x01BC, 0x01BD),
1175 }, 1175 },
1176 { .channel = 182, 1176 { .channel = 182,
1177 .freq = 5910, /* MHz */ 1177 .freq = 5910, /* MHz */
1178 .unk2 = 3940, 1178 .unk2 = 3940,
1179 RADIOREGS(0x71, 0x02, 0x4F, 0x00, 0xD6, 0x00, 0x04, 0x0A, 1179 RADIOREGS(0x71, 0x4F, 0x02, 0x00, 0xD6, 0x00, 0x04, 0x0A,
1180 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 1180 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1181 0x80, 0x00, 0x00, 0x00, 0x00, 0x80), 1181 0x80, 0x00, 0x00, 0x00, 0x00, 0x80),
1182 PHYREGS(0x4009, 0x3C09, 0x3809, 0xBB01, 0xBC01, 0xBC01), 1182 PHYREGS(0x0940, 0x093C, 0x0938, 0x01BB, 0x01BC, 0x01BC),
1183 }, 1183 },
1184 { .channel = 1, 1184 { .channel = 1,
1185 .freq = 2412, /* MHz */ 1185 .freq = 2412, /* MHz */
1186 .unk2 = 3216, 1186 .unk2 = 3216,
1187 RADIOREGS(0x73, 0x09, 0x6C, 0x0F, 0x00, 0x01, 0x07, 0x15, 1187 RADIOREGS(0x73, 0x6C, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
1188 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0D, 0x0C, 1188 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0D, 0x0C,
1189 0x80, 0xFF, 0x88, 0x0D, 0x0C, 0x80), 1189 0x80, 0xFF, 0x88, 0x0D, 0x0C, 0x80),
1190 PHYREGS(0xC903, 0xC503, 0xC103, 0x3A04, 0x3F04, 0x4304), 1190 PHYREGS(0x03C9, 0x03C5, 0x03C1, 0x043A, 0x043F, 0x0443),
1191 }, 1191 },
1192 { .channel = 2, 1192 { .channel = 2,
1193 .freq = 2417, /* MHz */ 1193 .freq = 2417, /* MHz */
1194 .unk2 = 3223, 1194 .unk2 = 3223,
1195 RADIOREGS(0x73, 0x09, 0x71, 0x0F, 0x00, 0x01, 0x07, 0x15, 1195 RADIOREGS(0x73, 0x71, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
1196 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0B, 1196 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0B,
1197 0x80, 0xFF, 0x88, 0x0C, 0x0B, 0x80), 1197 0x80, 0xFF, 0x88, 0x0C, 0x0B, 0x80),
1198 PHYREGS(0xCB03, 0xC703, 0xC303, 0x3804, 0x3D04, 0x4104), 1198 PHYREGS(0x03CB, 0x03C7, 0x03C3, 0x0438, 0x043D, 0x0441),
1199 }, 1199 },
1200 { .channel = 3, 1200 { .channel = 3,
1201 .freq = 2422, /* MHz */ 1201 .freq = 2422, /* MHz */
1202 .unk2 = 3229, 1202 .unk2 = 3229,
1203 RADIOREGS(0x73, 0x09, 0x76, 0x0F, 0x00, 0x01, 0x07, 0x15, 1203 RADIOREGS(0x73, 0x76, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
1204 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0A, 1204 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0A,
1205 0x80, 0xFF, 0x88, 0x0C, 0x0A, 0x80), 1205 0x80, 0xFF, 0x88, 0x0C, 0x0A, 0x80),
1206 PHYREGS(0xCD03, 0xC903, 0xC503, 0x3604, 0x3A04, 0x3F04), 1206 PHYREGS(0x03CD, 0x03C9, 0x03C5, 0x0436, 0x043A, 0x043F),
1207 }, 1207 },
1208 { .channel = 4, 1208 { .channel = 4,
1209 .freq = 2427, /* MHz */ 1209 .freq = 2427, /* MHz */
1210 .unk2 = 3236, 1210 .unk2 = 3236,
1211 RADIOREGS(0x73, 0x09, 0x7B, 0x0F, 0x00, 0x01, 0x07, 0x15, 1211 RADIOREGS(0x73, 0x7B, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
1212 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0A, 1212 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x0A,
1213 0x80, 0xFF, 0x88, 0x0C, 0x0A, 0x80), 1213 0x80, 0xFF, 0x88, 0x0C, 0x0A, 0x80),
1214 PHYREGS(0xCF03, 0xCB03, 0xC703, 0x3404, 0x3804, 0x3D04), 1214 PHYREGS(0x03CF, 0x03CB, 0x03C7, 0x0434, 0x0438, 0x043D),
1215 }, 1215 },
1216 { .channel = 5, 1216 { .channel = 5,
1217 .freq = 2432, /* MHz */ 1217 .freq = 2432, /* MHz */
1218 .unk2 = 3243, 1218 .unk2 = 3243,
1219 RADIOREGS(0x73, 0x09, 0x80, 0x0F, 0x00, 0x01, 0x07, 0x15, 1219 RADIOREGS(0x73, 0x80, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
1220 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x09, 1220 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0C, 0x09,
1221 0x80, 0xFF, 0x88, 0x0C, 0x09, 0x80), 1221 0x80, 0xFF, 0x88, 0x0C, 0x09, 0x80),
1222 PHYREGS(0xD103, 0xCD03, 0xC903, 0x3104, 0x3604, 0x3A04), 1222 PHYREGS(0x03D1, 0x03CD, 0x03C9, 0x0431, 0x0436, 0x043A),
1223 }, 1223 },
1224 { .channel = 6, 1224 { .channel = 6,
1225 .freq = 2437, /* MHz */ 1225 .freq = 2437, /* MHz */
1226 .unk2 = 3249, 1226 .unk2 = 3249,
1227 RADIOREGS(0x73, 0x09, 0x85, 0x0F, 0x00, 0x01, 0x07, 0x15, 1227 RADIOREGS(0x73, 0x85, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
1228 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0B, 0x08, 1228 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0B, 0x08,
1229 0x80, 0xFF, 0x88, 0x0B, 0x08, 0x80), 1229 0x80, 0xFF, 0x88, 0x0B, 0x08, 0x80),
1230 PHYREGS(0xD303, 0xCF03, 0xCB03, 0x2F04, 0x3404, 0x3804), 1230 PHYREGS(0x03D3, 0x03CF, 0x03CB, 0x042F, 0x0434, 0x0438),
1231 }, 1231 },
1232 { .channel = 7, 1232 { .channel = 7,
1233 .freq = 2442, /* MHz */ 1233 .freq = 2442, /* MHz */
1234 .unk2 = 3256, 1234 .unk2 = 3256,
1235 RADIOREGS(0x73, 0x09, 0x8A, 0x0F, 0x00, 0x01, 0x07, 0x15, 1235 RADIOREGS(0x73, 0x8A, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
1236 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0A, 0x07, 1236 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0A, 0x07,
1237 0x80, 0xFF, 0x88, 0x0A, 0x07, 0x80), 1237 0x80, 0xFF, 0x88, 0x0A, 0x07, 0x80),
1238 PHYREGS(0xD503, 0xD103, 0xCD03, 0x2D04, 0x3104, 0x3604), 1238 PHYREGS(0x03D5, 0x03D1, 0x03CD, 0x042D, 0x0431, 0x0436),
1239 }, 1239 },
1240 { .channel = 8, 1240 { .channel = 8,
1241 .freq = 2447, /* MHz */ 1241 .freq = 2447, /* MHz */
1242 .unk2 = 3263, 1242 .unk2 = 3263,
1243 RADIOREGS(0x73, 0x09, 0x8F, 0x0F, 0x00, 0x01, 0x07, 0x15, 1243 RADIOREGS(0x73, 0x8F, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
1244 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0A, 0x06, 1244 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x0A, 0x06,
1245 0x80, 0xFF, 0x88, 0x0A, 0x06, 0x80), 1245 0x80, 0xFF, 0x88, 0x0A, 0x06, 0x80),
1246 PHYREGS(0xD703, 0xD303, 0xCF03, 0x2B04, 0x2F04, 0x3404), 1246 PHYREGS(0x03D7, 0x03D3, 0x03CF, 0x042B, 0x042F, 0x0434),
1247 }, 1247 },
1248 { .channel = 9, 1248 { .channel = 9,
1249 .freq = 2452, /* MHz */ 1249 .freq = 2452, /* MHz */
1250 .unk2 = 3269, 1250 .unk2 = 3269,
1251 RADIOREGS(0x73, 0x09, 0x94, 0x0F, 0x00, 0x01, 0x07, 0x15, 1251 RADIOREGS(0x73, 0x94, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
1252 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x09, 0x06, 1252 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x09, 0x06,
1253 0x80, 0xFF, 0x88, 0x09, 0x06, 0x80), 1253 0x80, 0xFF, 0x88, 0x09, 0x06, 0x80),
1254 PHYREGS(0xD903, 0xD503, 0xD103, 0x2904, 0x2D04, 0x3104), 1254 PHYREGS(0x03D9, 0x03D5, 0x03D1, 0x0429, 0x042D, 0x0431),
1255 }, 1255 },
1256 { .channel = 10, 1256 { .channel = 10,
1257 .freq = 2457, /* MHz */ 1257 .freq = 2457, /* MHz */
1258 .unk2 = 3276, 1258 .unk2 = 3276,
1259 RADIOREGS(0x73, 0x09, 0x99, 0x0F, 0x00, 0x01, 0x07, 0x15, 1259 RADIOREGS(0x73, 0x99, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
1260 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x05, 1260 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x05,
1261 0x80, 0xFF, 0x88, 0x08, 0x05, 0x80), 1261 0x80, 0xFF, 0x88, 0x08, 0x05, 0x80),
1262 PHYREGS(0xDB03, 0xD703, 0xD303, 0x2704, 0x2B04, 0x2F04), 1262 PHYREGS(0x03DB, 0x03D7, 0x03D3, 0x0427, 0x042B, 0x042F),
1263 }, 1263 },
1264 { .channel = 11, 1264 { .channel = 11,
1265 .freq = 2462, /* MHz */ 1265 .freq = 2462, /* MHz */
1266 .unk2 = 3283, 1266 .unk2 = 3283,
1267 RADIOREGS(0x73, 0x09, 0x9E, 0x0F, 0x00, 0x01, 0x07, 0x15, 1267 RADIOREGS(0x73, 0x9E, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
1268 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x04, 1268 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x04,
1269 0x80, 0xFF, 0x88, 0x08, 0x04, 0x80), 1269 0x80, 0xFF, 0x88, 0x08, 0x04, 0x80),
1270 PHYREGS(0xDD03, 0xD903, 0xD503, 0x2404, 0x2904, 0x2D04), 1270 PHYREGS(0x03DD, 0x03D9, 0x03D5, 0x0424, 0x0429, 0x042D),
1271 }, 1271 },
1272 { .channel = 12, 1272 { .channel = 12,
1273 .freq = 2467, /* MHz */ 1273 .freq = 2467, /* MHz */
1274 .unk2 = 3289, 1274 .unk2 = 3289,
1275 RADIOREGS(0x73, 0x09, 0xA3, 0x0F, 0x00, 0x01, 0x07, 0x15, 1275 RADIOREGS(0x73, 0xA3, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
1276 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x03, 1276 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x08, 0x03,
1277 0x80, 0xFF, 0x88, 0x08, 0x03, 0x80), 1277 0x80, 0xFF, 0x88, 0x08, 0x03, 0x80),
1278 PHYREGS(0xDF03, 0xDB03, 0xD703, 0x2204, 0x2704, 0x2B04), 1278 PHYREGS(0x03DF, 0x03DB, 0x03D7, 0x0422, 0x0427, 0x042B),
1279 }, 1279 },
1280 { .channel = 13, 1280 { .channel = 13,
1281 .freq = 2472, /* MHz */ 1281 .freq = 2472, /* MHz */
1282 .unk2 = 3296, 1282 .unk2 = 3296,
1283 RADIOREGS(0x73, 0x09, 0xA8, 0x0F, 0x00, 0x01, 0x07, 0x15, 1283 RADIOREGS(0x73, 0xA8, 0x09, 0x0F, 0x00, 0x01, 0x07, 0x15,
1284 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x07, 0x03, 1284 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x07, 0x03,
1285 0x80, 0xFF, 0x88, 0x07, 0x03, 0x80), 1285 0x80, 0xFF, 0x88, 0x07, 0x03, 0x80),
1286 PHYREGS(0xE103, 0xDD03, 0xD903, 0x2004, 0x2404, 0x2904), 1286 PHYREGS(0x03E1, 0x03DD, 0x03D9, 0x0420, 0x0424, 0x0429),
1287 }, 1287 },
1288 { .channel = 14, 1288 { .channel = 14,
1289 .freq = 2484, /* MHz */ 1289 .freq = 2484, /* MHz */
1290 .unk2 = 3312, 1290 .unk2 = 3312,
1291 RADIOREGS(0x73, 0x09, 0xB4, 0x0F, 0xFF, 0x01, 0x07, 0x15, 1291 RADIOREGS(0x73, 0xB4, 0x09, 0x0F, 0xFF, 0x01, 0x07, 0x15,
1292 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x07, 0x01, 1292 0x01, 0x8F, 0xFF, 0xFF, 0xFF, 0x88, 0x07, 0x01,
1293 0x80, 0xFF, 0x88, 0x07, 0x01, 0x80), 1293 0x80, 0xFF, 0x88, 0x07, 0x01, 0x80),
1294 PHYREGS(0xE603, 0xE203, 0xDE03, 0x1B04, 0x1F04, 0x2404), 1294 PHYREGS(0x03E6, 0x03E2, 0x03DE, 0x041B, 0x041F, 0x0424),
1295 }, 1295 },
1296}; 1296};
1297 1297
@@ -1299,7 +1299,7 @@ void b2055_upload_inittab(struct b43_wldev *dev,
1299 bool ghz5, bool ignore_uploadflag) 1299 bool ghz5, bool ignore_uploadflag)
1300{ 1300{
1301 const struct b2055_inittab_entry *e; 1301 const struct b2055_inittab_entry *e;
1302 unsigned int i; 1302 unsigned int i, writes = 0;
1303 u16 value; 1303 u16 value;
1304 1304
1305 for (i = 0; i < ARRAY_SIZE(b2055_inittab); i++) { 1305 for (i = 0; i < ARRAY_SIZE(b2055_inittab); i++) {
@@ -1312,6 +1312,8 @@ void b2055_upload_inittab(struct b43_wldev *dev,
1312 else 1312 else
1313 value = e->ghz2; 1313 value = e->ghz2;
1314 b43_radio_write16(dev, i, value); 1314 b43_radio_write16(dev, i, value);
1315 if (++writes % 4 == 0)
1316 b43_read32(dev, B43_MMIO_MACCTL); /* flush */
1315 } 1317 }
1316 } 1318 }
1317} 1319}
diff --git a/drivers/net/wireless/b43/radio_2056.c b/drivers/net/wireless/b43/radio_2056.c
index d8563192ce56..8890df067029 100644
--- a/drivers/net/wireless/b43/radio_2056.c
+++ b/drivers/net/wireless/b43/radio_2056.c
@@ -24,17 +24,9073 @@
24#include "radio_2056.h" 24#include "radio_2056.h"
25#include "phy_common.h" 25#include "phy_common.h"
26 26
27struct b2056_inittab_entry {
28 /* Value to write if we use the 5GHz band. */
29 u16 ghz5;
30 /* Value to write if we use the 2.4GHz band. */
31 u16 ghz2;
32 /* Flags */
33 u8 flags;
34};
35#define B2056_INITTAB_ENTRY_OK 0x01
36#define B2056_INITTAB_UPLOAD 0x02
37#define UPLOAD .flags = B2056_INITTAB_ENTRY_OK | B2056_INITTAB_UPLOAD
38#define NOUPLOAD .flags = B2056_INITTAB_ENTRY_OK
39
40struct b2056_inittabs_pts {
41 const struct b2056_inittab_entry *syn;
42 unsigned int syn_length;
43 const struct b2056_inittab_entry *tx;
44 unsigned int tx_length;
45 const struct b2056_inittab_entry *rx;
46 unsigned int rx_length;
47};
48
49static const struct b2056_inittab_entry b2056_inittab_rev3_syn[] = {
50 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
51 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
52 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
53 [B2056_SYN_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
54 [B2056_SYN_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
55 [B2056_SYN_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
56 [B2056_SYN_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
57 [B2056_SYN_COM_PU] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
58 [B2056_SYN_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
59 [B2056_SYN_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
60 [B2056_SYN_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
61 [B2056_SYN_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
62 [B2056_SYN_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
63 [B2056_SYN_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
64 [B2056_SYN_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
65 [B2056_SYN_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
66 [B2056_SYN_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
67 [B2056_SYN_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
68 [B2056_SYN_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
69 [B2056_SYN_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
70 [B2056_SYN_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
71 [B2056_SYN_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
72 [B2056_SYN_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
73 [B2056_SYN_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
74 [B2056_SYN_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
75 [B2056_SYN_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
76 [B2056_SYN_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
77 [B2056_SYN_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
78 [B2056_SYN_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
79 [B2056_SYN_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
80 [B2056_SYN_GPIO_MASTER1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
81 [B2056_SYN_GPIO_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
82 [B2056_SYN_TOPBIAS_MASTER] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
83 [B2056_SYN_TOPBIAS_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
84 [B2056_SYN_AFEREG] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
85 [B2056_SYN_TEMPPROCSENSE] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
86 [B2056_SYN_TEMPPROCSENSEIDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
87 [B2056_SYN_TEMPPROCSENSERCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
88 [B2056_SYN_LPO] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
89 [B2056_SYN_VDDCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
90 [B2056_SYN_VDDCAL_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
91 [B2056_SYN_VDDCAL_STATUS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
92 [B2056_SYN_RCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
93 [B2056_SYN_RCAL_CODE_OUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
94 [B2056_SYN_RCCAL_CTRL0] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
95 [B2056_SYN_RCCAL_CTRL1] = { .ghz5 = 0x001f, .ghz2 = 0x001f, NOUPLOAD, },
96 [B2056_SYN_RCCAL_CTRL2] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
97 [B2056_SYN_RCCAL_CTRL3] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
98 [B2056_SYN_RCCAL_CTRL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
99 [B2056_SYN_RCCAL_CTRL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
100 [B2056_SYN_RCCAL_CTRL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
101 [B2056_SYN_RCCAL_CTRL7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
102 [B2056_SYN_RCCAL_CTRL8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
103 [B2056_SYN_RCCAL_CTRL9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
104 [B2056_SYN_RCCAL_CTRL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
105 [B2056_SYN_RCCAL_CTRL11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
106 [B2056_SYN_ZCAL_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
107 [B2056_SYN_ZCAL_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
108 [B2056_SYN_PLL_MAST1] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
109 [B2056_SYN_PLL_MAST2] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
110 [B2056_SYN_PLL_MAST3] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
111 [B2056_SYN_PLL_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
112 [B2056_SYN_PLL_XTAL0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
113 [B2056_SYN_PLL_XTAL1] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
114 [B2056_SYN_PLL_XTAL3] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
115 [B2056_SYN_PLL_XTAL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
116 [B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
117 [B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
118 [B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
119 [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
120 [B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
121 [B2056_SYN_PLL_CP2] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
122 [B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
123 [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
124 [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
125 [B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
126 [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
127 [B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
128 [B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, },
129 [B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
130 [B2056_SYN_PLL_VCO1] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
131 [B2056_SYN_PLL_VCO2] = { .ghz5 = 0x00f7, .ghz2 = 0x00f7, UPLOAD, },
132 [B2056_SYN_PLL_MONITOR1] = { .ghz5 = 0x00b4, .ghz2 = 0x00b4, NOUPLOAD, },
133 [B2056_SYN_PLL_MONITOR2] = { .ghz5 = 0x00d2, .ghz2 = 0x00d2, NOUPLOAD, },
134 [B2056_SYN_PLL_VCOCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
135 [B2056_SYN_PLL_VCOCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
136 [B2056_SYN_PLL_VCOCAL4] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
137 [B2056_SYN_PLL_VCOCAL5] = { .ghz5 = 0x0096, .ghz2 = 0x0096, NOUPLOAD, },
138 [B2056_SYN_PLL_VCOCAL6] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
139 [B2056_SYN_PLL_VCOCAL7] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
140 [B2056_SYN_PLL_VCOCAL8] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
141 [B2056_SYN_PLL_VCOCAL9] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
142 [B2056_SYN_PLL_VCOCAL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
143 [B2056_SYN_PLL_VCOCAL11] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
144 [B2056_SYN_PLL_VCOCAL12] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
145 [B2056_SYN_PLL_VCOCAL13] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
146 [B2056_SYN_PLL_VREG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
147 [B2056_SYN_PLL_STATUS1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
148 [B2056_SYN_PLL_STATUS2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
149 [B2056_SYN_PLL_STATUS3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
150 [B2056_SYN_LOGEN_PU0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
151 [B2056_SYN_LOGEN_PU1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
152 [B2056_SYN_LOGEN_PU2] = { .ghz5 = 0x0040, .ghz2 = 0x0040, NOUPLOAD, },
153 [B2056_SYN_LOGEN_PU3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
154 [B2056_SYN_LOGEN_PU5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
155 [B2056_SYN_LOGEN_PU6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
156 [B2056_SYN_LOGEN_PU7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
157 [B2056_SYN_LOGEN_PU8] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
158 [B2056_SYN_LOGEN_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
159 [B2056_SYN_LOGEN_RCCR1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
160 [B2056_SYN_LOGEN_VCOBUF1] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
161 [B2056_SYN_LOGEN_MIXER1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
162 [B2056_SYN_LOGEN_MIXER2] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
163 [B2056_SYN_LOGEN_BUF1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
164 [B2056_SYN_LOGENBUF2] = { .ghz5 = 0x008f, .ghz2 = 0x008f, UPLOAD, },
165 [B2056_SYN_LOGEN_BUF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
166 [B2056_SYN_LOGEN_BUF4] = { .ghz5 = 0x00cc, .ghz2 = 0x00cc, NOUPLOAD, },
167 [B2056_SYN_LOGEN_DIV1] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
168 [B2056_SYN_LOGEN_DIV2] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
169 [B2056_SYN_LOGEN_DIV3] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
170 [B2056_SYN_LOGEN_ACL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
171 [B2056_SYN_LOGEN_ACL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
172 [B2056_SYN_LOGEN_ACL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
173 [B2056_SYN_LOGEN_ACL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
174 [B2056_SYN_LOGEN_ACL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
175 [B2056_SYN_LOGEN_ACL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
176 [B2056_SYN_LOGEN_ACLOUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
177 [B2056_SYN_LOGEN_ACLCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
178 [B2056_SYN_LOGEN_ACLCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
179 [B2056_SYN_LOGEN_ACLCAL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
180 [B2056_SYN_CALEN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
181 [B2056_SYN_LOGEN_PEAKDET1] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, NOUPLOAD, },
182 [B2056_SYN_LOGEN_CORE_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
183 [B2056_SYN_LOGEN_RX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
184 [B2056_SYN_LOGEN_TX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
185 [B2056_SYN_LOGEN_RX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
186 [B2056_SYN_LOGEN_TX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
187 [B2056_SYN_LOGEN_VCOBUF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
188 [B2056_SYN_LOGEN_MIXER3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
189 [B2056_SYN_LOGEN_BUF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
190 [B2056_SYN_LOGEN_BUF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
191 [B2056_SYN_LOGEN_CBUFRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
192 [B2056_SYN_LOGEN_CBUFRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
193 [B2056_SYN_LOGEN_CBUFRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
194 [B2056_SYN_LOGEN_CBUFRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
195 [B2056_SYN_LOGEN_CBUFTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
196 [B2056_SYN_LOGEN_CBUFTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
197 [B2056_SYN_LOGEN_CBUFTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
198 [B2056_SYN_LOGEN_CBUFTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
199 [B2056_SYN_LOGEN_CMOSRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
200 [B2056_SYN_LOGEN_CMOSRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
201 [B2056_SYN_LOGEN_CMOSRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
202 [B2056_SYN_LOGEN_CMOSRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
203 [B2056_SYN_LOGEN_CMOSTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
204 [B2056_SYN_LOGEN_CMOSTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
205 [B2056_SYN_LOGEN_CMOSTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
206 [B2056_SYN_LOGEN_CMOSTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
207 [B2056_SYN_LOGEN_VCOBUF2_OVRVAL]= { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
208 [B2056_SYN_LOGEN_MIXER3_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
209 [B2056_SYN_LOGEN_BUF5_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
210 [B2056_SYN_LOGEN_BUF6_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
211 [B2056_SYN_LOGEN_CBUFRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
212 [B2056_SYN_LOGEN_CBUFRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
213 [B2056_SYN_LOGEN_CBUFRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
214 [B2056_SYN_LOGEN_CBUFRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
215 [B2056_SYN_LOGEN_CBUFTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
216 [B2056_SYN_LOGEN_CBUFTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
217 [B2056_SYN_LOGEN_CBUFTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
218 [B2056_SYN_LOGEN_CBUFTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
219 [B2056_SYN_LOGEN_CMOSRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
220 [B2056_SYN_LOGEN_CMOSRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
221 [B2056_SYN_LOGEN_CMOSRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
222 [B2056_SYN_LOGEN_CMOSRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
223 [B2056_SYN_LOGEN_CMOSTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
224 [B2056_SYN_LOGEN_CMOSTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
225 [B2056_SYN_LOGEN_CMOSTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
226 [B2056_SYN_LOGEN_CMOSTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
227 [B2056_SYN_LOGEN_ACL_WAITCNT] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
228 [B2056_SYN_LOGEN_CORE_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
229 [B2056_SYN_LOGEN_RX_CMOS_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
230 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
231};
232
233static const struct b2056_inittab_entry b2056_inittab_rev3_tx[] = {
234 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
235 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
236 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
237 [B2056_TX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
238 [B2056_TX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
239 [B2056_TX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
240 [B2056_TX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
241 [B2056_TX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
242 [B2056_TX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
243 [B2056_TX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
244 [B2056_TX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
245 [B2056_TX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
246 [B2056_TX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
247 [B2056_TX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
248 [B2056_TX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
249 [B2056_TX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
250 [B2056_TX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
251 [B2056_TX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
252 [B2056_TX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
253 [B2056_TX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
254 [B2056_TX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
255 [B2056_TX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
256 [B2056_TX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
257 [B2056_TX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
258 [B2056_TX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
259 [B2056_TX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
260 [B2056_TX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
261 [B2056_TX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
262 [B2056_TX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
263 [B2056_TX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
264 [B2056_TX_IQCAL_GAIN_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
265 [B2056_TX_LOFT_FINE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
266 [B2056_TX_LOFT_FINE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
267 [B2056_TX_LOFT_COARSE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
268 [B2056_TX_LOFT_COARSE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
269 [B2056_TX_TX_COM_MASTER1] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
270 [B2056_TX_TX_COM_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
271 [B2056_TX_RXIQCAL_TXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
272 [B2056_TX_TX_SSI_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
273 [B2056_TX_IQCAL_VCM_HG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
274 [B2056_TX_IQCAL_IDAC] = { .ghz5 = 0x0037, .ghz2 = 0x0037, NOUPLOAD, },
275 [B2056_TX_TSSI_VCM] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
276 [B2056_TX_TX_AMP_DET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
277 [B2056_TX_TX_SSI_MUX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
278 [B2056_TX_TSSIA] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
279 [B2056_TX_TSSIG] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
280 [B2056_TX_TSSI_MISC1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
281 [B2056_TX_TSSI_MISC2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
282 [B2056_TX_TSSI_MISC3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
283 [B2056_TX_PA_SPARE1] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
284 [B2056_TX_PA_SPARE2] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
285 [B2056_TX_INTPAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
286 [B2056_TX_INTPAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
287 [B2056_TX_INTPAA_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
288 [B2056_TX_INTPAA_IAUX_STAT] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
289 [B2056_TX_INTPAA_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
290 [B2056_TX_INTPAA_IMAIN_STAT] = { .ghz5 = 0x002d, .ghz2 = 0x002d, NOUPLOAD, },
291 [B2056_TX_INTPAA_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
292 [B2056_TX_INTPAA_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
293 [B2056_TX_INTPAA_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
294 [B2056_TX_INTPAA_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
295 [B2056_TX_INTPAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
296 [B2056_TX_INTPAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
297 [B2056_TX_INTPAG_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
298 [B2056_TX_INTPAG_IAUX_STAT] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
299 [B2056_TX_INTPAG_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
300 [B2056_TX_INTPAG_IMAIN_STAT] = { .ghz5 = 0x001e, .ghz2 = 0x001e, NOUPLOAD, },
301 [B2056_TX_INTPAG_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
302 [B2056_TX_INTPAG_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
303 [B2056_TX_INTPAG_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
304 [B2056_TX_INTPAG_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
305 [B2056_TX_PADA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
306 [B2056_TX_PADA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
307 [B2056_TX_PADA_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
308 [B2056_TX_PADA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
309 [B2056_TX_PADA_BOOST_TUNE] = { .ghz5 = 0x0038, .ghz2 = 0x0038, NOUPLOAD, },
310 [B2056_TX_PADA_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
311 [B2056_TX_PADG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
312 [B2056_TX_PADG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
313 [B2056_TX_PADG_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
314 [B2056_TX_PADG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
315 [B2056_TX_PADG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
316 [B2056_TX_PADG_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
317 [B2056_TX_PGAA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
318 [B2056_TX_PGAA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
319 [B2056_TX_PGAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
320 [B2056_TX_PGAA_BOOST_TUNE] = { .ghz5 = 0x0083, .ghz2 = 0x0083, NOUPLOAD, },
321 [B2056_TX_PGAA_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
322 [B2056_TX_PGAA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
323 [B2056_TX_PGAG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
324 [B2056_TX_PGAG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
325 [B2056_TX_PGAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
326 [B2056_TX_PGAG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
327 [B2056_TX_PGAG_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
328 [B2056_TX_PGAG_MISC] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
329 [B2056_TX_MIXA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
330 [B2056_TX_MIXA_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
331 [B2056_TX_MIXG] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
332 [B2056_TX_MIXG_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
333 [B2056_TX_BB_GM_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
334 [B2056_TX_GMBB_GM] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
335 [B2056_TX_GMBB_IDAC] = { .ghz5 = 0x0074, .ghz2 = 0x0074, UPLOAD, },
336 [B2056_TX_TXLPF_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
337 [B2056_TX_TXLPF_RCCAL] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
338 [B2056_TX_TXLPF_RCCAL_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
339 [B2056_TX_TXLPF_RCCAL_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
340 [B2056_TX_TXLPF_RCCAL_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
341 [B2056_TX_TXLPF_RCCAL_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
342 [B2056_TX_TXLPF_RCCAL_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
343 [B2056_TX_TXLPF_RCCAL_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
344 [B2056_TX_TXLPF_RCCAL_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
345 [B2056_TX_TXLPF_BW] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
346 [B2056_TX_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
347 [B2056_TX_TXLPF_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
348 [B2056_TX_TXLPF_IDAC_0] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
349 [B2056_TX_TXLPF_IDAC_1] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
350 [B2056_TX_TXLPF_IDAC_2] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
351 [B2056_TX_TXLPF_IDAC_3] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
352 [B2056_TX_TXLPF_IDAC_4] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
353 [B2056_TX_TXLPF_IDAC_5] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
354 [B2056_TX_TXLPF_IDAC_6] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
355 [B2056_TX_TXLPF_OPAMP_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
356 [B2056_TX_TXLPF_MISC] = { .ghz5 = 0x005b, .ghz2 = 0x005b, NOUPLOAD, },
357 [B2056_TX_TXSPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
358 [B2056_TX_TXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
359 [B2056_TX_TXSPARE3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
360 [B2056_TX_TXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
361 [B2056_TX_TXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
362 [B2056_TX_TXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
363 [B2056_TX_TXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
364 [B2056_TX_TXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
365 [B2056_TX_TXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
366 [B2056_TX_TXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
367 [B2056_TX_TXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
368 [B2056_TX_TXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
369 [B2056_TX_TXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
370 [B2056_TX_TXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
371 [B2056_TX_TXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
372 [B2056_TX_TXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
373 [B2056_TX_STATUS_INTPA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
374 [B2056_TX_STATUS_PAD_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
375 [B2056_TX_STATUS_PGA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
376 [B2056_TX_STATUS_GM_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
377 [B2056_TX_STATUS_TXLPF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
378 [B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
379};
380
381static const struct b2056_inittab_entry b2056_inittab_rev3_rx[] = {
382 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
383 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
384 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
385 [B2056_RX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
386 [B2056_RX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
387 [B2056_RX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
388 [B2056_RX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
389 [B2056_RX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
390 [B2056_RX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
391 [B2056_RX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
392 [B2056_RX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
393 [B2056_RX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
394 [B2056_RX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
395 [B2056_RX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
396 [B2056_RX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
397 [B2056_RX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
398 [B2056_RX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
399 [B2056_RX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
400 [B2056_RX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
401 [B2056_RX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
402 [B2056_RX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
403 [B2056_RX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
404 [B2056_RX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
405 [B2056_RX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
406 [B2056_RX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
407 [B2056_RX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
408 [B2056_RX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
409 [B2056_RX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
410 [B2056_RX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
411 [B2056_RX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
412 [B2056_RX_RXIQCAL_RXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
413 [B2056_RX_RSSI_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
414 [B2056_RX_RSSI_SEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
415 [B2056_RX_RSSI_GAIN] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
416 [B2056_RX_RSSI_NB_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
417 [B2056_RX_RSSI_WB2I_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
418 [B2056_RX_RSSI_WB2I_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
419 [B2056_RX_RSSI_WB2Q_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
420 [B2056_RX_RSSI_WB2Q_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
421 [B2056_RX_RSSI_POLE] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
422 [B2056_RX_RSSI_WB1_IDAC] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
423 [B2056_RX_RSSI_MISC] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
424 [B2056_RX_LNAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
425 [B2056_RX_LNAA_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
426 [B2056_RX_LNAA_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
427 [B2056_RX_LNA_A_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
428 [B2056_RX_BIASPOLE_LNAA1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
429 [B2056_RX_LNAA2_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
430 [B2056_RX_LNA1A_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
431 [B2056_RX_LNAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
432 [B2056_RX_LNAG_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
433 [B2056_RX_LNAG_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
434 [B2056_RX_LNA_G_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
435 [B2056_RX_BIASPOLE_LNAG1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
436 [B2056_RX_LNAG2_IDAC] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
437 [B2056_RX_LNA1G_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
438 [B2056_RX_MIXA_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
439 [B2056_RX_MIXA_VCM] = { .ghz5 = 0x0099, .ghz2 = 0x0099, NOUPLOAD, },
440 [B2056_RX_MIXA_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
441 [B2056_RX_MIXA_LOB_BIAS] = { .ghz5 = 0x0044, .ghz2 = 0x0044, UPLOAD, },
442 [B2056_RX_MIXA_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
443 [B2056_RX_MIXA_CMFB_IDAC] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
444 [B2056_RX_MIXA_BIAS_AUX] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
445 [B2056_RX_MIXA_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
446 [B2056_RX_MIXA_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
447 [B2056_RX_MIXA_MAST_BIAS] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
448 [B2056_RX_MIXG_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
449 [B2056_RX_MIXG_VCM] = { .ghz5 = 0x0099, .ghz2 = 0x0099, NOUPLOAD, },
450 [B2056_RX_MIXG_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
451 [B2056_RX_MIXG_LOB_BIAS] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
452 [B2056_RX_MIXG_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
453 [B2056_RX_MIXG_CMFB_IDAC] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
454 [B2056_RX_MIXG_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
455 [B2056_RX_MIXG_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
456 [B2056_RX_MIXG_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
457 [B2056_RX_MIXG_MAST_BIAS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
458 [B2056_RX_TIA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
459 [B2056_RX_TIA_IOPAMP] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
460 [B2056_RX_TIA_QOPAMP] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
461 [B2056_RX_TIA_IMISC] = { .ghz5 = 0x0057, .ghz2 = 0x0057, NOUPLOAD, },
462 [B2056_RX_TIA_QMISC] = { .ghz5 = 0x0057, .ghz2 = 0x0057, NOUPLOAD, },
463 [B2056_RX_TIA_GAIN] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
464 [B2056_RX_TIA_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
465 [B2056_RX_TIA_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
466 [B2056_RX_BB_LPF_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
467 [B2056_RX_AACI_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
468 [B2056_RX_RXLPF_IDAC] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
469 [B2056_RX_RXLPF_OPAMPBIAS_LOWQ] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
470 [B2056_RX_RXLPF_OPAMPBIAS_HIGHQ]= { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
471 [B2056_RX_RXLPF_BIAS_DCCANCEL] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
472 [B2056_RX_RXLPF_OUTVCM] = { .ghz5 = 0x0023, .ghz2 = 0x0023, NOUPLOAD, },
473 [B2056_RX_RXLPF_INVCM_BODY] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
474 [B2056_RX_RXLPF_CC_OP] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
475 [B2056_RX_RXLPF_GAIN] = { .ghz5 = 0x0023, .ghz2 = 0x0023, NOUPLOAD, },
476 [B2056_RX_RXLPF_Q_BW] = { .ghz5 = 0x0041, .ghz2 = 0x0041, NOUPLOAD, },
477 [B2056_RX_RXLPF_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
478 [B2056_RX_RXLPF_RCCAL_HPC] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
479 [B2056_RX_RXHPF_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
480 [B2056_RX_RXHPF_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
481 [B2056_RX_RXHPF_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
482 [B2056_RX_RXHPF_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
483 [B2056_RX_RXHPF_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
484 [B2056_RX_RXHPF_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
485 [B2056_RX_RXHPF_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
486 [B2056_RX_RXHPF_OFF7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
487 [B2056_RX_RXLPF_RCCAL_LPC] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
488 [B2056_RX_RXLPF_OFF_0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
489 [B2056_RX_RXLPF_OFF_1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
490 [B2056_RX_RXLPF_OFF_2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
491 [B2056_RX_RXLPF_OFF_3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
492 [B2056_RX_RXLPF_OFF_4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
493 [B2056_RX_UNUSED] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
494 [B2056_RX_VGA_MASTER] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
495 [B2056_RX_VGA_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
496 [B2056_RX_VGA_BIAS_DCCANCEL] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
497 [B2056_RX_VGA_GAIN] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
498 [B2056_RX_VGA_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
499 [B2056_RX_VGABUF_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
500 [B2056_RX_VGABUF_GAIN_BW] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
501 [B2056_RX_TXFBMIX_A] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
502 [B2056_RX_TXFBMIX_G] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
503 [B2056_RX_RXSPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
504 [B2056_RX_RXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
505 [B2056_RX_RXSPARE3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
506 [B2056_RX_RXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
507 [B2056_RX_RXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
508 [B2056_RX_RXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
509 [B2056_RX_RXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
510 [B2056_RX_RXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
511 [B2056_RX_RXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
512 [B2056_RX_RXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
513 [B2056_RX_RXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
514 [B2056_RX_RXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
515 [B2056_RX_RXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
516 [B2056_RX_RXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
517 [B2056_RX_RXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
518 [B2056_RX_RXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
519 [B2056_RX_STATUS_LNAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
520 [B2056_RX_STATUS_LNAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
521 [B2056_RX_STATUS_MIXTIA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
522 [B2056_RX_STATUS_RXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
523 [B2056_RX_STATUS_VGA_BUF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
524 [B2056_RX_STATUS_RXLPF_Q] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
525 [B2056_RX_STATUS_RXLPF_BUF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
526 [B2056_RX_STATUS_RXLPF_VGA_HPC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
527 [B2056_RX_STATUS_RXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
528 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
529};
530
531static const struct b2056_inittab_entry b2056_inittab_rev4_syn[] = {
532 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
533 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
534 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
535 [B2056_SYN_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
536 [B2056_SYN_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
537 [B2056_SYN_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
538 [B2056_SYN_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
539 [B2056_SYN_COM_PU] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
540 [B2056_SYN_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
541 [B2056_SYN_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
542 [B2056_SYN_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
543 [B2056_SYN_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
544 [B2056_SYN_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
545 [B2056_SYN_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
546 [B2056_SYN_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
547 [B2056_SYN_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
548 [B2056_SYN_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
549 [B2056_SYN_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
550 [B2056_SYN_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
551 [B2056_SYN_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
552 [B2056_SYN_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
553 [B2056_SYN_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
554 [B2056_SYN_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
555 [B2056_SYN_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
556 [B2056_SYN_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
557 [B2056_SYN_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
558 [B2056_SYN_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
559 [B2056_SYN_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
560 [B2056_SYN_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
561 [B2056_SYN_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
562 [B2056_SYN_GPIO_MASTER1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
563 [B2056_SYN_GPIO_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
564 [B2056_SYN_TOPBIAS_MASTER] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
565 [B2056_SYN_TOPBIAS_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
566 [B2056_SYN_AFEREG] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
567 [B2056_SYN_TEMPPROCSENSE] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
568 [B2056_SYN_TEMPPROCSENSEIDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
569 [B2056_SYN_TEMPPROCSENSERCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
570 [B2056_SYN_LPO] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
571 [B2056_SYN_VDDCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
572 [B2056_SYN_VDDCAL_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
573 [B2056_SYN_VDDCAL_STATUS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
574 [B2056_SYN_RCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
575 [B2056_SYN_RCAL_CODE_OUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
576 [B2056_SYN_RCCAL_CTRL0] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
577 [B2056_SYN_RCCAL_CTRL1] = { .ghz5 = 0x001f, .ghz2 = 0x001f, NOUPLOAD, },
578 [B2056_SYN_RCCAL_CTRL2] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
579 [B2056_SYN_RCCAL_CTRL3] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
580 [B2056_SYN_RCCAL_CTRL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
581 [B2056_SYN_RCCAL_CTRL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
582 [B2056_SYN_RCCAL_CTRL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
583 [B2056_SYN_RCCAL_CTRL7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
584 [B2056_SYN_RCCAL_CTRL8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
585 [B2056_SYN_RCCAL_CTRL9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
586 [B2056_SYN_RCCAL_CTRL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
587 [B2056_SYN_RCCAL_CTRL11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
588 [B2056_SYN_ZCAL_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
589 [B2056_SYN_ZCAL_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
590 [B2056_SYN_PLL_MAST1] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
591 [B2056_SYN_PLL_MAST2] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
592 [B2056_SYN_PLL_MAST3] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
593 [B2056_SYN_PLL_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
594 [B2056_SYN_PLL_XTAL0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
595 [B2056_SYN_PLL_XTAL1] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
596 [B2056_SYN_PLL_XTAL3] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
597 [B2056_SYN_PLL_XTAL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
598 [B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
599 [B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
600 [B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
601 [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
602 [B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
603 [B2056_SYN_PLL_CP2] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
604 [B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
605 [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
606 [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
607 [B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
608 [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
609 [B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
610 [B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, },
611 [B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
612 [B2056_SYN_PLL_VCO1] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
613 [B2056_SYN_PLL_VCO2] = { .ghz5 = 0x00f7, .ghz2 = 0x00f7, UPLOAD, },
614 [B2056_SYN_PLL_MONITOR1] = { .ghz5 = 0x00b4, .ghz2 = 0x00b4, NOUPLOAD, },
615 [B2056_SYN_PLL_MONITOR2] = { .ghz5 = 0x00d2, .ghz2 = 0x00d2, NOUPLOAD, },
616 [B2056_SYN_PLL_VCOCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
617 [B2056_SYN_PLL_VCOCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
618 [B2056_SYN_PLL_VCOCAL4] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
619 [B2056_SYN_PLL_VCOCAL5] = { .ghz5 = 0x0096, .ghz2 = 0x0096, NOUPLOAD, },
620 [B2056_SYN_PLL_VCOCAL6] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
621 [B2056_SYN_PLL_VCOCAL7] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
622 [B2056_SYN_PLL_VCOCAL8] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
623 [B2056_SYN_PLL_VCOCAL9] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
624 [B2056_SYN_PLL_VCOCAL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
625 [B2056_SYN_PLL_VCOCAL11] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
626 [B2056_SYN_PLL_VCOCAL12] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
627 [B2056_SYN_PLL_VCOCAL13] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
628 [B2056_SYN_PLL_VREG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
629 [B2056_SYN_PLL_STATUS1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
630 [B2056_SYN_PLL_STATUS2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
631 [B2056_SYN_PLL_STATUS3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
632 [B2056_SYN_LOGEN_PU0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
633 [B2056_SYN_LOGEN_PU1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
634 [B2056_SYN_LOGEN_PU2] = { .ghz5 = 0x0040, .ghz2 = 0x0040, NOUPLOAD, },
635 [B2056_SYN_LOGEN_PU3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
636 [B2056_SYN_LOGEN_PU5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
637 [B2056_SYN_LOGEN_PU6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
638 [B2056_SYN_LOGEN_PU7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
639 [B2056_SYN_LOGEN_PU8] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
640 [B2056_SYN_LOGEN_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
641 [B2056_SYN_LOGEN_RCCR1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
642 [B2056_SYN_LOGEN_VCOBUF1] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
643 [B2056_SYN_LOGEN_MIXER1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
644 [B2056_SYN_LOGEN_MIXER2] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
645 [B2056_SYN_LOGEN_BUF1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
646 [B2056_SYN_LOGENBUF2] = { .ghz5 = 0x008f, .ghz2 = 0x008f, UPLOAD, },
647 [B2056_SYN_LOGEN_BUF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
648 [B2056_SYN_LOGEN_BUF4] = { .ghz5 = 0x00cc, .ghz2 = 0x00cc, NOUPLOAD, },
649 [B2056_SYN_LOGEN_DIV1] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
650 [B2056_SYN_LOGEN_DIV2] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
651 [B2056_SYN_LOGEN_DIV3] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
652 [B2056_SYN_LOGEN_ACL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
653 [B2056_SYN_LOGEN_ACL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
654 [B2056_SYN_LOGEN_ACL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
655 [B2056_SYN_LOGEN_ACL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
656 [B2056_SYN_LOGEN_ACL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
657 [B2056_SYN_LOGEN_ACL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
658 [B2056_SYN_LOGEN_ACLOUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
659 [B2056_SYN_LOGEN_ACLCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
660 [B2056_SYN_LOGEN_ACLCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
661 [B2056_SYN_LOGEN_ACLCAL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
662 [B2056_SYN_CALEN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
663 [B2056_SYN_LOGEN_PEAKDET1] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, NOUPLOAD, },
664 [B2056_SYN_LOGEN_CORE_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
665 [B2056_SYN_LOGEN_RX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
666 [B2056_SYN_LOGEN_TX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
667 [B2056_SYN_LOGEN_RX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
668 [B2056_SYN_LOGEN_TX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
669 [B2056_SYN_LOGEN_VCOBUF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
670 [B2056_SYN_LOGEN_MIXER3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
671 [B2056_SYN_LOGEN_BUF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
672 [B2056_SYN_LOGEN_BUF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
673 [B2056_SYN_LOGEN_CBUFRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
674 [B2056_SYN_LOGEN_CBUFRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
675 [B2056_SYN_LOGEN_CBUFRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
676 [B2056_SYN_LOGEN_CBUFRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
677 [B2056_SYN_LOGEN_CBUFTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
678 [B2056_SYN_LOGEN_CBUFTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
679 [B2056_SYN_LOGEN_CBUFTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
680 [B2056_SYN_LOGEN_CBUFTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
681 [B2056_SYN_LOGEN_CMOSRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
682 [B2056_SYN_LOGEN_CMOSRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
683 [B2056_SYN_LOGEN_CMOSRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
684 [B2056_SYN_LOGEN_CMOSRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
685 [B2056_SYN_LOGEN_CMOSTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
686 [B2056_SYN_LOGEN_CMOSTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
687 [B2056_SYN_LOGEN_CMOSTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
688 [B2056_SYN_LOGEN_CMOSTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
689 [B2056_SYN_LOGEN_VCOBUF2_OVRVAL]= { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
690 [B2056_SYN_LOGEN_MIXER3_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
691 [B2056_SYN_LOGEN_BUF5_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
692 [B2056_SYN_LOGEN_BUF6_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
693 [B2056_SYN_LOGEN_CBUFRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
694 [B2056_SYN_LOGEN_CBUFRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
695 [B2056_SYN_LOGEN_CBUFRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
696 [B2056_SYN_LOGEN_CBUFRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
697 [B2056_SYN_LOGEN_CBUFTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
698 [B2056_SYN_LOGEN_CBUFTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
699 [B2056_SYN_LOGEN_CBUFTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
700 [B2056_SYN_LOGEN_CBUFTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
701 [B2056_SYN_LOGEN_CMOSRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
702 [B2056_SYN_LOGEN_CMOSRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
703 [B2056_SYN_LOGEN_CMOSRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
704 [B2056_SYN_LOGEN_CMOSRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
705 [B2056_SYN_LOGEN_CMOSTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
706 [B2056_SYN_LOGEN_CMOSTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
707 [B2056_SYN_LOGEN_CMOSTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
708 [B2056_SYN_LOGEN_CMOSTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
709 [B2056_SYN_LOGEN_ACL_WAITCNT] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
710 [B2056_SYN_LOGEN_CORE_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
711 [B2056_SYN_LOGEN_RX_CMOS_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
712 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
713};
714
715static const struct b2056_inittab_entry b2056_inittab_rev4_tx[] = {
716 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
717 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
718 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
719 [B2056_TX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
720 [B2056_TX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
721 [B2056_TX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
722 [B2056_TX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
723 [B2056_TX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
724 [B2056_TX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
725 [B2056_TX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
726 [B2056_TX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
727 [B2056_TX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
728 [B2056_TX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
729 [B2056_TX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
730 [B2056_TX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
731 [B2056_TX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
732 [B2056_TX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
733 [B2056_TX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
734 [B2056_TX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
735 [B2056_TX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
736 [B2056_TX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
737 [B2056_TX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
738 [B2056_TX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
739 [B2056_TX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
740 [B2056_TX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
741 [B2056_TX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
742 [B2056_TX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
743 [B2056_TX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
744 [B2056_TX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
745 [B2056_TX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
746 [B2056_TX_IQCAL_GAIN_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
747 [B2056_TX_LOFT_FINE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
748 [B2056_TX_LOFT_FINE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
749 [B2056_TX_LOFT_COARSE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
750 [B2056_TX_LOFT_COARSE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
751 [B2056_TX_TX_COM_MASTER1] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
752 [B2056_TX_TX_COM_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
753 [B2056_TX_RXIQCAL_TXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
754 [B2056_TX_TX_SSI_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
755 [B2056_TX_IQCAL_VCM_HG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
756 [B2056_TX_IQCAL_IDAC] = { .ghz5 = 0x0037, .ghz2 = 0x0037, NOUPLOAD, },
757 [B2056_TX_TSSI_VCM] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
758 [B2056_TX_TX_AMP_DET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
759 [B2056_TX_TX_SSI_MUX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
760 [B2056_TX_TSSIA] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
761 [B2056_TX_TSSIG] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
762 [B2056_TX_TSSI_MISC1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
763 [B2056_TX_TSSI_MISC2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
764 [B2056_TX_TSSI_MISC3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
765 [B2056_TX_PA_SPARE1] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
766 [B2056_TX_PA_SPARE2] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
767 [B2056_TX_INTPAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
768 [B2056_TX_INTPAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
769 [B2056_TX_INTPAA_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
770 [B2056_TX_INTPAA_IAUX_STAT] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
771 [B2056_TX_INTPAA_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
772 [B2056_TX_INTPAA_IMAIN_STAT] = { .ghz5 = 0x002d, .ghz2 = 0x002d, NOUPLOAD, },
773 [B2056_TX_INTPAA_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
774 [B2056_TX_INTPAA_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
775 [B2056_TX_INTPAA_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
776 [B2056_TX_INTPAA_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
777 [B2056_TX_INTPAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
778 [B2056_TX_INTPAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
779 [B2056_TX_INTPAG_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
780 [B2056_TX_INTPAG_IAUX_STAT] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
781 [B2056_TX_INTPAG_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
782 [B2056_TX_INTPAG_IMAIN_STAT] = { .ghz5 = 0x001e, .ghz2 = 0x001e, NOUPLOAD, },
783 [B2056_TX_INTPAG_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
784 [B2056_TX_INTPAG_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
785 [B2056_TX_INTPAG_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
786 [B2056_TX_INTPAG_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
787 [B2056_TX_PADA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
788 [B2056_TX_PADA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
789 [B2056_TX_PADA_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
790 [B2056_TX_PADA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
791 [B2056_TX_PADA_BOOST_TUNE] = { .ghz5 = 0x0038, .ghz2 = 0x0038, NOUPLOAD, },
792 [B2056_TX_PADA_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
793 [B2056_TX_PADG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
794 [B2056_TX_PADG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
795 [B2056_TX_PADG_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
796 [B2056_TX_PADG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
797 [B2056_TX_PADG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
798 [B2056_TX_PADG_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
799 [B2056_TX_PGAA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
800 [B2056_TX_PGAA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
801 [B2056_TX_PGAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
802 [B2056_TX_PGAA_BOOST_TUNE] = { .ghz5 = 0x0083, .ghz2 = 0x0083, NOUPLOAD, },
803 [B2056_TX_PGAA_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
804 [B2056_TX_PGAA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
805 [B2056_TX_PGAG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
806 [B2056_TX_PGAG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
807 [B2056_TX_PGAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
808 [B2056_TX_PGAG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
809 [B2056_TX_PGAG_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
810 [B2056_TX_PGAG_MISC] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
811 [B2056_TX_MIXA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
812 [B2056_TX_MIXA_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
813 [B2056_TX_MIXG] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
814 [B2056_TX_MIXG_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
815 [B2056_TX_BB_GM_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
816 [B2056_TX_GMBB_GM] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
817 [B2056_TX_GMBB_IDAC] = { .ghz5 = 0x0072, .ghz2 = 0x0072, UPLOAD, },
818 [B2056_TX_TXLPF_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
819 [B2056_TX_TXLPF_RCCAL] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
820 [B2056_TX_TXLPF_RCCAL_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
821 [B2056_TX_TXLPF_RCCAL_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
822 [B2056_TX_TXLPF_RCCAL_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
823 [B2056_TX_TXLPF_RCCAL_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
824 [B2056_TX_TXLPF_RCCAL_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
825 [B2056_TX_TXLPF_RCCAL_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
826 [B2056_TX_TXLPF_RCCAL_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
827 [B2056_TX_TXLPF_BW] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
828 [B2056_TX_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
829 [B2056_TX_TXLPF_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
830 [B2056_TX_TXLPF_IDAC_0] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
831 [B2056_TX_TXLPF_IDAC_1] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
832 [B2056_TX_TXLPF_IDAC_2] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
833 [B2056_TX_TXLPF_IDAC_3] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
834 [B2056_TX_TXLPF_IDAC_4] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
835 [B2056_TX_TXLPF_IDAC_5] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
836 [B2056_TX_TXLPF_IDAC_6] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
837 [B2056_TX_TXLPF_OPAMP_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
838 [B2056_TX_TXLPF_MISC] = { .ghz5 = 0x005b, .ghz2 = 0x005b, NOUPLOAD, },
839 [B2056_TX_TXSPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
840 [B2056_TX_TXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
841 [B2056_TX_TXSPARE3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
842 [B2056_TX_TXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
843 [B2056_TX_TXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
844 [B2056_TX_TXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
845 [B2056_TX_TXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
846 [B2056_TX_TXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
847 [B2056_TX_TXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
848 [B2056_TX_TXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
849 [B2056_TX_TXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
850 [B2056_TX_TXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
851 [B2056_TX_TXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
852 [B2056_TX_TXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
853 [B2056_TX_TXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
854 [B2056_TX_TXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
855 [B2056_TX_STATUS_INTPA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
856 [B2056_TX_STATUS_PAD_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
857 [B2056_TX_STATUS_PGA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
858 [B2056_TX_STATUS_GM_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
859 [B2056_TX_STATUS_TXLPF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
860 [B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
861};
862
863static const struct b2056_inittab_entry b2056_inittab_rev4_rx[] = {
864 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
865 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
866 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
867 [B2056_RX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
868 [B2056_RX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
869 [B2056_RX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
870 [B2056_RX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
871 [B2056_RX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
872 [B2056_RX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
873 [B2056_RX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
874 [B2056_RX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
875 [B2056_RX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
876 [B2056_RX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
877 [B2056_RX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
878 [B2056_RX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
879 [B2056_RX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
880 [B2056_RX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
881 [B2056_RX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
882 [B2056_RX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
883 [B2056_RX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
884 [B2056_RX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
885 [B2056_RX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
886 [B2056_RX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
887 [B2056_RX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
888 [B2056_RX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
889 [B2056_RX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
890 [B2056_RX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
891 [B2056_RX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
892 [B2056_RX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
893 [B2056_RX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
894 [B2056_RX_RXIQCAL_RXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
895 [B2056_RX_RSSI_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
896 [B2056_RX_RSSI_SEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
897 [B2056_RX_RSSI_GAIN] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
898 [B2056_RX_RSSI_NB_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
899 [B2056_RX_RSSI_WB2I_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
900 [B2056_RX_RSSI_WB2I_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
901 [B2056_RX_RSSI_WB2Q_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
902 [B2056_RX_RSSI_WB2Q_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
903 [B2056_RX_RSSI_POLE] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
904 [B2056_RX_RSSI_WB1_IDAC] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
905 [B2056_RX_RSSI_MISC] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
906 [B2056_RX_LNAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
907 [B2056_RX_LNAA_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
908 [B2056_RX_LNAA_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
909 [B2056_RX_LNA_A_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
910 [B2056_RX_BIASPOLE_LNAA1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
911 [B2056_RX_LNAA2_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
912 [B2056_RX_LNA1A_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
913 [B2056_RX_LNAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
914 [B2056_RX_LNAG_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
915 [B2056_RX_LNAG_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
916 [B2056_RX_LNA_G_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
917 [B2056_RX_BIASPOLE_LNAG1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
918 [B2056_RX_LNAG2_IDAC] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
919 [B2056_RX_LNA1G_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
920 [B2056_RX_MIXA_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
921 [B2056_RX_MIXA_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
922 [B2056_RX_MIXA_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
923 [B2056_RX_MIXA_LOB_BIAS] = { .ghz5 = 0x0044, .ghz2 = 0x0044, UPLOAD, },
924 [B2056_RX_MIXA_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
925 [B2056_RX_MIXA_CMFB_IDAC] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
926 [B2056_RX_MIXA_BIAS_AUX] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
927 [B2056_RX_MIXA_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
928 [B2056_RX_MIXA_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
929 [B2056_RX_MIXA_MAST_BIAS] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
930 [B2056_RX_MIXG_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
931 [B2056_RX_MIXG_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
932 [B2056_RX_MIXG_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
933 [B2056_RX_MIXG_LOB_BIAS] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
934 [B2056_RX_MIXG_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
935 [B2056_RX_MIXG_CMFB_IDAC] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
936 [B2056_RX_MIXG_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
937 [B2056_RX_MIXG_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
938 [B2056_RX_MIXG_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
939 [B2056_RX_MIXG_MAST_BIAS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
940 [B2056_RX_TIA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
941 [B2056_RX_TIA_IOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
942 [B2056_RX_TIA_QOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
943 [B2056_RX_TIA_IMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
944 [B2056_RX_TIA_QMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
945 [B2056_RX_TIA_GAIN] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
946 [B2056_RX_TIA_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
947 [B2056_RX_TIA_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
948 [B2056_RX_BB_LPF_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
949 [B2056_RX_AACI_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
950 [B2056_RX_RXLPF_IDAC] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
951 [B2056_RX_RXLPF_OPAMPBIAS_LOWQ] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
952 [B2056_RX_RXLPF_OPAMPBIAS_HIGHQ]= { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
953 [B2056_RX_RXLPF_BIAS_DCCANCEL] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
954 [B2056_RX_RXLPF_OUTVCM] = { .ghz5 = 0x002f, .ghz2 = 0x002f, UPLOAD, },
955 [B2056_RX_RXLPF_INVCM_BODY] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
956 [B2056_RX_RXLPF_CC_OP] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
957 [B2056_RX_RXLPF_GAIN] = { .ghz5 = 0x0023, .ghz2 = 0x0023, NOUPLOAD, },
958 [B2056_RX_RXLPF_Q_BW] = { .ghz5 = 0x0041, .ghz2 = 0x0041, NOUPLOAD, },
959 [B2056_RX_RXLPF_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
960 [B2056_RX_RXLPF_RCCAL_HPC] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
961 [B2056_RX_RXHPF_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
962 [B2056_RX_RXHPF_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
963 [B2056_RX_RXHPF_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
964 [B2056_RX_RXHPF_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
965 [B2056_RX_RXHPF_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
966 [B2056_RX_RXHPF_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
967 [B2056_RX_RXHPF_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
968 [B2056_RX_RXHPF_OFF7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
969 [B2056_RX_RXLPF_RCCAL_LPC] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
970 [B2056_RX_RXLPF_OFF_0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
971 [B2056_RX_RXLPF_OFF_1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
972 [B2056_RX_RXLPF_OFF_2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
973 [B2056_RX_RXLPF_OFF_3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
974 [B2056_RX_RXLPF_OFF_4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
975 [B2056_RX_UNUSED] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
976 [B2056_RX_VGA_MASTER] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
977 [B2056_RX_VGA_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
978 [B2056_RX_VGA_BIAS_DCCANCEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
979 [B2056_RX_VGA_GAIN] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
980 [B2056_RX_VGA_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
981 [B2056_RX_VGABUF_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
982 [B2056_RX_VGABUF_GAIN_BW] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
983 [B2056_RX_TXFBMIX_A] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
984 [B2056_RX_TXFBMIX_G] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
985 [B2056_RX_RXSPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
986 [B2056_RX_RXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
987 [B2056_RX_RXSPARE3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
988 [B2056_RX_RXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
989 [B2056_RX_RXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
990 [B2056_RX_RXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
991 [B2056_RX_RXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
992 [B2056_RX_RXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
993 [B2056_RX_RXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
994 [B2056_RX_RXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
995 [B2056_RX_RXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
996 [B2056_RX_RXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
997 [B2056_RX_RXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
998 [B2056_RX_RXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
999 [B2056_RX_RXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1000 [B2056_RX_RXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1001 [B2056_RX_STATUS_LNAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1002 [B2056_RX_STATUS_LNAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1003 [B2056_RX_STATUS_MIXTIA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1004 [B2056_RX_STATUS_RXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1005 [B2056_RX_STATUS_VGA_BUF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1006 [B2056_RX_STATUS_RXLPF_Q] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1007 [B2056_RX_STATUS_RXLPF_BUF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1008 [B2056_RX_STATUS_RXLPF_VGA_HPC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1009 [B2056_RX_STATUS_RXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1010 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1011};
1012
1013static const struct b2056_inittab_entry b2056_inittab_rev5_syn[] = {
1014 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1015 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1016 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1017 [B2056_SYN_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1018 [B2056_SYN_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1019 [B2056_SYN_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1020 [B2056_SYN_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1021 [B2056_SYN_COM_PU] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1022 [B2056_SYN_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1023 [B2056_SYN_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1024 [B2056_SYN_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1025 [B2056_SYN_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1026 [B2056_SYN_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1027 [B2056_SYN_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1028 [B2056_SYN_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1029 [B2056_SYN_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1030 [B2056_SYN_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1031 [B2056_SYN_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1032 [B2056_SYN_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1033 [B2056_SYN_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1034 [B2056_SYN_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1035 [B2056_SYN_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1036 [B2056_SYN_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1037 [B2056_SYN_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1038 [B2056_SYN_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1039 [B2056_SYN_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1040 [B2056_SYN_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1041 [B2056_SYN_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1042 [B2056_SYN_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1043 [B2056_SYN_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1044 [B2056_SYN_GPIO_MASTER1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1045 [B2056_SYN_GPIO_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1046 [B2056_SYN_TOPBIAS_MASTER] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
1047 [B2056_SYN_TOPBIAS_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
1048 [B2056_SYN_AFEREG] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
1049 [B2056_SYN_TEMPPROCSENSE] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1050 [B2056_SYN_TEMPPROCSENSEIDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1051 [B2056_SYN_TEMPPROCSENSERCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1052 [B2056_SYN_LPO] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1053 [B2056_SYN_VDDCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1054 [B2056_SYN_VDDCAL_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1055 [B2056_SYN_VDDCAL_STATUS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1056 [B2056_SYN_RCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1057 [B2056_SYN_RCAL_CODE_OUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1058 [B2056_SYN_RCCAL_CTRL0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1059 [B2056_SYN_RCCAL_CTRL1] = { .ghz5 = 0x001f, .ghz2 = 0x001f, NOUPLOAD, },
1060 [B2056_SYN_RCCAL_CTRL2] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
1061 [B2056_SYN_RCCAL_CTRL3] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
1062 [B2056_SYN_RCCAL_CTRL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1063 [B2056_SYN_RCCAL_CTRL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1064 [B2056_SYN_RCCAL_CTRL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1065 [B2056_SYN_RCCAL_CTRL7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1066 [B2056_SYN_RCCAL_CTRL8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1067 [B2056_SYN_RCCAL_CTRL9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1068 [B2056_SYN_RCCAL_CTRL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1069 [B2056_SYN_RCCAL_CTRL11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1070 [B2056_SYN_ZCAL_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1071 [B2056_SYN_ZCAL_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1072 [B2056_SYN_PLL_MAST1] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
1073 [B2056_SYN_PLL_MAST2] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
1074 [B2056_SYN_PLL_MAST3] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
1075 [B2056_SYN_PLL_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1076 [B2056_SYN_PLL_XTAL0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1077 [B2056_SYN_PLL_XTAL1] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
1078 [B2056_SYN_PLL_XTAL3] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
1079 [B2056_SYN_PLL_XTAL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1080 [B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
1081 [B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
1082 [B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1083 [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
1084 [B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
1085 [B2056_SYN_PLL_CP2] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
1086 [B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
1087 [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
1088 [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
1089 [B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
1090 [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
1091 [B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1092 [B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, },
1093 [B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
1094 [B2056_SYN_PLL_VCO1] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
1095 [B2056_SYN_PLL_VCO2] = { .ghz5 = 0x00f7, .ghz2 = 0x00f7, UPLOAD, },
1096 [B2056_SYN_PLL_MONITOR1] = { .ghz5 = 0x00b4, .ghz2 = 0x00b4, NOUPLOAD, },
1097 [B2056_SYN_PLL_MONITOR2] = { .ghz5 = 0x00d2, .ghz2 = 0x00d2, NOUPLOAD, },
1098 [B2056_SYN_PLL_VCOCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1099 [B2056_SYN_PLL_VCOCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1100 [B2056_SYN_PLL_VCOCAL4] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
1101 [B2056_SYN_PLL_VCOCAL5] = { .ghz5 = 0x0096, .ghz2 = 0x0096, NOUPLOAD, },
1102 [B2056_SYN_PLL_VCOCAL6] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
1103 [B2056_SYN_PLL_VCOCAL7] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
1104 [B2056_SYN_PLL_VCOCAL8] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
1105 [B2056_SYN_PLL_VCOCAL9] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
1106 [B2056_SYN_PLL_VCOCAL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1107 [B2056_SYN_PLL_VCOCAL11] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
1108 [B2056_SYN_PLL_VCOCAL12] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
1109 [B2056_SYN_PLL_VCOCAL13] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
1110 [B2056_SYN_PLL_VREG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
1111 [B2056_SYN_PLL_STATUS1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1112 [B2056_SYN_PLL_STATUS2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1113 [B2056_SYN_PLL_STATUS3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1114 [B2056_SYN_LOGEN_PU0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1115 [B2056_SYN_LOGEN_PU1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1116 [B2056_SYN_LOGEN_PU2] = { .ghz5 = 0x0040, .ghz2 = 0x0040, NOUPLOAD, },
1117 [B2056_SYN_LOGEN_PU3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1118 [B2056_SYN_LOGEN_PU5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1119 [B2056_SYN_LOGEN_PU6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1120 [B2056_SYN_LOGEN_PU7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1121 [B2056_SYN_LOGEN_PU8] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1122 [B2056_SYN_LOGEN_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1123 [B2056_SYN_LOGEN_RCCR1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1124 [B2056_SYN_LOGEN_VCOBUF1] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
1125 [B2056_SYN_LOGEN_MIXER1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1126 [B2056_SYN_LOGEN_MIXER2] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
1127 [B2056_SYN_LOGEN_BUF1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1128 [B2056_SYN_LOGENBUF2] = { .ghz5 = 0x008f, .ghz2 = 0x008f, UPLOAD, },
1129 [B2056_SYN_LOGEN_BUF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1130 [B2056_SYN_LOGEN_BUF4] = { .ghz5 = 0x00cc, .ghz2 = 0x00cc, NOUPLOAD, },
1131 [B2056_SYN_LOGEN_DIV1] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1132 [B2056_SYN_LOGEN_DIV2] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1133 [B2056_SYN_LOGEN_DIV3] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1134 [B2056_SYN_LOGEN_ACL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1135 [B2056_SYN_LOGEN_ACL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1136 [B2056_SYN_LOGEN_ACL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1137 [B2056_SYN_LOGEN_ACL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1138 [B2056_SYN_LOGEN_ACL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1139 [B2056_SYN_LOGEN_ACL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1140 [B2056_SYN_LOGEN_ACLOUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1141 [B2056_SYN_LOGEN_ACLCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1142 [B2056_SYN_LOGEN_ACLCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1143 [B2056_SYN_LOGEN_ACLCAL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1144 [B2056_SYN_CALEN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1145 [B2056_SYN_LOGEN_PEAKDET1] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, NOUPLOAD, },
1146 [B2056_SYN_LOGEN_CORE_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1147 [B2056_SYN_LOGEN_RX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1148 [B2056_SYN_LOGEN_TX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1149 [B2056_SYN_LOGEN_RX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1150 [B2056_SYN_LOGEN_TX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1151 [B2056_SYN_LOGEN_VCOBUF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1152 [B2056_SYN_LOGEN_MIXER3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1153 [B2056_SYN_LOGEN_BUF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1154 [B2056_SYN_LOGEN_BUF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1155 [B2056_SYN_LOGEN_CBUFRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1156 [B2056_SYN_LOGEN_CBUFRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1157 [B2056_SYN_LOGEN_CBUFRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1158 [B2056_SYN_LOGEN_CBUFRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1159 [B2056_SYN_LOGEN_CBUFTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1160 [B2056_SYN_LOGEN_CBUFTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1161 [B2056_SYN_LOGEN_CBUFTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1162 [B2056_SYN_LOGEN_CBUFTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1163 [B2056_SYN_LOGEN_CMOSRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1164 [B2056_SYN_LOGEN_CMOSRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1165 [B2056_SYN_LOGEN_CMOSRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1166 [B2056_SYN_LOGEN_CMOSRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1167 [B2056_SYN_LOGEN_CMOSTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1168 [B2056_SYN_LOGEN_CMOSTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1169 [B2056_SYN_LOGEN_CMOSTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1170 [B2056_SYN_LOGEN_CMOSTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1171 [B2056_SYN_LOGEN_VCOBUF2_OVRVAL]= { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
1172 [B2056_SYN_LOGEN_MIXER3_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1173 [B2056_SYN_LOGEN_BUF5_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1174 [B2056_SYN_LOGEN_BUF6_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1175 [B2056_SYN_LOGEN_CBUFRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1176 [B2056_SYN_LOGEN_CBUFRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1177 [B2056_SYN_LOGEN_CBUFRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1178 [B2056_SYN_LOGEN_CBUFRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1179 [B2056_SYN_LOGEN_CBUFTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1180 [B2056_SYN_LOGEN_CBUFTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1181 [B2056_SYN_LOGEN_CBUFTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1182 [B2056_SYN_LOGEN_CBUFTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1183 [B2056_SYN_LOGEN_CMOSRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1184 [B2056_SYN_LOGEN_CMOSRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1185 [B2056_SYN_LOGEN_CMOSRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1186 [B2056_SYN_LOGEN_CMOSRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1187 [B2056_SYN_LOGEN_CMOSTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1188 [B2056_SYN_LOGEN_CMOSTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1189 [B2056_SYN_LOGEN_CMOSTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1190 [B2056_SYN_LOGEN_CMOSTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1191 [B2056_SYN_LOGEN_ACL_WAITCNT] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
1192 [B2056_SYN_LOGEN_CORE_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1193 [B2056_SYN_LOGEN_RX_CMOS_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1194 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1195};
1196
1197static const struct b2056_inittab_entry b2056_inittab_rev5_tx[] = {
1198 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1199 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1200 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1201 [B2056_TX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1202 [B2056_TX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1203 [B2056_TX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1204 [B2056_TX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1205 [B2056_TX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1206 [B2056_TX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1207 [B2056_TX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1208 [B2056_TX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1209 [B2056_TX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1210 [B2056_TX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1211 [B2056_TX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1212 [B2056_TX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1213 [B2056_TX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1214 [B2056_TX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1215 [B2056_TX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1216 [B2056_TX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1217 [B2056_TX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1218 [B2056_TX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1219 [B2056_TX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1220 [B2056_TX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1221 [B2056_TX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1222 [B2056_TX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1223 [B2056_TX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1224 [B2056_TX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1225 [B2056_TX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1226 [B2056_TX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1227 [B2056_TX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1228 [B2056_TX_IQCAL_GAIN_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1229 [B2056_TX_LOFT_FINE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
1230 [B2056_TX_LOFT_FINE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
1231 [B2056_TX_LOFT_COARSE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
1232 [B2056_TX_LOFT_COARSE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
1233 [B2056_TX_TX_COM_MASTER1] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
1234 [B2056_TX_TX_COM_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1235 [B2056_TX_RXIQCAL_TXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
1236 [B2056_TX_TX_SSI_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1237 [B2056_TX_IQCAL_VCM_HG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
1238 [B2056_TX_IQCAL_IDAC] = { .ghz5 = 0x0037, .ghz2 = 0x0037, NOUPLOAD, },
1239 [B2056_TX_TSSI_VCM] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
1240 [B2056_TX_TX_AMP_DET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1241 [B2056_TX_TX_SSI_MUX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1242 [B2056_TX_TSSIA] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1243 [B2056_TX_TSSIG] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1244 [B2056_TX_TSSI_MISC1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1245 [B2056_TX_TSSI_MISC2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1246 [B2056_TX_TSSI_MISC3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1247 [B2056_TX_PA_SPARE1] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
1248 [B2056_TX_PA_SPARE2] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
1249 [B2056_TX_INTPAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1250 [B2056_TX_INTPAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1251 [B2056_TX_INTPAA_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
1252 [B2056_TX_INTPAA_IAUX_STAT] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
1253 [B2056_TX_INTPAA_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1254 [B2056_TX_INTPAA_IMAIN_STAT] = { .ghz5 = 0x002d, .ghz2 = 0x002d, NOUPLOAD, },
1255 [B2056_TX_INTPAA_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1256 [B2056_TX_INTPAA_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
1257 [B2056_TX_INTPAA_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
1258 [B2056_TX_INTPAA_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1259 [B2056_TX_INTPAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1260 [B2056_TX_INTPAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1261 [B2056_TX_INTPAG_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
1262 [B2056_TX_INTPAG_IAUX_STAT] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
1263 [B2056_TX_INTPAG_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1264 [B2056_TX_INTPAG_IMAIN_STAT] = { .ghz5 = 0x001e, .ghz2 = 0x001e, NOUPLOAD, },
1265 [B2056_TX_INTPAG_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1266 [B2056_TX_INTPAG_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
1267 [B2056_TX_INTPAG_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
1268 [B2056_TX_INTPAG_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1269 [B2056_TX_PADA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
1270 [B2056_TX_PADA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
1271 [B2056_TX_PADA_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
1272 [B2056_TX_PADA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1273 [B2056_TX_PADA_BOOST_TUNE] = { .ghz5 = 0x0038, .ghz2 = 0x0038, NOUPLOAD, },
1274 [B2056_TX_PADA_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
1275 [B2056_TX_PADG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
1276 [B2056_TX_PADG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
1277 [B2056_TX_PADG_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
1278 [B2056_TX_PADG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1279 [B2056_TX_PADG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
1280 [B2056_TX_PADG_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
1281 [B2056_TX_PGAA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
1282 [B2056_TX_PGAA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
1283 [B2056_TX_PGAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1284 [B2056_TX_PGAA_BOOST_TUNE] = { .ghz5 = 0x0083, .ghz2 = 0x0083, NOUPLOAD, },
1285 [B2056_TX_PGAA_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
1286 [B2056_TX_PGAA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1287 [B2056_TX_PGAG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
1288 [B2056_TX_PGAG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
1289 [B2056_TX_PGAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1290 [B2056_TX_PGAG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
1291 [B2056_TX_PGAG_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
1292 [B2056_TX_PGAG_MISC] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1293 [B2056_TX_MIXA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1294 [B2056_TX_MIXA_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
1295 [B2056_TX_MIXG] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1296 [B2056_TX_MIXG_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
1297 [B2056_TX_BB_GM_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1298 [B2056_TX_GMBB_GM] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1299 [B2056_TX_GMBB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
1300 [B2056_TX_TXLPF_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1301 [B2056_TX_TXLPF_RCCAL] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
1302 [B2056_TX_TXLPF_RCCAL_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1303 [B2056_TX_TXLPF_RCCAL_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1304 [B2056_TX_TXLPF_RCCAL_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1305 [B2056_TX_TXLPF_RCCAL_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1306 [B2056_TX_TXLPF_RCCAL_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1307 [B2056_TX_TXLPF_RCCAL_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1308 [B2056_TX_TXLPF_RCCAL_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1309 [B2056_TX_TXLPF_BW] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
1310 [B2056_TX_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1311 [B2056_TX_TXLPF_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1312 [B2056_TX_TXLPF_IDAC_0] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
1313 [B2056_TX_TXLPF_IDAC_1] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
1314 [B2056_TX_TXLPF_IDAC_2] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
1315 [B2056_TX_TXLPF_IDAC_3] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
1316 [B2056_TX_TXLPF_IDAC_4] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
1317 [B2056_TX_TXLPF_IDAC_5] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
1318 [B2056_TX_TXLPF_IDAC_6] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
1319 [B2056_TX_TXLPF_OPAMP_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
1320 [B2056_TX_TXLPF_MISC] = { .ghz5 = 0x005b, .ghz2 = 0x005b, NOUPLOAD, },
1321 [B2056_TX_TXSPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1322 [B2056_TX_TXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1323 [B2056_TX_TXSPARE3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1324 [B2056_TX_TXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1325 [B2056_TX_TXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1326 [B2056_TX_TXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1327 [B2056_TX_TXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1328 [B2056_TX_TXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1329 [B2056_TX_TXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1330 [B2056_TX_TXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1331 [B2056_TX_TXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1332 [B2056_TX_TXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1333 [B2056_TX_TXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1334 [B2056_TX_TXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1335 [B2056_TX_TXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1336 [B2056_TX_TXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1337 [B2056_TX_STATUS_INTPA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1338 [B2056_TX_STATUS_PAD_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1339 [B2056_TX_STATUS_PGA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1340 [B2056_TX_STATUS_GM_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1341 [B2056_TX_STATUS_TXLPF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1342 [B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1343 [B2056_TX_GMBB_IDAC0] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
1344 [B2056_TX_GMBB_IDAC1] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
1345 [B2056_TX_GMBB_IDAC2] = { .ghz5 = 0x0071, .ghz2 = 0x0071, UPLOAD, },
1346 [B2056_TX_GMBB_IDAC3] = { .ghz5 = 0x0071, .ghz2 = 0x0071, UPLOAD, },
1347 [B2056_TX_GMBB_IDAC4] = { .ghz5 = 0x0072, .ghz2 = 0x0072, UPLOAD, },
1348 [B2056_TX_GMBB_IDAC5] = { .ghz5 = 0x0073, .ghz2 = 0x0073, UPLOAD, },
1349 [B2056_TX_GMBB_IDAC6] = { .ghz5 = 0x0074, .ghz2 = 0x0074, UPLOAD, },
1350 [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, },
1351};
1352
1353static const struct b2056_inittab_entry b2056_inittab_rev5_rx[] = {
1354 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1355 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1356 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1357 [B2056_RX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1358 [B2056_RX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1359 [B2056_RX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1360 [B2056_RX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1361 [B2056_RX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1362 [B2056_RX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1363 [B2056_RX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1364 [B2056_RX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1365 [B2056_RX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1366 [B2056_RX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1367 [B2056_RX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1368 [B2056_RX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1369 [B2056_RX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1370 [B2056_RX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1371 [B2056_RX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1372 [B2056_RX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1373 [B2056_RX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1374 [B2056_RX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1375 [B2056_RX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1376 [B2056_RX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1377 [B2056_RX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1378 [B2056_RX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1379 [B2056_RX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1380 [B2056_RX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1381 [B2056_RX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1382 [B2056_RX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1383 [B2056_RX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1384 [B2056_RX_RXIQCAL_RXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
1385 [B2056_RX_RSSI_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1386 [B2056_RX_RSSI_SEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1387 [B2056_RX_RSSI_GAIN] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
1388 [B2056_RX_RSSI_NB_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
1389 [B2056_RX_RSSI_WB2I_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
1390 [B2056_RX_RSSI_WB2I_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
1391 [B2056_RX_RSSI_WB2Q_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
1392 [B2056_RX_RSSI_WB2Q_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
1393 [B2056_RX_RSSI_POLE] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
1394 [B2056_RX_RSSI_WB1_IDAC] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
1395 [B2056_RX_RSSI_MISC] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
1396 [B2056_RX_LNAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1397 [B2056_RX_LNAA_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
1398 [B2056_RX_LNAA_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
1399 [B2056_RX_LNA_A_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
1400 [B2056_RX_BIASPOLE_LNAA1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
1401 [B2056_RX_LNAA2_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
1402 [B2056_RX_LNA1A_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
1403 [B2056_RX_LNAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1404 [B2056_RX_LNAG_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
1405 [B2056_RX_LNAG_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
1406 [B2056_RX_LNA_G_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
1407 [B2056_RX_BIASPOLE_LNAG1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
1408 [B2056_RX_LNAG2_IDAC] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
1409 [B2056_RX_LNA1G_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
1410 [B2056_RX_MIXA_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
1411 [B2056_RX_MIXA_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
1412 [B2056_RX_MIXA_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1413 [B2056_RX_MIXA_LOB_BIAS] = { .ghz5 = 0x0088, .ghz2 = 0x0088, UPLOAD, },
1414 [B2056_RX_MIXA_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1415 [B2056_RX_MIXA_CMFB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
1416 [B2056_RX_MIXA_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
1417 [B2056_RX_MIXA_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
1418 [B2056_RX_MIXA_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
1419 [B2056_RX_MIXA_MAST_BIAS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1420 [B2056_RX_MIXG_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
1421 [B2056_RX_MIXG_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
1422 [B2056_RX_MIXG_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1423 [B2056_RX_MIXG_LOB_BIAS] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
1424 [B2056_RX_MIXG_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1425 [B2056_RX_MIXG_CMFB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
1426 [B2056_RX_MIXG_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
1427 [B2056_RX_MIXG_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
1428 [B2056_RX_MIXG_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
1429 [B2056_RX_MIXG_MAST_BIAS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1430 [B2056_RX_TIA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1431 [B2056_RX_TIA_IOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
1432 [B2056_RX_TIA_QOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
1433 [B2056_RX_TIA_IMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
1434 [B2056_RX_TIA_QMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
1435 [B2056_RX_TIA_GAIN] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
1436 [B2056_RX_TIA_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1437 [B2056_RX_TIA_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1438 [B2056_RX_BB_LPF_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
1439 [B2056_RX_AACI_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
1440 [B2056_RX_RXLPF_IDAC] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
1441 [B2056_RX_RXLPF_OPAMPBIAS_LOWQ] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
1442 [B2056_RX_RXLPF_OPAMPBIAS_HIGHQ]= { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
1443 [B2056_RX_RXLPF_BIAS_DCCANCEL] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
1444 [B2056_RX_RXLPF_OUTVCM] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
1445 [B2056_RX_RXLPF_INVCM_BODY] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
1446 [B2056_RX_RXLPF_CC_OP] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
1447 [B2056_RX_RXLPF_GAIN] = { .ghz5 = 0x0023, .ghz2 = 0x0023, NOUPLOAD, },
1448 [B2056_RX_RXLPF_Q_BW] = { .ghz5 = 0x0041, .ghz2 = 0x0041, NOUPLOAD, },
1449 [B2056_RX_RXLPF_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1450 [B2056_RX_RXLPF_RCCAL_HPC] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
1451 [B2056_RX_RXHPF_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1452 [B2056_RX_RXHPF_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1453 [B2056_RX_RXHPF_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1454 [B2056_RX_RXHPF_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1455 [B2056_RX_RXHPF_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1456 [B2056_RX_RXHPF_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1457 [B2056_RX_RXHPF_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1458 [B2056_RX_RXHPF_OFF7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1459 [B2056_RX_RXLPF_RCCAL_LPC] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
1460 [B2056_RX_RXLPF_OFF_0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1461 [B2056_RX_RXLPF_OFF_1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1462 [B2056_RX_RXLPF_OFF_2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1463 [B2056_RX_RXLPF_OFF_3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1464 [B2056_RX_RXLPF_OFF_4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1465 [B2056_RX_UNUSED] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1466 [B2056_RX_VGA_MASTER] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
1467 [B2056_RX_VGA_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
1468 [B2056_RX_VGA_BIAS_DCCANCEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
1469 [B2056_RX_VGA_GAIN] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
1470 [B2056_RX_VGA_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1471 [B2056_RX_VGABUF_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
1472 [B2056_RX_VGABUF_GAIN_BW] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
1473 [B2056_RX_TXFBMIX_A] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1474 [B2056_RX_TXFBMIX_G] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1475 [B2056_RX_RXSPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1476 [B2056_RX_RXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1477 [B2056_RX_RXSPARE3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1478 [B2056_RX_RXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1479 [B2056_RX_RXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1480 [B2056_RX_RXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1481 [B2056_RX_RXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1482 [B2056_RX_RXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1483 [B2056_RX_RXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1484 [B2056_RX_RXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1485 [B2056_RX_RXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1486 [B2056_RX_RXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1487 [B2056_RX_RXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1488 [B2056_RX_RXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1489 [B2056_RX_RXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1490 [B2056_RX_RXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1491 [B2056_RX_STATUS_LNAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1492 [B2056_RX_STATUS_LNAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1493 [B2056_RX_STATUS_MIXTIA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1494 [B2056_RX_STATUS_RXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1495 [B2056_RX_STATUS_VGA_BUF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1496 [B2056_RX_STATUS_RXLPF_Q] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1497 [B2056_RX_STATUS_RXLPF_BUF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1498 [B2056_RX_STATUS_RXLPF_VGA_HPC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1499 [B2056_RX_STATUS_RXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1500 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1501};
1502
1503static const struct b2056_inittab_entry b2056_inittab_rev6_syn[] = {
1504 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1505 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1506 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1507 [B2056_SYN_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1508 [B2056_SYN_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1509 [B2056_SYN_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1510 [B2056_SYN_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1511 [B2056_SYN_COM_PU] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1512 [B2056_SYN_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1513 [B2056_SYN_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1514 [B2056_SYN_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1515 [B2056_SYN_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1516 [B2056_SYN_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1517 [B2056_SYN_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1518 [B2056_SYN_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1519 [B2056_SYN_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1520 [B2056_SYN_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1521 [B2056_SYN_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1522 [B2056_SYN_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1523 [B2056_SYN_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1524 [B2056_SYN_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1525 [B2056_SYN_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1526 [B2056_SYN_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1527 [B2056_SYN_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1528 [B2056_SYN_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1529 [B2056_SYN_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1530 [B2056_SYN_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1531 [B2056_SYN_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1532 [B2056_SYN_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1533 [B2056_SYN_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1534 [B2056_SYN_GPIO_MASTER1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1535 [B2056_SYN_GPIO_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1536 [B2056_SYN_TOPBIAS_MASTER] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
1537 [B2056_SYN_TOPBIAS_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
1538 [B2056_SYN_AFEREG] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
1539 [B2056_SYN_TEMPPROCSENSE] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1540 [B2056_SYN_TEMPPROCSENSEIDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1541 [B2056_SYN_TEMPPROCSENSERCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1542 [B2056_SYN_LPO] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1543 [B2056_SYN_VDDCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1544 [B2056_SYN_VDDCAL_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1545 [B2056_SYN_VDDCAL_STATUS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1546 [B2056_SYN_RCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1547 [B2056_SYN_RCAL_CODE_OUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1548 [B2056_SYN_RCCAL_CTRL0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1549 [B2056_SYN_RCCAL_CTRL1] = { .ghz5 = 0x001f, .ghz2 = 0x001f, NOUPLOAD, },
1550 [B2056_SYN_RCCAL_CTRL2] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
1551 [B2056_SYN_RCCAL_CTRL3] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
1552 [B2056_SYN_RCCAL_CTRL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1553 [B2056_SYN_RCCAL_CTRL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1554 [B2056_SYN_RCCAL_CTRL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1555 [B2056_SYN_RCCAL_CTRL7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1556 [B2056_SYN_RCCAL_CTRL8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1557 [B2056_SYN_RCCAL_CTRL9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1558 [B2056_SYN_RCCAL_CTRL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1559 [B2056_SYN_RCCAL_CTRL11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1560 [B2056_SYN_ZCAL_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1561 [B2056_SYN_ZCAL_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1562 [B2056_SYN_PLL_MAST1] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
1563 [B2056_SYN_PLL_MAST2] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
1564 [B2056_SYN_PLL_MAST3] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
1565 [B2056_SYN_PLL_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1566 [B2056_SYN_PLL_XTAL0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1567 [B2056_SYN_PLL_XTAL1] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
1568 [B2056_SYN_PLL_XTAL3] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
1569 [B2056_SYN_PLL_XTAL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1570 [B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
1571 [B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
1572 [B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1573 [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
1574 [B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
1575 [B2056_SYN_PLL_CP2] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
1576 [B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
1577 [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
1578 [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
1579 [B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
1580 [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
1581 [B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1582 [B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, },
1583 [B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
1584 [B2056_SYN_PLL_VCO1] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
1585 [B2056_SYN_PLL_VCO2] = { .ghz5 = 0x00f7, .ghz2 = 0x00f7, UPLOAD, },
1586 [B2056_SYN_PLL_MONITOR1] = { .ghz5 = 0x00b4, .ghz2 = 0x00b4, NOUPLOAD, },
1587 [B2056_SYN_PLL_MONITOR2] = { .ghz5 = 0x00d2, .ghz2 = 0x00d2, NOUPLOAD, },
1588 [B2056_SYN_PLL_VCOCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1589 [B2056_SYN_PLL_VCOCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1590 [B2056_SYN_PLL_VCOCAL4] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
1591 [B2056_SYN_PLL_VCOCAL5] = { .ghz5 = 0x0096, .ghz2 = 0x0096, NOUPLOAD, },
1592 [B2056_SYN_PLL_VCOCAL6] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
1593 [B2056_SYN_PLL_VCOCAL7] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
1594 [B2056_SYN_PLL_VCOCAL8] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
1595 [B2056_SYN_PLL_VCOCAL9] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
1596 [B2056_SYN_PLL_VCOCAL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1597 [B2056_SYN_PLL_VCOCAL11] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
1598 [B2056_SYN_PLL_VCOCAL12] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
1599 [B2056_SYN_PLL_VCOCAL13] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
1600 [B2056_SYN_PLL_VREG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
1601 [B2056_SYN_PLL_STATUS1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1602 [B2056_SYN_PLL_STATUS2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1603 [B2056_SYN_PLL_STATUS3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1604 [B2056_SYN_LOGEN_PU0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1605 [B2056_SYN_LOGEN_PU1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1606 [B2056_SYN_LOGEN_PU2] = { .ghz5 = 0x0040, .ghz2 = 0x0040, NOUPLOAD, },
1607 [B2056_SYN_LOGEN_PU3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1608 [B2056_SYN_LOGEN_PU5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1609 [B2056_SYN_LOGEN_PU6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1610 [B2056_SYN_LOGEN_PU7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1611 [B2056_SYN_LOGEN_PU8] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1612 [B2056_SYN_LOGEN_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1613 [B2056_SYN_LOGEN_RCCR1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1614 [B2056_SYN_LOGEN_VCOBUF1] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
1615 [B2056_SYN_LOGEN_MIXER1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1616 [B2056_SYN_LOGEN_MIXER2] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
1617 [B2056_SYN_LOGEN_BUF1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1618 [B2056_SYN_LOGENBUF2] = { .ghz5 = 0x008f, .ghz2 = 0x008f, UPLOAD, },
1619 [B2056_SYN_LOGEN_BUF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1620 [B2056_SYN_LOGEN_BUF4] = { .ghz5 = 0x00cc, .ghz2 = 0x00cc, NOUPLOAD, },
1621 [B2056_SYN_LOGEN_DIV1] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1622 [B2056_SYN_LOGEN_DIV2] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1623 [B2056_SYN_LOGEN_DIV3] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1624 [B2056_SYN_LOGEN_ACL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1625 [B2056_SYN_LOGEN_ACL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1626 [B2056_SYN_LOGEN_ACL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1627 [B2056_SYN_LOGEN_ACL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1628 [B2056_SYN_LOGEN_ACL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1629 [B2056_SYN_LOGEN_ACL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1630 [B2056_SYN_LOGEN_ACLOUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1631 [B2056_SYN_LOGEN_ACLCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1632 [B2056_SYN_LOGEN_ACLCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1633 [B2056_SYN_LOGEN_ACLCAL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1634 [B2056_SYN_CALEN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1635 [B2056_SYN_LOGEN_PEAKDET1] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, NOUPLOAD, },
1636 [B2056_SYN_LOGEN_CORE_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1637 [B2056_SYN_LOGEN_RX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1638 [B2056_SYN_LOGEN_TX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1639 [B2056_SYN_LOGEN_RX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1640 [B2056_SYN_LOGEN_TX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1641 [B2056_SYN_LOGEN_VCOBUF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1642 [B2056_SYN_LOGEN_MIXER3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1643 [B2056_SYN_LOGEN_BUF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1644 [B2056_SYN_LOGEN_BUF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1645 [B2056_SYN_LOGEN_CBUFRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1646 [B2056_SYN_LOGEN_CBUFRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1647 [B2056_SYN_LOGEN_CBUFRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1648 [B2056_SYN_LOGEN_CBUFRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1649 [B2056_SYN_LOGEN_CBUFTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1650 [B2056_SYN_LOGEN_CBUFTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1651 [B2056_SYN_LOGEN_CBUFTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1652 [B2056_SYN_LOGEN_CBUFTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1653 [B2056_SYN_LOGEN_CMOSRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1654 [B2056_SYN_LOGEN_CMOSRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1655 [B2056_SYN_LOGEN_CMOSRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1656 [B2056_SYN_LOGEN_CMOSRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1657 [B2056_SYN_LOGEN_CMOSTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1658 [B2056_SYN_LOGEN_CMOSTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1659 [B2056_SYN_LOGEN_CMOSTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1660 [B2056_SYN_LOGEN_CMOSTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1661 [B2056_SYN_LOGEN_VCOBUF2_OVRVAL]= { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
1662 [B2056_SYN_LOGEN_MIXER3_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1663 [B2056_SYN_LOGEN_BUF5_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1664 [B2056_SYN_LOGEN_BUF6_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1665 [B2056_SYN_LOGEN_CBUFRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1666 [B2056_SYN_LOGEN_CBUFRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1667 [B2056_SYN_LOGEN_CBUFRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1668 [B2056_SYN_LOGEN_CBUFRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1669 [B2056_SYN_LOGEN_CBUFTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1670 [B2056_SYN_LOGEN_CBUFTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1671 [B2056_SYN_LOGEN_CBUFTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1672 [B2056_SYN_LOGEN_CBUFTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1673 [B2056_SYN_LOGEN_CMOSRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1674 [B2056_SYN_LOGEN_CMOSRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1675 [B2056_SYN_LOGEN_CMOSRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1676 [B2056_SYN_LOGEN_CMOSRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1677 [B2056_SYN_LOGEN_CMOSTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1678 [B2056_SYN_LOGEN_CMOSTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1679 [B2056_SYN_LOGEN_CMOSTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1680 [B2056_SYN_LOGEN_CMOSTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
1681 [B2056_SYN_LOGEN_ACL_WAITCNT] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
1682 [B2056_SYN_LOGEN_CORE_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1683 [B2056_SYN_LOGEN_RX_CMOS_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1684 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1685};
1686
1687static const struct b2056_inittab_entry b2056_inittab_rev6_tx[] = {
1688 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1689 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1690 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1691 [B2056_TX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1692 [B2056_TX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1693 [B2056_TX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1694 [B2056_TX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1695 [B2056_TX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1696 [B2056_TX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1697 [B2056_TX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1698 [B2056_TX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1699 [B2056_TX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1700 [B2056_TX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1701 [B2056_TX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1702 [B2056_TX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1703 [B2056_TX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1704 [B2056_TX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1705 [B2056_TX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1706 [B2056_TX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1707 [B2056_TX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1708 [B2056_TX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1709 [B2056_TX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1710 [B2056_TX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1711 [B2056_TX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1712 [B2056_TX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1713 [B2056_TX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1714 [B2056_TX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1715 [B2056_TX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1716 [B2056_TX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1717 [B2056_TX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1718 [B2056_TX_IQCAL_GAIN_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1719 [B2056_TX_LOFT_FINE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
1720 [B2056_TX_LOFT_FINE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
1721 [B2056_TX_LOFT_COARSE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
1722 [B2056_TX_LOFT_COARSE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
1723 [B2056_TX_TX_COM_MASTER1] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
1724 [B2056_TX_TX_COM_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1725 [B2056_TX_RXIQCAL_TXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
1726 [B2056_TX_TX_SSI_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1727 [B2056_TX_IQCAL_VCM_HG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
1728 [B2056_TX_IQCAL_IDAC] = { .ghz5 = 0x0037, .ghz2 = 0x0037, NOUPLOAD, },
1729 [B2056_TX_TSSI_VCM] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
1730 [B2056_TX_TX_AMP_DET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1731 [B2056_TX_TX_SSI_MUX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1732 [B2056_TX_TSSIA] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1733 [B2056_TX_TSSIG] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1734 [B2056_TX_TSSI_MISC1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1735 [B2056_TX_TSSI_MISC2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1736 [B2056_TX_TSSI_MISC3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1737 [B2056_TX_PA_SPARE1] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
1738 [B2056_TX_PA_SPARE2] = { .ghz5 = 0x00ee, .ghz2 = 0x00ee, UPLOAD, },
1739 [B2056_TX_INTPAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1740 [B2056_TX_INTPAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1741 [B2056_TX_INTPAA_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
1742 [B2056_TX_INTPAA_IAUX_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
1743 [B2056_TX_INTPAA_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1744 [B2056_TX_INTPAA_IMAIN_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
1745 [B2056_TX_INTPAA_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1746 [B2056_TX_INTPAA_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
1747 [B2056_TX_INTPAA_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
1748 [B2056_TX_INTPAA_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1749 [B2056_TX_INTPAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1750 [B2056_TX_INTPAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1751 [B2056_TX_INTPAG_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
1752 [B2056_TX_INTPAG_IAUX_STAT] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
1753 [B2056_TX_INTPAG_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1754 [B2056_TX_INTPAG_IMAIN_STAT] = { .ghz5 = 0x001e, .ghz2 = 0x001e, NOUPLOAD, },
1755 [B2056_TX_INTPAG_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1756 [B2056_TX_INTPAG_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
1757 [B2056_TX_INTPAG_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
1758 [B2056_TX_INTPAG_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1759 [B2056_TX_PADA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
1760 [B2056_TX_PADA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
1761 [B2056_TX_PADA_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
1762 [B2056_TX_PADA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1763 [B2056_TX_PADA_BOOST_TUNE] = { .ghz5 = 0x0038, .ghz2 = 0x0038, NOUPLOAD, },
1764 [B2056_TX_PADA_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
1765 [B2056_TX_PADG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
1766 [B2056_TX_PADG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
1767 [B2056_TX_PADG_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
1768 [B2056_TX_PADG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1769 [B2056_TX_PADG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
1770 [B2056_TX_PADG_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
1771 [B2056_TX_PGAA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
1772 [B2056_TX_PGAA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
1773 [B2056_TX_PGAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1774 [B2056_TX_PGAA_BOOST_TUNE] = { .ghz5 = 0x0083, .ghz2 = 0x0083, NOUPLOAD, },
1775 [B2056_TX_PGAA_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
1776 [B2056_TX_PGAA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1777 [B2056_TX_PGAG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
1778 [B2056_TX_PGAG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
1779 [B2056_TX_PGAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1780 [B2056_TX_PGAG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
1781 [B2056_TX_PGAG_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
1782 [B2056_TX_PGAG_MISC] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1783 [B2056_TX_MIXA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1784 [B2056_TX_MIXA_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
1785 [B2056_TX_MIXG] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1786 [B2056_TX_MIXG_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
1787 [B2056_TX_BB_GM_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1788 [B2056_TX_GMBB_GM] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1789 [B2056_TX_GMBB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
1790 [B2056_TX_TXLPF_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1791 [B2056_TX_TXLPF_RCCAL] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
1792 [B2056_TX_TXLPF_RCCAL_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1793 [B2056_TX_TXLPF_RCCAL_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1794 [B2056_TX_TXLPF_RCCAL_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1795 [B2056_TX_TXLPF_RCCAL_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1796 [B2056_TX_TXLPF_RCCAL_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1797 [B2056_TX_TXLPF_RCCAL_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1798 [B2056_TX_TXLPF_RCCAL_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1799 [B2056_TX_TXLPF_BW] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
1800 [B2056_TX_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1801 [B2056_TX_TXLPF_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1802 [B2056_TX_TXLPF_IDAC_0] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
1803 [B2056_TX_TXLPF_IDAC_1] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
1804 [B2056_TX_TXLPF_IDAC_2] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
1805 [B2056_TX_TXLPF_IDAC_3] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
1806 [B2056_TX_TXLPF_IDAC_4] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
1807 [B2056_TX_TXLPF_IDAC_5] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
1808 [B2056_TX_TXLPF_IDAC_6] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
1809 [B2056_TX_TXLPF_OPAMP_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
1810 [B2056_TX_TXLPF_MISC] = { .ghz5 = 0x005b, .ghz2 = 0x005b, NOUPLOAD, },
1811 [B2056_TX_TXSPARE1] = { .ghz5 = 0x0030, .ghz2 = 0x0030, UPLOAD, },
1812 [B2056_TX_TXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1813 [B2056_TX_TXSPARE3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1814 [B2056_TX_TXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1815 [B2056_TX_TXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1816 [B2056_TX_TXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1817 [B2056_TX_TXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1818 [B2056_TX_TXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1819 [B2056_TX_TXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1820 [B2056_TX_TXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1821 [B2056_TX_TXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1822 [B2056_TX_TXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1823 [B2056_TX_TXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1824 [B2056_TX_TXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1825 [B2056_TX_TXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1826 [B2056_TX_TXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1827 [B2056_TX_STATUS_INTPA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1828 [B2056_TX_STATUS_PAD_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1829 [B2056_TX_STATUS_PGA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1830 [B2056_TX_STATUS_GM_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1831 [B2056_TX_STATUS_TXLPF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1832 [B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1833 [B2056_TX_GMBB_IDAC0] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
1834 [B2056_TX_GMBB_IDAC1] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
1835 [B2056_TX_GMBB_IDAC2] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
1836 [B2056_TX_GMBB_IDAC3] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
1837 [B2056_TX_GMBB_IDAC4] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
1838 [B2056_TX_GMBB_IDAC5] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
1839 [B2056_TX_GMBB_IDAC6] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
1840 [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
1841};
1842
1843static const struct b2056_inittab_entry b2056_inittab_rev6_rx[] = {
1844 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1845 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1846 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1847 [B2056_RX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1848 [B2056_RX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1849 [B2056_RX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1850 [B2056_RX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1851 [B2056_RX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1852 [B2056_RX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1853 [B2056_RX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1854 [B2056_RX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1855 [B2056_RX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1856 [B2056_RX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1857 [B2056_RX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1858 [B2056_RX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1859 [B2056_RX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1860 [B2056_RX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1861 [B2056_RX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1862 [B2056_RX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1863 [B2056_RX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1864 [B2056_RX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1865 [B2056_RX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1866 [B2056_RX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1867 [B2056_RX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1868 [B2056_RX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1869 [B2056_RX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1870 [B2056_RX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1871 [B2056_RX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1872 [B2056_RX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1873 [B2056_RX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1874 [B2056_RX_RXIQCAL_RXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
1875 [B2056_RX_RSSI_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1876 [B2056_RX_RSSI_SEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1877 [B2056_RX_RSSI_GAIN] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
1878 [B2056_RX_RSSI_NB_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
1879 [B2056_RX_RSSI_WB2I_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
1880 [B2056_RX_RSSI_WB2I_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
1881 [B2056_RX_RSSI_WB2Q_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
1882 [B2056_RX_RSSI_WB2Q_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
1883 [B2056_RX_RSSI_POLE] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
1884 [B2056_RX_RSSI_WB1_IDAC] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
1885 [B2056_RX_RSSI_MISC] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
1886 [B2056_RX_LNAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1887 [B2056_RX_LNAA_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
1888 [B2056_RX_LNAA_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
1889 [B2056_RX_LNA_A_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
1890 [B2056_RX_BIASPOLE_LNAA1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
1891 [B2056_RX_LNAA2_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
1892 [B2056_RX_LNA1A_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
1893 [B2056_RX_LNAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1894 [B2056_RX_LNAG_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
1895 [B2056_RX_LNAG_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
1896 [B2056_RX_LNA_G_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
1897 [B2056_RX_BIASPOLE_LNAG1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
1898 [B2056_RX_LNAG2_IDAC] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
1899 [B2056_RX_LNA1G_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
1900 [B2056_RX_MIXA_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
1901 [B2056_RX_MIXA_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
1902 [B2056_RX_MIXA_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1903 [B2056_RX_MIXA_LOB_BIAS] = { .ghz5 = 0x0088, .ghz2 = 0x0088, UPLOAD, },
1904 [B2056_RX_MIXA_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1905 [B2056_RX_MIXA_CMFB_IDAC] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
1906 [B2056_RX_MIXA_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
1907 [B2056_RX_MIXA_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
1908 [B2056_RX_MIXA_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
1909 [B2056_RX_MIXA_MAST_BIAS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1910 [B2056_RX_MIXG_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
1911 [B2056_RX_MIXG_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
1912 [B2056_RX_MIXG_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1913 [B2056_RX_MIXG_LOB_BIAS] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
1914 [B2056_RX_MIXG_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1915 [B2056_RX_MIXG_CMFB_IDAC] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
1916 [B2056_RX_MIXG_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
1917 [B2056_RX_MIXG_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
1918 [B2056_RX_MIXG_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
1919 [B2056_RX_MIXG_MAST_BIAS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1920 [B2056_RX_TIA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1921 [B2056_RX_TIA_IOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
1922 [B2056_RX_TIA_QOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
1923 [B2056_RX_TIA_IMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
1924 [B2056_RX_TIA_QMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
1925 [B2056_RX_TIA_GAIN] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
1926 [B2056_RX_TIA_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1927 [B2056_RX_TIA_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1928 [B2056_RX_BB_LPF_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
1929 [B2056_RX_AACI_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
1930 [B2056_RX_RXLPF_IDAC] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
1931 [B2056_RX_RXLPF_OPAMPBIAS_LOWQ] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
1932 [B2056_RX_RXLPF_OPAMPBIAS_HIGHQ]= { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
1933 [B2056_RX_RXLPF_BIAS_DCCANCEL] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
1934 [B2056_RX_RXLPF_OUTVCM] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
1935 [B2056_RX_RXLPF_INVCM_BODY] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
1936 [B2056_RX_RXLPF_CC_OP] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
1937 [B2056_RX_RXLPF_GAIN] = { .ghz5 = 0x0023, .ghz2 = 0x0023, NOUPLOAD, },
1938 [B2056_RX_RXLPF_Q_BW] = { .ghz5 = 0x0041, .ghz2 = 0x0041, NOUPLOAD, },
1939 [B2056_RX_RXLPF_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1940 [B2056_RX_RXLPF_RCCAL_HPC] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
1941 [B2056_RX_RXHPF_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1942 [B2056_RX_RXHPF_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1943 [B2056_RX_RXHPF_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1944 [B2056_RX_RXHPF_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1945 [B2056_RX_RXHPF_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1946 [B2056_RX_RXHPF_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1947 [B2056_RX_RXHPF_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1948 [B2056_RX_RXHPF_OFF7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1949 [B2056_RX_RXLPF_RCCAL_LPC] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
1950 [B2056_RX_RXLPF_OFF_0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1951 [B2056_RX_RXLPF_OFF_1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1952 [B2056_RX_RXLPF_OFF_2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1953 [B2056_RX_RXLPF_OFF_3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1954 [B2056_RX_RXLPF_OFF_4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1955 [B2056_RX_UNUSED] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1956 [B2056_RX_VGA_MASTER] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
1957 [B2056_RX_VGA_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
1958 [B2056_RX_VGA_BIAS_DCCANCEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
1959 [B2056_RX_VGA_GAIN] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
1960 [B2056_RX_VGA_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
1961 [B2056_RX_VGABUF_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
1962 [B2056_RX_VGABUF_GAIN_BW] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
1963 [B2056_RX_TXFBMIX_A] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1964 [B2056_RX_TXFBMIX_G] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1965 [B2056_RX_RXSPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1966 [B2056_RX_RXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1967 [B2056_RX_RXSPARE3] = { .ghz5 = 0x0005, .ghz2 = 0x0005, UPLOAD, },
1968 [B2056_RX_RXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1969 [B2056_RX_RXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1970 [B2056_RX_RXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1971 [B2056_RX_RXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1972 [B2056_RX_RXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1973 [B2056_RX_RXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1974 [B2056_RX_RXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1975 [B2056_RX_RXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1976 [B2056_RX_RXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1977 [B2056_RX_RXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1978 [B2056_RX_RXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1979 [B2056_RX_RXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1980 [B2056_RX_RXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1981 [B2056_RX_STATUS_LNAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1982 [B2056_RX_STATUS_LNAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1983 [B2056_RX_STATUS_MIXTIA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1984 [B2056_RX_STATUS_RXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1985 [B2056_RX_STATUS_VGA_BUF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1986 [B2056_RX_STATUS_RXLPF_Q] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1987 [B2056_RX_STATUS_RXLPF_BUF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1988 [B2056_RX_STATUS_RXLPF_VGA_HPC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1989 [B2056_RX_STATUS_RXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1990 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1991};
1992
1993static const struct b2056_inittab_entry b2056_inittab_rev7_syn[] = {
1994 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1995 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1996 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1997 [B2056_SYN_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1998 [B2056_SYN_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
1999 [B2056_SYN_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2000 [B2056_SYN_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2001 [B2056_SYN_COM_PU] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2002 [B2056_SYN_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2003 [B2056_SYN_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2004 [B2056_SYN_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2005 [B2056_SYN_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2006 [B2056_SYN_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2007 [B2056_SYN_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2008 [B2056_SYN_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2009 [B2056_SYN_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2010 [B2056_SYN_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2011 [B2056_SYN_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2012 [B2056_SYN_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2013 [B2056_SYN_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2014 [B2056_SYN_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2015 [B2056_SYN_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2016 [B2056_SYN_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2017 [B2056_SYN_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2018 [B2056_SYN_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2019 [B2056_SYN_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2020 [B2056_SYN_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2021 [B2056_SYN_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2022 [B2056_SYN_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2023 [B2056_SYN_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2024 [B2056_SYN_GPIO_MASTER1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2025 [B2056_SYN_GPIO_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2026 [B2056_SYN_TOPBIAS_MASTER] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
2027 [B2056_SYN_TOPBIAS_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
2028 [B2056_SYN_AFEREG] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
2029 [B2056_SYN_TEMPPROCSENSE] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2030 [B2056_SYN_TEMPPROCSENSEIDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2031 [B2056_SYN_TEMPPROCSENSERCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2032 [B2056_SYN_LPO] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2033 [B2056_SYN_VDDCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2034 [B2056_SYN_VDDCAL_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2035 [B2056_SYN_VDDCAL_STATUS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2036 [B2056_SYN_RCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2037 [B2056_SYN_RCAL_CODE_OUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2038 [B2056_SYN_RCCAL_CTRL0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2039 [B2056_SYN_RCCAL_CTRL1] = { .ghz5 = 0x001f, .ghz2 = 0x001f, NOUPLOAD, },
2040 [B2056_SYN_RCCAL_CTRL2] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
2041 [B2056_SYN_RCCAL_CTRL3] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
2042 [B2056_SYN_RCCAL_CTRL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2043 [B2056_SYN_RCCAL_CTRL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2044 [B2056_SYN_RCCAL_CTRL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2045 [B2056_SYN_RCCAL_CTRL7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2046 [B2056_SYN_RCCAL_CTRL8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2047 [B2056_SYN_RCCAL_CTRL9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2048 [B2056_SYN_RCCAL_CTRL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2049 [B2056_SYN_RCCAL_CTRL11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2050 [B2056_SYN_ZCAL_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2051 [B2056_SYN_ZCAL_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2052 [B2056_SYN_PLL_MAST1] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
2053 [B2056_SYN_PLL_MAST2] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
2054 [B2056_SYN_PLL_MAST3] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
2055 [B2056_SYN_PLL_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2056 [B2056_SYN_PLL_XTAL0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2057 [B2056_SYN_PLL_XTAL1] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
2058 [B2056_SYN_PLL_XTAL3] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
2059 [B2056_SYN_PLL_XTAL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2060 [B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
2061 [B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
2062 [B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2063 [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
2064 [B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
2065 [B2056_SYN_PLL_CP2] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
2066 [B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
2067 [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
2068 [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
2069 [B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
2070 [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
2071 [B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2072 [B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, },
2073 [B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
2074 [B2056_SYN_PLL_VCO1] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
2075 [B2056_SYN_PLL_VCO2] = { .ghz5 = 0x00f7, .ghz2 = 0x00f7, UPLOAD, },
2076 [B2056_SYN_PLL_MONITOR1] = { .ghz5 = 0x00b4, .ghz2 = 0x00b4, NOUPLOAD, },
2077 [B2056_SYN_PLL_MONITOR2] = { .ghz5 = 0x00d2, .ghz2 = 0x00d2, NOUPLOAD, },
2078 [B2056_SYN_PLL_VCOCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2079 [B2056_SYN_PLL_VCOCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2080 [B2056_SYN_PLL_VCOCAL4] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
2081 [B2056_SYN_PLL_VCOCAL5] = { .ghz5 = 0x0096, .ghz2 = 0x0096, NOUPLOAD, },
2082 [B2056_SYN_PLL_VCOCAL6] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
2083 [B2056_SYN_PLL_VCOCAL7] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
2084 [B2056_SYN_PLL_VCOCAL8] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
2085 [B2056_SYN_PLL_VCOCAL9] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
2086 [B2056_SYN_PLL_VCOCAL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2087 [B2056_SYN_PLL_VCOCAL11] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
2088 [B2056_SYN_PLL_VCOCAL12] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
2089 [B2056_SYN_PLL_VCOCAL13] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
2090 [B2056_SYN_PLL_VREG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
2091 [B2056_SYN_PLL_STATUS1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2092 [B2056_SYN_PLL_STATUS2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2093 [B2056_SYN_PLL_STATUS3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2094 [B2056_SYN_LOGEN_PU0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2095 [B2056_SYN_LOGEN_PU1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2096 [B2056_SYN_LOGEN_PU2] = { .ghz5 = 0x0040, .ghz2 = 0x0040, NOUPLOAD, },
2097 [B2056_SYN_LOGEN_PU3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2098 [B2056_SYN_LOGEN_PU5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2099 [B2056_SYN_LOGEN_PU6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2100 [B2056_SYN_LOGEN_PU7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2101 [B2056_SYN_LOGEN_PU8] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2102 [B2056_SYN_LOGEN_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2103 [B2056_SYN_LOGEN_RCCR1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2104 [B2056_SYN_LOGEN_VCOBUF1] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
2105 [B2056_SYN_LOGEN_MIXER1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2106 [B2056_SYN_LOGEN_MIXER2] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
2107 [B2056_SYN_LOGEN_BUF1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2108 [B2056_SYN_LOGENBUF2] = { .ghz5 = 0x008f, .ghz2 = 0x008f, UPLOAD, },
2109 [B2056_SYN_LOGEN_BUF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2110 [B2056_SYN_LOGEN_BUF4] = { .ghz5 = 0x00cc, .ghz2 = 0x00cc, NOUPLOAD, },
2111 [B2056_SYN_LOGEN_DIV1] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2112 [B2056_SYN_LOGEN_DIV2] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2113 [B2056_SYN_LOGEN_DIV3] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2114 [B2056_SYN_LOGEN_ACL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2115 [B2056_SYN_LOGEN_ACL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2116 [B2056_SYN_LOGEN_ACL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2117 [B2056_SYN_LOGEN_ACL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2118 [B2056_SYN_LOGEN_ACL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2119 [B2056_SYN_LOGEN_ACL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2120 [B2056_SYN_LOGEN_ACLOUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2121 [B2056_SYN_LOGEN_ACLCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2122 [B2056_SYN_LOGEN_ACLCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2123 [B2056_SYN_LOGEN_ACLCAL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2124 [B2056_SYN_CALEN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2125 [B2056_SYN_LOGEN_PEAKDET1] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, NOUPLOAD, },
2126 [B2056_SYN_LOGEN_CORE_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2127 [B2056_SYN_LOGEN_RX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2128 [B2056_SYN_LOGEN_TX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2129 [B2056_SYN_LOGEN_RX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2130 [B2056_SYN_LOGEN_TX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2131 [B2056_SYN_LOGEN_VCOBUF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2132 [B2056_SYN_LOGEN_MIXER3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2133 [B2056_SYN_LOGEN_BUF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2134 [B2056_SYN_LOGEN_BUF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2135 [B2056_SYN_LOGEN_CBUFRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2136 [B2056_SYN_LOGEN_CBUFRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2137 [B2056_SYN_LOGEN_CBUFRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2138 [B2056_SYN_LOGEN_CBUFRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2139 [B2056_SYN_LOGEN_CBUFTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2140 [B2056_SYN_LOGEN_CBUFTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2141 [B2056_SYN_LOGEN_CBUFTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2142 [B2056_SYN_LOGEN_CBUFTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2143 [B2056_SYN_LOGEN_CMOSRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2144 [B2056_SYN_LOGEN_CMOSRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2145 [B2056_SYN_LOGEN_CMOSRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2146 [B2056_SYN_LOGEN_CMOSRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2147 [B2056_SYN_LOGEN_CMOSTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2148 [B2056_SYN_LOGEN_CMOSTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2149 [B2056_SYN_LOGEN_CMOSTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2150 [B2056_SYN_LOGEN_CMOSTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2151 [B2056_SYN_LOGEN_VCOBUF2_OVRVAL]= { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
2152 [B2056_SYN_LOGEN_MIXER3_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2153 [B2056_SYN_LOGEN_BUF5_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2154 [B2056_SYN_LOGEN_BUF6_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2155 [B2056_SYN_LOGEN_CBUFRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2156 [B2056_SYN_LOGEN_CBUFRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2157 [B2056_SYN_LOGEN_CBUFRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2158 [B2056_SYN_LOGEN_CBUFRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2159 [B2056_SYN_LOGEN_CBUFTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2160 [B2056_SYN_LOGEN_CBUFTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2161 [B2056_SYN_LOGEN_CBUFTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2162 [B2056_SYN_LOGEN_CBUFTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2163 [B2056_SYN_LOGEN_CMOSRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2164 [B2056_SYN_LOGEN_CMOSRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2165 [B2056_SYN_LOGEN_CMOSRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2166 [B2056_SYN_LOGEN_CMOSRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2167 [B2056_SYN_LOGEN_CMOSTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2168 [B2056_SYN_LOGEN_CMOSTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2169 [B2056_SYN_LOGEN_CMOSTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2170 [B2056_SYN_LOGEN_CMOSTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2171 [B2056_SYN_LOGEN_ACL_WAITCNT] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
2172 [B2056_SYN_LOGEN_CORE_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2173 [B2056_SYN_LOGEN_RX_CMOS_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2174 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2175};
2176
2177static const struct b2056_inittab_entry b2056_inittab_rev7_tx[] = {
2178 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2179 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2180 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2181 [B2056_TX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2182 [B2056_TX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2183 [B2056_TX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2184 [B2056_TX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2185 [B2056_TX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2186 [B2056_TX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2187 [B2056_TX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2188 [B2056_TX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2189 [B2056_TX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2190 [B2056_TX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2191 [B2056_TX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2192 [B2056_TX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2193 [B2056_TX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2194 [B2056_TX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2195 [B2056_TX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2196 [B2056_TX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2197 [B2056_TX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2198 [B2056_TX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2199 [B2056_TX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2200 [B2056_TX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2201 [B2056_TX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2202 [B2056_TX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2203 [B2056_TX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2204 [B2056_TX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2205 [B2056_TX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2206 [B2056_TX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2207 [B2056_TX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2208 [B2056_TX_IQCAL_GAIN_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2209 [B2056_TX_LOFT_FINE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
2210 [B2056_TX_LOFT_FINE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
2211 [B2056_TX_LOFT_COARSE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
2212 [B2056_TX_LOFT_COARSE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
2213 [B2056_TX_TX_COM_MASTER1] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
2214 [B2056_TX_TX_COM_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2215 [B2056_TX_RXIQCAL_TXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
2216 [B2056_TX_TX_SSI_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2217 [B2056_TX_IQCAL_VCM_HG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
2218 [B2056_TX_IQCAL_IDAC] = { .ghz5 = 0x0037, .ghz2 = 0x0037, NOUPLOAD, },
2219 [B2056_TX_TSSI_VCM] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
2220 [B2056_TX_TX_AMP_DET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2221 [B2056_TX_TX_SSI_MUX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2222 [B2056_TX_TSSIA] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2223 [B2056_TX_TSSIG] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2224 [B2056_TX_TSSI_MISC1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2225 [B2056_TX_TSSI_MISC2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2226 [B2056_TX_TSSI_MISC3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2227 [B2056_TX_PA_SPARE1] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
2228 [B2056_TX_PA_SPARE2] = { .ghz5 = 0x00ee, .ghz2 = 0x00ee, UPLOAD, },
2229 [B2056_TX_INTPAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2230 [B2056_TX_INTPAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2231 [B2056_TX_INTPAA_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
2232 [B2056_TX_INTPAA_IAUX_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
2233 [B2056_TX_INTPAA_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2234 [B2056_TX_INTPAA_IMAIN_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
2235 [B2056_TX_INTPAA_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2236 [B2056_TX_INTPAA_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
2237 [B2056_TX_INTPAA_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
2238 [B2056_TX_INTPAA_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2239 [B2056_TX_INTPAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2240 [B2056_TX_INTPAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2241 [B2056_TX_INTPAG_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
2242 [B2056_TX_INTPAG_IAUX_STAT] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
2243 [B2056_TX_INTPAG_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2244 [B2056_TX_INTPAG_IMAIN_STAT] = { .ghz5 = 0x001e, .ghz2 = 0x001e, NOUPLOAD, },
2245 [B2056_TX_INTPAG_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2246 [B2056_TX_INTPAG_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
2247 [B2056_TX_INTPAG_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
2248 [B2056_TX_INTPAG_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2249 [B2056_TX_PADA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
2250 [B2056_TX_PADA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
2251 [B2056_TX_PADA_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
2252 [B2056_TX_PADA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2253 [B2056_TX_PADA_BOOST_TUNE] = { .ghz5 = 0x0038, .ghz2 = 0x0038, NOUPLOAD, },
2254 [B2056_TX_PADA_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
2255 [B2056_TX_PADG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
2256 [B2056_TX_PADG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
2257 [B2056_TX_PADG_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
2258 [B2056_TX_PADG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2259 [B2056_TX_PADG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
2260 [B2056_TX_PADG_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
2261 [B2056_TX_PGAA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
2262 [B2056_TX_PGAA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
2263 [B2056_TX_PGAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2264 [B2056_TX_PGAA_BOOST_TUNE] = { .ghz5 = 0x0083, .ghz2 = 0x0083, NOUPLOAD, },
2265 [B2056_TX_PGAA_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
2266 [B2056_TX_PGAA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2267 [B2056_TX_PGAG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
2268 [B2056_TX_PGAG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
2269 [B2056_TX_PGAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2270 [B2056_TX_PGAG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
2271 [B2056_TX_PGAG_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
2272 [B2056_TX_PGAG_MISC] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2273 [B2056_TX_MIXA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2274 [B2056_TX_MIXA_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
2275 [B2056_TX_MIXG] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2276 [B2056_TX_MIXG_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
2277 [B2056_TX_BB_GM_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2278 [B2056_TX_GMBB_GM] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2279 [B2056_TX_GMBB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
2280 [B2056_TX_TXLPF_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2281 [B2056_TX_TXLPF_RCCAL] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
2282 [B2056_TX_TXLPF_RCCAL_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2283 [B2056_TX_TXLPF_RCCAL_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2284 [B2056_TX_TXLPF_RCCAL_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2285 [B2056_TX_TXLPF_RCCAL_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2286 [B2056_TX_TXLPF_RCCAL_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2287 [B2056_TX_TXLPF_RCCAL_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2288 [B2056_TX_TXLPF_RCCAL_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2289 [B2056_TX_TXLPF_BW] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
2290 [B2056_TX_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2291 [B2056_TX_TXLPF_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2292 [B2056_TX_TXLPF_IDAC_0] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
2293 [B2056_TX_TXLPF_IDAC_1] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
2294 [B2056_TX_TXLPF_IDAC_2] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
2295 [B2056_TX_TXLPF_IDAC_3] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
2296 [B2056_TX_TXLPF_IDAC_4] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
2297 [B2056_TX_TXLPF_IDAC_5] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
2298 [B2056_TX_TXLPF_IDAC_6] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
2299 [B2056_TX_TXLPF_OPAMP_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
2300 [B2056_TX_TXLPF_MISC] = { .ghz5 = 0x005b, .ghz2 = 0x005b, NOUPLOAD, },
2301 [B2056_TX_TXSPARE1] = { .ghz5 = 0x0030, .ghz2 = 0x0030, UPLOAD, },
2302 [B2056_TX_TXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2303 [B2056_TX_TXSPARE3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2304 [B2056_TX_TXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2305 [B2056_TX_TXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2306 [B2056_TX_TXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2307 [B2056_TX_TXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2308 [B2056_TX_TXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2309 [B2056_TX_TXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2310 [B2056_TX_TXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2311 [B2056_TX_TXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2312 [B2056_TX_TXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2313 [B2056_TX_TXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2314 [B2056_TX_TXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2315 [B2056_TX_TXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2316 [B2056_TX_TXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2317 [B2056_TX_STATUS_INTPA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2318 [B2056_TX_STATUS_PAD_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2319 [B2056_TX_STATUS_PGA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2320 [B2056_TX_STATUS_GM_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2321 [B2056_TX_STATUS_TXLPF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2322 [B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2323 [B2056_TX_GMBB_IDAC0] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
2324 [B2056_TX_GMBB_IDAC1] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
2325 [B2056_TX_GMBB_IDAC2] = { .ghz5 = 0x0071, .ghz2 = 0x0071, UPLOAD, },
2326 [B2056_TX_GMBB_IDAC3] = { .ghz5 = 0x0071, .ghz2 = 0x0071, UPLOAD, },
2327 [B2056_TX_GMBB_IDAC4] = { .ghz5 = 0x0072, .ghz2 = 0x0072, UPLOAD, },
2328 [B2056_TX_GMBB_IDAC5] = { .ghz5 = 0x0073, .ghz2 = 0x0073, UPLOAD, },
2329 [B2056_TX_GMBB_IDAC6] = { .ghz5 = 0x0074, .ghz2 = 0x0074, UPLOAD, },
2330 [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0075, .ghz2 = 0x0075, UPLOAD, },
2331};
2332
2333static const struct b2056_inittab_entry b2056_inittab_rev7_rx[] = {
2334 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2335 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2336 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2337 [B2056_RX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2338 [B2056_RX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2339 [B2056_RX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2340 [B2056_RX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2341 [B2056_RX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2342 [B2056_RX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2343 [B2056_RX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2344 [B2056_RX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2345 [B2056_RX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2346 [B2056_RX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2347 [B2056_RX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2348 [B2056_RX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2349 [B2056_RX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2350 [B2056_RX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2351 [B2056_RX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2352 [B2056_RX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2353 [B2056_RX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2354 [B2056_RX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2355 [B2056_RX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2356 [B2056_RX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2357 [B2056_RX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2358 [B2056_RX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2359 [B2056_RX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2360 [B2056_RX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2361 [B2056_RX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2362 [B2056_RX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2363 [B2056_RX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2364 [B2056_RX_RXIQCAL_RXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
2365 [B2056_RX_RSSI_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2366 [B2056_RX_RSSI_SEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2367 [B2056_RX_RSSI_GAIN] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
2368 [B2056_RX_RSSI_NB_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
2369 [B2056_RX_RSSI_WB2I_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
2370 [B2056_RX_RSSI_WB2I_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
2371 [B2056_RX_RSSI_WB2Q_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
2372 [B2056_RX_RSSI_WB2Q_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
2373 [B2056_RX_RSSI_POLE] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
2374 [B2056_RX_RSSI_WB1_IDAC] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
2375 [B2056_RX_RSSI_MISC] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
2376 [B2056_RX_LNAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2377 [B2056_RX_LNAA_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
2378 [B2056_RX_LNAA_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
2379 [B2056_RX_LNA_A_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
2380 [B2056_RX_BIASPOLE_LNAA1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
2381 [B2056_RX_LNAA2_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
2382 [B2056_RX_LNA1A_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
2383 [B2056_RX_LNAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2384 [B2056_RX_LNAG_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
2385 [B2056_RX_LNAG_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
2386 [B2056_RX_LNA_G_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
2387 [B2056_RX_BIASPOLE_LNAG1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
2388 [B2056_RX_LNAG2_IDAC] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
2389 [B2056_RX_LNA1G_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
2390 [B2056_RX_MIXA_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
2391 [B2056_RX_MIXA_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
2392 [B2056_RX_MIXA_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2393 [B2056_RX_MIXA_LOB_BIAS] = { .ghz5 = 0x0088, .ghz2 = 0x0088, UPLOAD, },
2394 [B2056_RX_MIXA_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2395 [B2056_RX_MIXA_CMFB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
2396 [B2056_RX_MIXA_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
2397 [B2056_RX_MIXA_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
2398 [B2056_RX_MIXA_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
2399 [B2056_RX_MIXA_MAST_BIAS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2400 [B2056_RX_MIXG_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
2401 [B2056_RX_MIXG_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
2402 [B2056_RX_MIXG_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2403 [B2056_RX_MIXG_LOB_BIAS] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
2404 [B2056_RX_MIXG_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2405 [B2056_RX_MIXG_CMFB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
2406 [B2056_RX_MIXG_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
2407 [B2056_RX_MIXG_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
2408 [B2056_RX_MIXG_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
2409 [B2056_RX_MIXG_MAST_BIAS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2410 [B2056_RX_TIA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2411 [B2056_RX_TIA_IOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
2412 [B2056_RX_TIA_QOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
2413 [B2056_RX_TIA_IMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
2414 [B2056_RX_TIA_QMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
2415 [B2056_RX_TIA_GAIN] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
2416 [B2056_RX_TIA_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2417 [B2056_RX_TIA_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2418 [B2056_RX_BB_LPF_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
2419 [B2056_RX_AACI_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
2420 [B2056_RX_RXLPF_IDAC] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
2421 [B2056_RX_RXLPF_OPAMPBIAS_LOWQ] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
2422 [B2056_RX_RXLPF_OPAMPBIAS_HIGHQ]= { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
2423 [B2056_RX_RXLPF_BIAS_DCCANCEL] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
2424 [B2056_RX_RXLPF_OUTVCM] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
2425 [B2056_RX_RXLPF_INVCM_BODY] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
2426 [B2056_RX_RXLPF_CC_OP] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
2427 [B2056_RX_RXLPF_GAIN] = { .ghz5 = 0x0023, .ghz2 = 0x0023, NOUPLOAD, },
2428 [B2056_RX_RXLPF_Q_BW] = { .ghz5 = 0x0041, .ghz2 = 0x0041, NOUPLOAD, },
2429 [B2056_RX_RXLPF_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2430 [B2056_RX_RXLPF_RCCAL_HPC] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
2431 [B2056_RX_RXHPF_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2432 [B2056_RX_RXHPF_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2433 [B2056_RX_RXHPF_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2434 [B2056_RX_RXHPF_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2435 [B2056_RX_RXHPF_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2436 [B2056_RX_RXHPF_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2437 [B2056_RX_RXHPF_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2438 [B2056_RX_RXHPF_OFF7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2439 [B2056_RX_RXLPF_RCCAL_LPC] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
2440 [B2056_RX_RXLPF_OFF_0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2441 [B2056_RX_RXLPF_OFF_1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2442 [B2056_RX_RXLPF_OFF_2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2443 [B2056_RX_RXLPF_OFF_3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2444 [B2056_RX_RXLPF_OFF_4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2445 [B2056_RX_UNUSED] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2446 [B2056_RX_VGA_MASTER] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
2447 [B2056_RX_VGA_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
2448 [B2056_RX_VGA_BIAS_DCCANCEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
2449 [B2056_RX_VGA_GAIN] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
2450 [B2056_RX_VGA_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2451 [B2056_RX_VGABUF_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
2452 [B2056_RX_VGABUF_GAIN_BW] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
2453 [B2056_RX_TXFBMIX_A] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2454 [B2056_RX_TXFBMIX_G] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2455 [B2056_RX_RXSPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2456 [B2056_RX_RXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2457 [B2056_RX_RXSPARE3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2458 [B2056_RX_RXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2459 [B2056_RX_RXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2460 [B2056_RX_RXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2461 [B2056_RX_RXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2462 [B2056_RX_RXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2463 [B2056_RX_RXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2464 [B2056_RX_RXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2465 [B2056_RX_RXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2466 [B2056_RX_RXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2467 [B2056_RX_RXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2468 [B2056_RX_RXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2469 [B2056_RX_RXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2470 [B2056_RX_RXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2471 [B2056_RX_STATUS_LNAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2472 [B2056_RX_STATUS_LNAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2473 [B2056_RX_STATUS_MIXTIA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2474 [B2056_RX_STATUS_RXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2475 [B2056_RX_STATUS_VGA_BUF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2476 [B2056_RX_STATUS_RXLPF_Q] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2477 [B2056_RX_STATUS_RXLPF_BUF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2478 [B2056_RX_STATUS_RXLPF_VGA_HPC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2479 [B2056_RX_STATUS_RXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2480 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2481};
2482
2483static const struct b2056_inittab_entry b2056_inittab_rev8_syn[] = {
2484 [B2056_SYN_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2485 [B2056_SYN_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2486 [B2056_SYN_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2487 [B2056_SYN_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2488 [B2056_SYN_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2489 [B2056_SYN_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2490 [B2056_SYN_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2491 [B2056_SYN_COM_PU] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2492 [B2056_SYN_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2493 [B2056_SYN_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2494 [B2056_SYN_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2495 [B2056_SYN_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2496 [B2056_SYN_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2497 [B2056_SYN_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2498 [B2056_SYN_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2499 [B2056_SYN_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2500 [B2056_SYN_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2501 [B2056_SYN_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2502 [B2056_SYN_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2503 [B2056_SYN_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2504 [B2056_SYN_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2505 [B2056_SYN_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2506 [B2056_SYN_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2507 [B2056_SYN_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2508 [B2056_SYN_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2509 [B2056_SYN_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2510 [B2056_SYN_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2511 [B2056_SYN_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2512 [B2056_SYN_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2513 [B2056_SYN_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2514 [B2056_SYN_GPIO_MASTER1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2515 [B2056_SYN_GPIO_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2516 [B2056_SYN_TOPBIAS_MASTER] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
2517 [B2056_SYN_TOPBIAS_RCAL] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
2518 [B2056_SYN_AFEREG] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
2519 [B2056_SYN_TEMPPROCSENSE] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2520 [B2056_SYN_TEMPPROCSENSEIDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2521 [B2056_SYN_TEMPPROCSENSERCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2522 [B2056_SYN_LPO] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2523 [B2056_SYN_VDDCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2524 [B2056_SYN_VDDCAL_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2525 [B2056_SYN_VDDCAL_STATUS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2526 [B2056_SYN_RCAL_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2527 [B2056_SYN_RCAL_CODE_OUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2528 [B2056_SYN_RCCAL_CTRL0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2529 [B2056_SYN_RCCAL_CTRL1] = { .ghz5 = 0x001f, .ghz2 = 0x001f, NOUPLOAD, },
2530 [B2056_SYN_RCCAL_CTRL2] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
2531 [B2056_SYN_RCCAL_CTRL3] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
2532 [B2056_SYN_RCCAL_CTRL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2533 [B2056_SYN_RCCAL_CTRL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2534 [B2056_SYN_RCCAL_CTRL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2535 [B2056_SYN_RCCAL_CTRL7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2536 [B2056_SYN_RCCAL_CTRL8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2537 [B2056_SYN_RCCAL_CTRL9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2538 [B2056_SYN_RCCAL_CTRL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2539 [B2056_SYN_RCCAL_CTRL11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2540 [B2056_SYN_ZCAL_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2541 [B2056_SYN_ZCAL_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2542 [B2056_SYN_PLL_MAST1] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
2543 [B2056_SYN_PLL_MAST2] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
2544 [B2056_SYN_PLL_MAST3] = { .ghz5 = 0x0018, .ghz2 = 0x0018, NOUPLOAD, },
2545 [B2056_SYN_PLL_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2546 [B2056_SYN_PLL_XTAL0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2547 [B2056_SYN_PLL_XTAL1] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
2548 [B2056_SYN_PLL_XTAL3] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
2549 [B2056_SYN_PLL_XTAL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2550 [B2056_SYN_PLL_XTAL5] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
2551 [B2056_SYN_PLL_XTAL6] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
2552 [B2056_SYN_PLL_REFDIV] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2553 [B2056_SYN_PLL_PFD] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
2554 [B2056_SYN_PLL_CP1] = { .ghz5 = 0x000f, .ghz2 = 0x000f, NOUPLOAD, },
2555 [B2056_SYN_PLL_CP2] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
2556 [B2056_SYN_PLL_CP3] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
2557 [B2056_SYN_PLL_LOOPFILTER1] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
2558 [B2056_SYN_PLL_LOOPFILTER2] = { .ghz5 = 0x000d, .ghz2 = 0x000d, NOUPLOAD, },
2559 [B2056_SYN_PLL_LOOPFILTER3] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
2560 [B2056_SYN_PLL_LOOPFILTER4] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
2561 [B2056_SYN_PLL_LOOPFILTER5] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2562 [B2056_SYN_PLL_MMD1] = { .ghz5 = 0x001c, .ghz2 = 0x001c, NOUPLOAD, },
2563 [B2056_SYN_PLL_MMD2] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
2564 [B2056_SYN_PLL_VCO1] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
2565 [B2056_SYN_PLL_VCO2] = { .ghz5 = 0x00f7, .ghz2 = 0x00f7, UPLOAD, },
2566 [B2056_SYN_PLL_MONITOR1] = { .ghz5 = 0x00b4, .ghz2 = 0x00b4, NOUPLOAD, },
2567 [B2056_SYN_PLL_MONITOR2] = { .ghz5 = 0x00d2, .ghz2 = 0x00d2, NOUPLOAD, },
2568 [B2056_SYN_PLL_VCOCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2569 [B2056_SYN_PLL_VCOCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2570 [B2056_SYN_PLL_VCOCAL4] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
2571 [B2056_SYN_PLL_VCOCAL5] = { .ghz5 = 0x0096, .ghz2 = 0x0096, NOUPLOAD, },
2572 [B2056_SYN_PLL_VCOCAL6] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
2573 [B2056_SYN_PLL_VCOCAL7] = { .ghz5 = 0x003e, .ghz2 = 0x003e, NOUPLOAD, },
2574 [B2056_SYN_PLL_VCOCAL8] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
2575 [B2056_SYN_PLL_VCOCAL9] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
2576 [B2056_SYN_PLL_VCOCAL10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2577 [B2056_SYN_PLL_VCOCAL11] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
2578 [B2056_SYN_PLL_VCOCAL12] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
2579 [B2056_SYN_PLL_VCOCAL13] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
2580 [B2056_SYN_PLL_VREG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
2581 [B2056_SYN_PLL_STATUS1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2582 [B2056_SYN_PLL_STATUS2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2583 [B2056_SYN_PLL_STATUS3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2584 [B2056_SYN_LOGEN_PU0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2585 [B2056_SYN_LOGEN_PU1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2586 [B2056_SYN_LOGEN_PU2] = { .ghz5 = 0x0040, .ghz2 = 0x0040, NOUPLOAD, },
2587 [B2056_SYN_LOGEN_PU3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2588 [B2056_SYN_LOGEN_PU5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2589 [B2056_SYN_LOGEN_PU6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2590 [B2056_SYN_LOGEN_PU7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2591 [B2056_SYN_LOGEN_PU8] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2592 [B2056_SYN_LOGEN_BIAS_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2593 [B2056_SYN_LOGEN_RCCR1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2594 [B2056_SYN_LOGEN_VCOBUF1] = { .ghz5 = 0x0060, .ghz2 = 0x0060, NOUPLOAD, },
2595 [B2056_SYN_LOGEN_MIXER1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2596 [B2056_SYN_LOGEN_MIXER2] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
2597 [B2056_SYN_LOGEN_BUF1] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2598 [B2056_SYN_LOGENBUF2] = { .ghz5 = 0x008f, .ghz2 = 0x008f, UPLOAD, },
2599 [B2056_SYN_LOGEN_BUF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2600 [B2056_SYN_LOGEN_BUF4] = { .ghz5 = 0x00cc, .ghz2 = 0x00cc, NOUPLOAD, },
2601 [B2056_SYN_LOGEN_DIV1] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2602 [B2056_SYN_LOGEN_DIV2] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2603 [B2056_SYN_LOGEN_DIV3] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2604 [B2056_SYN_LOGEN_ACL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2605 [B2056_SYN_LOGEN_ACL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2606 [B2056_SYN_LOGEN_ACL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2607 [B2056_SYN_LOGEN_ACL4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2608 [B2056_SYN_LOGEN_ACL5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2609 [B2056_SYN_LOGEN_ACL6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2610 [B2056_SYN_LOGEN_ACLOUT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2611 [B2056_SYN_LOGEN_ACLCAL1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2612 [B2056_SYN_LOGEN_ACLCAL2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2613 [B2056_SYN_LOGEN_ACLCAL3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2614 [B2056_SYN_CALEN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2615 [B2056_SYN_LOGEN_PEAKDET1] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, NOUPLOAD, },
2616 [B2056_SYN_LOGEN_CORE_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2617 [B2056_SYN_LOGEN_RX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2618 [B2056_SYN_LOGEN_TX_DIFF_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2619 [B2056_SYN_LOGEN_RX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2620 [B2056_SYN_LOGEN_TX_CMOS_ACL_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2621 [B2056_SYN_LOGEN_VCOBUF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2622 [B2056_SYN_LOGEN_MIXER3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2623 [B2056_SYN_LOGEN_BUF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2624 [B2056_SYN_LOGEN_BUF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2625 [B2056_SYN_LOGEN_CBUFRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2626 [B2056_SYN_LOGEN_CBUFRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2627 [B2056_SYN_LOGEN_CBUFRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2628 [B2056_SYN_LOGEN_CBUFRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2629 [B2056_SYN_LOGEN_CBUFTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2630 [B2056_SYN_LOGEN_CBUFTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2631 [B2056_SYN_LOGEN_CBUFTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2632 [B2056_SYN_LOGEN_CBUFTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2633 [B2056_SYN_LOGEN_CMOSRX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2634 [B2056_SYN_LOGEN_CMOSRX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2635 [B2056_SYN_LOGEN_CMOSRX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2636 [B2056_SYN_LOGEN_CMOSRX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2637 [B2056_SYN_LOGEN_CMOSTX1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2638 [B2056_SYN_LOGEN_CMOSTX2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2639 [B2056_SYN_LOGEN_CMOSTX3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2640 [B2056_SYN_LOGEN_CMOSTX4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2641 [B2056_SYN_LOGEN_VCOBUF2_OVRVAL]= { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
2642 [B2056_SYN_LOGEN_MIXER3_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2643 [B2056_SYN_LOGEN_BUF5_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2644 [B2056_SYN_LOGEN_BUF6_OVRVAL] = { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2645 [B2056_SYN_LOGEN_CBUFRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2646 [B2056_SYN_LOGEN_CBUFRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2647 [B2056_SYN_LOGEN_CBUFRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2648 [B2056_SYN_LOGEN_CBUFRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2649 [B2056_SYN_LOGEN_CBUFTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2650 [B2056_SYN_LOGEN_CBUFTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2651 [B2056_SYN_LOGEN_CBUFTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2652 [B2056_SYN_LOGEN_CBUFTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2653 [B2056_SYN_LOGEN_CMOSRX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2654 [B2056_SYN_LOGEN_CMOSRX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2655 [B2056_SYN_LOGEN_CMOSRX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2656 [B2056_SYN_LOGEN_CMOSRX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2657 [B2056_SYN_LOGEN_CMOSTX1_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2658 [B2056_SYN_LOGEN_CMOSTX2_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2659 [B2056_SYN_LOGEN_CMOSTX3_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2660 [B2056_SYN_LOGEN_CMOSTX4_OVRVAL]= { .ghz5 = 0x0066, .ghz2 = 0x0066, NOUPLOAD, },
2661 [B2056_SYN_LOGEN_ACL_WAITCNT] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
2662 [B2056_SYN_LOGEN_CORE_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2663 [B2056_SYN_LOGEN_RX_CMOS_CALVALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2664 [B2056_SYN_LOGEN_TX_CMOS_VALID] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2665};
2666
2667static const struct b2056_inittab_entry b2056_inittab_rev8_tx[] = {
2668 [B2056_TX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2669 [B2056_TX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2670 [B2056_TX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2671 [B2056_TX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2672 [B2056_TX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2673 [B2056_TX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2674 [B2056_TX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2675 [B2056_TX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2676 [B2056_TX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2677 [B2056_TX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2678 [B2056_TX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2679 [B2056_TX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2680 [B2056_TX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2681 [B2056_TX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2682 [B2056_TX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2683 [B2056_TX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2684 [B2056_TX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2685 [B2056_TX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2686 [B2056_TX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2687 [B2056_TX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2688 [B2056_TX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2689 [B2056_TX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2690 [B2056_TX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2691 [B2056_TX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2692 [B2056_TX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2693 [B2056_TX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2694 [B2056_TX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2695 [B2056_TX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2696 [B2056_TX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2697 [B2056_TX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2698 [B2056_TX_IQCAL_GAIN_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2699 [B2056_TX_LOFT_FINE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
2700 [B2056_TX_LOFT_FINE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
2701 [B2056_TX_LOFT_COARSE_I] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
2702 [B2056_TX_LOFT_COARSE_Q] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
2703 [B2056_TX_TX_COM_MASTER1] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
2704 [B2056_TX_TX_COM_MASTER2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2705 [B2056_TX_RXIQCAL_TXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
2706 [B2056_TX_TX_SSI_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2707 [B2056_TX_IQCAL_VCM_HG] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
2708 [B2056_TX_IQCAL_IDAC] = { .ghz5 = 0x0037, .ghz2 = 0x0037, NOUPLOAD, },
2709 [B2056_TX_TSSI_VCM] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
2710 [B2056_TX_TX_AMP_DET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2711 [B2056_TX_TX_SSI_MUX] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2712 [B2056_TX_TSSIA] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2713 [B2056_TX_TSSIG] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2714 [B2056_TX_TSSI_MISC1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2715 [B2056_TX_TSSI_MISC2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2716 [B2056_TX_TSSI_MISC3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2717 [B2056_TX_PA_SPARE1] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
2718 [B2056_TX_PA_SPARE2] = { .ghz5 = 0x00ee, .ghz2 = 0x00ee, UPLOAD, },
2719 [B2056_TX_INTPAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2720 [B2056_TX_INTPAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2721 [B2056_TX_INTPAA_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
2722 [B2056_TX_INTPAA_IAUX_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
2723 [B2056_TX_INTPAA_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2724 [B2056_TX_INTPAA_IMAIN_STAT] = { .ghz5 = 0x0050, .ghz2 = 0x0050, UPLOAD, },
2725 [B2056_TX_INTPAA_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2726 [B2056_TX_INTPAA_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
2727 [B2056_TX_INTPAA_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
2728 [B2056_TX_INTPAA_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2729 [B2056_TX_INTPAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2730 [B2056_TX_INTPAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2731 [B2056_TX_INTPAG_BOOST_TUNE] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
2732 [B2056_TX_INTPAG_IAUX_STAT] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
2733 [B2056_TX_INTPAG_IAUX_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2734 [B2056_TX_INTPAG_IMAIN_STAT] = { .ghz5 = 0x001e, .ghz2 = 0x001e, NOUPLOAD, },
2735 [B2056_TX_INTPAG_IMAIN_DYN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2736 [B2056_TX_INTPAG_CASCBIAS] = { .ghz5 = 0x006e, .ghz2 = 0x006e, NOUPLOAD, },
2737 [B2056_TX_INTPAG_PASLOPE] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
2738 [B2056_TX_INTPAG_PA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2739 [B2056_TX_PADA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
2740 [B2056_TX_PADA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
2741 [B2056_TX_PADA_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
2742 [B2056_TX_PADA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2743 [B2056_TX_PADA_BOOST_TUNE] = { .ghz5 = 0x0038, .ghz2 = 0x0038, NOUPLOAD, },
2744 [B2056_TX_PADA_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
2745 [B2056_TX_PADG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
2746 [B2056_TX_PADG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
2747 [B2056_TX_PADG_CASCBIAS] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
2748 [B2056_TX_PADG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2749 [B2056_TX_PADG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
2750 [B2056_TX_PADG_SLOPE] = { .ghz5 = 0x0070, .ghz2 = 0x0070, UPLOAD, },
2751 [B2056_TX_PGAA_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
2752 [B2056_TX_PGAA_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
2753 [B2056_TX_PGAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2754 [B2056_TX_PGAA_BOOST_TUNE] = { .ghz5 = 0x0083, .ghz2 = 0x0083, NOUPLOAD, },
2755 [B2056_TX_PGAA_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
2756 [B2056_TX_PGAA_MISC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2757 [B2056_TX_PGAG_MASTER] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
2758 [B2056_TX_PGAG_IDAC] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
2759 [B2056_TX_PGAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2760 [B2056_TX_PGAG_BOOST_TUNE] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
2761 [B2056_TX_PGAG_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, UPLOAD, },
2762 [B2056_TX_PGAG_MISC] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2763 [B2056_TX_MIXA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2764 [B2056_TX_MIXA_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
2765 [B2056_TX_MIXG] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2766 [B2056_TX_MIXG_BOOST_TUNE] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
2767 [B2056_TX_BB_GM_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2768 [B2056_TX_GMBB_GM] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2769 [B2056_TX_GMBB_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
2770 [B2056_TX_TXLPF_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2771 [B2056_TX_TXLPF_RCCAL] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
2772 [B2056_TX_TXLPF_RCCAL_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2773 [B2056_TX_TXLPF_RCCAL_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2774 [B2056_TX_TXLPF_RCCAL_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2775 [B2056_TX_TXLPF_RCCAL_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2776 [B2056_TX_TXLPF_RCCAL_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2777 [B2056_TX_TXLPF_RCCAL_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2778 [B2056_TX_TXLPF_RCCAL_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2779 [B2056_TX_TXLPF_BW] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
2780 [B2056_TX_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2781 [B2056_TX_TXLPF_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2782 [B2056_TX_TXLPF_IDAC_0] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
2783 [B2056_TX_TXLPF_IDAC_1] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
2784 [B2056_TX_TXLPF_IDAC_2] = { .ghz5 = 0x000e, .ghz2 = 0x000e, NOUPLOAD, },
2785 [B2056_TX_TXLPF_IDAC_3] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
2786 [B2056_TX_TXLPF_IDAC_4] = { .ghz5 = 0x0013, .ghz2 = 0x0013, NOUPLOAD, },
2787 [B2056_TX_TXLPF_IDAC_5] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
2788 [B2056_TX_TXLPF_IDAC_6] = { .ghz5 = 0x001b, .ghz2 = 0x001b, NOUPLOAD, },
2789 [B2056_TX_TXLPF_OPAMP_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
2790 [B2056_TX_TXLPF_MISC] = { .ghz5 = 0x005b, .ghz2 = 0x005b, NOUPLOAD, },
2791 [B2056_TX_TXSPARE1] = { .ghz5 = 0x0030, .ghz2 = 0x0030, UPLOAD, },
2792 [B2056_TX_TXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2793 [B2056_TX_TXSPARE3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2794 [B2056_TX_TXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2795 [B2056_TX_TXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2796 [B2056_TX_TXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2797 [B2056_TX_TXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2798 [B2056_TX_TXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2799 [B2056_TX_TXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2800 [B2056_TX_TXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2801 [B2056_TX_TXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2802 [B2056_TX_TXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2803 [B2056_TX_TXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2804 [B2056_TX_TXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2805 [B2056_TX_TXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2806 [B2056_TX_TXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2807 [B2056_TX_STATUS_INTPA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2808 [B2056_TX_STATUS_PAD_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2809 [B2056_TX_STATUS_PGA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2810 [B2056_TX_STATUS_GM_TXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2811 [B2056_TX_STATUS_TXLPF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2812 [B2056_TX_STATUS_TXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2813 [B2056_TX_GMBB_IDAC0] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
2814 [B2056_TX_GMBB_IDAC1] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
2815 [B2056_TX_GMBB_IDAC2] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
2816 [B2056_TX_GMBB_IDAC3] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
2817 [B2056_TX_GMBB_IDAC4] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
2818 [B2056_TX_GMBB_IDAC5] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
2819 [B2056_TX_GMBB_IDAC6] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
2820 [B2056_TX_GMBB_IDAC7] = { .ghz5 = 0x0070, .ghz2 = 0x0070, NOUPLOAD, },
2821};
2822
2823static const struct b2056_inittab_entry b2056_inittab_rev8_rx[] = {
2824 [B2056_RX_RESERVED_ADDR2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2825 [B2056_RX_RESERVED_ADDR3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2826 [B2056_RX_RESERVED_ADDR4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2827 [B2056_RX_RESERVED_ADDR5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2828 [B2056_RX_RESERVED_ADDR6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2829 [B2056_RX_RESERVED_ADDR7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2830 [B2056_RX_COM_CTRL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2831 [B2056_RX_COM_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2832 [B2056_RX_COM_OVR] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2833 [B2056_RX_COM_RESET] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2834 [B2056_RX_COM_RCAL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2835 [B2056_RX_COM_RC_RXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2836 [B2056_RX_COM_RC_TXLPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2837 [B2056_RX_COM_RC_RXHPF] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2838 [B2056_RX_RESERVED_ADDR16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2839 [B2056_RX_RESERVED_ADDR17] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2840 [B2056_RX_RESERVED_ADDR18] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2841 [B2056_RX_RESERVED_ADDR19] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2842 [B2056_RX_RESERVED_ADDR20] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2843 [B2056_RX_RESERVED_ADDR21] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2844 [B2056_RX_RESERVED_ADDR22] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2845 [B2056_RX_RESERVED_ADDR23] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2846 [B2056_RX_RESERVED_ADDR24] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2847 [B2056_RX_RESERVED_ADDR25] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2848 [B2056_RX_RESERVED_ADDR26] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2849 [B2056_RX_RESERVED_ADDR27] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2850 [B2056_RX_RESERVED_ADDR28] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2851 [B2056_RX_RESERVED_ADDR29] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2852 [B2056_RX_RESERVED_ADDR30] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2853 [B2056_RX_RESERVED_ADDR31] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2854 [B2056_RX_RXIQCAL_RXMUX] = { .ghz5 = 0x0003, .ghz2 = 0x0003, NOUPLOAD, },
2855 [B2056_RX_RSSI_PU] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2856 [B2056_RX_RSSI_SEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2857 [B2056_RX_RSSI_GAIN] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
2858 [B2056_RX_RSSI_NB_IDAC] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
2859 [B2056_RX_RSSI_WB2I_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
2860 [B2056_RX_RSSI_WB2I_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
2861 [B2056_RX_RSSI_WB2Q_IDAC_1] = { .ghz5 = 0x0015, .ghz2 = 0x0015, NOUPLOAD, },
2862 [B2056_RX_RSSI_WB2Q_IDAC_2] = { .ghz5 = 0x0005, .ghz2 = 0x0005, NOUPLOAD, },
2863 [B2056_RX_RSSI_POLE] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
2864 [B2056_RX_RSSI_WB1_IDAC] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
2865 [B2056_RX_RSSI_MISC] = { .ghz5 = 0x0090, .ghz2 = 0x0090, NOUPLOAD, },
2866 [B2056_RX_LNAA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2867 [B2056_RX_LNAA_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
2868 [B2056_RX_LNAA_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
2869 [B2056_RX_LNA_A_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
2870 [B2056_RX_BIASPOLE_LNAA1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
2871 [B2056_RX_LNAA2_IDAC] = { .ghz5 = 0x00ff, .ghz2 = 0x00ff, UPLOAD, },
2872 [B2056_RX_LNA1A_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
2873 [B2056_RX_LNAG_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2874 [B2056_RX_LNAG_TUNE] = { .ghz5 = 0x0088, .ghz2 = 0x0088, NOUPLOAD, },
2875 [B2056_RX_LNAG_GAIN] = { .ghz5 = 0x0032, .ghz2 = 0x0032, NOUPLOAD, },
2876 [B2056_RX_LNA_G_SLOPE] = { .ghz5 = 0x0077, .ghz2 = 0x0077, NOUPLOAD, },
2877 [B2056_RX_BIASPOLE_LNAG1_IDAC] = { .ghz5 = 0x0017, .ghz2 = 0x0017, UPLOAD, },
2878 [B2056_RX_LNAG2_IDAC] = { .ghz5 = 0x00f0, .ghz2 = 0x00f0, UPLOAD, },
2879 [B2056_RX_LNA1G_MISC] = { .ghz5 = 0x0020, .ghz2 = 0x0020, NOUPLOAD, },
2880 [B2056_RX_MIXA_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
2881 [B2056_RX_MIXA_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
2882 [B2056_RX_MIXA_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2883 [B2056_RX_MIXA_LOB_BIAS] = { .ghz5 = 0x0088, .ghz2 = 0x0088, UPLOAD, },
2884 [B2056_RX_MIXA_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2885 [B2056_RX_MIXA_CMFB_IDAC] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
2886 [B2056_RX_MIXA_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, UPLOAD, },
2887 [B2056_RX_MIXA_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
2888 [B2056_RX_MIXA_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
2889 [B2056_RX_MIXA_MAST_BIAS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2890 [B2056_RX_MIXG_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
2891 [B2056_RX_MIXG_VCM] = { .ghz5 = 0x0055, .ghz2 = 0x0055, UPLOAD, },
2892 [B2056_RX_MIXG_CTRLPTAT] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2893 [B2056_RX_MIXG_LOB_BIAS] = { .ghz5 = 0x0011, .ghz2 = 0x0011, NOUPLOAD, },
2894 [B2056_RX_MIXG_CORE_IDAC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2895 [B2056_RX_MIXG_CMFB_IDAC] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
2896 [B2056_RX_MIXG_BIAS_AUX] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
2897 [B2056_RX_MIXG_BIAS_MAIN] = { .ghz5 = 0x0006, .ghz2 = 0x0006, NOUPLOAD, },
2898 [B2056_RX_MIXG_BIAS_MISC] = { .ghz5 = 0x0004, .ghz2 = 0x0004, NOUPLOAD, },
2899 [B2056_RX_MIXG_MAST_BIAS] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2900 [B2056_RX_TIA_MASTER] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2901 [B2056_RX_TIA_IOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
2902 [B2056_RX_TIA_QOPAMP] = { .ghz5 = 0x0026, .ghz2 = 0x0026, UPLOAD, },
2903 [B2056_RX_TIA_IMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
2904 [B2056_RX_TIA_QMISC] = { .ghz5 = 0x000f, .ghz2 = 0x000f, UPLOAD, },
2905 [B2056_RX_TIA_GAIN] = { .ghz5 = 0x0044, .ghz2 = 0x0044, NOUPLOAD, },
2906 [B2056_RX_TIA_SPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2907 [B2056_RX_TIA_SPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2908 [B2056_RX_BB_LPF_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
2909 [B2056_RX_AACI_MASTER] = { .ghz5 = 0x0008, .ghz2 = 0x0008, NOUPLOAD, },
2910 [B2056_RX_RXLPF_IDAC] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
2911 [B2056_RX_RXLPF_OPAMPBIAS_LOWQ] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
2912 [B2056_RX_RXLPF_OPAMPBIAS_HIGHQ]= { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
2913 [B2056_RX_RXLPF_BIAS_DCCANCEL] = { .ghz5 = 0x0002, .ghz2 = 0x0002, NOUPLOAD, },
2914 [B2056_RX_RXLPF_OUTVCM] = { .ghz5 = 0x0004, .ghz2 = 0x0004, UPLOAD, },
2915 [B2056_RX_RXLPF_INVCM_BODY] = { .ghz5 = 0x0007, .ghz2 = 0x0007, NOUPLOAD, },
2916 [B2056_RX_RXLPF_CC_OP] = { .ghz5 = 0x0055, .ghz2 = 0x0055, NOUPLOAD, },
2917 [B2056_RX_RXLPF_GAIN] = { .ghz5 = 0x0023, .ghz2 = 0x0023, NOUPLOAD, },
2918 [B2056_RX_RXLPF_Q_BW] = { .ghz5 = 0x0041, .ghz2 = 0x0041, NOUPLOAD, },
2919 [B2056_RX_RXLPF_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2920 [B2056_RX_RXLPF_RCCAL_HPC] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
2921 [B2056_RX_RXHPF_OFF0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2922 [B2056_RX_RXHPF_OFF1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2923 [B2056_RX_RXHPF_OFF2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2924 [B2056_RX_RXHPF_OFF3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2925 [B2056_RX_RXHPF_OFF4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2926 [B2056_RX_RXHPF_OFF5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2927 [B2056_RX_RXHPF_OFF6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2928 [B2056_RX_RXHPF_OFF7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2929 [B2056_RX_RXLPF_RCCAL_LPC] = { .ghz5 = 0x000c, .ghz2 = 0x000c, NOUPLOAD, },
2930 [B2056_RX_RXLPF_OFF_0] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2931 [B2056_RX_RXLPF_OFF_1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2932 [B2056_RX_RXLPF_OFF_2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2933 [B2056_RX_RXLPF_OFF_3] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2934 [B2056_RX_RXLPF_OFF_4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2935 [B2056_RX_UNUSED] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2936 [B2056_RX_VGA_MASTER] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
2937 [B2056_RX_VGA_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
2938 [B2056_RX_VGA_BIAS_DCCANCEL] = { .ghz5 = 0x0000, .ghz2 = 0x0000, UPLOAD, },
2939 [B2056_RX_VGA_GAIN] = { .ghz5 = 0x000a, .ghz2 = 0x000a, NOUPLOAD, },
2940 [B2056_RX_VGA_HP_CORNER_BW] = { .ghz5 = 0x0001, .ghz2 = 0x0001, NOUPLOAD, },
2941 [B2056_RX_VGABUF_BIAS] = { .ghz5 = 0x0022, .ghz2 = 0x0022, NOUPLOAD, },
2942 [B2056_RX_VGABUF_GAIN_BW] = { .ghz5 = 0x0030, .ghz2 = 0x0030, NOUPLOAD, },
2943 [B2056_RX_TXFBMIX_A] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2944 [B2056_RX_TXFBMIX_G] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2945 [B2056_RX_RXSPARE1] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2946 [B2056_RX_RXSPARE2] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2947 [B2056_RX_RXSPARE3] = { .ghz5 = 0x0005, .ghz2 = 0x0005, UPLOAD, },
2948 [B2056_RX_RXSPARE4] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2949 [B2056_RX_RXSPARE5] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2950 [B2056_RX_RXSPARE6] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2951 [B2056_RX_RXSPARE7] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2952 [B2056_RX_RXSPARE8] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2953 [B2056_RX_RXSPARE9] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2954 [B2056_RX_RXSPARE10] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2955 [B2056_RX_RXSPARE11] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2956 [B2056_RX_RXSPARE12] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2957 [B2056_RX_RXSPARE13] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2958 [B2056_RX_RXSPARE14] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2959 [B2056_RX_RXSPARE15] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2960 [B2056_RX_RXSPARE16] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2961 [B2056_RX_STATUS_LNAA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2962 [B2056_RX_STATUS_LNAG_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2963 [B2056_RX_STATUS_MIXTIA_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2964 [B2056_RX_STATUS_RXLPF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2965 [B2056_RX_STATUS_VGA_BUF_GAIN] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2966 [B2056_RX_STATUS_RXLPF_Q] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2967 [B2056_RX_STATUS_RXLPF_BUF_BW] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2968 [B2056_RX_STATUS_RXLPF_VGA_HPC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2969 [B2056_RX_STATUS_RXLPF_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2970 [B2056_RX_STATUS_HPC_RC] = { .ghz5 = 0x0000, .ghz2 = 0x0000, NOUPLOAD, },
2971};
2972
2973#define INITTABSPTS(prefix) \
2974 .syn = prefix##_syn, \
2975 .syn_length = ARRAY_SIZE(prefix##_syn), \
2976 .tx = prefix##_tx, \
2977 .tx_length = ARRAY_SIZE(prefix##_tx), \
2978 .rx = prefix##_rx, \
2979 .rx_length = ARRAY_SIZE(prefix##_rx)
2980
2981struct b2056_inittabs_pts b2056_inittabs[] = {
2982 [3] = { INITTABSPTS(b2056_inittab_rev3) },
2983 [4] = { INITTABSPTS(b2056_inittab_rev4) },
2984 [5] = { INITTABSPTS(b2056_inittab_rev5) },
2985 [6] = { INITTABSPTS(b2056_inittab_rev6) },
2986 [7] = { INITTABSPTS(b2056_inittab_rev7) },
2987 [8] = { INITTABSPTS(b2056_inittab_rev8) },
2988 [9] = { INITTABSPTS(b2056_inittab_rev7) },
2989};
2990
2991#define RADIOREGS3(r00, r01, r02, r03, r04, r05, r06, r07, r08, r09, \
2992 r10, r11, r12, r13, r14, r15, r16, r17, r18, r19, \
2993 r20, r21, r22, r23, r24, r25, r26, r27, r28, r29, \
2994 r30, r31, r32, r33, r34, r35, r36) \
2995 .radio_syn_pll_vcocal1 = r00, \
2996 .radio_syn_pll_vcocal2 = r01, \
2997 .radio_syn_pll_refdiv = r02, \
2998 .radio_syn_pll_mmd2 = r03, \
2999 .radio_syn_pll_mmd1 = r04, \
3000 .radio_syn_pll_loopfilter1 = r05, \
3001 .radio_syn_pll_loopfilter2 = r06, \
3002 .radio_syn_pll_loopfilter3 = r07, \
3003 .radio_syn_pll_loopfilter4 = r08, \
3004 .radio_syn_pll_loopfilter5 = r09, \
3005 .radio_syn_reserved_addr27 = r10, \
3006 .radio_syn_reserved_addr28 = r11, \
3007 .radio_syn_reserved_addr29 = r12, \
3008 .radio_syn_logen_vcobuf1 = r13, \
3009 .radio_syn_logen_mixer2 = r14, \
3010 .radio_syn_logen_buf3 = r15, \
3011 .radio_syn_logen_buf4 = r16, \
3012 .radio_rx0_lnaa_tune = r17, \
3013 .radio_rx0_lnag_tune = r18, \
3014 .radio_tx0_intpaa_boost_tune = r19, \
3015 .radio_tx0_intpag_boost_tune = r20, \
3016 .radio_tx0_pada_boost_tune = r21, \
3017 .radio_tx0_padg_boost_tune = r22, \
3018 .radio_tx0_pgaa_boost_tune = r23, \
3019 .radio_tx0_pgag_boost_tune = r24, \
3020 .radio_tx0_mixa_boost_tune = r25, \
3021 .radio_tx0_mixg_boost_tune = r26, \
3022 .radio_rx1_lnaa_tune = r27, \
3023 .radio_rx1_lnag_tune = r28, \
3024 .radio_tx1_intpaa_boost_tune = r29, \
3025 .radio_tx1_intpag_boost_tune = r30, \
3026 .radio_tx1_pada_boost_tune = r31, \
3027 .radio_tx1_padg_boost_tune = r32, \
3028 .radio_tx1_pgaa_boost_tune = r33, \
3029 .radio_tx1_pgag_boost_tune = r34, \
3030 .radio_tx1_mixa_boost_tune = r35, \
3031 .radio_tx1_mixg_boost_tune = r36
3032
3033#define PHYREGS(r0, r1, r2, r3, r4, r5) \
3034 .phy_regs.phy_bw1a = r0, \
3035 .phy_regs.phy_bw2 = r1, \
3036 .phy_regs.phy_bw3 = r2, \
3037 .phy_regs.phy_bw4 = r3, \
3038 .phy_regs.phy_bw5 = r4, \
3039 .phy_regs.phy_bw6 = r5
3040
3041/* http://bcm-v4.sipsolutions.net/802.11/Radio/2056/ChannelTable */
27static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] = { 3042static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev3[] = {
3043 { .freq = 4920,
3044 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
3045 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
3046 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
3047 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
3048 0x00, 0x0b, 0x00, 0xff, 0x00),
3049 PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
3050 },
3051 { .freq = 4930,
3052 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x04,
3053 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
3054 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
3055 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
3056 0x00, 0x0b, 0x00, 0xff, 0x00),
3057 PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
3058 },
3059 { .freq = 4940,
3060 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x04,
3061 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
3062 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
3063 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
3064 0x00, 0x0b, 0x00, 0xff, 0x00),
3065 PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
3066 },
3067 { .freq = 4950,
3068 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x04,
3069 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
3070 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
3071 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
3072 0x00, 0x0b, 0x00, 0xff, 0x00),
3073 PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
3074 },
3075 { .freq = 4960,
3076 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x04,
3077 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3078 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
3079 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
3080 0x00, 0x0b, 0x00, 0xff, 0x00),
3081 PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
3082 },
3083 { .freq = 4970,
3084 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x04,
3085 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3086 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
3087 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
3088 0x00, 0x0b, 0x00, 0xff, 0x00),
3089 PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
3090 },
3091 { .freq = 4980,
3092 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x04,
3093 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3094 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
3095 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
3096 0x00, 0x0b, 0x00, 0xff, 0x00),
3097 PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
3098 },
3099 { .freq = 4990,
3100 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x04,
3101 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3102 0xff, 0xff, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0b,
3103 0x00, 0xff, 0x00, 0xff, 0x00, 0x08, 0x00, 0x7f,
3104 0x00, 0x0b, 0x00, 0xff, 0x00),
3105 PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
3106 },
3107 { .freq = 5000,
3108 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x04,
3109 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3110 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
3111 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
3112 0x00, 0x0b, 0x00, 0xff, 0x00),
3113 PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
3114 },
3115 { .freq = 5010,
3116 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x04,
3117 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3118 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
3119 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
3120 0x00, 0x0b, 0x00, 0xff, 0x00),
3121 PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
3122 },
3123 { .freq = 5020,
3124 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x04,
3125 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3126 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
3127 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
3128 0x00, 0x0b, 0x00, 0xff, 0x00),
3129 PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
3130 },
3131 { .freq = 5030,
3132 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x04,
3133 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3134 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
3135 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
3136 0x00, 0x0b, 0x00, 0xff, 0x00),
3137 PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
3138 },
3139 { .freq = 5040,
3140 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x04,
3141 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3142 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
3143 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
3144 0x00, 0x0b, 0x00, 0xff, 0x00),
3145 PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
3146 },
3147 { .freq = 5050,
3148 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x04,
3149 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3150 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
3151 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
3152 0x00, 0x0b, 0x00, 0xff, 0x00),
3153 PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
3154 },
3155 { .freq = 5060,
3156 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x04,
3157 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3158 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
3159 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
3160 0x00, 0x0b, 0x00, 0xff, 0x00),
3161 PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
3162 },
3163 { .freq = 5070,
3164 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x04,
3165 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3166 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
3167 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
3168 0x00, 0x0b, 0x00, 0xff, 0x00),
3169 PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
3170 },
3171 { .freq = 5080,
3172 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x04,
3173 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3174 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
3175 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
3176 0x00, 0x0b, 0x00, 0xff, 0x00),
3177 PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
3178 },
3179 { .freq = 5090,
3180 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x04,
3181 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
3182 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
3183 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
3184 0x00, 0x0b, 0x00, 0xff, 0x00),
3185 PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
3186 },
3187 { .freq = 5100,
3188 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x04,
3189 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3190 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
3191 0x00, 0xff, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
3192 0x00, 0x0b, 0x00, 0xff, 0x00),
3193 PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
3194 },
3195 { .freq = 5110,
3196 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x04,
3197 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3198 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
3199 0x00, 0xfc, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
3200 0x00, 0x0b, 0x00, 0xfc, 0x00),
3201 PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
3202 },
3203 { .freq = 5120,
3204 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x04,
3205 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3206 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
3207 0x00, 0xfc, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
3208 0x00, 0x0b, 0x00, 0xfc, 0x00),
3209 PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
3210 },
3211 { .freq = 5130,
3212 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x04,
3213 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3214 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
3215 0x00, 0xfc, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
3216 0x00, 0x0b, 0x00, 0xfc, 0x00),
3217 PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
3218 },
3219 { .freq = 5140,
3220 RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x04,
3221 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3222 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
3223 0x00, 0xfc, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
3224 0x00, 0x0b, 0x00, 0xfc, 0x00),
3225 PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
3226 },
3227 { .freq = 5160,
3228 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x04,
3229 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3230 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
3231 0x00, 0xfc, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
3232 0x00, 0x0b, 0x00, 0xfc, 0x00),
3233 PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
3234 },
3235 { .freq = 5170,
3236 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x04,
3237 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3238 0xff, 0xff, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
3239 0x00, 0xfc, 0x00, 0xff, 0x00, 0x07, 0x00, 0x7f,
3240 0x00, 0x0b, 0x00, 0xfc, 0x00),
3241 PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
3242 },
3243 { .freq = 5180,
3244 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x04,
3245 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3246 0xff, 0xef, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
3247 0x00, 0xfc, 0x00, 0xef, 0x00, 0x07, 0x00, 0x7f,
3248 0x00, 0x0b, 0x00, 0xfc, 0x00),
3249 PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
3250 },
3251 { .freq = 5190,
3252 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x04,
3253 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3254 0xff, 0xef, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0b,
3255 0x00, 0xfc, 0x00, 0xef, 0x00, 0x07, 0x00, 0x7f,
3256 0x00, 0x0b, 0x00, 0xfc, 0x00),
3257 PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
3258 },
3259 { .freq = 5200,
3260 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x04,
3261 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3262 0xff, 0xef, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
3263 0x00, 0xfc, 0x00, 0xef, 0x00, 0x06, 0x00, 0x7f,
3264 0x00, 0x0a, 0x00, 0xfc, 0x00),
3265 PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
3266 },
3267 { .freq = 5210,
3268 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x04,
3269 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3270 0xff, 0xdf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
3271 0x00, 0xfc, 0x00, 0xdf, 0x00, 0x06, 0x00, 0x7f,
3272 0x00, 0x0a, 0x00, 0xfc, 0x00),
3273 PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
3274 },
3275 { .freq = 5220,
3276 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x04,
3277 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3278 0xff, 0xdf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
3279 0x00, 0xfc, 0x00, 0xdf, 0x00, 0x06, 0x00, 0x7f,
3280 0x00, 0x0a, 0x00, 0xfc, 0x00),
3281 PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
3282 },
3283 { .freq = 5230,
3284 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x04,
3285 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3286 0xff, 0xdf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
3287 0x00, 0xfc, 0x00, 0xdf, 0x00, 0x06, 0x00, 0x7f,
3288 0x00, 0x0a, 0x00, 0xfc, 0x00),
3289 PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
3290 },
3291 { .freq = 5240,
3292 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x04,
3293 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3294 0xff, 0xcf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
3295 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x06, 0x00, 0x7f,
3296 0x00, 0x0a, 0x00, 0xfc, 0x00),
3297 PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
3298 },
3299 { .freq = 5250,
3300 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x04,
3301 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3302 0xff, 0xcf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
3303 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x06, 0x00, 0x7f,
3304 0x00, 0x0a, 0x00, 0xfc, 0x00),
3305 PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
3306 },
3307 { .freq = 5260,
3308 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x04,
3309 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
3310 0xff, 0xcf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
3311 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x06, 0x00, 0x7f,
3312 0x00, 0x0a, 0x00, 0xfc, 0x00),
3313 PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
3314 },
3315 { .freq = 5270,
3316 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x04,
3317 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
3318 0xff, 0xcf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
3319 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x06, 0x00, 0x7f,
3320 0x00, 0x0a, 0x00, 0xfc, 0x00),
3321 PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
3322 },
3323 { .freq = 5280,
3324 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x04,
3325 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
3326 0xff, 0xbf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
3327 0x00, 0xfc, 0x00, 0xbf, 0x00, 0x06, 0x00, 0x7f,
3328 0x00, 0x0a, 0x00, 0xfc, 0x00),
3329 PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
3330 },
3331 { .freq = 5290,
3332 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x04,
3333 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
3334 0xff, 0xbf, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0a,
3335 0x00, 0xfc, 0x00, 0xbf, 0x00, 0x06, 0x00, 0x7f,
3336 0x00, 0x0a, 0x00, 0xfc, 0x00),
3337 PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
3338 },
3339 { .freq = 5300,
3340 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x04,
3341 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
3342 0xff, 0xbf, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
3343 0x00, 0xfc, 0x00, 0xbf, 0x00, 0x05, 0x00, 0x7f,
3344 0x00, 0x09, 0x00, 0xfc, 0x00),
3345 PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
3346 },
3347 { .freq = 5310,
3348 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x04,
3349 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
3350 0xff, 0xbf, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
3351 0x00, 0xfa, 0x00, 0xbf, 0x00, 0x05, 0x00, 0x7f,
3352 0x00, 0x09, 0x00, 0xfa, 0x00),
3353 PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
3354 },
3355 { .freq = 5320,
3356 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x04,
3357 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
3358 0xff, 0xbf, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
3359 0x00, 0xfa, 0x00, 0xbf, 0x00, 0x05, 0x00, 0x7f,
3360 0x00, 0x09, 0x00, 0xfa, 0x00),
3361 PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
3362 },
3363 { .freq = 5330,
3364 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x04,
3365 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
3366 0xff, 0xaf, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
3367 0x00, 0xfa, 0x00, 0xaf, 0x00, 0x05, 0x00, 0x7f,
3368 0x00, 0x09, 0x00, 0xfa, 0x00),
3369 PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
3370 },
3371 { .freq = 5340,
3372 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x04,
3373 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
3374 0xff, 0xaf, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
3375 0x00, 0xfa, 0x00, 0xaf, 0x00, 0x05, 0x00, 0x7f,
3376 0x00, 0x09, 0x00, 0xfa, 0x00),
3377 PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
3378 },
3379 { .freq = 5350,
3380 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x04,
3381 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
3382 0xff, 0x9f, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
3383 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x05, 0x00, 0x7f,
3384 0x00, 0x09, 0x00, 0xfa, 0x00),
3385 PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
3386 },
3387 { .freq = 5360,
3388 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x04,
3389 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
3390 0xff, 0x9f, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
3391 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x05, 0x00, 0x7f,
3392 0x00, 0x09, 0x00, 0xfa, 0x00),
3393 PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
3394 },
3395 { .freq = 5370,
3396 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x04,
3397 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
3398 0xff, 0x9f, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
3399 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x05, 0x00, 0x7f,
3400 0x00, 0x09, 0x00, 0xfa, 0x00),
3401 PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
3402 },
3403 { .freq = 5380,
3404 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x04,
3405 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
3406 0xff, 0x9f, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
3407 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x05, 0x00, 0x7f,
3408 0x00, 0x09, 0x00, 0xfa, 0x00),
3409 PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
3410 },
3411 { .freq = 5390,
3412 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x04,
3413 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
3414 0xff, 0x8f, 0x00, 0x05, 0x00, 0x7f, 0x00, 0x09,
3415 0x00, 0xfa, 0x00, 0x8f, 0x00, 0x05, 0x00, 0x7f,
3416 0x00, 0x09, 0x00, 0xfa, 0x00),
3417 PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
3418 },
3419 { .freq = 5400,
3420 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x04,
3421 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
3422 0xc8, 0x8f, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
3423 0x00, 0xfa, 0x00, 0x8f, 0x00, 0x04, 0x00, 0x7f,
3424 0x00, 0x08, 0x00, 0xfa, 0x00),
3425 PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
3426 },
3427 { .freq = 5410,
3428 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x04,
3429 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
3430 0xc8, 0x8f, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
3431 0x00, 0xfa, 0x00, 0x8f, 0x00, 0x04, 0x00, 0x7f,
3432 0x00, 0x08, 0x00, 0xfa, 0x00),
3433 PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
3434 },
3435 { .freq = 5420,
3436 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x04,
3437 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
3438 0xc8, 0x8e, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
3439 0x00, 0xfa, 0x00, 0x8e, 0x00, 0x04, 0x00, 0x7f,
3440 0x00, 0x08, 0x00, 0xfa, 0x00),
3441 PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
3442 },
3443 { .freq = 5430,
3444 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x04,
3445 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
3446 0xc8, 0x8e, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
3447 0x00, 0xfa, 0x00, 0x8e, 0x00, 0x04, 0x00, 0x7f,
3448 0x00, 0x08, 0x00, 0xfa, 0x00),
3449 PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
3450 },
3451 { .freq = 5440,
3452 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x04,
3453 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
3454 0xc8, 0x7e, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
3455 0x00, 0xfa, 0x00, 0x7e, 0x00, 0x04, 0x00, 0x7f,
3456 0x00, 0x08, 0x00, 0xfa, 0x00),
3457 PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
3458 },
3459 { .freq = 5450,
3460 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x04,
3461 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
3462 0xc8, 0x7d, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
3463 0x00, 0xfa, 0x00, 0x7d, 0x00, 0x04, 0x00, 0x7f,
3464 0x00, 0x08, 0x00, 0xfa, 0x00),
3465 PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
3466 },
3467 { .freq = 5460,
3468 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x04,
3469 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
3470 0xc8, 0x6d, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
3471 0x00, 0xf8, 0x00, 0x6d, 0x00, 0x04, 0x00, 0x7f,
3472 0x00, 0x08, 0x00, 0xf8, 0x00),
3473 PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
3474 },
3475 { .freq = 5470,
3476 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x04,
3477 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
3478 0xc8, 0x6d, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
3479 0x00, 0xf8, 0x00, 0x6d, 0x00, 0x04, 0x00, 0x7f,
3480 0x00, 0x08, 0x00, 0xf8, 0x00),
3481 PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
3482 },
3483 { .freq = 5480,
3484 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x04,
3485 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
3486 0xc8, 0x5d, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
3487 0x00, 0xf8, 0x00, 0x5d, 0x00, 0x04, 0x00, 0x7f,
3488 0x00, 0x08, 0x00, 0xf8, 0x00),
3489 PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
3490 },
3491 { .freq = 5490,
3492 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x04,
3493 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
3494 0xc8, 0x5c, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x08,
3495 0x00, 0xf8, 0x00, 0x5c, 0x00, 0x04, 0x00, 0x7f,
3496 0x00, 0x08, 0x00, 0xf8, 0x00),
3497 PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
3498 },
3499 { .freq = 5500,
3500 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x04,
3501 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
3502 0x84, 0x5c, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
3503 0x00, 0xf8, 0x00, 0x5c, 0x00, 0x03, 0x00, 0x7f,
3504 0x00, 0x07, 0x00, 0xf8, 0x00),
3505 PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
3506 },
3507 { .freq = 5510,
3508 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x04,
3509 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
3510 0x84, 0x4c, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
3511 0x00, 0xf8, 0x00, 0x4c, 0x00, 0x03, 0x00, 0x7f,
3512 0x00, 0x07, 0x00, 0xf8, 0x00),
3513 PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
3514 },
3515 { .freq = 5520,
3516 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x04,
3517 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
3518 0x84, 0x4c, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
3519 0x00, 0xf8, 0x00, 0x4c, 0x00, 0x03, 0x00, 0x7f,
3520 0x00, 0x07, 0x00, 0xf8, 0x00),
3521 PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
3522 },
3523 { .freq = 5530,
3524 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x04,
3525 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
3526 0x84, 0x3b, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
3527 0x00, 0xf8, 0x00, 0x3b, 0x00, 0x03, 0x00, 0x7f,
3528 0x00, 0x07, 0x00, 0xf8, 0x00),
3529 PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
3530 },
3531 { .freq = 5540,
3532 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x04,
3533 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
3534 0x84, 0x3b, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
3535 0x00, 0xf8, 0x00, 0x3b, 0x00, 0x03, 0x00, 0x7f,
3536 0x00, 0x07, 0x00, 0xf8, 0x00),
3537 PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
3538 },
3539 { .freq = 5550,
3540 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x04,
3541 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
3542 0x84, 0x3b, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
3543 0x00, 0xf8, 0x00, 0x3b, 0x00, 0x03, 0x00, 0x7f,
3544 0x00, 0x07, 0x00, 0xf8, 0x00),
3545 PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
3546 },
3547 { .freq = 5560,
3548 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x04,
3549 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
3550 0x84, 0x2b, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
3551 0x00, 0xf8, 0x00, 0x2b, 0x00, 0x03, 0x00, 0x7f,
3552 0x00, 0x07, 0x00, 0xf8, 0x00),
3553 PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
3554 },
3555 { .freq = 5570,
3556 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x04,
3557 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
3558 0x84, 0x2a, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
3559 0x00, 0xf8, 0x00, 0x2a, 0x00, 0x03, 0x00, 0x7f,
3560 0x00, 0x07, 0x00, 0xf8, 0x00),
3561 PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
3562 },
3563 { .freq = 5580,
3564 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x04,
3565 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
3566 0x84, 0x1a, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
3567 0x00, 0xf8, 0x00, 0x1a, 0x00, 0x03, 0x00, 0x7f,
3568 0x00, 0x07, 0x00, 0xf8, 0x00),
3569 PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
3570 },
3571 { .freq = 5590,
3572 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x04,
3573 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
3574 0x84, 0x1a, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
3575 0x00, 0xf8, 0x00, 0x1a, 0x00, 0x03, 0x00, 0x7f,
3576 0x00, 0x07, 0x00, 0xf8, 0x00),
3577 PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
3578 },
3579 { .freq = 5600,
3580 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x04,
3581 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
3582 0x70, 0x1a, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
3583 0x00, 0xf8, 0x00, 0x1a, 0x00, 0x03, 0x00, 0x7f,
3584 0x00, 0x07, 0x00, 0xf8, 0x00),
3585 PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
3586 },
3587 { .freq = 5610,
3588 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x04,
3589 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
3590 0x70, 0x19, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
3591 0x00, 0xf8, 0x00, 0x19, 0x00, 0x03, 0x00, 0x7f,
3592 0x00, 0x07, 0x00, 0xf8, 0x00),
3593 PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
3594 },
3595 { .freq = 5620,
3596 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x04,
3597 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
3598 0x70, 0x19, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
3599 0x00, 0xf8, 0x00, 0x19, 0x00, 0x03, 0x00, 0x7f,
3600 0x00, 0x07, 0x00, 0xf8, 0x00),
3601 PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
3602 },
3603 { .freq = 5630,
3604 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x04,
3605 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
3606 0x70, 0x09, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
3607 0x00, 0xf8, 0x00, 0x09, 0x00, 0x03, 0x00, 0x7f,
3608 0x00, 0x07, 0x00, 0xf8, 0x00),
3609 PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
3610 },
3611 { .freq = 5640,
3612 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x04,
3613 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
3614 0x70, 0x09, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
3615 0x00, 0xf8, 0x00, 0x09, 0x00, 0x03, 0x00, 0x7f,
3616 0x00, 0x07, 0x00, 0xf8, 0x00),
3617 PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
3618 },
3619 { .freq = 5650,
3620 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x04,
3621 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
3622 0x70, 0x08, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
3623 0x00, 0xf8, 0x00, 0x08, 0x00, 0x03, 0x00, 0x7f,
3624 0x00, 0x07, 0x00, 0xf8, 0x00),
3625 PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
3626 },
3627 { .freq = 5660,
3628 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x04,
3629 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
3630 0x70, 0x08, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
3631 0x00, 0xf6, 0x00, 0x08, 0x00, 0x03, 0x00, 0x7f,
3632 0x00, 0x07, 0x00, 0xf6, 0x00),
3633 PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
3634 },
3635 { .freq = 5670,
3636 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x04,
3637 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
3638 0x70, 0x08, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
3639 0x00, 0xf6, 0x00, 0x08, 0x00, 0x03, 0x00, 0x7f,
3640 0x00, 0x07, 0x00, 0xf6, 0x00),
3641 PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
3642 },
3643 { .freq = 5680,
3644 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x04,
3645 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
3646 0x70, 0x08, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
3647 0x00, 0xf6, 0x00, 0x08, 0x00, 0x03, 0x00, 0x7f,
3648 0x00, 0x07, 0x00, 0xf6, 0x00),
3649 PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
3650 },
3651 { .freq = 5690,
3652 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x04,
3653 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
3654 0x70, 0x07, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x07,
3655 0x00, 0xf6, 0x00, 0x07, 0x00, 0x03, 0x00, 0x7f,
3656 0x00, 0x07, 0x00, 0xf6, 0x00),
3657 PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
3658 },
3659 { .freq = 5700,
3660 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x04,
3661 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
3662 0x40, 0x07, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
3663 0x00, 0xf6, 0x00, 0x07, 0x00, 0x02, 0x00, 0x7f,
3664 0x00, 0x06, 0x00, 0xf6, 0x00),
3665 PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
3666 },
3667 { .freq = 5710,
3668 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x04,
3669 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
3670 0x40, 0x07, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
3671 0x00, 0xf4, 0x00, 0x07, 0x00, 0x02, 0x00, 0x7f,
3672 0x00, 0x06, 0x00, 0xf4, 0x00),
3673 PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
3674 },
3675 { .freq = 5720,
3676 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x04,
3677 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
3678 0x40, 0x07, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
3679 0x00, 0xf4, 0x00, 0x07, 0x00, 0x02, 0x00, 0x7f,
3680 0x00, 0x06, 0x00, 0xf4, 0x00),
3681 PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
3682 },
3683 { .freq = 5725,
3684 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x07, 0x07, 0x04,
3685 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
3686 0x40, 0x06, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
3687 0x00, 0xf4, 0x00, 0x06, 0x00, 0x02, 0x00, 0x7f,
3688 0x00, 0x06, 0x00, 0xf4, 0x00),
3689 PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
3690 },
3691 { .freq = 5730,
3692 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x04,
3693 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
3694 0x40, 0x06, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
3695 0x00, 0xf4, 0x00, 0x06, 0x00, 0x02, 0x00, 0x7f,
3696 0x00, 0x06, 0x00, 0xf4, 0x00),
3697 PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
3698 },
3699 { .freq = 5735,
3700 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x07, 0x07, 0x04,
3701 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
3702 0x40, 0x06, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
3703 0x00, 0xf4, 0x00, 0x06, 0x00, 0x02, 0x00, 0x7f,
3704 0x00, 0x06, 0x00, 0xf4, 0x00),
3705 PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
3706 },
3707 { .freq = 5740,
3708 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x04,
3709 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
3710 0x40, 0x06, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
3711 0x00, 0xf4, 0x00, 0x06, 0x00, 0x02, 0x00, 0x7f,
3712 0x00, 0x06, 0x00, 0xf4, 0x00),
3713 PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
3714 },
3715 { .freq = 5745,
3716 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x07, 0x07, 0x04,
3717 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
3718 0x40, 0x06, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
3719 0x00, 0xf4, 0x00, 0x06, 0x00, 0x02, 0x00, 0x7f,
3720 0x00, 0x06, 0x00, 0xf4, 0x00),
3721 PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
3722 },
3723 { .freq = 5750,
3724 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x04,
3725 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
3726 0x40, 0x06, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
3727 0x00, 0xf4, 0x00, 0x06, 0x00, 0x02, 0x00, 0x7f,
3728 0x00, 0x06, 0x00, 0xf4, 0x00),
3729 PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
3730 },
3731 { .freq = 5755,
3732 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x07, 0x07, 0x04,
3733 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
3734 0x40, 0x05, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
3735 0x00, 0xf4, 0x00, 0x05, 0x00, 0x02, 0x00, 0x7f,
3736 0x00, 0x06, 0x00, 0xf4, 0x00),
3737 PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
3738 },
3739 { .freq = 5760,
3740 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x04,
3741 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
3742 0x40, 0x05, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
3743 0x00, 0xf4, 0x00, 0x05, 0x00, 0x02, 0x00, 0x7f,
3744 0x00, 0x06, 0x00, 0xf4, 0x00),
3745 PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
3746 },
3747 { .freq = 5765,
3748 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x07, 0x07, 0x04,
3749 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
3750 0x40, 0x05, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
3751 0x00, 0xf4, 0x00, 0x05, 0x00, 0x02, 0x00, 0x7f,
3752 0x00, 0x06, 0x00, 0xf4, 0x00),
3753 PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
3754 },
3755 { .freq = 5770,
3756 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x04,
3757 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
3758 0x40, 0x05, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
3759 0x00, 0xf4, 0x00, 0x05, 0x00, 0x02, 0x00, 0x7f,
3760 0x00, 0x06, 0x00, 0xf4, 0x00),
3761 PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
3762 },
3763 { .freq = 5775,
3764 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x07, 0x07, 0x04,
3765 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
3766 0x40, 0x05, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
3767 0x00, 0xf4, 0x00, 0x05, 0x00, 0x02, 0x00, 0x7f,
3768 0x00, 0x06, 0x00, 0xf4, 0x00),
3769 PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
3770 },
3771 { .freq = 5780,
3772 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x04,
3773 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
3774 0x40, 0x05, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
3775 0x00, 0xf4, 0x00, 0x05, 0x00, 0x02, 0x00, 0x7f,
3776 0x00, 0x06, 0x00, 0xf4, 0x00),
3777 PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
3778 },
3779 { .freq = 5785,
3780 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x07, 0x07, 0x04,
3781 0x10, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
3782 0x40, 0x04, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
3783 0x00, 0xf4, 0x00, 0x04, 0x00, 0x02, 0x00, 0x7f,
3784 0x00, 0x06, 0x00, 0xf4, 0x00),
3785 PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
3786 },
3787 { .freq = 5790,
3788 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x04,
3789 0x0c, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
3790 0x40, 0x04, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
3791 0x00, 0xf4, 0x00, 0x04, 0x00, 0x02, 0x00, 0x7f,
3792 0x00, 0x06, 0x00, 0xf4, 0x00),
3793 PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
3794 },
3795 { .freq = 5795,
3796 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x07, 0x07, 0x04,
3797 0x10, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
3798 0x40, 0x04, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x06,
3799 0x00, 0xf4, 0x00, 0x04, 0x00, 0x02, 0x00, 0x7f,
3800 0x00, 0x06, 0x00, 0xf4, 0x00),
3801 PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
3802 },
3803 { .freq = 5800,
3804 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x04,
3805 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
3806 0x20, 0x04, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
3807 0x00, 0xf4, 0x00, 0x04, 0x00, 0x00, 0x00, 0x7f,
3808 0x00, 0x06, 0x00, 0xf4, 0x00),
3809 PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
3810 },
3811 { .freq = 5805,
3812 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x07, 0x07, 0x04,
3813 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
3814 0x20, 0x04, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
3815 0x00, 0xf4, 0x00, 0x04, 0x00, 0x00, 0x00, 0x7f,
3816 0x00, 0x06, 0x00, 0xf4, 0x00),
3817 PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
3818 },
3819 { .freq = 5810,
3820 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x04,
3821 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
3822 0x20, 0x04, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
3823 0x00, 0xf4, 0x00, 0x04, 0x00, 0x00, 0x00, 0x7f,
3824 0x00, 0x06, 0x00, 0xf4, 0x00),
3825 PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
3826 },
3827 { .freq = 5815,
3828 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x07, 0x07, 0x04,
3829 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
3830 0x20, 0x04, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
3831 0x00, 0xf4, 0x00, 0x04, 0x00, 0x00, 0x00, 0x7f,
3832 0x00, 0x06, 0x00, 0xf4, 0x00),
3833 PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
3834 },
3835 { .freq = 5820,
3836 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x04,
3837 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
3838 0x20, 0x03, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
3839 0x00, 0xf4, 0x00, 0x03, 0x00, 0x00, 0x00, 0x7f,
3840 0x00, 0x06, 0x00, 0xf4, 0x00),
3841 PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
3842 },
3843 { .freq = 5825,
3844 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x07, 0x07, 0x04,
3845 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
3846 0x20, 0x03, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
3847 0x00, 0xf4, 0x00, 0x03, 0x00, 0x00, 0x00, 0x7f,
3848 0x00, 0x06, 0x00, 0xf4, 0x00),
3849 PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
3850 },
3851 { .freq = 5830,
3852 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x04,
3853 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
3854 0x20, 0x03, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
3855 0x00, 0xf4, 0x00, 0x03, 0x00, 0x00, 0x00, 0x7f,
3856 0x00, 0x06, 0x00, 0xf4, 0x00),
3857 PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
3858 },
3859 { .freq = 5840,
3860 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x04,
3861 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
3862 0x20, 0x03, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
3863 0x00, 0xf4, 0x00, 0x03, 0x00, 0x00, 0x00, 0x7f,
3864 0x00, 0x06, 0x00, 0xf4, 0x00),
3865 PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
3866 },
3867 { .freq = 5850,
3868 RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x04,
3869 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
3870 0x20, 0x03, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
3871 0x00, 0xf4, 0x00, 0x03, 0x00, 0x00, 0x00, 0x7f,
3872 0x00, 0x06, 0x00, 0xf4, 0x00),
3873 PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
3874 },
3875 { .freq = 5860,
3876 RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x04,
3877 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
3878 0x20, 0x03, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
3879 0x00, 0xf2, 0x00, 0x03, 0x00, 0x00, 0x00, 0x7f,
3880 0x00, 0x06, 0x00, 0xf2, 0x00),
3881 PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
3882 },
3883 { .freq = 5870,
3884 RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x04,
3885 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
3886 0x20, 0x02, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
3887 0x00, 0xf2, 0x00, 0x02, 0x00, 0x00, 0x00, 0x7f,
3888 0x00, 0x06, 0x00, 0xf2, 0x00),
3889 PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
3890 },
3891 { .freq = 5880,
3892 RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x04,
3893 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
3894 0x20, 0x02, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
3895 0x00, 0xf2, 0x00, 0x02, 0x00, 0x00, 0x00, 0x7f,
3896 0x00, 0x06, 0x00, 0xf2, 0x00),
3897 PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
3898 },
3899 { .freq = 5890,
3900 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x04,
3901 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
3902 0x20, 0x02, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x06,
3903 0x00, 0xf2, 0x00, 0x02, 0x00, 0x00, 0x00, 0x7f,
3904 0x00, 0x06, 0x00, 0xf2, 0x00),
3905 PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
3906 },
3907 { .freq = 5900,
3908 RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x04,
3909 0x0c, 0x01, 0x06, 0x06, 0x06, 0x87, 0x03, 0x00,
3910 0x00, 0x02, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x05,
3911 0x00, 0xf2, 0x00, 0x02, 0x00, 0x00, 0x00, 0x7f,
3912 0x00, 0x05, 0x00, 0xf2, 0x00),
3913 PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
3914 },
3915 { .freq = 5910,
3916 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x04,
3917 0x0c, 0x01, 0x06, 0x06, 0x06, 0x87, 0x03, 0x00,
3918 0x00, 0x01, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x05,
3919 0x00, 0xf2, 0x00, 0x01, 0x00, 0x00, 0x00, 0x7f,
3920 0x00, 0x05, 0x00, 0xf2, 0x00),
3921 PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
3922 },
3923 { .freq = 2412,
3924 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x08, 0x08, 0x04,
3925 0x16, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
3926 0x00, 0x00, 0xff, 0x00, 0x05, 0x00, 0x70, 0x00,
3927 0x0f, 0x00, 0x0f, 0x00, 0xff, 0x00, 0x05, 0x00,
3928 0x70, 0x00, 0x0f, 0x00, 0x0f),
3929 PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
3930 },
3931 { .freq = 2417,
3932 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x08, 0x08, 0x04,
3933 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
3934 0x00, 0x00, 0xff, 0x00, 0x05, 0x00, 0x70, 0x00,
3935 0x0f, 0x00, 0x0f, 0x00, 0xff, 0x00, 0x05, 0x00,
3936 0x70, 0x00, 0x0f, 0x00, 0x0f),
3937 PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
3938 },
3939 { .freq = 2422,
3940 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x08, 0x08, 0x04,
3941 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
3942 0x00, 0x00, 0xff, 0x00, 0x05, 0x00, 0x70, 0x00,
3943 0x0f, 0x00, 0x0f, 0x00, 0xff, 0x00, 0x05, 0x00,
3944 0x70, 0x00, 0x0f, 0x00, 0x0f),
3945 PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
3946 },
3947 { .freq = 2427,
3948 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x08, 0x08, 0x04,
3949 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
3950 0x00, 0x00, 0xfd, 0x00, 0x05, 0x00, 0x70, 0x00,
3951 0x0f, 0x00, 0x0f, 0x00, 0xfd, 0x00, 0x05, 0x00,
3952 0x70, 0x00, 0x0f, 0x00, 0x0f),
3953 PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
3954 },
3955 { .freq = 2432,
3956 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x08, 0x08, 0x04,
3957 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
3958 0x00, 0x00, 0xfb, 0x00, 0x05, 0x00, 0x70, 0x00,
3959 0x0f, 0x00, 0x0f, 0x00, 0xfb, 0x00, 0x05, 0x00,
3960 0x70, 0x00, 0x0f, 0x00, 0x0f),
3961 PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
3962 },
3963 { .freq = 2437,
3964 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x08, 0x08, 0x04,
3965 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
3966 0x00, 0x00, 0xfa, 0x00, 0x05, 0x00, 0x70, 0x00,
3967 0x0f, 0x00, 0x0f, 0x00, 0xfa, 0x00, 0x05, 0x00,
3968 0x70, 0x00, 0x0f, 0x00, 0x0f),
3969 PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
3970 },
3971 { .freq = 2442,
3972 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x08, 0x08, 0x04,
3973 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
3974 0x00, 0x00, 0xf8, 0x00, 0x05, 0x00, 0x70, 0x00,
3975 0x0f, 0x00, 0x0f, 0x00, 0xf8, 0x00, 0x05, 0x00,
3976 0x70, 0x00, 0x0f, 0x00, 0x0f),
3977 PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
3978 },
3979 { .freq = 2447,
3980 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x08, 0x08, 0x04,
3981 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
3982 0x00, 0x00, 0xf7, 0x00, 0x05, 0x00, 0x70, 0x00,
3983 0x0f, 0x00, 0x0f, 0x00, 0xf7, 0x00, 0x05, 0x00,
3984 0x70, 0x00, 0x0f, 0x00, 0x0f),
3985 PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
3986 },
3987 { .freq = 2452,
3988 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x08, 0x08, 0x04,
3989 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
3990 0x00, 0x00, 0xf6, 0x00, 0x05, 0x00, 0x70, 0x00,
3991 0x0f, 0x00, 0x0f, 0x00, 0xf6, 0x00, 0x05, 0x00,
3992 0x70, 0x00, 0x0f, 0x00, 0x0f),
3993 PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
3994 },
3995 { .freq = 2457,
3996 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x08, 0x08, 0x04,
3997 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
3998 0x00, 0x00, 0xf5, 0x00, 0x05, 0x00, 0x70, 0x00,
3999 0x0f, 0x00, 0x0d, 0x00, 0xf5, 0x00, 0x05, 0x00,
4000 0x70, 0x00, 0x0f, 0x00, 0x0d),
4001 PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
4002 },
4003 { .freq = 2462,
4004 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x08, 0x08, 0x04,
4005 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
4006 0x00, 0x00, 0xf4, 0x00, 0x05, 0x00, 0x70, 0x00,
4007 0x0f, 0x00, 0x0d, 0x00, 0xf4, 0x00, 0x05, 0x00,
4008 0x70, 0x00, 0x0f, 0x00, 0x0d),
4009 PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
4010 },
4011 { .freq = 2467,
4012 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x08, 0x08, 0x04,
4013 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
4014 0x00, 0x00, 0xf3, 0x00, 0x05, 0x00, 0x70, 0x00,
4015 0x0f, 0x00, 0x0d, 0x00, 0xf3, 0x00, 0x05, 0x00,
4016 0x70, 0x00, 0x0f, 0x00, 0x0d),
4017 PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
4018 },
4019 { .freq = 2472,
4020 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x08, 0x08, 0x04,
4021 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
4022 0x00, 0x00, 0xf2, 0x00, 0x05, 0x00, 0x70, 0x00,
4023 0x0f, 0x00, 0x0d, 0x00, 0xf2, 0x00, 0x05, 0x00,
4024 0x70, 0x00, 0x0f, 0x00, 0x0d),
4025 PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
4026 },
4027 { .freq = 2484,
4028 RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x08, 0x08, 0x04,
4029 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
4030 0x00, 0x00, 0xf0, 0x00, 0x05, 0x00, 0x70, 0x00,
4031 0x0f, 0x00, 0x0d, 0x00, 0xf0, 0x00, 0x05, 0x00,
4032 0x70, 0x00, 0x0f, 0x00, 0x0d),
4033 PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
4034 },
4035};
4036
4037static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev4[] = {
4038 { .freq = 4920,
4039 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
4040 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
4041 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
4042 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
4043 0x00, 0x0f, 0x00, 0xff, 0x00),
4044 PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
4045 },
4046 { .freq = 4930,
4047 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x04,
4048 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
4049 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
4050 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
4051 0x00, 0x0f, 0x00, 0xff, 0x00),
4052 PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
4053 },
4054 { .freq = 4940,
4055 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x04,
4056 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
4057 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
4058 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
4059 0x00, 0x0f, 0x00, 0xff, 0x00),
4060 PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
4061 },
4062 { .freq = 4950,
4063 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x04,
4064 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
4065 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
4066 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
4067 0x00, 0x0f, 0x00, 0xff, 0x00),
4068 PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
4069 },
4070 { .freq = 4960,
4071 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x04,
4072 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4073 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
4074 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
4075 0x00, 0x0f, 0x00, 0xff, 0x00),
4076 PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
4077 },
4078 { .freq = 4970,
4079 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x04,
4080 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4081 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
4082 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
4083 0x00, 0x0f, 0x00, 0xff, 0x00),
4084 PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
4085 },
4086 { .freq = 4980,
4087 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x04,
4088 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4089 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
4090 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
4091 0x00, 0x0f, 0x00, 0xff, 0x00),
4092 PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
4093 },
4094 { .freq = 4990,
4095 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x04,
4096 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4097 0xff, 0xff, 0x00, 0x0e, 0x00, 0x7f, 0x00, 0x0f,
4098 0x00, 0xff, 0x00, 0xff, 0x00, 0x0e, 0x00, 0x7f,
4099 0x00, 0x0f, 0x00, 0xff, 0x00),
4100 PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
4101 },
4102 { .freq = 5000,
4103 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x04,
4104 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4105 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
4106 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
4107 0x00, 0x0f, 0x00, 0xff, 0x00),
4108 PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
4109 },
4110 { .freq = 5010,
4111 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x04,
4112 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4113 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
4114 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
4115 0x00, 0x0f, 0x00, 0xff, 0x00),
4116 PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
4117 },
4118 { .freq = 5020,
4119 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x04,
4120 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4121 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
4122 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
4123 0x00, 0x0f, 0x00, 0xff, 0x00),
4124 PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
4125 },
4126 { .freq = 5030,
4127 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x04,
4128 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4129 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
4130 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
4131 0x00, 0x0f, 0x00, 0xff, 0x00),
4132 PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
4133 },
4134 { .freq = 5040,
4135 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x04,
4136 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4137 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
4138 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
4139 0x00, 0x0f, 0x00, 0xff, 0x00),
4140 PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
4141 },
4142 { .freq = 5050,
4143 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x04,
4144 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4145 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
4146 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
4147 0x00, 0x0f, 0x00, 0xff, 0x00),
4148 PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
4149 },
4150 { .freq = 5060,
4151 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x04,
4152 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4153 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
4154 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
4155 0x00, 0x0f, 0x00, 0xff, 0x00),
4156 PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
4157 },
4158 { .freq = 5070,
4159 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x04,
4160 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4161 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
4162 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
4163 0x00, 0x0f, 0x00, 0xff, 0x00),
4164 PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
4165 },
4166 { .freq = 5080,
4167 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x04,
4168 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4169 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
4170 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
4171 0x00, 0x0f, 0x00, 0xff, 0x00),
4172 PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
4173 },
4174 { .freq = 5090,
4175 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x04,
4176 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
4177 0xff, 0xff, 0x00, 0x0d, 0x00, 0x7f, 0x00, 0x0f,
4178 0x00, 0xff, 0x00, 0xff, 0x00, 0x0d, 0x00, 0x7f,
4179 0x00, 0x0f, 0x00, 0xff, 0x00),
4180 PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
4181 },
4182 { .freq = 5100,
4183 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x04,
4184 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4185 0xff, 0xff, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
4186 0x00, 0xfe, 0x00, 0xff, 0x00, 0x0c, 0x00, 0x7f,
4187 0x00, 0x0f, 0x00, 0xfe, 0x00),
4188 PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
4189 },
4190 { .freq = 5110,
4191 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x04,
4192 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4193 0xff, 0xff, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
4194 0x00, 0xfe, 0x00, 0xff, 0x00, 0x0c, 0x00, 0x7f,
4195 0x00, 0x0f, 0x00, 0xfe, 0x00),
4196 PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
4197 },
4198 { .freq = 5120,
4199 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x04,
4200 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4201 0xff, 0xff, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
4202 0x00, 0xfe, 0x00, 0xff, 0x00, 0x0c, 0x00, 0x7f,
4203 0x00, 0x0f, 0x00, 0xfe, 0x00),
4204 PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
4205 },
4206 { .freq = 5130,
4207 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x04,
4208 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4209 0xff, 0xff, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
4210 0x00, 0xfe, 0x00, 0xff, 0x00, 0x0c, 0x00, 0x7f,
4211 0x00, 0x0f, 0x00, 0xfe, 0x00),
4212 PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
4213 },
4214 { .freq = 5140,
4215 RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x04,
4216 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4217 0xff, 0xff, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
4218 0x00, 0xfe, 0x00, 0xff, 0x00, 0x0c, 0x00, 0x7f,
4219 0x00, 0x0f, 0x00, 0xfe, 0x00),
4220 PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
4221 },
4222 { .freq = 5160,
4223 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x04,
4224 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4225 0xff, 0xff, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
4226 0x00, 0xfe, 0x00, 0xff, 0x00, 0x0c, 0x00, 0x7f,
4227 0x00, 0x0f, 0x00, 0xfe, 0x00),
4228 PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
4229 },
4230 { .freq = 5170,
4231 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x04,
4232 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4233 0xff, 0xff, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
4234 0x00, 0xfe, 0x00, 0xff, 0x00, 0x0c, 0x00, 0x7f,
4235 0x00, 0x0f, 0x00, 0xfe, 0x00),
4236 PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
4237 },
4238 { .freq = 5180,
4239 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x04,
4240 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4241 0xff, 0xef, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
4242 0x00, 0xfe, 0x00, 0xef, 0x00, 0x0c, 0x00, 0x7f,
4243 0x00, 0x0f, 0x00, 0xfe, 0x00),
4244 PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
4245 },
4246 { .freq = 5190,
4247 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x04,
4248 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4249 0xff, 0xef, 0x00, 0x0c, 0x00, 0x7f, 0x00, 0x0f,
4250 0x00, 0xfe, 0x00, 0xef, 0x00, 0x0c, 0x00, 0x7f,
4251 0x00, 0x0f, 0x00, 0xfe, 0x00),
4252 PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
4253 },
4254 { .freq = 5200,
4255 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x04,
4256 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4257 0xff, 0xef, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
4258 0x00, 0xfc, 0x00, 0xef, 0x00, 0x0a, 0x00, 0x7f,
4259 0x00, 0x0f, 0x00, 0xfc, 0x00),
4260 PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
4261 },
4262 { .freq = 5210,
4263 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x04,
4264 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4265 0xff, 0xdf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
4266 0x00, 0xfc, 0x00, 0xdf, 0x00, 0x0a, 0x00, 0x7f,
4267 0x00, 0x0f, 0x00, 0xfc, 0x00),
4268 PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
4269 },
4270 { .freq = 5220,
4271 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x04,
4272 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4273 0xff, 0xdf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
4274 0x00, 0xfc, 0x00, 0xdf, 0x00, 0x0a, 0x00, 0x7f,
4275 0x00, 0x0f, 0x00, 0xfc, 0x00),
4276 PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
4277 },
4278 { .freq = 5230,
4279 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x04,
4280 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4281 0xff, 0xdf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
4282 0x00, 0xfc, 0x00, 0xdf, 0x00, 0x0a, 0x00, 0x7f,
4283 0x00, 0x0f, 0x00, 0xfc, 0x00),
4284 PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
4285 },
4286 { .freq = 5240,
4287 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x04,
4288 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4289 0xff, 0xcf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
4290 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x0a, 0x00, 0x7f,
4291 0x00, 0x0f, 0x00, 0xfc, 0x00),
4292 PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
4293 },
4294 { .freq = 5250,
4295 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x04,
4296 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4297 0xff, 0xcf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
4298 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x0a, 0x00, 0x7f,
4299 0x00, 0x0f, 0x00, 0xfc, 0x00),
4300 PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
4301 },
4302 { .freq = 5260,
4303 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x04,
4304 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
4305 0xff, 0xcf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
4306 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x0a, 0x00, 0x7f,
4307 0x00, 0x0f, 0x00, 0xfc, 0x00),
4308 PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
4309 },
4310 { .freq = 5270,
4311 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x04,
4312 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
4313 0xff, 0xcf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
4314 0x00, 0xfc, 0x00, 0xcf, 0x00, 0x0a, 0x00, 0x7f,
4315 0x00, 0x0f, 0x00, 0xfc, 0x00),
4316 PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
4317 },
4318 { .freq = 5280,
4319 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x04,
4320 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
4321 0xff, 0xbf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
4322 0x00, 0xfc, 0x00, 0xbf, 0x00, 0x0a, 0x00, 0x7f,
4323 0x00, 0x0f, 0x00, 0xfc, 0x00),
4324 PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
4325 },
4326 { .freq = 5290,
4327 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x04,
4328 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
4329 0xff, 0xbf, 0x00, 0x0a, 0x00, 0x7f, 0x00, 0x0f,
4330 0x00, 0xfc, 0x00, 0xbf, 0x00, 0x0a, 0x00, 0x7f,
4331 0x00, 0x0f, 0x00, 0xfc, 0x00),
4332 PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
4333 },
4334 { .freq = 5300,
4335 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x04,
4336 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
4337 0xff, 0xbf, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
4338 0x00, 0xfa, 0x00, 0xbf, 0x00, 0x08, 0x00, 0x7f,
4339 0x00, 0x0f, 0x00, 0xfa, 0x00),
4340 PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
4341 },
4342 { .freq = 5310,
4343 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x04,
4344 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
4345 0xff, 0xbf, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
4346 0x00, 0xfa, 0x00, 0xbf, 0x00, 0x08, 0x00, 0x7f,
4347 0x00, 0x0f, 0x00, 0xfa, 0x00),
4348 PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
4349 },
4350 { .freq = 5320,
4351 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x04,
4352 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
4353 0xff, 0xbf, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
4354 0x00, 0xfa, 0x00, 0xbf, 0x00, 0x08, 0x00, 0x7f,
4355 0x00, 0x0f, 0x00, 0xfa, 0x00),
4356 PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
4357 },
4358 { .freq = 5330,
4359 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x04,
4360 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
4361 0xff, 0xaf, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
4362 0x00, 0xfa, 0x00, 0xaf, 0x00, 0x08, 0x00, 0x7f,
4363 0x00, 0x0f, 0x00, 0xfa, 0x00),
4364 PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
4365 },
4366 { .freq = 5340,
4367 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x04,
4368 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
4369 0xff, 0xaf, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
4370 0x00, 0xfa, 0x00, 0xaf, 0x00, 0x08, 0x00, 0x7f,
4371 0x00, 0x0f, 0x00, 0xfa, 0x00),
4372 PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
4373 },
4374 { .freq = 5350,
4375 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x04,
4376 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
4377 0xff, 0x9f, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
4378 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x08, 0x00, 0x7f,
4379 0x00, 0x0f, 0x00, 0xfa, 0x00),
4380 PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
4381 },
4382 { .freq = 5360,
4383 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x04,
4384 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
4385 0xff, 0x9f, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
4386 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x08, 0x00, 0x7f,
4387 0x00, 0x0f, 0x00, 0xfa, 0x00),
4388 PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
4389 },
4390 { .freq = 5370,
4391 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x04,
4392 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
4393 0xff, 0x9f, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
4394 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x08, 0x00, 0x7f,
4395 0x00, 0x0f, 0x00, 0xfa, 0x00),
4396 PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
4397 },
4398 { .freq = 5380,
4399 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x04,
4400 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
4401 0xff, 0x9f, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
4402 0x00, 0xfa, 0x00, 0x9f, 0x00, 0x08, 0x00, 0x7f,
4403 0x00, 0x0f, 0x00, 0xfa, 0x00),
4404 PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
4405 },
4406 { .freq = 5390,
4407 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x04,
4408 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
4409 0xff, 0x8f, 0x00, 0x08, 0x00, 0x7f, 0x00, 0x0f,
4410 0x00, 0xfa, 0x00, 0x8f, 0x00, 0x08, 0x00, 0x7f,
4411 0x00, 0x0f, 0x00, 0xfa, 0x00),
4412 PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
4413 },
4414 { .freq = 5400,
4415 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x04,
4416 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
4417 0xc8, 0x8f, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
4418 0x00, 0xf8, 0x00, 0x8f, 0x00, 0x07, 0x00, 0x7f,
4419 0x00, 0x0f, 0x00, 0xf8, 0x00),
4420 PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
4421 },
4422 { .freq = 5410,
4423 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x04,
4424 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
4425 0xc8, 0x8f, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
4426 0x00, 0xf8, 0x00, 0x8f, 0x00, 0x07, 0x00, 0x7f,
4427 0x00, 0x0f, 0x00, 0xf8, 0x00),
4428 PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
4429 },
4430 { .freq = 5420,
4431 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x04,
4432 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
4433 0xc8, 0x8e, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
4434 0x00, 0xf8, 0x00, 0x8e, 0x00, 0x07, 0x00, 0x7f,
4435 0x00, 0x0f, 0x00, 0xf8, 0x00),
4436 PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
4437 },
4438 { .freq = 5430,
4439 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x04,
4440 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
4441 0xc8, 0x8e, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
4442 0x00, 0xf8, 0x00, 0x8e, 0x00, 0x07, 0x00, 0x7f,
4443 0x00, 0x0f, 0x00, 0xf8, 0x00),
4444 PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
4445 },
4446 { .freq = 5440,
4447 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x04,
4448 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
4449 0xc8, 0x7e, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
4450 0x00, 0xf8, 0x00, 0x7e, 0x00, 0x07, 0x00, 0x7f,
4451 0x00, 0x0f, 0x00, 0xf8, 0x00),
4452 PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
4453 },
4454 { .freq = 5450,
4455 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x04,
4456 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
4457 0xc8, 0x7d, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
4458 0x00, 0xf8, 0x00, 0x7d, 0x00, 0x07, 0x00, 0x7f,
4459 0x00, 0x0f, 0x00, 0xf8, 0x00),
4460 PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
4461 },
4462 { .freq = 5460,
4463 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x04,
4464 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
4465 0xc8, 0x6d, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
4466 0x00, 0xf8, 0x00, 0x6d, 0x00, 0x07, 0x00, 0x7f,
4467 0x00, 0x0f, 0x00, 0xf8, 0x00),
4468 PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
4469 },
4470 { .freq = 5470,
4471 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x04,
4472 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
4473 0xc8, 0x6d, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
4474 0x00, 0xf8, 0x00, 0x6d, 0x00, 0x07, 0x00, 0x7f,
4475 0x00, 0x0f, 0x00, 0xf8, 0x00),
4476 PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
4477 },
4478 { .freq = 5480,
4479 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x04,
4480 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
4481 0xc8, 0x5d, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
4482 0x00, 0xf8, 0x00, 0x5d, 0x00, 0x07, 0x00, 0x7f,
4483 0x00, 0x0f, 0x00, 0xf8, 0x00),
4484 PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
4485 },
4486 { .freq = 5490,
4487 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x04,
4488 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
4489 0xc8, 0x5c, 0x00, 0x07, 0x00, 0x7f, 0x00, 0x0f,
4490 0x00, 0xf8, 0x00, 0x5c, 0x00, 0x07, 0x00, 0x7f,
4491 0x00, 0x0f, 0x00, 0xf8, 0x00),
4492 PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
4493 },
4494 { .freq = 5500,
4495 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x04,
4496 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
4497 0x84, 0x5c, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
4498 0x00, 0xf6, 0x00, 0x5c, 0x00, 0x06, 0x00, 0x7f,
4499 0x00, 0x0d, 0x00, 0xf6, 0x00),
4500 PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
4501 },
4502 { .freq = 5510,
4503 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x04,
4504 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
4505 0x84, 0x4c, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
4506 0x00, 0xf6, 0x00, 0x4c, 0x00, 0x06, 0x00, 0x7f,
4507 0x00, 0x0d, 0x00, 0xf6, 0x00),
4508 PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
4509 },
4510 { .freq = 5520,
4511 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x04,
4512 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
4513 0x84, 0x4c, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
4514 0x00, 0xf6, 0x00, 0x4c, 0x00, 0x06, 0x00, 0x7f,
4515 0x00, 0x0d, 0x00, 0xf6, 0x00),
4516 PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
4517 },
4518 { .freq = 5530,
4519 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x04,
4520 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
4521 0x84, 0x3b, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
4522 0x00, 0xf6, 0x00, 0x3b, 0x00, 0x06, 0x00, 0x7f,
4523 0x00, 0x0d, 0x00, 0xf6, 0x00),
4524 PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
4525 },
4526 { .freq = 5540,
4527 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x04,
4528 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
4529 0x84, 0x3b, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
4530 0x00, 0xf6, 0x00, 0x3b, 0x00, 0x06, 0x00, 0x7f,
4531 0x00, 0x0d, 0x00, 0xf6, 0x00),
4532 PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
4533 },
4534 { .freq = 5550,
4535 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x04,
4536 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
4537 0x84, 0x3b, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
4538 0x00, 0xf6, 0x00, 0x3b, 0x00, 0x06, 0x00, 0x7f,
4539 0x00, 0x0d, 0x00, 0xf6, 0x00),
4540 PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
4541 },
4542 { .freq = 5560,
4543 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x04,
4544 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
4545 0x84, 0x2b, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
4546 0x00, 0xf6, 0x00, 0x2b, 0x00, 0x06, 0x00, 0x7f,
4547 0x00, 0x0d, 0x00, 0xf6, 0x00),
4548 PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
4549 },
4550 { .freq = 5570,
4551 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x04,
4552 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
4553 0x84, 0x2a, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
4554 0x00, 0xf6, 0x00, 0x2a, 0x00, 0x06, 0x00, 0x7f,
4555 0x00, 0x0d, 0x00, 0xf6, 0x00),
4556 PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
4557 },
4558 { .freq = 5580,
4559 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x04,
4560 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
4561 0x84, 0x1a, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
4562 0x00, 0xf6, 0x00, 0x1a, 0x00, 0x06, 0x00, 0x7f,
4563 0x00, 0x0d, 0x00, 0xf6, 0x00),
4564 PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
4565 },
4566 { .freq = 5590,
4567 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x04,
4568 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
4569 0x84, 0x1a, 0x00, 0x06, 0x00, 0x7f, 0x00, 0x0d,
4570 0x00, 0xf6, 0x00, 0x1a, 0x00, 0x06, 0x00, 0x7f,
4571 0x00, 0x0d, 0x00, 0xf6, 0x00),
4572 PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
4573 },
4574 { .freq = 5600,
4575 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x04,
4576 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
4577 0x70, 0x1a, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
4578 0x00, 0xf4, 0x00, 0x1a, 0x00, 0x04, 0x00, 0x7f,
4579 0x00, 0x0b, 0x00, 0xf4, 0x00),
4580 PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
4581 },
4582 { .freq = 5610,
4583 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x04,
4584 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
4585 0x70, 0x19, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
4586 0x00, 0xf4, 0x00, 0x19, 0x00, 0x04, 0x00, 0x7f,
4587 0x00, 0x0b, 0x00, 0xf4, 0x00),
4588 PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
4589 },
4590 { .freq = 5620,
4591 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x04,
4592 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
4593 0x70, 0x19, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
4594 0x00, 0xf4, 0x00, 0x19, 0x00, 0x04, 0x00, 0x7f,
4595 0x00, 0x0b, 0x00, 0xf4, 0x00),
4596 PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
4597 },
4598 { .freq = 5630,
4599 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x04,
4600 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
4601 0x70, 0x09, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
4602 0x00, 0xf4, 0x00, 0x09, 0x00, 0x04, 0x00, 0x7f,
4603 0x00, 0x0b, 0x00, 0xf4, 0x00),
4604 PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
4605 },
4606 { .freq = 5640,
4607 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x04,
4608 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
4609 0x70, 0x09, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
4610 0x00, 0xf4, 0x00, 0x09, 0x00, 0x04, 0x00, 0x7f,
4611 0x00, 0x0b, 0x00, 0xf4, 0x00),
4612 PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
4613 },
4614 { .freq = 5650,
4615 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x04,
4616 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
4617 0x70, 0x08, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
4618 0x00, 0xf4, 0x00, 0x08, 0x00, 0x04, 0x00, 0x7f,
4619 0x00, 0x0b, 0x00, 0xf4, 0x00),
4620 PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
4621 },
4622 { .freq = 5660,
4623 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x04,
4624 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
4625 0x70, 0x08, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
4626 0x00, 0xf4, 0x00, 0x08, 0x00, 0x04, 0x00, 0x7f,
4627 0x00, 0x0b, 0x00, 0xf4, 0x00),
4628 PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
4629 },
4630 { .freq = 5670,
4631 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x04,
4632 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
4633 0x70, 0x08, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
4634 0x00, 0xf4, 0x00, 0x08, 0x00, 0x04, 0x00, 0x7f,
4635 0x00, 0x0b, 0x00, 0xf4, 0x00),
4636 PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
4637 },
4638 { .freq = 5680,
4639 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x04,
4640 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
4641 0x70, 0x08, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
4642 0x00, 0xf4, 0x00, 0x08, 0x00, 0x04, 0x00, 0x7f,
4643 0x00, 0x0b, 0x00, 0xf4, 0x00),
4644 PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
4645 },
4646 { .freq = 5690,
4647 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x04,
4648 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
4649 0x70, 0x07, 0x00, 0x04, 0x00, 0x7f, 0x00, 0x0b,
4650 0x00, 0xf4, 0x00, 0x07, 0x00, 0x04, 0x00, 0x7f,
4651 0x00, 0x0b, 0x00, 0xf4, 0x00),
4652 PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
4653 },
4654 { .freq = 5700,
4655 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x04,
4656 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
4657 0x40, 0x07, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
4658 0x00, 0xf2, 0x00, 0x07, 0x00, 0x03, 0x00, 0x7f,
4659 0x00, 0x0a, 0x00, 0xf2, 0x00),
4660 PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
4661 },
4662 { .freq = 5710,
4663 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x04,
4664 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
4665 0x40, 0x07, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
4666 0x00, 0xf2, 0x00, 0x07, 0x00, 0x03, 0x00, 0x7f,
4667 0x00, 0x0a, 0x00, 0xf2, 0x00),
4668 PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
4669 },
4670 { .freq = 5720,
4671 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x04,
4672 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
4673 0x40, 0x07, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
4674 0x00, 0xf2, 0x00, 0x07, 0x00, 0x03, 0x00, 0x7f,
4675 0x00, 0x0a, 0x00, 0xf2, 0x00),
4676 PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
4677 },
4678 { .freq = 5725,
4679 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x07, 0x07, 0x04,
4680 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
4681 0x40, 0x06, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
4682 0x00, 0xf2, 0x00, 0x06, 0x00, 0x03, 0x00, 0x7f,
4683 0x00, 0x0a, 0x00, 0xf2, 0x00),
4684 PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
4685 },
4686 { .freq = 5730,
4687 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x04,
4688 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
4689 0x40, 0x06, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
4690 0x00, 0xf2, 0x00, 0x06, 0x00, 0x03, 0x00, 0x7f,
4691 0x00, 0x0a, 0x00, 0xf2, 0x00),
4692 PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
4693 },
4694 { .freq = 5735,
4695 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x07, 0x07, 0x04,
4696 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
4697 0x40, 0x06, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
4698 0x00, 0xf2, 0x00, 0x06, 0x00, 0x03, 0x00, 0x7f,
4699 0x00, 0x0a, 0x00, 0xf2, 0x00),
4700 PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
4701 },
4702 { .freq = 5740,
4703 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x04,
4704 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
4705 0x40, 0x06, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
4706 0x00, 0xf2, 0x00, 0x06, 0x00, 0x03, 0x00, 0x7f,
4707 0x00, 0x0a, 0x00, 0xf2, 0x00),
4708 PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
4709 },
4710 { .freq = 5745,
4711 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x07, 0x07, 0x04,
4712 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
4713 0x40, 0x06, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
4714 0x00, 0xf2, 0x00, 0x06, 0x00, 0x03, 0x00, 0x7f,
4715 0x00, 0x0a, 0x00, 0xf2, 0x00),
4716 PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
4717 },
4718 { .freq = 5750,
4719 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x04,
4720 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
4721 0x40, 0x06, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
4722 0x00, 0xf2, 0x00, 0x06, 0x00, 0x03, 0x00, 0x7f,
4723 0x00, 0x0a, 0x00, 0xf2, 0x00),
4724 PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
4725 },
4726 { .freq = 5755,
4727 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x07, 0x07, 0x04,
4728 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
4729 0x40, 0x05, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
4730 0x00, 0xf2, 0x00, 0x05, 0x00, 0x03, 0x00, 0x7f,
4731 0x00, 0x0a, 0x00, 0xf2, 0x00),
4732 PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
4733 },
4734 { .freq = 5760,
4735 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x04,
4736 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
4737 0x40, 0x05, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
4738 0x00, 0xf2, 0x00, 0x05, 0x00, 0x03, 0x00, 0x7f,
4739 0x00, 0x0a, 0x00, 0xf2, 0x00),
4740 PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
4741 },
4742 { .freq = 5765,
4743 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x07, 0x07, 0x04,
4744 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
4745 0x40, 0x05, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
4746 0x00, 0xf2, 0x00, 0x05, 0x00, 0x03, 0x00, 0x7f,
4747 0x00, 0x0a, 0x00, 0xf2, 0x00),
4748 PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
4749 },
4750 { .freq = 5770,
4751 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x04,
4752 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
4753 0x40, 0x05, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
4754 0x00, 0xf2, 0x00, 0x05, 0x00, 0x03, 0x00, 0x7f,
4755 0x00, 0x0a, 0x00, 0xf2, 0x00),
4756 PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
4757 },
4758 { .freq = 5775,
4759 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x07, 0x07, 0x04,
4760 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
4761 0x40, 0x05, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
4762 0x00, 0xf2, 0x00, 0x05, 0x00, 0x03, 0x00, 0x7f,
4763 0x00, 0x0a, 0x00, 0xf2, 0x00),
4764 PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
4765 },
4766 { .freq = 5780,
4767 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x04,
4768 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
4769 0x40, 0x05, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
4770 0x00, 0xf2, 0x00, 0x05, 0x00, 0x03, 0x00, 0x7f,
4771 0x00, 0x0a, 0x00, 0xf2, 0x00),
4772 PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
4773 },
4774 { .freq = 5785,
4775 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x07, 0x07, 0x04,
4776 0x10, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
4777 0x40, 0x04, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
4778 0x00, 0xf2, 0x00, 0x04, 0x00, 0x03, 0x00, 0x7f,
4779 0x00, 0x0a, 0x00, 0xf2, 0x00),
4780 PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
4781 },
4782 { .freq = 5790,
4783 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x04,
4784 0x0c, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
4785 0x40, 0x04, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
4786 0x00, 0xf2, 0x00, 0x04, 0x00, 0x03, 0x00, 0x7f,
4787 0x00, 0x0a, 0x00, 0xf2, 0x00),
4788 PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
4789 },
4790 { .freq = 5795,
4791 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x07, 0x07, 0x04,
4792 0x10, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
4793 0x40, 0x04, 0x00, 0x03, 0x00, 0x7f, 0x00, 0x0a,
4794 0x00, 0xf2, 0x00, 0x04, 0x00, 0x03, 0x00, 0x7f,
4795 0x00, 0x0a, 0x00, 0xf2, 0x00),
4796 PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
4797 },
4798 { .freq = 5800,
4799 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x04,
4800 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
4801 0x20, 0x04, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
4802 0x00, 0xf0, 0x00, 0x04, 0x00, 0x02, 0x00, 0x7f,
4803 0x00, 0x09, 0x00, 0xf0, 0x00),
4804 PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
4805 },
4806 { .freq = 5805,
4807 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x07, 0x07, 0x04,
4808 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
4809 0x20, 0x04, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
4810 0x00, 0xf0, 0x00, 0x04, 0x00, 0x02, 0x00, 0x7f,
4811 0x00, 0x09, 0x00, 0xf0, 0x00),
4812 PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
4813 },
4814 { .freq = 5810,
4815 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x04,
4816 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
4817 0x20, 0x04, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
4818 0x00, 0xf0, 0x00, 0x04, 0x00, 0x02, 0x00, 0x7f,
4819 0x00, 0x09, 0x00, 0xf0, 0x00),
4820 PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
4821 },
4822 { .freq = 5815,
4823 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x07, 0x07, 0x04,
4824 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
4825 0x20, 0x04, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
4826 0x00, 0xf0, 0x00, 0x04, 0x00, 0x02, 0x00, 0x7f,
4827 0x00, 0x09, 0x00, 0xf0, 0x00),
4828 PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
4829 },
4830 { .freq = 5820,
4831 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x04,
4832 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
4833 0x20, 0x03, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
4834 0x00, 0xf0, 0x00, 0x03, 0x00, 0x02, 0x00, 0x7f,
4835 0x00, 0x09, 0x00, 0xf0, 0x00),
4836 PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
4837 },
4838 { .freq = 5825,
4839 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x07, 0x07, 0x04,
4840 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
4841 0x20, 0x03, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
4842 0x00, 0xf0, 0x00, 0x03, 0x00, 0x02, 0x00, 0x7f,
4843 0x00, 0x09, 0x00, 0xf0, 0x00),
4844 PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
4845 },
4846 { .freq = 5830,
4847 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x04,
4848 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
4849 0x20, 0x03, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
4850 0x00, 0xf0, 0x00, 0x03, 0x00, 0x02, 0x00, 0x7f,
4851 0x00, 0x09, 0x00, 0xf0, 0x00),
4852 PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
4853 },
4854 { .freq = 5840,
4855 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x04,
4856 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
4857 0x20, 0x03, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
4858 0x00, 0xf0, 0x00, 0x03, 0x00, 0x02, 0x00, 0x7f,
4859 0x00, 0x09, 0x00, 0xf0, 0x00),
4860 PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
4861 },
4862 { .freq = 5850,
4863 RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x04,
4864 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
4865 0x20, 0x03, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
4866 0x00, 0xf0, 0x00, 0x03, 0x00, 0x02, 0x00, 0x7f,
4867 0x00, 0x09, 0x00, 0xf0, 0x00),
4868 PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
4869 },
4870 { .freq = 5860,
4871 RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x04,
4872 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
4873 0x20, 0x03, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
4874 0x00, 0xf0, 0x00, 0x03, 0x00, 0x02, 0x00, 0x7f,
4875 0x00, 0x09, 0x00, 0xf0, 0x00),
4876 PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
4877 },
4878 { .freq = 5870,
4879 RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x04,
4880 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
4881 0x20, 0x02, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
4882 0x00, 0xf0, 0x00, 0x02, 0x00, 0x02, 0x00, 0x7f,
4883 0x00, 0x09, 0x00, 0xf0, 0x00),
4884 PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
4885 },
4886 { .freq = 5880,
4887 RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x04,
4888 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
4889 0x20, 0x02, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
4890 0x00, 0xf0, 0x00, 0x02, 0x00, 0x02, 0x00, 0x7f,
4891 0x00, 0x09, 0x00, 0xf0, 0x00),
4892 PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
4893 },
4894 { .freq = 5890,
4895 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x04,
4896 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
4897 0x20, 0x02, 0x00, 0x02, 0x00, 0x7f, 0x00, 0x09,
4898 0x00, 0xf0, 0x00, 0x02, 0x00, 0x02, 0x00, 0x7f,
4899 0x00, 0x09, 0x00, 0xf0, 0x00),
4900 PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
4901 },
4902 { .freq = 5900,
4903 RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x04,
4904 0x0c, 0x01, 0x06, 0x06, 0x06, 0x87, 0x03, 0x00,
4905 0x00, 0x02, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x07,
4906 0x00, 0xf0, 0x00, 0x02, 0x00, 0x00, 0x00, 0x7f,
4907 0x00, 0x07, 0x00, 0xf0, 0x00),
4908 PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
4909 },
4910 { .freq = 5910,
4911 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x04,
4912 0x0c, 0x01, 0x06, 0x06, 0x06, 0x87, 0x03, 0x00,
4913 0x00, 0x01, 0x00, 0x00, 0x00, 0x7f, 0x00, 0x07,
4914 0x00, 0xf0, 0x00, 0x01, 0x00, 0x00, 0x00, 0x7f,
4915 0x00, 0x07, 0x00, 0xf0, 0x00),
4916 PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
4917 },
4918 { .freq = 2412,
4919 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x08, 0x08, 0x04,
4920 0x16, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
4921 0x00, 0x00, 0xff, 0x00, 0x04, 0x00, 0x70, 0x00,
4922 0x0f, 0x00, 0x0e, 0x00, 0xff, 0x00, 0x04, 0x00,
4923 0x70, 0x00, 0x0f, 0x00, 0x0e),
4924 PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
4925 },
4926 { .freq = 2417,
4927 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x08, 0x08, 0x04,
4928 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
4929 0x00, 0x00, 0xff, 0x00, 0x04, 0x00, 0x70, 0x00,
4930 0x0f, 0x00, 0x0e, 0x00, 0xff, 0x00, 0x04, 0x00,
4931 0x70, 0x00, 0x0f, 0x00, 0x0e),
4932 PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
4933 },
4934 { .freq = 2422,
4935 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x08, 0x08, 0x04,
4936 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
4937 0x00, 0x00, 0xff, 0x00, 0x04, 0x00, 0x70, 0x00,
4938 0x0f, 0x00, 0x0e, 0x00, 0xff, 0x00, 0x04, 0x00,
4939 0x70, 0x00, 0x0f, 0x00, 0x0e),
4940 PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
4941 },
4942 { .freq = 2427,
4943 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x08, 0x08, 0x04,
4944 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
4945 0x00, 0x00, 0xfd, 0x00, 0x04, 0x00, 0x70, 0x00,
4946 0x0f, 0x00, 0x0e, 0x00, 0xfd, 0x00, 0x04, 0x00,
4947 0x70, 0x00, 0x0f, 0x00, 0x0e),
4948 PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
4949 },
4950 { .freq = 2432,
4951 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x08, 0x08, 0x04,
4952 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
4953 0x00, 0x00, 0xfb, 0x00, 0x04, 0x00, 0x70, 0x00,
4954 0x0f, 0x00, 0x0e, 0x00, 0xfb, 0x00, 0x04, 0x00,
4955 0x70, 0x00, 0x0f, 0x00, 0x0e),
4956 PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
4957 },
4958 { .freq = 2437,
4959 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x08, 0x08, 0x04,
4960 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
4961 0x00, 0x00, 0xfa, 0x00, 0x04, 0x00, 0x70, 0x00,
4962 0x0f, 0x00, 0x0e, 0x00, 0xfa, 0x00, 0x04, 0x00,
4963 0x70, 0x00, 0x0f, 0x00, 0x0e),
4964 PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
4965 },
4966 { .freq = 2442,
4967 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x08, 0x08, 0x04,
4968 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
4969 0x00, 0x00, 0xf8, 0x00, 0x04, 0x00, 0x70, 0x00,
4970 0x0f, 0x00, 0x0e, 0x00, 0xf8, 0x00, 0x04, 0x00,
4971 0x70, 0x00, 0x0f, 0x00, 0x0e),
4972 PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
4973 },
4974 { .freq = 2447,
4975 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x08, 0x08, 0x04,
4976 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
4977 0x00, 0x00, 0xf7, 0x00, 0x04, 0x00, 0x70, 0x00,
4978 0x0f, 0x00, 0x0e, 0x00, 0xf7, 0x00, 0x04, 0x00,
4979 0x70, 0x00, 0x0f, 0x00, 0x0e),
4980 PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
4981 },
4982 { .freq = 2452,
4983 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x08, 0x08, 0x04,
4984 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
4985 0x00, 0x00, 0xf6, 0x00, 0x04, 0x00, 0x70, 0x00,
4986 0x0f, 0x00, 0x0e, 0x00, 0xf6, 0x00, 0x04, 0x00,
4987 0x70, 0x00, 0x0f, 0x00, 0x0e),
4988 PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
4989 },
4990 { .freq = 2457,
4991 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x08, 0x08, 0x04,
4992 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
4993 0x00, 0x00, 0xf5, 0x00, 0x04, 0x00, 0x70, 0x00,
4994 0x0f, 0x00, 0x0e, 0x00, 0xf5, 0x00, 0x04, 0x00,
4995 0x70, 0x00, 0x0f, 0x00, 0x0e),
4996 PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
4997 },
4998 { .freq = 2462,
4999 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x08, 0x08, 0x04,
5000 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
5001 0x00, 0x00, 0xf4, 0x00, 0x04, 0x00, 0x70, 0x00,
5002 0x0f, 0x00, 0x0e, 0x00, 0xf4, 0x00, 0x04, 0x00,
5003 0x70, 0x00, 0x0f, 0x00, 0x0e),
5004 PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
5005 },
5006 { .freq = 2467,
5007 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x08, 0x08, 0x04,
5008 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
5009 0x00, 0x00, 0xf3, 0x00, 0x04, 0x00, 0x70, 0x00,
5010 0x0f, 0x00, 0x0e, 0x00, 0xf3, 0x00, 0x04, 0x00,
5011 0x70, 0x00, 0x0f, 0x00, 0x0e),
5012 PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
5013 },
5014 { .freq = 2472,
5015 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x08, 0x08, 0x04,
5016 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
5017 0x00, 0x00, 0xf2, 0x00, 0x04, 0x00, 0x70, 0x00,
5018 0x0f, 0x00, 0x0e, 0x00, 0xf2, 0x00, 0x04, 0x00,
5019 0x70, 0x00, 0x0f, 0x00, 0x0e),
5020 PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
5021 },
5022 { .freq = 2484,
5023 RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x08, 0x08, 0x04,
5024 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
5025 0x00, 0x00, 0xf0, 0x00, 0x04, 0x00, 0x70, 0x00,
5026 0x0f, 0x00, 0x0e, 0x00, 0xf0, 0x00, 0x04, 0x00,
5027 0x70, 0x00, 0x0f, 0x00, 0x0e),
5028 PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
5029 },
5030};
5031
5032static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev5[] = {
5033 { .freq = 4920,
5034 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
5035 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
5036 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0f,
5037 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
5038 0x00, 0x0f, 0x00, 0x6f, 0x00),
5039 PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
5040 },
5041 { .freq = 4930,
5042 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x04,
5043 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
5044 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0e,
5045 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
5046 0x00, 0x0e, 0x00, 0x6f, 0x00),
5047 PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
5048 },
5049 { .freq = 4940,
5050 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x04,
5051 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
5052 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0e,
5053 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
5054 0x00, 0x0e, 0x00, 0x6f, 0x00),
5055 PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
5056 },
5057 { .freq = 4950,
5058 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x04,
5059 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
5060 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0e,
5061 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
5062 0x00, 0x0e, 0x00, 0x6f, 0x00),
5063 PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
5064 },
5065 { .freq = 4960,
5066 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x04,
5067 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5068 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0e,
5069 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
5070 0x00, 0x0e, 0x00, 0x6f, 0x00),
5071 PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
5072 },
5073 { .freq = 4970,
5074 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x04,
5075 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5076 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
5077 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
5078 0x00, 0x0d, 0x00, 0x6f, 0x00),
5079 PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
5080 },
5081 { .freq = 4980,
5082 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x04,
5083 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5084 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
5085 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
5086 0x00, 0x0d, 0x00, 0x6f, 0x00),
5087 PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
5088 },
5089 { .freq = 4990,
5090 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x04,
5091 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5092 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
5093 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
5094 0x00, 0x0d, 0x00, 0x6f, 0x00),
5095 PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
5096 },
5097 { .freq = 5000,
5098 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x04,
5099 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5100 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
5101 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
5102 0x00, 0x0d, 0x00, 0x6f, 0x00),
5103 PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
5104 },
5105 { .freq = 5010,
5106 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x04,
5107 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5108 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
5109 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
5110 0x00, 0x0d, 0x00, 0x6f, 0x00),
5111 PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
5112 },
5113 { .freq = 5020,
5114 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x04,
5115 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5116 0xff, 0xff, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0d,
5117 0x00, 0x9f, 0x00, 0xff, 0x00, 0x09, 0x00, 0x70,
5118 0x00, 0x0d, 0x00, 0x6f, 0x00),
5119 PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
5120 },
5121 { .freq = 5030,
5122 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x04,
5123 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5124 0xff, 0xff, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
5125 0x00, 0x9f, 0x00, 0xff, 0x00, 0x09, 0x00, 0x70,
5126 0x00, 0x0c, 0x00, 0x6f, 0x00),
5127 PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
5128 },
5129 { .freq = 5040,
5130 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x04,
5131 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5132 0xff, 0xfe, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
5133 0x00, 0x9f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x70,
5134 0x00, 0x0c, 0x00, 0x6f, 0x00),
5135 PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
5136 },
5137 { .freq = 5050,
5138 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x04,
5139 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5140 0xff, 0xfe, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
5141 0x00, 0x9f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x70,
5142 0x00, 0x0c, 0x00, 0x6f, 0x00),
5143 PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
5144 },
5145 { .freq = 5060,
5146 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x04,
5147 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5148 0xff, 0xfd, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
5149 0x00, 0x9f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x70,
5150 0x00, 0x0c, 0x00, 0x6f, 0x00),
5151 PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
5152 },
5153 { .freq = 5070,
5154 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x04,
5155 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5156 0xff, 0xfd, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
5157 0x00, 0x9f, 0x00, 0xfd, 0x00, 0x08, 0x00, 0x70,
5158 0x00, 0x0b, 0x00, 0x6f, 0x00),
5159 PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
5160 },
5161 { .freq = 5080,
5162 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x04,
5163 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5164 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
5165 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
5166 0x00, 0x0b, 0x00, 0x6f, 0x00),
5167 PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
5168 },
5169 { .freq = 5090,
5170 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x04,
5171 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
5172 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
5173 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
5174 0x00, 0x0b, 0x00, 0x6f, 0x00),
5175 PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
5176 },
5177 { .freq = 5100,
5178 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x04,
5179 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5180 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
5181 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
5182 0x00, 0x0b, 0x00, 0x6f, 0x00),
5183 PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
5184 },
5185 { .freq = 5110,
5186 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x04,
5187 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5188 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
5189 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
5190 0x00, 0x0b, 0x00, 0x6f, 0x00),
5191 PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
5192 },
5193 { .freq = 5120,
5194 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x04,
5195 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5196 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
5197 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
5198 0x00, 0x0b, 0x00, 0x6f, 0x00),
5199 PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
5200 },
5201 { .freq = 5130,
5202 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x04,
5203 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5204 0xff, 0xfb, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0a,
5205 0x00, 0x9f, 0x00, 0xfb, 0x00, 0x08, 0x00, 0x70,
5206 0x00, 0x0a, 0x00, 0x6f, 0x00),
5207 PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
5208 },
5209 { .freq = 5140,
5210 RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x04,
5211 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5212 0xff, 0xfb, 0x00, 0x07, 0x00, 0x70, 0x00, 0x0a,
5213 0x00, 0x9f, 0x00, 0xfb, 0x00, 0x07, 0x00, 0x70,
5214 0x00, 0x0a, 0x00, 0x6f, 0x00),
5215 PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
5216 },
5217 { .freq = 5160,
5218 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x04,
5219 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5220 0xff, 0xfb, 0x00, 0x07, 0x00, 0x70, 0x00, 0x09,
5221 0x00, 0x9e, 0x00, 0xfb, 0x00, 0x07, 0x00, 0x70,
5222 0x00, 0x09, 0x00, 0x6e, 0x00),
5223 PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
5224 },
5225 { .freq = 5170,
5226 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x04,
5227 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5228 0xff, 0xfb, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
5229 0x00, 0x9e, 0x00, 0xfb, 0x00, 0x06, 0x00, 0x70,
5230 0x00, 0x09, 0x00, 0x6e, 0x00),
5231 PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
5232 },
5233 { .freq = 5180,
5234 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x04,
5235 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5236 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
5237 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
5238 0x00, 0x09, 0x00, 0x6e, 0x00),
5239 PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
5240 },
5241 { .freq = 5190,
5242 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x04,
5243 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5244 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
5245 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
5246 0x00, 0x09, 0x00, 0x6e, 0x00),
5247 PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
5248 },
5249 { .freq = 5200,
5250 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x04,
5251 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5252 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
5253 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
5254 0x00, 0x09, 0x00, 0x6e, 0x00),
5255 PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
5256 },
5257 { .freq = 5210,
5258 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x04,
5259 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5260 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
5261 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
5262 0x00, 0x09, 0x00, 0x6e, 0x00),
5263 PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
5264 },
5265 { .freq = 5220,
5266 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x04,
5267 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5268 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
5269 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
5270 0x00, 0x09, 0x00, 0x6e, 0x00),
5271 PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
5272 },
5273 { .freq = 5230,
5274 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x04,
5275 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5276 0xff, 0xea, 0x00, 0x06, 0x00, 0x70, 0x00, 0x08,
5277 0x00, 0x9e, 0x00, 0xea, 0x00, 0x06, 0x00, 0x70,
5278 0x00, 0x08, 0x00, 0x6e, 0x00),
5279 PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
5280 },
5281 { .freq = 5240,
5282 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x04,
5283 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5284 0xff, 0xe9, 0x00, 0x05, 0x00, 0x70, 0x00, 0x08,
5285 0x00, 0x9d, 0x00, 0xe9, 0x00, 0x05, 0x00, 0x70,
5286 0x00, 0x08, 0x00, 0x6d, 0x00),
5287 PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
5288 },
5289 { .freq = 5250,
5290 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x04,
5291 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5292 0xff, 0xe9, 0x00, 0x05, 0x00, 0x70, 0x00, 0x08,
5293 0x00, 0x9d, 0x00, 0xe9, 0x00, 0x05, 0x00, 0x70,
5294 0x00, 0x08, 0x00, 0x6d, 0x00),
5295 PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
5296 },
5297 { .freq = 5260,
5298 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x04,
5299 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
5300 0xff, 0xd9, 0x00, 0x05, 0x00, 0x70, 0x00, 0x08,
5301 0x00, 0x9d, 0x00, 0xd9, 0x00, 0x05, 0x00, 0x70,
5302 0x00, 0x08, 0x00, 0x6d, 0x00),
5303 PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
5304 },
5305 { .freq = 5270,
5306 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x04,
5307 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
5308 0xff, 0xd8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
5309 0x00, 0x9c, 0x00, 0xd8, 0x00, 0x04, 0x00, 0x70,
5310 0x00, 0x07, 0x00, 0x6c, 0x00),
5311 PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
5312 },
5313 { .freq = 5280,
5314 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x04,
5315 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
5316 0xff, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
5317 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
5318 0x00, 0x07, 0x00, 0x6c, 0x00),
5319 PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
5320 },
5321 { .freq = 5290,
5322 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x04,
5323 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0f, 0x00,
5324 0xff, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
5325 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
5326 0x00, 0x07, 0x00, 0x6c, 0x00),
5327 PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
5328 },
5329 { .freq = 5300,
5330 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x04,
5331 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
5332 0xff, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
5333 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
5334 0x00, 0x07, 0x00, 0x6c, 0x00),
5335 PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
5336 },
5337 { .freq = 5310,
5338 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x04,
5339 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
5340 0xff, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
5341 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
5342 0x00, 0x07, 0x00, 0x6c, 0x00),
5343 PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
5344 },
5345 { .freq = 5320,
5346 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x04,
5347 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
5348 0xff, 0xb8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
5349 0x00, 0x9c, 0x00, 0xb8, 0x00, 0x04, 0x00, 0x70,
5350 0x00, 0x07, 0x00, 0x6c, 0x00),
5351 PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
5352 },
5353 { .freq = 5330,
5354 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x04,
5355 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
5356 0xff, 0xb7, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
5357 0x00, 0x9b, 0x00, 0xb7, 0x00, 0x04, 0x00, 0x70,
5358 0x00, 0x07, 0x00, 0x6b, 0x00),
5359 PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
5360 },
5361 { .freq = 5340,
5362 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x04,
5363 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
5364 0xff, 0xb7, 0x00, 0x03, 0x00, 0x70, 0x00, 0x07,
5365 0x00, 0x9b, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x70,
5366 0x00, 0x07, 0x00, 0x6b, 0x00),
5367 PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
5368 },
5369 { .freq = 5350,
5370 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x04,
5371 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
5372 0xff, 0xa7, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
5373 0x00, 0x9b, 0x00, 0xa7, 0x00, 0x03, 0x00, 0x70,
5374 0x00, 0x06, 0x00, 0x6b, 0x00),
5375 PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
5376 },
5377 { .freq = 5360,
5378 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x04,
5379 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
5380 0xff, 0xa6, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
5381 0x00, 0x9b, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x70,
5382 0x00, 0x06, 0x00, 0x6b, 0x00),
5383 PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
5384 },
5385 { .freq = 5370,
5386 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x04,
5387 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
5388 0xff, 0xa6, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
5389 0x00, 0x9b, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x70,
5390 0x00, 0x06, 0x00, 0x5b, 0x00),
5391 PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
5392 },
5393 { .freq = 5380,
5394 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x04,
5395 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
5396 0xff, 0x96, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
5397 0x00, 0x9a, 0x00, 0x96, 0x00, 0x03, 0x00, 0x70,
5398 0x00, 0x06, 0x00, 0x5a, 0x00),
5399 PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
5400 },
5401 { .freq = 5390,
5402 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x04,
5403 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8f, 0x0e, 0x00,
5404 0xff, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
5405 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
5406 0x00, 0x06, 0x00, 0x5a, 0x00),
5407 PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
5408 },
5409 { .freq = 5400,
5410 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x04,
5411 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
5412 0xc8, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
5413 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
5414 0x00, 0x06, 0x00, 0x5a, 0x00),
5415 PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
5416 },
5417 { .freq = 5410,
5418 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x04,
5419 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
5420 0xc8, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x05,
5421 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
5422 0x00, 0x05, 0x00, 0x5a, 0x00),
5423 PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
5424 },
5425 { .freq = 5420,
5426 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x04,
5427 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
5428 0xc8, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x05,
5429 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
5430 0x00, 0x05, 0x00, 0x5a, 0x00),
5431 PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
5432 },
5433 { .freq = 5430,
5434 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x04,
5435 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
5436 0xc8, 0x85, 0x00, 0x02, 0x00, 0x70, 0x00, 0x05,
5437 0x00, 0x99, 0x00, 0x85, 0x00, 0x02, 0x00, 0x70,
5438 0x00, 0x05, 0x00, 0x59, 0x00),
5439 PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
5440 },
5441 { .freq = 5440,
5442 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x04,
5443 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
5444 0xc8, 0x84, 0x00, 0x02, 0x00, 0x70, 0x00, 0x05,
5445 0x00, 0x99, 0x00, 0x84, 0x00, 0x02, 0x00, 0x70,
5446 0x00, 0x05, 0x00, 0x59, 0x00),
5447 PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
5448 },
5449 { .freq = 5450,
5450 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x04,
5451 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
5452 0xc8, 0x84, 0x00, 0x02, 0x00, 0x70, 0x00, 0x05,
5453 0x00, 0x99, 0x00, 0x84, 0x00, 0x02, 0x00, 0x70,
5454 0x00, 0x05, 0x00, 0x59, 0x00),
5455 PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
5456 },
5457 { .freq = 5460,
5458 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x04,
5459 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
5460 0xc8, 0x84, 0x00, 0x02, 0x00, 0x70, 0x00, 0x04,
5461 0x00, 0x99, 0x00, 0x84, 0x00, 0x02, 0x00, 0x70,
5462 0x00, 0x04, 0x00, 0x69, 0x00),
5463 PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
5464 },
5465 { .freq = 5470,
5466 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x04,
5467 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
5468 0xc8, 0x74, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
5469 0x00, 0x99, 0x00, 0x74, 0x00, 0x01, 0x00, 0x70,
5470 0x00, 0x04, 0x00, 0x69, 0x00),
5471 PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
5472 },
5473 { .freq = 5480,
5474 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x04,
5475 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
5476 0xc8, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
5477 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
5478 0x00, 0x04, 0x00, 0x68, 0x00),
5479 PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
5480 },
5481 { .freq = 5490,
5482 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x04,
5483 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0d, 0x00,
5484 0xc8, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
5485 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
5486 0x00, 0x04, 0x00, 0x68, 0x00),
5487 PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
5488 },
5489 { .freq = 5500,
5490 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x04,
5491 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
5492 0x84, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
5493 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
5494 0x00, 0x04, 0x00, 0x78, 0x00),
5495 PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
5496 },
5497 { .freq = 5510,
5498 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x04,
5499 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
5500 0x84, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
5501 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
5502 0x00, 0x04, 0x00, 0x78, 0x00),
5503 PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
5504 },
5505 { .freq = 5520,
5506 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x04,
5507 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
5508 0x84, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
5509 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
5510 0x00, 0x04, 0x00, 0x78, 0x00),
5511 PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
5512 },
5513 { .freq = 5530,
5514 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x04,
5515 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
5516 0x84, 0x63, 0x00, 0x01, 0x00, 0x70, 0x00, 0x03,
5517 0x00, 0x98, 0x00, 0x63, 0x00, 0x01, 0x00, 0x70,
5518 0x00, 0x03, 0x00, 0x78, 0x00),
5519 PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
5520 },
5521 { .freq = 5540,
5522 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x04,
5523 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
5524 0x84, 0x62, 0x00, 0x00, 0x00, 0x70, 0x00, 0x03,
5525 0x00, 0x97, 0x00, 0x62, 0x00, 0x00, 0x00, 0x70,
5526 0x00, 0x03, 0x00, 0x77, 0x00),
5527 PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
5528 },
5529 { .freq = 5550,
5530 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x04,
5531 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
5532 0x84, 0x62, 0x00, 0x00, 0x00, 0x70, 0x00, 0x03,
5533 0x00, 0x97, 0x00, 0x62, 0x00, 0x00, 0x00, 0x70,
5534 0x00, 0x03, 0x00, 0x77, 0x00),
5535 PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
5536 },
5537 { .freq = 5560,
5538 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x04,
5539 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
5540 0x84, 0x62, 0x00, 0x00, 0x00, 0x70, 0x00, 0x03,
5541 0x00, 0x97, 0x00, 0x62, 0x00, 0x00, 0x00, 0x70,
5542 0x00, 0x03, 0x00, 0x77, 0x00),
5543 PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
5544 },
5545 { .freq = 5570,
5546 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x04,
5547 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
5548 0x84, 0x52, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
5549 0x00, 0x96, 0x00, 0x52, 0x00, 0x00, 0x00, 0x70,
5550 0x00, 0x02, 0x00, 0x76, 0x00),
5551 PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
5552 },
5553 { .freq = 5580,
5554 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x04,
5555 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
5556 0x84, 0x52, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
5557 0x00, 0x96, 0x00, 0x52, 0x00, 0x00, 0x00, 0x70,
5558 0x00, 0x02, 0x00, 0x76, 0x00),
5559 PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
5560 },
5561 { .freq = 5590,
5562 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x04,
5563 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8d, 0x0b, 0x00,
5564 0x84, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
5565 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
5566 0x00, 0x02, 0x00, 0x76, 0x00),
5567 PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
5568 },
5569 { .freq = 5600,
5570 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x04,
5571 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
5572 0x70, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
5573 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
5574 0x00, 0x02, 0x00, 0x76, 0x00),
5575 PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
5576 },
5577 { .freq = 5610,
5578 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x04,
5579 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
5580 0x70, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
5581 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
5582 0x00, 0x02, 0x00, 0x76, 0x00),
5583 PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
5584 },
5585 { .freq = 5620,
5586 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x04,
5587 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
5588 0x70, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
5589 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
5590 0x00, 0x02, 0x00, 0x76, 0x00),
5591 PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
5592 },
5593 { .freq = 5630,
5594 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x04,
5595 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
5596 0x70, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
5597 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
5598 0x00, 0x02, 0x00, 0x76, 0x00),
5599 PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
5600 },
5601 { .freq = 5640,
5602 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x04,
5603 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
5604 0x70, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
5605 0x00, 0x95, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
5606 0x00, 0x02, 0x00, 0x75, 0x00),
5607 PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
5608 },
5609 { .freq = 5650,
5610 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x04,
5611 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
5612 0x70, 0x50, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
5613 0x00, 0x95, 0x00, 0x50, 0x00, 0x00, 0x00, 0x70,
5614 0x00, 0x01, 0x00, 0x75, 0x00),
5615 PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
5616 },
5617 { .freq = 5660,
5618 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x04,
5619 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
5620 0x70, 0x50, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
5621 0x00, 0x95, 0x00, 0x50, 0x00, 0x00, 0x00, 0x70,
5622 0x00, 0x01, 0x00, 0x75, 0x00),
5623 PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
5624 },
5625 { .freq = 5670,
5626 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x04,
5627 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
5628 0x70, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
5629 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
5630 0x00, 0x01, 0x00, 0x74, 0x00),
5631 PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
5632 },
5633 { .freq = 5680,
5634 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x04,
5635 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
5636 0x70, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
5637 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
5638 0x00, 0x01, 0x00, 0x74, 0x00),
5639 PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
5640 },
5641 { .freq = 5690,
5642 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x04,
5643 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8b, 0x09, 0x00,
5644 0x70, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
5645 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
5646 0x00, 0x01, 0x00, 0x74, 0x00),
5647 PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
5648 },
5649 { .freq = 5700,
5650 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x04,
5651 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
5652 0x40, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
5653 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
5654 0x00, 0x01, 0x00, 0x74, 0x00),
5655 PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
5656 },
5657 { .freq = 5710,
5658 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x04,
5659 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
5660 0x40, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
5661 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
5662 0x00, 0x01, 0x00, 0x74, 0x00),
5663 PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
5664 },
5665 { .freq = 5720,
5666 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x04,
5667 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
5668 0x40, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
5669 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
5670 0x00, 0x01, 0x00, 0x74, 0x00),
5671 PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
5672 },
5673 { .freq = 5725,
5674 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x07, 0x07, 0x04,
5675 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
5676 0x40, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
5677 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
5678 0x00, 0x01, 0x00, 0x74, 0x00),
5679 PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
5680 },
5681 { .freq = 5730,
5682 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x04,
5683 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
5684 0x40, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
5685 0x00, 0x94, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
5686 0x00, 0x01, 0x00, 0x84, 0x00),
5687 PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
5688 },
5689 { .freq = 5735,
5690 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x07, 0x07, 0x04,
5691 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
5692 0x40, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5693 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
5694 0x00, 0x00, 0x00, 0x83, 0x00),
5695 PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
5696 },
5697 { .freq = 5740,
5698 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x04,
5699 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
5700 0x40, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5701 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
5702 0x00, 0x00, 0x00, 0x83, 0x00),
5703 PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
5704 },
5705 { .freq = 5745,
5706 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x07, 0x07, 0x04,
5707 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
5708 0x40, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5709 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
5710 0x00, 0x00, 0x00, 0x83, 0x00),
5711 PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
5712 },
5713 { .freq = 5750,
5714 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x04,
5715 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
5716 0x40, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5717 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
5718 0x00, 0x00, 0x00, 0x83, 0x00),
5719 PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
5720 },
5721 { .freq = 5755,
5722 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x07, 0x07, 0x04,
5723 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
5724 0x40, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5725 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
5726 0x00, 0x00, 0x00, 0x83, 0x00),
5727 PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
5728 },
5729 { .freq = 5760,
5730 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x04,
5731 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
5732 0x40, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5733 0x00, 0x93, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
5734 0x00, 0x00, 0x00, 0x83, 0x00),
5735 PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
5736 },
5737 { .freq = 5765,
5738 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x07, 0x07, 0x04,
5739 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
5740 0x40, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5741 0x00, 0x92, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
5742 0x00, 0x00, 0x00, 0x82, 0x00),
5743 PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
5744 },
5745 { .freq = 5770,
5746 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x04,
5747 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
5748 0x40, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5749 0x00, 0x92, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
5750 0x00, 0x00, 0x00, 0x82, 0x00),
5751 PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
5752 },
5753 { .freq = 5775,
5754 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x07, 0x07, 0x04,
5755 0x10, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
5756 0x40, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5757 0x00, 0x92, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
5758 0x00, 0x00, 0x00, 0x82, 0x00),
5759 PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
5760 },
5761 { .freq = 5780,
5762 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x04,
5763 0x0c, 0x01, 0x05, 0x05, 0x05, 0x8a, 0x06, 0x00,
5764 0x40, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5765 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
5766 0x00, 0x00, 0x00, 0x82, 0x00),
5767 PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
5768 },
5769 { .freq = 5785,
5770 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x07, 0x07, 0x04,
5771 0x10, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
5772 0x40, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5773 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
5774 0x00, 0x00, 0x00, 0x82, 0x00),
5775 PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
5776 },
5777 { .freq = 5790,
5778 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x04,
5779 0x0c, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
5780 0x40, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5781 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
5782 0x00, 0x00, 0x00, 0x82, 0x00),
5783 PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
5784 },
5785 { .freq = 5795,
5786 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x07, 0x07, 0x04,
5787 0x10, 0x01, 0x06, 0x06, 0x06, 0x8a, 0x06, 0x00,
5788 0x40, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5789 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
5790 0x00, 0x00, 0x00, 0x82, 0x00),
5791 PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
5792 },
5793 { .freq = 5800,
5794 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x04,
5795 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
5796 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5797 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
5798 0x00, 0x00, 0x00, 0x82, 0x00),
5799 PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
5800 },
5801 { .freq = 5805,
5802 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x07, 0x07, 0x04,
5803 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
5804 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5805 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
5806 0x00, 0x00, 0x00, 0x82, 0x00),
5807 PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
5808 },
5809 { .freq = 5810,
5810 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x04,
5811 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
5812 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5813 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
5814 0x00, 0x00, 0x00, 0x82, 0x00),
5815 PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
5816 },
5817 { .freq = 5815,
5818 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x07, 0x07, 0x04,
5819 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
5820 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5821 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
5822 0x00, 0x00, 0x00, 0x82, 0x00),
5823 PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
5824 },
5825 { .freq = 5820,
5826 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x04,
5827 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
5828 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5829 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
5830 0x00, 0x00, 0x00, 0x82, 0x00),
5831 PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
5832 },
5833 { .freq = 5825,
5834 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x07, 0x07, 0x04,
5835 0x10, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
5836 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5837 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
5838 0x00, 0x00, 0x00, 0x82, 0x00),
5839 PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
5840 },
5841 { .freq = 5830,
5842 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x04,
5843 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
5844 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5845 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
5846 0x00, 0x00, 0x00, 0x72, 0x00),
5847 PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
5848 },
5849 { .freq = 5840,
5850 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x04,
5851 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
5852 0x20, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5853 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
5854 0x00, 0x00, 0x00, 0x72, 0x00),
5855 PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
5856 },
5857 { .freq = 5850,
5858 RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x04,
5859 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
5860 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5861 0x00, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
5862 0x00, 0x00, 0x00, 0x72, 0x00),
5863 PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
5864 },
5865 { .freq = 5860,
5866 RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x04,
5867 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
5868 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5869 0x00, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
5870 0x00, 0x00, 0x00, 0x72, 0x00),
5871 PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
5872 },
5873 { .freq = 5870,
5874 RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x04,
5875 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
5876 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5877 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
5878 0x00, 0x00, 0x00, 0x71, 0x00),
5879 PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
5880 },
5881 { .freq = 5880,
5882 RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x04,
5883 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
5884 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5885 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
5886 0x00, 0x00, 0x00, 0x71, 0x00),
5887 PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
5888 },
5889 { .freq = 5890,
5890 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x04,
5891 0x0c, 0x01, 0x06, 0x06, 0x06, 0x88, 0x04, 0x00,
5892 0x20, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5893 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
5894 0x00, 0x00, 0x00, 0x71, 0x00),
5895 PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
5896 },
5897 { .freq = 5900,
5898 RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x04,
5899 0x0c, 0x01, 0x06, 0x06, 0x06, 0x87, 0x03, 0x00,
5900 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5901 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
5902 0x00, 0x00, 0x00, 0x71, 0x00),
5903 PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
5904 },
5905 { .freq = 5910,
5906 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x04,
5907 0x0c, 0x01, 0x06, 0x06, 0x06, 0x87, 0x03, 0x00,
5908 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
5909 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
5910 0x00, 0x00, 0x00, 0x71, 0x00),
5911 PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
5912 },
5913 { .freq = 2412,
5914 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x08, 0x08, 0x04,
5915 0x16, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
5916 0x00, 0x00, 0x1f, 0x00, 0x03, 0x00, 0x70, 0x00,
5917 0x0f, 0x00, 0x0b, 0x00, 0x1f, 0x00, 0x03, 0x00,
5918 0x70, 0x00, 0x0f, 0x00, 0x0b),
5919 PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
5920 },
5921 { .freq = 2417,
5922 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x08, 0x08, 0x04,
5923 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
5924 0x00, 0x00, 0x1f, 0x00, 0x03, 0x00, 0x70, 0x00,
5925 0x0f, 0x00, 0x0a, 0x00, 0x1f, 0x00, 0x03, 0x00,
5926 0x70, 0x00, 0x0f, 0x00, 0x0a),
5927 PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
5928 },
5929 { .freq = 2422,
5930 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x08, 0x08, 0x04,
5931 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
5932 0x00, 0x00, 0x0e, 0x00, 0x03, 0x00, 0x70, 0x00,
5933 0x0f, 0x00, 0x0a, 0x00, 0x0e, 0x00, 0x03, 0x00,
5934 0x70, 0x00, 0x0f, 0x00, 0x0a),
5935 PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
5936 },
5937 { .freq = 2427,
5938 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x08, 0x08, 0x04,
5939 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
5940 0x00, 0x00, 0x0d, 0x00, 0x03, 0x00, 0x70, 0x00,
5941 0x0e, 0x00, 0x0a, 0x00, 0x0d, 0x00, 0x03, 0x00,
5942 0x70, 0x00, 0x0e, 0x00, 0x0a),
5943 PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
5944 },
5945 { .freq = 2432,
5946 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x08, 0x08, 0x04,
5947 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
5948 0x00, 0x00, 0x0c, 0x00, 0x03, 0x00, 0x70, 0x00,
5949 0x0e, 0x00, 0x0a, 0x00, 0x0c, 0x00, 0x03, 0x00,
5950 0x70, 0x00, 0x0e, 0x00, 0x0a),
5951 PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
5952 },
5953 { .freq = 2437,
5954 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x08, 0x08, 0x04,
5955 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
5956 0x00, 0x00, 0x0b, 0x00, 0x03, 0x00, 0x70, 0x00,
5957 0x0e, 0x00, 0x0a, 0x00, 0x0b, 0x00, 0x03, 0x00,
5958 0x70, 0x00, 0x0e, 0x00, 0x0a),
5959 PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
5960 },
5961 { .freq = 2442,
5962 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x08, 0x08, 0x04,
5963 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
5964 0x00, 0x00, 0x09, 0x00, 0x03, 0x00, 0x70, 0x00,
5965 0x0e, 0x00, 0x0a, 0x00, 0x09, 0x00, 0x03, 0x00,
5966 0x70, 0x00, 0x0e, 0x00, 0x0a),
5967 PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
5968 },
5969 { .freq = 2447,
5970 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x08, 0x08, 0x04,
5971 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
5972 0x00, 0x00, 0x08, 0x00, 0x02, 0x00, 0x70, 0x00,
5973 0x0e, 0x00, 0x09, 0x00, 0x08, 0x00, 0x02, 0x00,
5974 0x70, 0x00, 0x0e, 0x00, 0x09),
5975 PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
5976 },
5977 { .freq = 2452,
5978 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x08, 0x08, 0x04,
5979 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
5980 0x00, 0x00, 0x07, 0x00, 0x02, 0x00, 0x70, 0x00,
5981 0x0e, 0x00, 0x09, 0x00, 0x07, 0x00, 0x02, 0x00,
5982 0x70, 0x00, 0x0e, 0x00, 0x09),
5983 PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
5984 },
5985 { .freq = 2457,
5986 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x08, 0x08, 0x04,
5987 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
5988 0x00, 0x00, 0x06, 0x00, 0x02, 0x00, 0x70, 0x00,
5989 0x0d, 0x00, 0x09, 0x00, 0x06, 0x00, 0x02, 0x00,
5990 0x70, 0x00, 0x0d, 0x00, 0x09),
5991 PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
5992 },
5993 { .freq = 2462,
5994 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x08, 0x08, 0x04,
5995 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
5996 0x00, 0x00, 0x05, 0x00, 0x02, 0x00, 0x70, 0x00,
5997 0x0d, 0x00, 0x09, 0x00, 0x05, 0x00, 0x02, 0x00,
5998 0x70, 0x00, 0x0d, 0x00, 0x09),
5999 PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
6000 },
6001 { .freq = 2467,
6002 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x08, 0x08, 0x04,
6003 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
6004 0x00, 0x00, 0x04, 0x00, 0x02, 0x00, 0x70, 0x00,
6005 0x0d, 0x00, 0x08, 0x00, 0x04, 0x00, 0x02, 0x00,
6006 0x70, 0x00, 0x0d, 0x00, 0x08),
6007 PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
6008 },
6009 { .freq = 2472,
6010 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x08, 0x08, 0x04,
6011 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
6012 0x00, 0x00, 0x03, 0x00, 0x02, 0x00, 0x70, 0x00,
6013 0x0d, 0x00, 0x08, 0x00, 0x03, 0x00, 0x02, 0x00,
6014 0x70, 0x00, 0x0d, 0x00, 0x08),
6015 PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
6016 },
6017 { .freq = 2484,
6018 RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x08, 0x08, 0x04,
6019 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
6020 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x70, 0x00,
6021 0x0d, 0x00, 0x08, 0x00, 0x00, 0x00, 0x02, 0x00,
6022 0x70, 0x00, 0x0d, 0x00, 0x08),
6023 PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
6024 },
6025};
6026
6027static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev6[] = {
6028 { .freq = 4920,
6029 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
6030 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
6031 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
6032 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
6033 0x00, 0x0f, 0x00, 0x6f, 0x00),
6034 PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
6035 },
6036 { .freq = 4930,
6037 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x04,
6038 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
6039 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
6040 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
6041 0x00, 0x0f, 0x00, 0x6f, 0x00),
6042 PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
6043 },
6044 { .freq = 4940,
6045 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x04,
6046 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
6047 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
6048 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
6049 0x00, 0x0f, 0x00, 0x6f, 0x00),
6050 PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
6051 },
6052 { .freq = 4950,
6053 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x04,
6054 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
6055 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
6056 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
6057 0x00, 0x0f, 0x00, 0x6f, 0x00),
6058 PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
6059 },
6060 { .freq = 4960,
6061 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x04,
6062 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
6063 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
6064 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
6065 0x00, 0x0f, 0x00, 0x6f, 0x00),
6066 PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
6067 },
6068 { .freq = 4970,
6069 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x04,
6070 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
6071 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
6072 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
6073 0x00, 0x0f, 0x00, 0x6f, 0x00),
6074 PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
6075 },
6076 { .freq = 4980,
6077 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x04,
6078 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
6079 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
6080 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
6081 0x00, 0x0f, 0x00, 0x6f, 0x00),
6082 PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
6083 },
6084 { .freq = 4990,
6085 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x04,
6086 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
6087 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
6088 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
6089 0x00, 0x0f, 0x00, 0x6f, 0x00),
6090 PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
6091 },
6092 { .freq = 5000,
6093 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x04,
6094 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
6095 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
6096 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
6097 0x00, 0x0f, 0x00, 0x6f, 0x00),
6098 PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
6099 },
6100 { .freq = 5010,
6101 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x04,
6102 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
6103 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
6104 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
6105 0x00, 0x0f, 0x00, 0x6f, 0x00),
6106 PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
6107 },
6108 { .freq = 5020,
6109 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x04,
6110 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
6111 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
6112 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
6113 0x00, 0x0f, 0x00, 0x6f, 0x00),
6114 PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
6115 },
6116 { .freq = 5030,
6117 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x04,
6118 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
6119 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
6120 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
6121 0x00, 0x0f, 0x00, 0x6f, 0x00),
6122 PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
6123 },
6124 { .freq = 5040,
6125 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x04,
6126 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
6127 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
6128 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
6129 0x00, 0x0f, 0x00, 0x6f, 0x00),
6130 PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
6131 },
6132 { .freq = 5050,
6133 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x04,
6134 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
6135 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
6136 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
6137 0x00, 0x0f, 0x00, 0x6f, 0x00),
6138 PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
6139 },
6140 { .freq = 5060,
6141 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x04,
6142 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
6143 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
6144 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
6145 0x00, 0x0f, 0x00, 0x6f, 0x00),
6146 PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
6147 },
6148 { .freq = 5070,
6149 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x04,
6150 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
6151 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
6152 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
6153 0x00, 0x0f, 0x00, 0x6f, 0x00),
6154 PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
6155 },
6156 { .freq = 5080,
6157 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x04,
6158 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
6159 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
6160 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
6161 0x00, 0x0f, 0x00, 0x6f, 0x00),
6162 PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
6163 },
6164 { .freq = 5090,
6165 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x04,
6166 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
6167 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
6168 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
6169 0x00, 0x0f, 0x00, 0x6f, 0x00),
6170 PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
6171 },
6172 { .freq = 5100,
6173 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x04,
6174 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
6175 0xff, 0xfd, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
6176 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x08, 0x00, 0x77,
6177 0x00, 0x0f, 0x00, 0x6f, 0x00),
6178 PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
6179 },
6180 { .freq = 5110,
6181 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x04,
6182 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
6183 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
6184 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
6185 0x00, 0x0f, 0x00, 0x6f, 0x00),
6186 PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
6187 },
6188 { .freq = 5120,
6189 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x04,
6190 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
6191 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
6192 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
6193 0x00, 0x0f, 0x00, 0x6f, 0x00),
6194 PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
6195 },
6196 { .freq = 5130,
6197 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x04,
6198 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
6199 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
6200 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
6201 0x00, 0x0f, 0x00, 0x6f, 0x00),
6202 PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
6203 },
6204 { .freq = 5140,
6205 RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x04,
6206 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
6207 0xff, 0xfb, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
6208 0x00, 0x6f, 0x00, 0xfb, 0x00, 0x08, 0x00, 0x77,
6209 0x00, 0x0f, 0x00, 0x6f, 0x00),
6210 PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
6211 },
6212 { .freq = 5160,
6213 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x04,
6214 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
6215 0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
6216 0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
6217 0x00, 0x0e, 0x00, 0x6f, 0x00),
6218 PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
6219 },
6220 { .freq = 5170,
6221 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x04,
6222 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
6223 0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
6224 0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
6225 0x00, 0x0e, 0x00, 0x6f, 0x00),
6226 PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
6227 },
6228 { .freq = 5180,
6229 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x04,
6230 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
6231 0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0e,
6232 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
6233 0x00, 0x0e, 0x00, 0x6f, 0x00),
6234 PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
6235 },
6236 { .freq = 5190,
6237 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x04,
6238 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
6239 0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0d,
6240 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
6241 0x00, 0x0d, 0x00, 0x6f, 0x00),
6242 PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
6243 },
6244 { .freq = 5200,
6245 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x04,
6246 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
6247 0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
6248 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
6249 0x00, 0x0d, 0x00, 0x6f, 0x00),
6250 PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
6251 },
6252 { .freq = 5210,
6253 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x04,
6254 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
6255 0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
6256 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
6257 0x00, 0x0d, 0x00, 0x6f, 0x00),
6258 PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
6259 },
6260 { .freq = 5220,
6261 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x04,
6262 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
6263 0xfe, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
6264 0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
6265 0x00, 0x0d, 0x00, 0x6f, 0x00),
6266 PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
6267 },
6268 { .freq = 5230,
6269 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x04,
6270 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
6271 0xee, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
6272 0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
6273 0x00, 0x0d, 0x00, 0x6f, 0x00),
6274 PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
6275 },
6276 { .freq = 5240,
6277 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x04,
6278 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
6279 0xee, 0xc8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
6280 0x00, 0x6f, 0x00, 0xc8, 0x00, 0x05, 0x00, 0x77,
6281 0x00, 0x0d, 0x00, 0x6f, 0x00),
6282 PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
6283 },
6284 { .freq = 5250,
6285 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x04,
6286 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
6287 0xed, 0xc7, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
6288 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x05, 0x00, 0x77,
6289 0x00, 0x0d, 0x00, 0x6f, 0x00),
6290 PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
6291 },
6292 { .freq = 5260,
6293 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x04,
6294 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0e, 0x00,
6295 0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0d,
6296 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
6297 0x00, 0x0d, 0x00, 0x6f, 0x00),
6298 PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
6299 },
6300 { .freq = 5270,
6301 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x04,
6302 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8e, 0x0e, 0x00,
6303 0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0c,
6304 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
6305 0x00, 0x0c, 0x00, 0x6f, 0x00),
6306 PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
6307 },
6308 { .freq = 5280,
6309 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x04,
6310 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
6311 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
6312 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
6313 0x00, 0x0c, 0x00, 0x6f, 0x00),
6314 PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
6315 },
6316 { .freq = 5290,
6317 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x04,
6318 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
6319 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
6320 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
6321 0x00, 0x0c, 0x00, 0x6f, 0x00),
6322 PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
6323 },
6324 { .freq = 5300,
6325 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x04,
6326 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
6327 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
6328 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
6329 0x00, 0x0c, 0x00, 0x6f, 0x00),
6330 PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
6331 },
6332 { .freq = 5310,
6333 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x04,
6334 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
6335 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
6336 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
6337 0x00, 0x0c, 0x00, 0x6f, 0x00),
6338 PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
6339 },
6340 { .freq = 5320,
6341 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x04,
6342 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
6343 0xdb, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
6344 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
6345 0x00, 0x0c, 0x00, 0x6f, 0x00),
6346 PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
6347 },
6348 { .freq = 5330,
6349 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x04,
6350 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
6351 0xcb, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
6352 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
6353 0x00, 0x0b, 0x00, 0x6f, 0x00),
6354 PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
6355 },
6356 { .freq = 5340,
6357 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x04,
6358 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
6359 0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
6360 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
6361 0x00, 0x0b, 0x00, 0x6f, 0x00),
6362 PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
6363 },
6364 { .freq = 5350,
6365 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x04,
6366 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
6367 0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
6368 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
6369 0x00, 0x0b, 0x00, 0x6f, 0x00),
6370 PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
6371 },
6372 { .freq = 5360,
6373 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x04,
6374 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
6375 0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
6376 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
6377 0x00, 0x0a, 0x00, 0x6f, 0x00),
6378 PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
6379 },
6380 { .freq = 5370,
6381 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x04,
6382 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
6383 0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
6384 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
6385 0x00, 0x0a, 0x00, 0x6f, 0x00),
6386 PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
6387 },
6388 { .freq = 5380,
6389 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x04,
6390 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
6391 0xb8, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
6392 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
6393 0x00, 0x0a, 0x00, 0x6f, 0x00),
6394 PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
6395 },
6396 { .freq = 5390,
6397 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x04,
6398 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
6399 0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
6400 0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
6401 0x00, 0x0a, 0x00, 0x6f, 0x00),
6402 PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
6403 },
6404 { .freq = 5400,
6405 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x04,
6406 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
6407 0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
6408 0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
6409 0x00, 0x0a, 0x00, 0x6f, 0x00),
6410 PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
6411 },
6412 { .freq = 5410,
6413 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x04,
6414 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
6415 0xb7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
6416 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
6417 0x00, 0x0a, 0x00, 0x6f, 0x00),
6418 PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
6419 },
6420 { .freq = 5420,
6421 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x04,
6422 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
6423 0xa7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
6424 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
6425 0x00, 0x0a, 0x00, 0x6f, 0x00),
6426 PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
6427 },
6428 { .freq = 5430,
6429 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x04,
6430 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0b, 0x00,
6431 0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
6432 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
6433 0x00, 0x0a, 0x00, 0x6f, 0x00),
6434 PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
6435 },
6436 { .freq = 5440,
6437 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x04,
6438 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
6439 0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x09,
6440 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
6441 0x00, 0x09, 0x00, 0x6f, 0x00),
6442 PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
6443 },
6444 { .freq = 5450,
6445 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x04,
6446 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
6447 0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
6448 0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
6449 0x00, 0x09, 0x00, 0x6f, 0x00),
6450 PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
6451 },
6452 { .freq = 5460,
6453 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x04,
6454 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
6455 0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
6456 0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
6457 0x00, 0x09, 0x00, 0x6f, 0x00),
6458 PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
6459 },
6460 { .freq = 5470,
6461 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x04,
6462 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
6463 0x94, 0x73, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
6464 0x00, 0x6f, 0x00, 0x73, 0x00, 0x01, 0x00, 0x77,
6465 0x00, 0x09, 0x00, 0x6f, 0x00),
6466 PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
6467 },
6468 { .freq = 5480,
6469 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x04,
6470 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
6471 0x84, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
6472 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
6473 0x00, 0x09, 0x00, 0x6f, 0x00),
6474 PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
6475 },
6476 { .freq = 5490,
6477 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x04,
6478 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
6479 0x83, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
6480 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
6481 0x00, 0x09, 0x00, 0x6f, 0x00),
6482 PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
6483 },
6484 { .freq = 5500,
6485 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x04,
6486 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
6487 0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
6488 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
6489 0x00, 0x09, 0x00, 0x6f, 0x00),
6490 PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
6491 },
6492 { .freq = 5510,
6493 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x04,
6494 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
6495 0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
6496 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
6497 0x00, 0x09, 0x00, 0x6f, 0x00),
6498 PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
6499 },
6500 { .freq = 5520,
6501 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x04,
6502 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
6503 0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
6504 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
6505 0x00, 0x09, 0x00, 0x6f, 0x00),
6506 PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
6507 },
6508 { .freq = 5530,
6509 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x04,
6510 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
6511 0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
6512 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
6513 0x00, 0x09, 0x00, 0x6f, 0x00),
6514 PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
6515 },
6516 { .freq = 5540,
6517 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x04,
6518 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
6519 0x71, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
6520 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
6521 0x00, 0x09, 0x00, 0x6f, 0x00),
6522 PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
6523 },
6524 { .freq = 5550,
6525 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x04,
6526 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
6527 0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
6528 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
6529 0x00, 0x09, 0x00, 0x6f, 0x00),
6530 PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
6531 },
6532 { .freq = 5560,
6533 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x04,
6534 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
6535 0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
6536 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
6537 0x00, 0x09, 0x00, 0x6f, 0x00),
6538 PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
6539 },
6540 { .freq = 5570,
6541 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x04,
6542 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
6543 0x61, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
6544 0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
6545 0x00, 0x09, 0x00, 0x6f, 0x00),
6546 PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
6547 },
6548 { .freq = 5580,
6549 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x04,
6550 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
6551 0x60, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
6552 0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
6553 0x00, 0x08, 0x00, 0x6f, 0x00),
6554 PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
6555 },
6556 { .freq = 5590,
6557 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x04,
6558 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
6559 0x50, 0x61, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
6560 0x00, 0x6f, 0x00, 0x61, 0x00, 0x00, 0x00, 0x77,
6561 0x00, 0x08, 0x00, 0x6f, 0x00),
6562 PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
6563 },
6564 { .freq = 5600,
6565 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x04,
6566 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
6567 0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
6568 0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
6569 0x00, 0x08, 0x00, 0x6f, 0x00),
6570 PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
6571 },
6572 { .freq = 5610,
6573 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x04,
6574 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
6575 0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
6576 0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
6577 0x00, 0x08, 0x00, 0x6f, 0x00),
6578 PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
6579 },
6580 { .freq = 5620,
6581 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x04,
6582 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
6583 0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
6584 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
6585 0x00, 0x07, 0x00, 0x6f, 0x00),
6586 PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
6587 },
6588 { .freq = 5630,
6589 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x04,
6590 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
6591 0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
6592 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
6593 0x00, 0x07, 0x00, 0x6f, 0x00),
6594 PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
6595 },
6596 { .freq = 5640,
6597 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x04,
6598 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
6599 0x40, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
6600 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
6601 0x00, 0x07, 0x00, 0x6f, 0x00),
6602 PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
6603 },
6604 { .freq = 5650,
6605 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x04,
6606 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
6607 0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
6608 0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
6609 0x00, 0x07, 0x00, 0x6f, 0x00),
6610 PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
6611 },
6612 { .freq = 5660,
6613 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x04,
6614 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
6615 0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
6616 0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
6617 0x00, 0x06, 0x00, 0x6f, 0x00),
6618 PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
6619 },
6620 { .freq = 5670,
6621 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x04,
6622 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
6623 0x40, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
6624 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
6625 0x00, 0x06, 0x00, 0x6f, 0x00),
6626 PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
6627 },
6628 { .freq = 5680,
6629 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x04,
6630 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
6631 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
6632 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
6633 0x00, 0x06, 0x00, 0x6f, 0x00),
6634 PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
6635 },
6636 { .freq = 5690,
6637 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x04,
6638 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
6639 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
6640 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
6641 0x00, 0x06, 0x00, 0x6f, 0x00),
6642 PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
6643 },
6644 { .freq = 5700,
6645 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x04,
6646 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
6647 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
6648 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
6649 0x00, 0x06, 0x00, 0x6e, 0x00),
6650 PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
6651 },
6652 { .freq = 5710,
6653 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x04,
6654 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
6655 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
6656 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
6657 0x00, 0x06, 0x00, 0x6e, 0x00),
6658 PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
6659 },
6660 { .freq = 5720,
6661 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x04,
6662 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
6663 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
6664 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
6665 0x00, 0x06, 0x00, 0x6e, 0x00),
6666 PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
6667 },
6668 { .freq = 5725,
6669 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x07, 0x07, 0x04,
6670 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
6671 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
6672 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
6673 0x00, 0x06, 0x00, 0x6e, 0x00),
6674 PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
6675 },
6676 { .freq = 5730,
6677 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x04,
6678 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
6679 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
6680 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
6681 0x00, 0x06, 0x00, 0x6e, 0x00),
6682 PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
6683 },
6684 { .freq = 5735,
6685 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x07, 0x07, 0x04,
6686 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
6687 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
6688 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
6689 0x00, 0x06, 0x00, 0x6d, 0x00),
6690 PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
6691 },
6692 { .freq = 5740,
6693 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x04,
6694 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
6695 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
6696 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
6697 0x00, 0x06, 0x00, 0x6d, 0x00),
6698 PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
6699 },
6700 { .freq = 5745,
6701 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x07, 0x07, 0x04,
6702 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
6703 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
6704 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
6705 0x00, 0x06, 0x00, 0x6d, 0x00),
6706 PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
6707 },
6708 { .freq = 5750,
6709 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x04,
6710 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
6711 0x20, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
6712 0x00, 0x6d, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
6713 0x00, 0x05, 0x00, 0x6d, 0x00),
6714 PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
6715 },
6716 { .freq = 5755,
6717 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x07, 0x07, 0x04,
6718 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
6719 0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
6720 0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
6721 0x00, 0x05, 0x00, 0x6c, 0x00),
6722 PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
6723 },
6724 { .freq = 5760,
6725 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x04,
6726 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
6727 0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
6728 0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
6729 0x00, 0x05, 0x00, 0x6c, 0x00),
6730 PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
6731 },
6732 { .freq = 5765,
6733 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x07, 0x07, 0x04,
6734 0x10, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
6735 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
6736 0x00, 0x6c, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
6737 0x00, 0x05, 0x00, 0x6c, 0x00),
6738 PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
6739 },
6740 { .freq = 5770,
6741 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x04,
6742 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
6743 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
6744 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
6745 0x00, 0x05, 0x00, 0x6b, 0x00),
6746 PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
6747 },
6748 { .freq = 5775,
6749 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x07, 0x07, 0x04,
6750 0x10, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
6751 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
6752 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
6753 0x00, 0x05, 0x00, 0x6b, 0x00),
6754 PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
6755 },
6756 { .freq = 5780,
6757 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x04,
6758 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
6759 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
6760 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
6761 0x00, 0x05, 0x00, 0x6b, 0x00),
6762 PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
6763 },
6764 { .freq = 5785,
6765 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x07, 0x07, 0x04,
6766 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
6767 0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
6768 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
6769 0x00, 0x05, 0x00, 0x6b, 0x00),
6770 PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
6771 },
6772 { .freq = 5790,
6773 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x04,
6774 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
6775 0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
6776 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
6777 0x00, 0x05, 0x00, 0x6b, 0x00),
6778 PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
6779 },
6780 { .freq = 5795,
6781 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x07, 0x07, 0x04,
6782 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
6783 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
6784 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
6785 0x00, 0x05, 0x00, 0x6b, 0x00),
6786 PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
6787 },
6788 { .freq = 5800,
6789 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x04,
6790 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
6791 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
6792 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
6793 0x00, 0x05, 0x00, 0x6b, 0x00),
6794 PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
6795 },
6796 { .freq = 5805,
6797 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x07, 0x07, 0x04,
6798 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
6799 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
6800 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
6801 0x00, 0x05, 0x00, 0x6a, 0x00),
6802 PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
6803 },
6804 { .freq = 5810,
6805 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x04,
6806 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
6807 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
6808 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
6809 0x00, 0x05, 0x00, 0x6a, 0x00),
6810 PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
6811 },
6812 { .freq = 5815,
6813 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x07, 0x07, 0x04,
6814 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
6815 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
6816 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
6817 0x00, 0x05, 0x00, 0x6a, 0x00),
6818 PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
6819 },
6820 { .freq = 5820,
6821 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x04,
6822 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
6823 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
6824 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
6825 0x00, 0x05, 0x00, 0x6a, 0x00),
6826 PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
6827 },
6828 { .freq = 5825,
6829 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x07, 0x07, 0x04,
6830 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
6831 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
6832 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
6833 0x00, 0x05, 0x00, 0x69, 0x00),
6834 PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
6835 },
6836 { .freq = 5830,
6837 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x04,
6838 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
6839 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
6840 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
6841 0x00, 0x05, 0x00, 0x69, 0x00),
6842 PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
6843 },
6844 { .freq = 5840,
6845 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x04,
6846 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
6847 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
6848 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
6849 0x00, 0x04, 0x00, 0x69, 0x00),
6850 PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
6851 },
6852 { .freq = 5850,
6853 RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x04,
6854 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
6855 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
6856 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
6857 0x00, 0x04, 0x00, 0x69, 0x00),
6858 PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
6859 },
6860 { .freq = 5860,
6861 RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x04,
6862 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
6863 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
6864 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
6865 0x00, 0x04, 0x00, 0x69, 0x00),
6866 PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
6867 },
6868 { .freq = 5870,
6869 RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x04,
6870 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
6871 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
6872 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
6873 0x00, 0x04, 0x00, 0x68, 0x00),
6874 PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
6875 },
6876 { .freq = 5880,
6877 RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x04,
6878 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
6879 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
6880 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
6881 0x00, 0x04, 0x00, 0x68, 0x00),
6882 PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
6883 },
6884 { .freq = 5890,
6885 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x04,
6886 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
6887 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
6888 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
6889 0x00, 0x04, 0x00, 0x68, 0x00),
6890 PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
6891 },
6892 { .freq = 5900,
6893 RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x04,
6894 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
6895 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
6896 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
6897 0x00, 0x04, 0x00, 0x68, 0x00),
6898 PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
6899 },
6900 { .freq = 5910,
6901 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x04,
6902 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
6903 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
6904 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
6905 0x00, 0x04, 0x00, 0x68, 0x00),
6906 PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
6907 },
6908 { .freq = 2412,
6909 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x08, 0x08, 0x04,
6910 0x16, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
6911 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
6912 0x0b, 0x00, 0x0a, 0x00, 0x78, 0x00, 0x03, 0x00,
6913 0x70, 0x00, 0x0b, 0x00, 0x0a),
6914 PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
6915 },
6916 { .freq = 2417,
6917 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x08, 0x08, 0x04,
6918 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
6919 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
6920 0x0b, 0x00, 0x0a, 0x00, 0x78, 0x00, 0x03, 0x00,
6921 0x70, 0x00, 0x0b, 0x00, 0x0a),
6922 PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
6923 },
6924 { .freq = 2422,
6925 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x08, 0x08, 0x04,
6926 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
6927 0x00, 0x00, 0x67, 0x00, 0x03, 0x00, 0x70, 0x00,
6928 0x0b, 0x00, 0x0a, 0x00, 0x67, 0x00, 0x03, 0x00,
6929 0x70, 0x00, 0x0b, 0x00, 0x0a),
6930 PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
6931 },
6932 { .freq = 2427,
6933 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x08, 0x08, 0x04,
6934 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
6935 0x00, 0x00, 0x57, 0x00, 0x03, 0x00, 0x70, 0x00,
6936 0x0a, 0x00, 0x0a, 0x00, 0x57, 0x00, 0x03, 0x00,
6937 0x70, 0x00, 0x0a, 0x00, 0x0a),
6938 PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
6939 },
6940 { .freq = 2432,
6941 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x08, 0x08, 0x04,
6942 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
6943 0x00, 0x00, 0x56, 0x00, 0x03, 0x00, 0x70, 0x00,
6944 0x0a, 0x00, 0x0a, 0x00, 0x56, 0x00, 0x03, 0x00,
6945 0x70, 0x00, 0x0a, 0x00, 0x0a),
6946 PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
6947 },
6948 { .freq = 2437,
6949 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x08, 0x08, 0x04,
6950 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
6951 0x00, 0x00, 0x46, 0x00, 0x03, 0x00, 0x70, 0x00,
6952 0x0a, 0x00, 0x0a, 0x00, 0x46, 0x00, 0x03, 0x00,
6953 0x70, 0x00, 0x0a, 0x00, 0x0a),
6954 PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
6955 },
6956 { .freq = 2442,
6957 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x08, 0x08, 0x04,
6958 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
6959 0x00, 0x00, 0x45, 0x00, 0x02, 0x00, 0x70, 0x00,
6960 0x0a, 0x00, 0x0a, 0x00, 0x45, 0x00, 0x02, 0x00,
6961 0x70, 0x00, 0x0a, 0x00, 0x0a),
6962 PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
6963 },
6964 { .freq = 2447,
6965 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x08, 0x08, 0x04,
6966 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
6967 0x00, 0x00, 0x34, 0x00, 0x02, 0x00, 0x70, 0x00,
6968 0x0a, 0x00, 0x09, 0x00, 0x34, 0x00, 0x02, 0x00,
6969 0x70, 0x00, 0x0a, 0x00, 0x09),
6970 PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
6971 },
6972 { .freq = 2452,
6973 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x08, 0x08, 0x04,
6974 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
6975 0x00, 0x00, 0x23, 0x00, 0x02, 0x00, 0x70, 0x00,
6976 0x0a, 0x00, 0x09, 0x00, 0x23, 0x00, 0x02, 0x00,
6977 0x70, 0x00, 0x0a, 0x00, 0x09),
6978 PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
6979 },
6980 { .freq = 2457,
6981 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x08, 0x08, 0x04,
6982 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
6983 0x00, 0x00, 0x12, 0x00, 0x02, 0x00, 0x70, 0x00,
6984 0x0a, 0x00, 0x09, 0x00, 0x12, 0x00, 0x02, 0x00,
6985 0x70, 0x00, 0x0a, 0x00, 0x09),
6986 PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
6987 },
6988 { .freq = 2462,
6989 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x08, 0x08, 0x04,
6990 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
6991 0x00, 0x00, 0x02, 0x00, 0x02, 0x00, 0x70, 0x00,
6992 0x09, 0x00, 0x09, 0x00, 0x02, 0x00, 0x02, 0x00,
6993 0x70, 0x00, 0x09, 0x00, 0x09),
6994 PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
6995 },
6996 { .freq = 2467,
6997 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x08, 0x08, 0x04,
6998 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
6999 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
7000 0x09, 0x00, 0x09, 0x00, 0x01, 0x00, 0x02, 0x00,
7001 0x70, 0x00, 0x09, 0x00, 0x09),
7002 PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
7003 },
7004 { .freq = 2472,
7005 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x08, 0x08, 0x04,
7006 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
7007 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
7008 0x09, 0x00, 0x09, 0x00, 0x01, 0x00, 0x02, 0x00,
7009 0x70, 0x00, 0x09, 0x00, 0x09),
7010 PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
7011 },
7012 { .freq = 2484,
7013 RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x08, 0x08, 0x04,
7014 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x20, 0x00,
7015 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x70, 0x00,
7016 0x09, 0x00, 0x09, 0x00, 0x00, 0x00, 0x02, 0x00,
7017 0x70, 0x00, 0x09, 0x00, 0x09),
7018 PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
7019 },
7020};
7021
7022static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev7_9[] = {
7023 { .freq = 4920,
7024 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
7025 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
7026 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0f,
7027 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
7028 0x00, 0x0f, 0x00, 0x6f, 0x00),
7029 PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
7030 },
7031 { .freq = 4930,
7032 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x04,
7033 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
7034 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0e,
7035 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
7036 0x00, 0x0e, 0x00, 0x6f, 0x00),
7037 PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
7038 },
7039 { .freq = 4940,
7040 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x04,
7041 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
7042 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0e,
7043 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
7044 0x00, 0x0e, 0x00, 0x6f, 0x00),
7045 PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
7046 },
7047 { .freq = 4950,
7048 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x04,
7049 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
7050 0xff, 0xff, 0x00, 0x0b, 0x00, 0x70, 0x00, 0x0e,
7051 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0b, 0x00, 0x70,
7052 0x00, 0x0e, 0x00, 0x6f, 0x00),
7053 PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
7054 },
7055 { .freq = 4960,
7056 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x04,
7057 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
7058 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0e,
7059 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
7060 0x00, 0x0e, 0x00, 0x6f, 0x00),
7061 PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
7062 },
7063 { .freq = 4970,
7064 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x04,
7065 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
7066 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
7067 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
7068 0x00, 0x0d, 0x00, 0x6f, 0x00),
7069 PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
7070 },
7071 { .freq = 4980,
7072 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x04,
7073 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
7074 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
7075 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
7076 0x00, 0x0d, 0x00, 0x6f, 0x00),
7077 PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
7078 },
7079 { .freq = 4990,
7080 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x04,
7081 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
7082 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
7083 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
7084 0x00, 0x0d, 0x00, 0x6f, 0x00),
7085 PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
7086 },
7087 { .freq = 5000,
7088 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x04,
7089 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
7090 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
7091 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
7092 0x00, 0x0d, 0x00, 0x6f, 0x00),
7093 PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
7094 },
7095 { .freq = 5010,
7096 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x04,
7097 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
7098 0xff, 0xff, 0x00, 0x0a, 0x00, 0x70, 0x00, 0x0d,
7099 0x00, 0x9f, 0x00, 0xff, 0x00, 0x0a, 0x00, 0x70,
7100 0x00, 0x0d, 0x00, 0x6f, 0x00),
7101 PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
7102 },
7103 { .freq = 5020,
7104 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x04,
7105 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
7106 0xff, 0xff, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0d,
7107 0x00, 0x9f, 0x00, 0xff, 0x00, 0x09, 0x00, 0x70,
7108 0x00, 0x0d, 0x00, 0x6f, 0x00),
7109 PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
7110 },
7111 { .freq = 5030,
7112 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x04,
7113 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
7114 0xff, 0xff, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
7115 0x00, 0x9f, 0x00, 0xff, 0x00, 0x09, 0x00, 0x70,
7116 0x00, 0x0c, 0x00, 0x6f, 0x00),
7117 PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
7118 },
7119 { .freq = 5040,
7120 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x04,
7121 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
7122 0xff, 0xfe, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
7123 0x00, 0x9f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x70,
7124 0x00, 0x0c, 0x00, 0x6f, 0x00),
7125 PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
7126 },
7127 { .freq = 5050,
7128 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x04,
7129 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
7130 0xff, 0xfe, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
7131 0x00, 0x9f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x70,
7132 0x00, 0x0c, 0x00, 0x6f, 0x00),
7133 PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
7134 },
7135 { .freq = 5060,
7136 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x04,
7137 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
7138 0xff, 0xfd, 0x00, 0x09, 0x00, 0x70, 0x00, 0x0c,
7139 0x00, 0x9f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x70,
7140 0x00, 0x0c, 0x00, 0x6f, 0x00),
7141 PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
7142 },
7143 { .freq = 5070,
7144 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x04,
7145 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
7146 0xff, 0xfd, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
7147 0x00, 0x9f, 0x00, 0xfd, 0x00, 0x08, 0x00, 0x70,
7148 0x00, 0x0b, 0x00, 0x6f, 0x00),
7149 PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
7150 },
7151 { .freq = 5080,
7152 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x04,
7153 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
7154 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
7155 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
7156 0x00, 0x0b, 0x00, 0x6f, 0x00),
7157 PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
7158 },
7159 { .freq = 5090,
7160 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x04,
7161 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
7162 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
7163 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
7164 0x00, 0x0b, 0x00, 0x6f, 0x00),
7165 PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
7166 },
7167 { .freq = 5100,
7168 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x04,
7169 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
7170 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
7171 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
7172 0x00, 0x0b, 0x00, 0x6f, 0x00),
7173 PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
7174 },
7175 { .freq = 5110,
7176 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x04,
7177 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
7178 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
7179 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
7180 0x00, 0x0b, 0x00, 0x6f, 0x00),
7181 PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
7182 },
7183 { .freq = 5120,
7184 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x04,
7185 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
7186 0xff, 0xfc, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0b,
7187 0x00, 0x9f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x70,
7188 0x00, 0x0b, 0x00, 0x6f, 0x00),
7189 PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
7190 },
7191 { .freq = 5130,
7192 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x04,
7193 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
7194 0xff, 0xfb, 0x00, 0x08, 0x00, 0x70, 0x00, 0x0a,
7195 0x00, 0x9f, 0x00, 0xfb, 0x00, 0x08, 0x00, 0x70,
7196 0x00, 0x0a, 0x00, 0x6f, 0x00),
7197 PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
7198 },
7199 { .freq = 5140,
7200 RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x04,
7201 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
7202 0xff, 0xfb, 0x00, 0x07, 0x00, 0x70, 0x00, 0x0a,
7203 0x00, 0x9f, 0x00, 0xfb, 0x00, 0x07, 0x00, 0x70,
7204 0x00, 0x0a, 0x00, 0x6f, 0x00),
7205 PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
7206 },
7207 { .freq = 5160,
7208 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x04,
7209 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
7210 0xff, 0xfb, 0x00, 0x07, 0x00, 0x70, 0x00, 0x09,
7211 0x00, 0x9e, 0x00, 0xfb, 0x00, 0x07, 0x00, 0x70,
7212 0x00, 0x09, 0x00, 0x6e, 0x00),
7213 PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
7214 },
7215 { .freq = 5170,
7216 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x04,
7217 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
7218 0xff, 0xfb, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
7219 0x00, 0x9e, 0x00, 0xfb, 0x00, 0x06, 0x00, 0x70,
7220 0x00, 0x09, 0x00, 0x6e, 0x00),
7221 PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
7222 },
7223 { .freq = 5180,
7224 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x04,
7225 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
7226 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
7227 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
7228 0x00, 0x09, 0x00, 0x6e, 0x00),
7229 PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
7230 },
7231 { .freq = 5190,
7232 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x04,
7233 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
7234 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
7235 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
7236 0x00, 0x09, 0x00, 0x6e, 0x00),
7237 PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
7238 },
7239 { .freq = 5200,
7240 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x04,
7241 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
7242 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
7243 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
7244 0x00, 0x09, 0x00, 0x6e, 0x00),
7245 PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
7246 },
7247 { .freq = 5210,
7248 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x04,
7249 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
7250 0xff, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
7251 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
7252 0x00, 0x09, 0x00, 0x6e, 0x00),
7253 PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
7254 },
7255 { .freq = 5220,
7256 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x04,
7257 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
7258 0xfe, 0xfa, 0x00, 0x06, 0x00, 0x70, 0x00, 0x09,
7259 0x00, 0x9e, 0x00, 0xfa, 0x00, 0x06, 0x00, 0x70,
7260 0x00, 0x09, 0x00, 0x6e, 0x00),
7261 PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
7262 },
7263 { .freq = 5230,
7264 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x04,
7265 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
7266 0xee, 0xea, 0x00, 0x06, 0x00, 0x70, 0x00, 0x08,
7267 0x00, 0x9e, 0x00, 0xea, 0x00, 0x06, 0x00, 0x70,
7268 0x00, 0x08, 0x00, 0x6e, 0x00),
7269 PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
7270 },
7271 { .freq = 5240,
7272 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x04,
7273 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
7274 0xee, 0xe9, 0x00, 0x05, 0x00, 0x70, 0x00, 0x08,
7275 0x00, 0x9d, 0x00, 0xe9, 0x00, 0x05, 0x00, 0x70,
7276 0x00, 0x08, 0x00, 0x6d, 0x00),
7277 PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
7278 },
7279 { .freq = 5250,
7280 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x04,
7281 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
7282 0xed, 0xe9, 0x00, 0x05, 0x00, 0x70, 0x00, 0x08,
7283 0x00, 0x9d, 0x00, 0xe9, 0x00, 0x05, 0x00, 0x70,
7284 0x00, 0x08, 0x00, 0x6d, 0x00),
7285 PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
7286 },
7287 { .freq = 5260,
7288 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x04,
7289 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0e, 0x00,
7290 0xed, 0xd9, 0x00, 0x05, 0x00, 0x70, 0x00, 0x08,
7291 0x00, 0x9d, 0x00, 0xd9, 0x00, 0x05, 0x00, 0x70,
7292 0x00, 0x08, 0x00, 0x6d, 0x00),
7293 PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
7294 },
7295 { .freq = 5270,
7296 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x04,
7297 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8e, 0x0e, 0x00,
7298 0xed, 0xd8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
7299 0x00, 0x9c, 0x00, 0xd8, 0x00, 0x04, 0x00, 0x70,
7300 0x00, 0x07, 0x00, 0x6c, 0x00),
7301 PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
7302 },
7303 { .freq = 5280,
7304 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x04,
7305 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
7306 0xdc, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
7307 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
7308 0x00, 0x07, 0x00, 0x6c, 0x00),
7309 PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
7310 },
7311 { .freq = 5290,
7312 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x04,
7313 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
7314 0xdc, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
7315 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
7316 0x00, 0x07, 0x00, 0x6c, 0x00),
7317 PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
7318 },
7319 { .freq = 5300,
7320 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x04,
7321 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
7322 0xdc, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
7323 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
7324 0x00, 0x07, 0x00, 0x6c, 0x00),
7325 PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
7326 },
7327 { .freq = 5310,
7328 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x04,
7329 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
7330 0xdc, 0xc8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
7331 0x00, 0x9c, 0x00, 0xc8, 0x00, 0x04, 0x00, 0x70,
7332 0x00, 0x07, 0x00, 0x6c, 0x00),
7333 PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
7334 },
7335 { .freq = 5320,
7336 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x04,
7337 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
7338 0xdb, 0xb8, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
7339 0x00, 0x9c, 0x00, 0xb8, 0x00, 0x04, 0x00, 0x70,
7340 0x00, 0x07, 0x00, 0x6c, 0x00),
7341 PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
7342 },
7343 { .freq = 5330,
7344 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x04,
7345 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
7346 0xcb, 0xb7, 0x00, 0x04, 0x00, 0x70, 0x00, 0x07,
7347 0x00, 0x9b, 0x00, 0xb7, 0x00, 0x04, 0x00, 0x70,
7348 0x00, 0x07, 0x00, 0x6b, 0x00),
7349 PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
7350 },
7351 { .freq = 5340,
7352 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x04,
7353 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
7354 0xca, 0xb7, 0x00, 0x03, 0x00, 0x70, 0x00, 0x07,
7355 0x00, 0x9b, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x70,
7356 0x00, 0x07, 0x00, 0x6b, 0x00),
7357 PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
7358 },
7359 { .freq = 5350,
7360 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x04,
7361 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
7362 0xca, 0xa7, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
7363 0x00, 0x9b, 0x00, 0xa7, 0x00, 0x03, 0x00, 0x70,
7364 0x00, 0x06, 0x00, 0x6b, 0x00),
7365 PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
7366 },
7367 { .freq = 5360,
7368 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x04,
7369 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
7370 0xc9, 0xa6, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
7371 0x00, 0x9b, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x70,
7372 0x00, 0x06, 0x00, 0x6b, 0x00),
7373 PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
7374 },
7375 { .freq = 5370,
7376 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x04,
7377 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
7378 0xc9, 0xa6, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
7379 0x00, 0x9b, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x70,
7380 0x00, 0x06, 0x00, 0x7b, 0x00),
7381 PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
7382 },
7383 { .freq = 5380,
7384 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x04,
7385 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
7386 0xb8, 0x96, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
7387 0x00, 0x9a, 0x00, 0x96, 0x00, 0x03, 0x00, 0x70,
7388 0x00, 0x06, 0x00, 0x7a, 0x00),
7389 PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
7390 },
7391 { .freq = 5390,
7392 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x04,
7393 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
7394 0xb8, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
7395 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
7396 0x00, 0x06, 0x00, 0x7a, 0x00),
7397 PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
7398 },
7399 { .freq = 5400,
7400 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x04,
7401 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
7402 0xb8, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x06,
7403 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
7404 0x00, 0x06, 0x00, 0x7a, 0x00),
7405 PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
7406 },
7407 { .freq = 5410,
7408 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x04,
7409 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
7410 0xb7, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x05,
7411 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
7412 0x00, 0x05, 0x00, 0x7a, 0x00),
7413 PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
7414 },
7415 { .freq = 5420,
7416 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x04,
7417 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
7418 0xa7, 0x95, 0x00, 0x03, 0x00, 0x70, 0x00, 0x05,
7419 0x00, 0x9a, 0x00, 0x95, 0x00, 0x03, 0x00, 0x70,
7420 0x00, 0x05, 0x00, 0x7a, 0x00),
7421 PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
7422 },
7423 { .freq = 5430,
7424 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x04,
7425 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0b, 0x00,
7426 0xa6, 0x85, 0x00, 0x02, 0x00, 0x70, 0x00, 0x05,
7427 0x00, 0x99, 0x00, 0x85, 0x00, 0x02, 0x00, 0x70,
7428 0x00, 0x05, 0x00, 0x79, 0x00),
7429 PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
7430 },
7431 { .freq = 5440,
7432 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x04,
7433 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
7434 0xa6, 0x84, 0x00, 0x02, 0x00, 0x70, 0x00, 0x05,
7435 0x00, 0x99, 0x00, 0x84, 0x00, 0x02, 0x00, 0x70,
7436 0x00, 0x05, 0x00, 0x79, 0x00),
7437 PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
7438 },
7439 { .freq = 5450,
7440 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x04,
7441 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
7442 0x95, 0x84, 0x00, 0x02, 0x00, 0x70, 0x00, 0x05,
7443 0x00, 0x99, 0x00, 0x84, 0x00, 0x02, 0x00, 0x70,
7444 0x00, 0x05, 0x00, 0x79, 0x00),
7445 PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
7446 },
7447 { .freq = 5460,
7448 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x04,
7449 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
7450 0x95, 0x84, 0x00, 0x02, 0x00, 0x70, 0x00, 0x04,
7451 0x00, 0x99, 0x00, 0x84, 0x00, 0x02, 0x00, 0x70,
7452 0x00, 0x04, 0x00, 0x79, 0x00),
7453 PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
7454 },
7455 { .freq = 5470,
7456 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x04,
7457 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
7458 0x94, 0x74, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
7459 0x00, 0x99, 0x00, 0x74, 0x00, 0x01, 0x00, 0x70,
7460 0x00, 0x04, 0x00, 0x79, 0x00),
7461 PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
7462 },
7463 { .freq = 5480,
7464 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x04,
7465 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
7466 0x84, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
7467 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
7468 0x00, 0x04, 0x00, 0x78, 0x00),
7469 PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
7470 },
7471 { .freq = 5490,
7472 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x04,
7473 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
7474 0x83, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
7475 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
7476 0x00, 0x04, 0x00, 0x78, 0x00),
7477 PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
7478 },
7479 { .freq = 5500,
7480 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x04,
7481 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
7482 0x82, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
7483 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
7484 0x00, 0x04, 0x00, 0x78, 0x00),
7485 PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
7486 },
7487 { .freq = 5510,
7488 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x04,
7489 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
7490 0x82, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
7491 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
7492 0x00, 0x04, 0x00, 0x78, 0x00),
7493 PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
7494 },
7495 { .freq = 5520,
7496 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x04,
7497 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
7498 0x72, 0x73, 0x00, 0x01, 0x00, 0x70, 0x00, 0x04,
7499 0x00, 0x98, 0x00, 0x73, 0x00, 0x01, 0x00, 0x70,
7500 0x00, 0x04, 0x00, 0x78, 0x00),
7501 PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
7502 },
7503 { .freq = 5530,
7504 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x04,
7505 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
7506 0x72, 0x63, 0x00, 0x01, 0x00, 0x70, 0x00, 0x03,
7507 0x00, 0x98, 0x00, 0x63, 0x00, 0x01, 0x00, 0x70,
7508 0x00, 0x03, 0x00, 0x78, 0x00),
7509 PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
7510 },
7511 { .freq = 5540,
7512 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x04,
7513 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
7514 0x71, 0x62, 0x00, 0x00, 0x00, 0x70, 0x00, 0x03,
7515 0x00, 0x97, 0x00, 0x62, 0x00, 0x00, 0x00, 0x70,
7516 0x00, 0x03, 0x00, 0x77, 0x00),
7517 PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
7518 },
7519 { .freq = 5550,
7520 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x04,
7521 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
7522 0x61, 0x62, 0x00, 0x00, 0x00, 0x70, 0x00, 0x03,
7523 0x00, 0x97, 0x00, 0x62, 0x00, 0x00, 0x00, 0x70,
7524 0x00, 0x03, 0x00, 0x77, 0x00),
7525 PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
7526 },
7527 { .freq = 5560,
7528 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x04,
7529 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
7530 0x61, 0x62, 0x00, 0x00, 0x00, 0x70, 0x00, 0x03,
7531 0x00, 0x97, 0x00, 0x62, 0x00, 0x00, 0x00, 0x70,
7532 0x00, 0x03, 0x00, 0x77, 0x00),
7533 PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
7534 },
7535 { .freq = 5570,
7536 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x04,
7537 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
7538 0x61, 0x52, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
7539 0x00, 0x96, 0x00, 0x52, 0x00, 0x00, 0x00, 0x70,
7540 0x00, 0x02, 0x00, 0x76, 0x00),
7541 PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
7542 },
7543 { .freq = 5580,
7544 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x04,
7545 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
7546 0x60, 0x52, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
7547 0x00, 0x96, 0x00, 0x52, 0x00, 0x00, 0x00, 0x70,
7548 0x00, 0x02, 0x00, 0x86, 0x00),
7549 PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
7550 },
7551 { .freq = 5590,
7552 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x04,
7553 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
7554 0x50, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
7555 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
7556 0x00, 0x02, 0x00, 0x86, 0x00),
7557 PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
7558 },
7559 { .freq = 5600,
7560 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x04,
7561 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
7562 0x50, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
7563 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
7564 0x00, 0x02, 0x00, 0x86, 0x00),
7565 PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
7566 },
7567 { .freq = 5610,
7568 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x04,
7569 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
7570 0x50, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
7571 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
7572 0x00, 0x02, 0x00, 0x86, 0x00),
7573 PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
7574 },
7575 { .freq = 5620,
7576 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x04,
7577 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
7578 0x50, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
7579 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
7580 0x00, 0x02, 0x00, 0x86, 0x00),
7581 PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
7582 },
7583 { .freq = 5630,
7584 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x04,
7585 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
7586 0x50, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
7587 0x00, 0x96, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
7588 0x00, 0x02, 0x00, 0x86, 0x00),
7589 PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
7590 },
7591 { .freq = 5640,
7592 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x04,
7593 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
7594 0x40, 0x51, 0x00, 0x00, 0x00, 0x70, 0x00, 0x02,
7595 0x00, 0x95, 0x00, 0x51, 0x00, 0x00, 0x00, 0x70,
7596 0x00, 0x02, 0x00, 0x85, 0x00),
7597 PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
7598 },
7599 { .freq = 5650,
7600 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x04,
7601 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
7602 0x40, 0x50, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
7603 0x00, 0x95, 0x00, 0x50, 0x00, 0x00, 0x00, 0x70,
7604 0x00, 0x01, 0x00, 0x85, 0x00),
7605 PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
7606 },
7607 { .freq = 5660,
7608 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x04,
7609 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
7610 0x40, 0x50, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
7611 0x00, 0x95, 0x00, 0x50, 0x00, 0x00, 0x00, 0x70,
7612 0x00, 0x01, 0x00, 0x85, 0x00),
7613 PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
7614 },
7615 { .freq = 5670,
7616 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x04,
7617 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
7618 0x40, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
7619 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
7620 0x00, 0x01, 0x00, 0x84, 0x00),
7621 PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
7622 },
7623 { .freq = 5680,
7624 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x04,
7625 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
7626 0x30, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
7627 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
7628 0x00, 0x01, 0x00, 0x84, 0x00),
7629 PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
7630 },
7631 { .freq = 5690,
7632 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x04,
7633 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
7634 0x30, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
7635 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
7636 0x00, 0x01, 0x00, 0x94, 0x00),
7637 PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
7638 },
7639 { .freq = 5700,
7640 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x04,
7641 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
7642 0x30, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
7643 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
7644 0x00, 0x01, 0x00, 0x94, 0x00),
7645 PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
7646 },
7647 { .freq = 5710,
7648 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x04,
7649 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
7650 0x30, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
7651 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
7652 0x00, 0x01, 0x00, 0x94, 0x00),
7653 PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
7654 },
7655 { .freq = 5720,
7656 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x04,
7657 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
7658 0x30, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
7659 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
7660 0x00, 0x01, 0x00, 0x94, 0x00),
7661 PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
7662 },
7663 { .freq = 5725,
7664 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x07, 0x07, 0x04,
7665 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
7666 0x30, 0x40, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
7667 0x00, 0x94, 0x00, 0x40, 0x00, 0x00, 0x00, 0x70,
7668 0x00, 0x01, 0x00, 0x94, 0x00),
7669 PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
7670 },
7671 { .freq = 5730,
7672 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x04,
7673 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
7674 0x20, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x01,
7675 0x00, 0x94, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
7676 0x00, 0x01, 0x00, 0x94, 0x00),
7677 PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
7678 },
7679 { .freq = 5735,
7680 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x07, 0x07, 0x04,
7681 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
7682 0x20, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7683 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
7684 0x00, 0x00, 0x00, 0x93, 0x00),
7685 PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
7686 },
7687 { .freq = 5740,
7688 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x04,
7689 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
7690 0x20, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7691 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
7692 0x00, 0x00, 0x00, 0x93, 0x00),
7693 PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
7694 },
7695 { .freq = 5745,
7696 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x07, 0x07, 0x04,
7697 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
7698 0x20, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7699 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
7700 0x00, 0x00, 0x00, 0x93, 0x00),
7701 PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
7702 },
7703 { .freq = 5750,
7704 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x04,
7705 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
7706 0x20, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7707 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
7708 0x00, 0x00, 0x00, 0x93, 0x00),
7709 PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
7710 },
7711 { .freq = 5755,
7712 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x07, 0x07, 0x04,
7713 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
7714 0x10, 0x30, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7715 0x00, 0x93, 0x00, 0x30, 0x00, 0x00, 0x00, 0x70,
7716 0x00, 0x00, 0x00, 0x93, 0x00),
7717 PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
7718 },
7719 { .freq = 5760,
7720 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x04,
7721 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
7722 0x10, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7723 0x00, 0x93, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
7724 0x00, 0x00, 0x00, 0x93, 0x00),
7725 PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
7726 },
7727 { .freq = 5765,
7728 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x07, 0x07, 0x04,
7729 0x10, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
7730 0x10, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7731 0x00, 0x92, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
7732 0x00, 0x00, 0x00, 0x92, 0x00),
7733 PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
7734 },
7735 { .freq = 5770,
7736 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x04,
7737 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
7738 0x10, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7739 0x00, 0x92, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
7740 0x00, 0x00, 0x00, 0x92, 0x00),
7741 PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
7742 },
7743 { .freq = 5775,
7744 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x07, 0x07, 0x04,
7745 0x10, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
7746 0x10, 0x20, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7747 0x00, 0x92, 0x00, 0x20, 0x00, 0x00, 0x00, 0x70,
7748 0x00, 0x00, 0x00, 0x92, 0x00),
7749 PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
7750 },
7751 { .freq = 5780,
7752 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x04,
7753 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
7754 0x10, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7755 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
7756 0x00, 0x00, 0x00, 0x92, 0x00),
7757 PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
7758 },
7759 { .freq = 5785,
7760 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x07, 0x07, 0x04,
7761 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
7762 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7763 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
7764 0x00, 0x00, 0x00, 0x92, 0x00),
7765 PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
7766 },
7767 { .freq = 5790,
7768 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x04,
7769 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
7770 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7771 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
7772 0x00, 0x00, 0x00, 0x92, 0x00),
7773 PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
7774 },
7775 { .freq = 5795,
7776 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x07, 0x07, 0x04,
7777 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
7778 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7779 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
7780 0x00, 0x00, 0x00, 0x92, 0x00),
7781 PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
7782 },
7783 { .freq = 5800,
7784 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x04,
7785 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
7786 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7787 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
7788 0x00, 0x00, 0x00, 0x92, 0x00),
7789 PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
7790 },
7791 { .freq = 5805,
7792 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x07, 0x07, 0x04,
7793 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
7794 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7795 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
7796 0x00, 0x00, 0x00, 0x92, 0x00),
7797 PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
7798 },
7799 { .freq = 5810,
7800 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x04,
7801 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
7802 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7803 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
7804 0x00, 0x00, 0x00, 0x92, 0x00),
7805 PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
7806 },
7807 { .freq = 5815,
7808 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x07, 0x07, 0x04,
7809 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
7810 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7811 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
7812 0x00, 0x00, 0x00, 0x92, 0x00),
7813 PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
7814 },
7815 { .freq = 5820,
7816 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x04,
7817 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
7818 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7819 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
7820 0x00, 0x00, 0x00, 0x92, 0x00),
7821 PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
7822 },
7823 { .freq = 5825,
7824 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x07, 0x07, 0x04,
7825 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
7826 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7827 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
7828 0x00, 0x00, 0x00, 0x92, 0x00),
7829 PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
7830 },
7831 { .freq = 5830,
7832 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x04,
7833 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
7834 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7835 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
7836 0x00, 0x00, 0x00, 0x92, 0x00),
7837 PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
7838 },
7839 { .freq = 5840,
7840 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x04,
7841 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
7842 0x00, 0x10, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7843 0x00, 0x92, 0x00, 0x10, 0x00, 0x00, 0x00, 0x70,
7844 0x00, 0x00, 0x00, 0x92, 0x00),
7845 PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
7846 },
7847 { .freq = 5850,
7848 RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x04,
7849 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
7850 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7851 0x00, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
7852 0x00, 0x00, 0x00, 0x92, 0x00),
7853 PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
7854 },
7855 { .freq = 5860,
7856 RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x04,
7857 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
7858 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7859 0x00, 0x92, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
7860 0x00, 0x00, 0x00, 0x92, 0x00),
7861 PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
7862 },
7863 { .freq = 5870,
7864 RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x04,
7865 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
7866 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7867 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
7868 0x00, 0x00, 0x00, 0x91, 0x00),
7869 PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
7870 },
7871 { .freq = 5880,
7872 RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x04,
7873 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
7874 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7875 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
7876 0x00, 0x00, 0x00, 0x91, 0x00),
7877 PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
7878 },
7879 { .freq = 5890,
7880 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x04,
7881 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
7882 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7883 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
7884 0x00, 0x00, 0x00, 0x91, 0x00),
7885 PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
7886 },
7887 { .freq = 5900,
7888 RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x04,
7889 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
7890 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7891 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
7892 0x00, 0x00, 0x00, 0x91, 0x00),
7893 PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
7894 },
7895 { .freq = 5910,
7896 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x04,
7897 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
7898 0x00, 0x00, 0x00, 0x00, 0x00, 0x70, 0x00, 0x00,
7899 0x00, 0x91, 0x00, 0x00, 0x00, 0x00, 0x00, 0x70,
7900 0x00, 0x00, 0x00, 0x91, 0x00),
7901 PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
7902 },
7903 { .freq = 2412,
7904 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x08, 0x08, 0x04,
7905 0x16, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
7906 0x00, 0x00, 0x89, 0x00, 0x03, 0x00, 0x70, 0x00,
7907 0x0f, 0x00, 0x0b, 0x00, 0x89, 0x00, 0x03, 0x00,
7908 0x70, 0x00, 0x0f, 0x00, 0x0b),
7909 PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
7910 },
7911 { .freq = 2417,
7912 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x08, 0x08, 0x04,
7913 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
7914 0x00, 0x00, 0x89, 0x00, 0x03, 0x00, 0x70, 0x00,
7915 0x0f, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
7916 0x70, 0x00, 0x0f, 0x00, 0x0a),
7917 PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
7918 },
7919 { .freq = 2422,
7920 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x08, 0x08, 0x04,
7921 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
7922 0x00, 0x00, 0x89, 0x00, 0x03, 0x00, 0x70, 0x00,
7923 0x0f, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
7924 0x70, 0x00, 0x0f, 0x00, 0x0a),
7925 PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
7926 },
7927 { .freq = 2427,
7928 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x08, 0x08, 0x04,
7929 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
7930 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
7931 0x0e, 0x00, 0x0a, 0x00, 0x78, 0x00, 0x03, 0x00,
7932 0x70, 0x00, 0x0e, 0x00, 0x0a),
7933 PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
7934 },
7935 { .freq = 2432,
7936 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x08, 0x08, 0x04,
7937 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
7938 0x00, 0x00, 0x77, 0x00, 0x03, 0x00, 0x70, 0x00,
7939 0x0e, 0x00, 0x0a, 0x00, 0x77, 0x00, 0x03, 0x00,
7940 0x70, 0x00, 0x0e, 0x00, 0x0a),
7941 PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
7942 },
7943 { .freq = 2437,
7944 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x08, 0x08, 0x04,
7945 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
7946 0x00, 0x00, 0x76, 0x00, 0x03, 0x00, 0x70, 0x00,
7947 0x0e, 0x00, 0x0a, 0x00, 0x76, 0x00, 0x03, 0x00,
7948 0x70, 0x00, 0x0e, 0x00, 0x0a),
7949 PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
7950 },
7951 { .freq = 2442,
7952 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x08, 0x08, 0x04,
7953 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
7954 0x00, 0x00, 0x66, 0x00, 0x03, 0x00, 0x70, 0x00,
7955 0x0e, 0x00, 0x0a, 0x00, 0x66, 0x00, 0x03, 0x00,
7956 0x70, 0x00, 0x0e, 0x00, 0x0a),
7957 PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
7958 },
7959 { .freq = 2447,
7960 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x08, 0x08, 0x04,
7961 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
7962 0x00, 0x00, 0x55, 0x00, 0x02, 0x00, 0x70, 0x00,
7963 0x0e, 0x00, 0x09, 0x00, 0x55, 0x00, 0x02, 0x00,
7964 0x70, 0x00, 0x0e, 0x00, 0x09),
7965 PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
7966 },
7967 { .freq = 2452,
7968 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x08, 0x08, 0x04,
7969 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
7970 0x00, 0x00, 0x45, 0x00, 0x02, 0x00, 0x70, 0x00,
7971 0x0e, 0x00, 0x09, 0x00, 0x45, 0x00, 0x02, 0x00,
7972 0x70, 0x00, 0x0e, 0x00, 0x09),
7973 PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
7974 },
7975 { .freq = 2457,
7976 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x08, 0x08, 0x04,
7977 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
7978 0x00, 0x00, 0x34, 0x00, 0x02, 0x00, 0x70, 0x00,
7979 0x0d, 0x00, 0x09, 0x00, 0x34, 0x00, 0x02, 0x00,
7980 0x70, 0x00, 0x0d, 0x00, 0x09),
7981 PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
7982 },
7983 { .freq = 2462,
7984 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x08, 0x08, 0x04,
7985 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
7986 0x00, 0x00, 0x33, 0x00, 0x02, 0x00, 0x70, 0x00,
7987 0x0d, 0x00, 0x09, 0x00, 0x33, 0x00, 0x02, 0x00,
7988 0x70, 0x00, 0x0d, 0x00, 0x09),
7989 PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
7990 },
7991 { .freq = 2467,
7992 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x08, 0x08, 0x04,
7993 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
7994 0x00, 0x00, 0x22, 0x00, 0x02, 0x00, 0x70, 0x00,
7995 0x0d, 0x00, 0x08, 0x00, 0x22, 0x00, 0x02, 0x00,
7996 0x70, 0x00, 0x0d, 0x00, 0x08),
7997 PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
7998 },
7999 { .freq = 2472,
8000 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x08, 0x08, 0x04,
8001 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
8002 0x00, 0x00, 0x11, 0x00, 0x02, 0x00, 0x70, 0x00,
8003 0x0d, 0x00, 0x08, 0x00, 0x11, 0x00, 0x02, 0x00,
8004 0x70, 0x00, 0x0d, 0x00, 0x08),
8005 PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
8006 },
8007 { .freq = 2484,
8008 RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x08, 0x08, 0x04,
8009 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x20, 0x00,
8010 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x70, 0x00,
8011 0x0d, 0x00, 0x08, 0x00, 0x00, 0x00, 0x02, 0x00,
8012 0x70, 0x00, 0x0d, 0x00, 0x08),
8013 PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
8014 },
28}; 8015};
29 8016
8017static const struct b43_nphy_channeltab_entry_rev3 b43_nphy_channeltab_rev8[] = {
8018 { .freq = 4920,
8019 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xec, 0x05, 0x05, 0x04,
8020 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
8021 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
8022 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
8023 0x00, 0x0f, 0x00, 0x6f, 0x00),
8024 PHYREGS(0x07b4, 0x07b0, 0x07ac, 0x0214, 0x0215, 0x0216),
8025 },
8026 { .freq = 4930,
8027 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xed, 0x05, 0x05, 0x04,
8028 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
8029 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
8030 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
8031 0x00, 0x0f, 0x00, 0x6f, 0x00),
8032 PHYREGS(0x07b8, 0x07b4, 0x07b0, 0x0213, 0x0214, 0x0215),
8033 },
8034 { .freq = 4940,
8035 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xee, 0x05, 0x05, 0x04,
8036 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
8037 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
8038 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
8039 0x00, 0x0f, 0x00, 0x6f, 0x00),
8040 PHYREGS(0x07bc, 0x07b8, 0x07b4, 0x0212, 0x0213, 0x0214),
8041 },
8042 { .freq = 4950,
8043 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xef, 0x05, 0x05, 0x04,
8044 0x0c, 0x01, 0x00, 0x00, 0x00, 0x8f, 0x0f, 0x00,
8045 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
8046 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
8047 0x00, 0x0f, 0x00, 0x6f, 0x00),
8048 PHYREGS(0x07c0, 0x07bc, 0x07b8, 0x0211, 0x0212, 0x0213),
8049 },
8050 { .freq = 4960,
8051 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf0, 0x05, 0x05, 0x04,
8052 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
8053 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
8054 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
8055 0x00, 0x0f, 0x00, 0x6f, 0x00),
8056 PHYREGS(0x07c4, 0x07c0, 0x07bc, 0x020f, 0x0211, 0x0212),
8057 },
8058 { .freq = 4970,
8059 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf1, 0x05, 0x05, 0x04,
8060 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
8061 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
8062 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
8063 0x00, 0x0f, 0x00, 0x6f, 0x00),
8064 PHYREGS(0x07c8, 0x07c4, 0x07c0, 0x020e, 0x020f, 0x0211),
8065 },
8066 { .freq = 4980,
8067 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf2, 0x05, 0x05, 0x04,
8068 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
8069 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
8070 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
8071 0x00, 0x0f, 0x00, 0x6f, 0x00),
8072 PHYREGS(0x07cc, 0x07c8, 0x07c4, 0x020d, 0x020e, 0x020f),
8073 },
8074 { .freq = 4990,
8075 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf3, 0x05, 0x05, 0x04,
8076 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
8077 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
8078 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
8079 0x00, 0x0f, 0x00, 0x6f, 0x00),
8080 PHYREGS(0x07d0, 0x07cc, 0x07c8, 0x020c, 0x020d, 0x020e),
8081 },
8082 { .freq = 5000,
8083 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf4, 0x05, 0x05, 0x04,
8084 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
8085 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
8086 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
8087 0x00, 0x0f, 0x00, 0x6f, 0x00),
8088 PHYREGS(0x07d4, 0x07d0, 0x07cc, 0x020b, 0x020c, 0x020d),
8089 },
8090 { .freq = 5010,
8091 RADIOREGS3(0xff, 0x01, 0x01, 0x01, 0xf5, 0x05, 0x05, 0x04,
8092 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
8093 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
8094 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
8095 0x00, 0x0f, 0x00, 0x6f, 0x00),
8096 PHYREGS(0x07d8, 0x07d4, 0x07d0, 0x020a, 0x020b, 0x020c),
8097 },
8098 { .freq = 5020,
8099 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf6, 0x05, 0x05, 0x04,
8100 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
8101 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
8102 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
8103 0x00, 0x0f, 0x00, 0x6f, 0x00),
8104 PHYREGS(0x07dc, 0x07d8, 0x07d4, 0x0209, 0x020a, 0x020b),
8105 },
8106 { .freq = 5030,
8107 RADIOREGS3(0xf7, 0x01, 0x01, 0x01, 0xf7, 0x05, 0x05, 0x04,
8108 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
8109 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
8110 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
8111 0x00, 0x0f, 0x00, 0x6f, 0x00),
8112 PHYREGS(0x07e0, 0x07dc, 0x07d8, 0x0208, 0x0209, 0x020a),
8113 },
8114 { .freq = 5040,
8115 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf8, 0x05, 0x05, 0x04,
8116 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
8117 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
8118 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
8119 0x00, 0x0f, 0x00, 0x6f, 0x00),
8120 PHYREGS(0x07e4, 0x07e0, 0x07dc, 0x0207, 0x0208, 0x0209),
8121 },
8122 { .freq = 5050,
8123 RADIOREGS3(0xef, 0x01, 0x01, 0x01, 0xf9, 0x05, 0x05, 0x04,
8124 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
8125 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
8126 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
8127 0x00, 0x0f, 0x00, 0x6f, 0x00),
8128 PHYREGS(0x07e8, 0x07e4, 0x07e0, 0x0206, 0x0207, 0x0208),
8129 },
8130 { .freq = 5060,
8131 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfa, 0x05, 0x05, 0x04,
8132 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
8133 0xff, 0xfe, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
8134 0x00, 0x6f, 0x00, 0xfe, 0x00, 0x09, 0x00, 0x77,
8135 0x00, 0x0f, 0x00, 0x6f, 0x00),
8136 PHYREGS(0x07ec, 0x07e8, 0x07e4, 0x0205, 0x0206, 0x0207),
8137 },
8138 { .freq = 5070,
8139 RADIOREGS3(0xe6, 0x01, 0x01, 0x01, 0xfb, 0x05, 0x05, 0x04,
8140 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
8141 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
8142 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
8143 0x00, 0x0f, 0x00, 0x6f, 0x00),
8144 PHYREGS(0x07f0, 0x07ec, 0x07e8, 0x0204, 0x0205, 0x0206),
8145 },
8146 { .freq = 5080,
8147 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfc, 0x05, 0x05, 0x04,
8148 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
8149 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
8150 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
8151 0x00, 0x0f, 0x00, 0x6f, 0x00),
8152 PHYREGS(0x07f4, 0x07f0, 0x07ec, 0x0203, 0x0204, 0x0205),
8153 },
8154 { .freq = 5090,
8155 RADIOREGS3(0xde, 0x01, 0x01, 0x01, 0xfd, 0x05, 0x05, 0x04,
8156 0x0c, 0x01, 0x01, 0x01, 0x01, 0x8f, 0x0f, 0x00,
8157 0xff, 0xfd, 0x00, 0x09, 0x00, 0x77, 0x00, 0x0f,
8158 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x09, 0x00, 0x77,
8159 0x00, 0x0f, 0x00, 0x6f, 0x00),
8160 PHYREGS(0x07f8, 0x07f4, 0x07f0, 0x0202, 0x0203, 0x0204),
8161 },
8162 { .freq = 5100,
8163 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xfe, 0x05, 0x05, 0x04,
8164 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
8165 0xff, 0xfd, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
8166 0x00, 0x6f, 0x00, 0xfd, 0x00, 0x08, 0x00, 0x77,
8167 0x00, 0x0f, 0x00, 0x6f, 0x00),
8168 PHYREGS(0x07fc, 0x07f8, 0x07f4, 0x0201, 0x0202, 0x0203),
8169 },
8170 { .freq = 5110,
8171 RADIOREGS3(0xd6, 0x01, 0x01, 0x01, 0xff, 0x05, 0x05, 0x04,
8172 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
8173 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
8174 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
8175 0x00, 0x0f, 0x00, 0x6f, 0x00),
8176 PHYREGS(0x0800, 0x07fc, 0x07f8, 0x0200, 0x0201, 0x0202),
8177 },
8178 { .freq = 5120,
8179 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x00, 0x05, 0x05, 0x04,
8180 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
8181 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
8182 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
8183 0x00, 0x0f, 0x00, 0x6f, 0x00),
8184 PHYREGS(0x0804, 0x0800, 0x07fc, 0x01ff, 0x0200, 0x0201),
8185 },
8186 { .freq = 5130,
8187 RADIOREGS3(0xce, 0x01, 0x01, 0x02, 0x01, 0x05, 0x05, 0x04,
8188 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
8189 0xff, 0xfc, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
8190 0x00, 0x6f, 0x00, 0xfc, 0x00, 0x08, 0x00, 0x77,
8191 0x00, 0x0f, 0x00, 0x6f, 0x00),
8192 PHYREGS(0x0808, 0x0804, 0x0800, 0x01fe, 0x01ff, 0x0200),
8193 },
8194 { .freq = 5140,
8195 RADIOREGS3(0xc6, 0x01, 0x01, 0x02, 0x02, 0x05, 0x05, 0x04,
8196 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
8197 0xff, 0xfb, 0x00, 0x08, 0x00, 0x77, 0x00, 0x0f,
8198 0x00, 0x6f, 0x00, 0xfb, 0x00, 0x08, 0x00, 0x77,
8199 0x00, 0x0f, 0x00, 0x6f, 0x00),
8200 PHYREGS(0x080c, 0x0808, 0x0804, 0x01fd, 0x01fe, 0x01ff),
8201 },
8202 { .freq = 5160,
8203 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x04, 0x05, 0x05, 0x04,
8204 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
8205 0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
8206 0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
8207 0x00, 0x0e, 0x00, 0x6f, 0x00),
8208 PHYREGS(0x0814, 0x0810, 0x080c, 0x01fb, 0x01fc, 0x01fd),
8209 },
8210 { .freq = 5170,
8211 RADIOREGS3(0xbe, 0x01, 0x01, 0x02, 0x05, 0x05, 0x05, 0x04,
8212 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
8213 0xff, 0xfa, 0x00, 0x07, 0x00, 0x77, 0x00, 0x0e,
8214 0x00, 0x6f, 0x00, 0xfa, 0x00, 0x07, 0x00, 0x77,
8215 0x00, 0x0e, 0x00, 0x6f, 0x00),
8216 PHYREGS(0x0818, 0x0814, 0x0810, 0x01fa, 0x01fb, 0x01fc),
8217 },
8218 { .freq = 5180,
8219 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x06, 0x05, 0x05, 0x04,
8220 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
8221 0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0e,
8222 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
8223 0x00, 0x0e, 0x00, 0x6f, 0x00),
8224 PHYREGS(0x081c, 0x0818, 0x0814, 0x01f9, 0x01fa, 0x01fb),
8225 },
8226 { .freq = 5190,
8227 RADIOREGS3(0xb6, 0x01, 0x01, 0x02, 0x07, 0x05, 0x05, 0x04,
8228 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
8229 0xff, 0xf9, 0x00, 0x06, 0x00, 0x77, 0x00, 0x0d,
8230 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x06, 0x00, 0x77,
8231 0x00, 0x0d, 0x00, 0x6f, 0x00),
8232 PHYREGS(0x0820, 0x081c, 0x0818, 0x01f8, 0x01f9, 0x01fa),
8233 },
8234 { .freq = 5200,
8235 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x08, 0x05, 0x05, 0x04,
8236 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
8237 0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
8238 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
8239 0x00, 0x0d, 0x00, 0x6f, 0x00),
8240 PHYREGS(0x0824, 0x0820, 0x081c, 0x01f7, 0x01f8, 0x01f9),
8241 },
8242 { .freq = 5210,
8243 RADIOREGS3(0xaf, 0x01, 0x01, 0x02, 0x09, 0x05, 0x05, 0x04,
8244 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8f, 0x0f, 0x00,
8245 0xff, 0xf9, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
8246 0x00, 0x6f, 0x00, 0xf9, 0x00, 0x05, 0x00, 0x77,
8247 0x00, 0x0d, 0x00, 0x6f, 0x00),
8248 PHYREGS(0x0828, 0x0824, 0x0820, 0x01f6, 0x01f7, 0x01f8),
8249 },
8250 { .freq = 5220,
8251 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0a, 0x05, 0x05, 0x04,
8252 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
8253 0xfe, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
8254 0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
8255 0x00, 0x0d, 0x00, 0x6f, 0x00),
8256 PHYREGS(0x082c, 0x0828, 0x0824, 0x01f5, 0x01f6, 0x01f7),
8257 },
8258 { .freq = 5230,
8259 RADIOREGS3(0xa7, 0x01, 0x01, 0x02, 0x0b, 0x05, 0x05, 0x04,
8260 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
8261 0xee, 0xd8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
8262 0x00, 0x6f, 0x00, 0xd8, 0x00, 0x05, 0x00, 0x77,
8263 0x00, 0x0d, 0x00, 0x6f, 0x00),
8264 PHYREGS(0x0830, 0x082c, 0x0828, 0x01f4, 0x01f5, 0x01f6),
8265 },
8266 { .freq = 5240,
8267 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0c, 0x05, 0x05, 0x04,
8268 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
8269 0xee, 0xc8, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
8270 0x00, 0x6f, 0x00, 0xc8, 0x00, 0x05, 0x00, 0x77,
8271 0x00, 0x0d, 0x00, 0x6f, 0x00),
8272 PHYREGS(0x0834, 0x0830, 0x082c, 0x01f3, 0x01f4, 0x01f5),
8273 },
8274 { .freq = 5250,
8275 RADIOREGS3(0xa0, 0x01, 0x01, 0x02, 0x0d, 0x05, 0x05, 0x04,
8276 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0f, 0x00,
8277 0xed, 0xc7, 0x00, 0x05, 0x00, 0x77, 0x00, 0x0d,
8278 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x05, 0x00, 0x77,
8279 0x00, 0x0d, 0x00, 0x6f, 0x00),
8280 PHYREGS(0x0838, 0x0834, 0x0830, 0x01f2, 0x01f3, 0x01f4),
8281 },
8282 { .freq = 5260,
8283 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0e, 0x05, 0x05, 0x04,
8284 0x0c, 0x01, 0x02, 0x02, 0x02, 0x8e, 0x0e, 0x00,
8285 0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0d,
8286 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
8287 0x00, 0x0d, 0x00, 0x6f, 0x00),
8288 PHYREGS(0x083c, 0x0838, 0x0834, 0x01f1, 0x01f2, 0x01f3),
8289 },
8290 { .freq = 5270,
8291 RADIOREGS3(0x98, 0x01, 0x01, 0x02, 0x0f, 0x05, 0x05, 0x04,
8292 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8e, 0x0e, 0x00,
8293 0xed, 0xc7, 0x00, 0x04, 0x00, 0x77, 0x00, 0x0c,
8294 0x00, 0x6f, 0x00, 0xc7, 0x00, 0x04, 0x00, 0x77,
8295 0x00, 0x0c, 0x00, 0x6f, 0x00),
8296 PHYREGS(0x0840, 0x083c, 0x0838, 0x01f0, 0x01f1, 0x01f2),
8297 },
8298 { .freq = 5280,
8299 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x10, 0x05, 0x05, 0x04,
8300 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
8301 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
8302 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
8303 0x00, 0x0c, 0x00, 0x6f, 0x00),
8304 PHYREGS(0x0844, 0x0840, 0x083c, 0x01f0, 0x01f0, 0x01f1),
8305 },
8306 { .freq = 5290,
8307 RADIOREGS3(0x91, 0x01, 0x01, 0x02, 0x11, 0x05, 0x05, 0x04,
8308 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
8309 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
8310 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
8311 0x00, 0x0c, 0x00, 0x6f, 0x00),
8312 PHYREGS(0x0848, 0x0844, 0x0840, 0x01ef, 0x01f0, 0x01f0),
8313 },
8314 { .freq = 5300,
8315 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x12, 0x05, 0x05, 0x04,
8316 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
8317 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
8318 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
8319 0x00, 0x0c, 0x00, 0x6f, 0x00),
8320 PHYREGS(0x084c, 0x0848, 0x0844, 0x01ee, 0x01ef, 0x01f0),
8321 },
8322 { .freq = 5310,
8323 RADIOREGS3(0x8a, 0x01, 0x01, 0x02, 0x13, 0x05, 0x05, 0x04,
8324 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
8325 0xdc, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
8326 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
8327 0x00, 0x0c, 0x00, 0x6f, 0x00),
8328 PHYREGS(0x0850, 0x084c, 0x0848, 0x01ed, 0x01ee, 0x01ef),
8329 },
8330 { .freq = 5320,
8331 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x14, 0x05, 0x05, 0x04,
8332 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0e, 0x00,
8333 0xdb, 0xb7, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0c,
8334 0x00, 0x6f, 0x00, 0xb7, 0x00, 0x03, 0x00, 0x77,
8335 0x00, 0x0c, 0x00, 0x6f, 0x00),
8336 PHYREGS(0x0854, 0x0850, 0x084c, 0x01ec, 0x01ed, 0x01ee),
8337 },
8338 { .freq = 5330,
8339 RADIOREGS3(0x83, 0x01, 0x01, 0x02, 0x15, 0x05, 0x05, 0x04,
8340 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
8341 0xcb, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
8342 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
8343 0x00, 0x0b, 0x00, 0x6f, 0x00),
8344 PHYREGS(0x0858, 0x0854, 0x0850, 0x01eb, 0x01ec, 0x01ed),
8345 },
8346 { .freq = 5340,
8347 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x16, 0x05, 0x05, 0x04,
8348 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8d, 0x0d, 0x00,
8349 0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
8350 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
8351 0x00, 0x0b, 0x00, 0x6f, 0x00),
8352 PHYREGS(0x085c, 0x0858, 0x0854, 0x01ea, 0x01eb, 0x01ec),
8353 },
8354 { .freq = 5350,
8355 RADIOREGS3(0x7c, 0x01, 0x01, 0x02, 0x17, 0x05, 0x05, 0x04,
8356 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
8357 0xca, 0xa6, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0b,
8358 0x00, 0x6f, 0x00, 0xa6, 0x00, 0x03, 0x00, 0x77,
8359 0x00, 0x0b, 0x00, 0x6f, 0x00),
8360 PHYREGS(0x0860, 0x085c, 0x0858, 0x01e9, 0x01ea, 0x01eb),
8361 },
8362 { .freq = 5360,
8363 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x18, 0x05, 0x05, 0x04,
8364 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
8365 0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
8366 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
8367 0x00, 0x0a, 0x00, 0x6f, 0x00),
8368 PHYREGS(0x0864, 0x0860, 0x085c, 0x01e8, 0x01e9, 0x01ea),
8369 },
8370 { .freq = 5370,
8371 RADIOREGS3(0x75, 0x01, 0x01, 0x02, 0x19, 0x05, 0x05, 0x04,
8372 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0d, 0x00,
8373 0xc9, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
8374 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
8375 0x00, 0x0a, 0x00, 0x6f, 0x00),
8376 PHYREGS(0x0868, 0x0864, 0x0860, 0x01e7, 0x01e8, 0x01e9),
8377 },
8378 { .freq = 5380,
8379 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1a, 0x05, 0x05, 0x04,
8380 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
8381 0xb8, 0x95, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
8382 0x00, 0x6f, 0x00, 0x95, 0x00, 0x03, 0x00, 0x77,
8383 0x00, 0x0a, 0x00, 0x6f, 0x00),
8384 PHYREGS(0x086c, 0x0868, 0x0864, 0x01e6, 0x01e7, 0x01e8),
8385 },
8386 { .freq = 5390,
8387 RADIOREGS3(0x6e, 0x01, 0x01, 0x02, 0x1b, 0x05, 0x05, 0x04,
8388 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
8389 0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
8390 0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
8391 0x00, 0x0a, 0x00, 0x6f, 0x00),
8392 PHYREGS(0x0870, 0x086c, 0x0868, 0x01e5, 0x01e6, 0x01e7),
8393 },
8394 { .freq = 5400,
8395 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1c, 0x05, 0x05, 0x04,
8396 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
8397 0xb8, 0x84, 0x00, 0x03, 0x00, 0x77, 0x00, 0x0a,
8398 0x00, 0x6f, 0x00, 0x84, 0x00, 0x03, 0x00, 0x77,
8399 0x00, 0x0a, 0x00, 0x6f, 0x00),
8400 PHYREGS(0x0874, 0x0870, 0x086c, 0x01e5, 0x01e5, 0x01e6),
8401 },
8402 { .freq = 5410,
8403 RADIOREGS3(0x67, 0x01, 0x01, 0x02, 0x1d, 0x05, 0x05, 0x04,
8404 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
8405 0xb7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
8406 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
8407 0x00, 0x0a, 0x00, 0x6f, 0x00),
8408 PHYREGS(0x0878, 0x0874, 0x0870, 0x01e4, 0x01e5, 0x01e5),
8409 },
8410 { .freq = 5420,
8411 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1e, 0x05, 0x05, 0x04,
8412 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0c, 0x00,
8413 0xa7, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
8414 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
8415 0x00, 0x0a, 0x00, 0x6f, 0x00),
8416 PHYREGS(0x087c, 0x0878, 0x0874, 0x01e3, 0x01e4, 0x01e5),
8417 },
8418 { .freq = 5430,
8419 RADIOREGS3(0x61, 0x01, 0x01, 0x02, 0x1f, 0x05, 0x05, 0x04,
8420 0x0c, 0x01, 0x03, 0x03, 0x03, 0x8c, 0x0b, 0x00,
8421 0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x0a,
8422 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
8423 0x00, 0x0a, 0x00, 0x6f, 0x00),
8424 PHYREGS(0x0880, 0x087c, 0x0878, 0x01e2, 0x01e3, 0x01e4),
8425 },
8426 { .freq = 5440,
8427 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x20, 0x05, 0x05, 0x04,
8428 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
8429 0xa6, 0x84, 0x00, 0x02, 0x00, 0x77, 0x00, 0x09,
8430 0x00, 0x6f, 0x00, 0x84, 0x00, 0x02, 0x00, 0x77,
8431 0x00, 0x09, 0x00, 0x6f, 0x00),
8432 PHYREGS(0x0884, 0x0880, 0x087c, 0x01e1, 0x01e2, 0x01e3),
8433 },
8434 { .freq = 5450,
8435 RADIOREGS3(0x5a, 0x01, 0x01, 0x02, 0x21, 0x05, 0x05, 0x04,
8436 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
8437 0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
8438 0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
8439 0x00, 0x09, 0x00, 0x6f, 0x00),
8440 PHYREGS(0x0888, 0x0884, 0x0880, 0x01e0, 0x01e1, 0x01e2),
8441 },
8442 { .freq = 5460,
8443 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x22, 0x05, 0x05, 0x04,
8444 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
8445 0x95, 0x84, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
8446 0x00, 0x6f, 0x00, 0x84, 0x00, 0x01, 0x00, 0x77,
8447 0x00, 0x09, 0x00, 0x6f, 0x00),
8448 PHYREGS(0x088c, 0x0888, 0x0884, 0x01df, 0x01e0, 0x01e1),
8449 },
8450 { .freq = 5470,
8451 RADIOREGS3(0x53, 0x01, 0x01, 0x02, 0x23, 0x05, 0x05, 0x04,
8452 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8b, 0x0b, 0x00,
8453 0x94, 0x73, 0x00, 0x01, 0x00, 0x77, 0x00, 0x09,
8454 0x00, 0x6f, 0x00, 0x73, 0x00, 0x01, 0x00, 0x77,
8455 0x00, 0x09, 0x00, 0x6f, 0x00),
8456 PHYREGS(0x0890, 0x088c, 0x0888, 0x01de, 0x01df, 0x01e0),
8457 },
8458 { .freq = 5480,
8459 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x24, 0x05, 0x05, 0x04,
8460 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
8461 0x84, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
8462 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
8463 0x00, 0x09, 0x00, 0x6f, 0x00),
8464 PHYREGS(0x0894, 0x0890, 0x088c, 0x01dd, 0x01de, 0x01df),
8465 },
8466 { .freq = 5490,
8467 RADIOREGS3(0x4d, 0x01, 0x01, 0x02, 0x25, 0x05, 0x05, 0x04,
8468 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
8469 0x83, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
8470 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
8471 0x00, 0x09, 0x00, 0x6f, 0x00),
8472 PHYREGS(0x0898, 0x0894, 0x0890, 0x01dd, 0x01dd, 0x01de),
8473 },
8474 { .freq = 5500,
8475 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x26, 0x05, 0x05, 0x04,
8476 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
8477 0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
8478 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
8479 0x00, 0x09, 0x00, 0x6f, 0x00),
8480 PHYREGS(0x089c, 0x0898, 0x0894, 0x01dc, 0x01dd, 0x01dd),
8481 },
8482 { .freq = 5510,
8483 RADIOREGS3(0x47, 0x01, 0x01, 0x02, 0x27, 0x05, 0x05, 0x04,
8484 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
8485 0x82, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
8486 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
8487 0x00, 0x09, 0x00, 0x6f, 0x00),
8488 PHYREGS(0x08a0, 0x089c, 0x0898, 0x01db, 0x01dc, 0x01dd),
8489 },
8490 { .freq = 5520,
8491 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x28, 0x05, 0x05, 0x04,
8492 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x0a, 0x00,
8493 0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
8494 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
8495 0x00, 0x09, 0x00, 0x6f, 0x00),
8496 PHYREGS(0x08a4, 0x08a0, 0x089c, 0x01da, 0x01db, 0x01dc),
8497 },
8498 { .freq = 5530,
8499 RADIOREGS3(0x40, 0x01, 0x01, 0x02, 0x29, 0x05, 0x05, 0x04,
8500 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
8501 0x72, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
8502 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
8503 0x00, 0x09, 0x00, 0x6f, 0x00),
8504 PHYREGS(0x08a8, 0x08a4, 0x08a0, 0x01d9, 0x01da, 0x01db),
8505 },
8506 { .freq = 5540,
8507 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2a, 0x05, 0x05, 0x04,
8508 0x0c, 0x01, 0x04, 0x04, 0x04, 0x8a, 0x09, 0x00,
8509 0x71, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
8510 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
8511 0x00, 0x09, 0x00, 0x6f, 0x00),
8512 PHYREGS(0x08ac, 0x08a8, 0x08a4, 0x01d8, 0x01d9, 0x01da),
8513 },
8514 { .freq = 5550,
8515 RADIOREGS3(0x3a, 0x01, 0x01, 0x02, 0x2b, 0x05, 0x05, 0x04,
8516 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
8517 0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
8518 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
8519 0x00, 0x09, 0x00, 0x6f, 0x00),
8520 PHYREGS(0x08b0, 0x08ac, 0x08a8, 0x01d7, 0x01d8, 0x01d9),
8521 },
8522 { .freq = 5560,
8523 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2c, 0x05, 0x05, 0x04,
8524 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
8525 0x61, 0x73, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
8526 0x00, 0x6f, 0x00, 0x73, 0x00, 0x00, 0x00, 0x77,
8527 0x00, 0x09, 0x00, 0x6f, 0x00),
8528 PHYREGS(0x08b4, 0x08b0, 0x08ac, 0x01d7, 0x01d7, 0x01d8),
8529 },
8530 { .freq = 5570,
8531 RADIOREGS3(0x34, 0x01, 0x01, 0x02, 0x2d, 0x05, 0x05, 0x04,
8532 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x09, 0x00,
8533 0x61, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x09,
8534 0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
8535 0x00, 0x09, 0x00, 0x6f, 0x00),
8536 PHYREGS(0x08b8, 0x08b4, 0x08b0, 0x01d6, 0x01d7, 0x01d7),
8537 },
8538 { .freq = 5580,
8539 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2e, 0x05, 0x05, 0x04,
8540 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
8541 0x60, 0x62, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
8542 0x00, 0x6f, 0x00, 0x62, 0x00, 0x00, 0x00, 0x77,
8543 0x00, 0x08, 0x00, 0x6f, 0x00),
8544 PHYREGS(0x08bc, 0x08b8, 0x08b4, 0x01d5, 0x01d6, 0x01d7),
8545 },
8546 { .freq = 5590,
8547 RADIOREGS3(0x2e, 0x01, 0x01, 0x02, 0x2f, 0x05, 0x05, 0x04,
8548 0x0c, 0x01, 0x04, 0x04, 0x04, 0x89, 0x08, 0x00,
8549 0x50, 0x61, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
8550 0x00, 0x6f, 0x00, 0x61, 0x00, 0x00, 0x00, 0x77,
8551 0x00, 0x08, 0x00, 0x6f, 0x00),
8552 PHYREGS(0x08c0, 0x08bc, 0x08b8, 0x01d4, 0x01d5, 0x01d6),
8553 },
8554 { .freq = 5600,
8555 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x30, 0x05, 0x05, 0x04,
8556 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
8557 0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
8558 0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
8559 0x00, 0x08, 0x00, 0x6f, 0x00),
8560 PHYREGS(0x08c4, 0x08c0, 0x08bc, 0x01d3, 0x01d4, 0x01d5),
8561 },
8562 { .freq = 5610,
8563 RADIOREGS3(0x28, 0x01, 0x01, 0x02, 0x31, 0x05, 0x05, 0x04,
8564 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
8565 0x50, 0x51, 0x00, 0x00, 0x00, 0x77, 0x00, 0x08,
8566 0x00, 0x6f, 0x00, 0x51, 0x00, 0x00, 0x00, 0x77,
8567 0x00, 0x08, 0x00, 0x6f, 0x00),
8568 PHYREGS(0x08c8, 0x08c4, 0x08c0, 0x01d2, 0x01d3, 0x01d4),
8569 },
8570 { .freq = 5620,
8571 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x32, 0x05, 0x05, 0x04,
8572 0x0c, 0x01, 0x05, 0x05, 0x05, 0x89, 0x08, 0x00,
8573 0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
8574 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
8575 0x00, 0x07, 0x00, 0x6f, 0x00),
8576 PHYREGS(0x08cc, 0x08c8, 0x08c4, 0x01d2, 0x01d2, 0x01d3),
8577 },
8578 { .freq = 5630,
8579 RADIOREGS3(0x21, 0x01, 0x01, 0x02, 0x33, 0x05, 0x05, 0x04,
8580 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
8581 0x50, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
8582 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
8583 0x00, 0x07, 0x00, 0x6f, 0x00),
8584 PHYREGS(0x08d0, 0x08cc, 0x08c8, 0x01d1, 0x01d2, 0x01d2),
8585 },
8586 { .freq = 5640,
8587 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x34, 0x05, 0x05, 0x04,
8588 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
8589 0x40, 0x50, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
8590 0x00, 0x6f, 0x00, 0x50, 0x00, 0x00, 0x00, 0x77,
8591 0x00, 0x07, 0x00, 0x6f, 0x00),
8592 PHYREGS(0x08d4, 0x08d0, 0x08cc, 0x01d0, 0x01d1, 0x01d2),
8593 },
8594 { .freq = 5650,
8595 RADIOREGS3(0x1c, 0x01, 0x01, 0x02, 0x35, 0x05, 0x05, 0x04,
8596 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
8597 0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x07,
8598 0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
8599 0x00, 0x07, 0x00, 0x6f, 0x00),
8600 PHYREGS(0x08d8, 0x08d4, 0x08d0, 0x01cf, 0x01d0, 0x01d1),
8601 },
8602 { .freq = 5660,
8603 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x36, 0x05, 0x05, 0x04,
8604 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
8605 0x40, 0x40, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
8606 0x00, 0x6f, 0x00, 0x40, 0x00, 0x00, 0x00, 0x77,
8607 0x00, 0x06, 0x00, 0x6f, 0x00),
8608 PHYREGS(0x08dc, 0x08d8, 0x08d4, 0x01ce, 0x01cf, 0x01d0),
8609 },
8610 { .freq = 5670,
8611 RADIOREGS3(0x16, 0x01, 0x01, 0x02, 0x37, 0x05, 0x05, 0x04,
8612 0x0c, 0x01, 0x05, 0x05, 0x05, 0x88, 0x07, 0x00,
8613 0x40, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
8614 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
8615 0x00, 0x06, 0x00, 0x6f, 0x00),
8616 PHYREGS(0x08e0, 0x08dc, 0x08d8, 0x01ce, 0x01ce, 0x01cf),
8617 },
8618 { .freq = 5680,
8619 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x38, 0x05, 0x05, 0x04,
8620 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
8621 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
8622 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
8623 0x00, 0x06, 0x00, 0x6f, 0x00),
8624 PHYREGS(0x08e4, 0x08e0, 0x08dc, 0x01cd, 0x01ce, 0x01ce),
8625 },
8626 { .freq = 5690,
8627 RADIOREGS3(0x10, 0x01, 0x01, 0x02, 0x39, 0x05, 0x05, 0x04,
8628 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
8629 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
8630 0x00, 0x6f, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
8631 0x00, 0x06, 0x00, 0x6f, 0x00),
8632 PHYREGS(0x08e8, 0x08e4, 0x08e0, 0x01cc, 0x01cd, 0x01ce),
8633 },
8634 { .freq = 5700,
8635 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3a, 0x05, 0x05, 0x04,
8636 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
8637 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
8638 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
8639 0x00, 0x06, 0x00, 0x6e, 0x00),
8640 PHYREGS(0x08ec, 0x08e8, 0x08e4, 0x01cb, 0x01cc, 0x01cd),
8641 },
8642 { .freq = 5710,
8643 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3b, 0x05, 0x05, 0x04,
8644 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
8645 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
8646 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
8647 0x00, 0x06, 0x00, 0x6e, 0x00),
8648 PHYREGS(0x08f0, 0x08ec, 0x08e8, 0x01ca, 0x01cb, 0x01cc),
8649 },
8650 { .freq = 5720,
8651 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3c, 0x05, 0x05, 0x04,
8652 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
8653 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
8654 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
8655 0x00, 0x06, 0x00, 0x6e, 0x00),
8656 PHYREGS(0x08f4, 0x08f0, 0x08ec, 0x01c9, 0x01ca, 0x01cb),
8657 },
8658 { .freq = 5725,
8659 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x79, 0x07, 0x07, 0x04,
8660 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x06, 0x00,
8661 0x30, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
8662 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
8663 0x00, 0x06, 0x00, 0x6e, 0x00),
8664 PHYREGS(0x08f6, 0x08f2, 0x08ee, 0x01c9, 0x01ca, 0x01cb),
8665 },
8666 { .freq = 5730,
8667 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3d, 0x05, 0x05, 0x04,
8668 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
8669 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
8670 0x00, 0x6e, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
8671 0x00, 0x06, 0x00, 0x6e, 0x00),
8672 PHYREGS(0x08f8, 0x08f4, 0x08f0, 0x01c9, 0x01c9, 0x01ca),
8673 },
8674 { .freq = 5735,
8675 RADIOREGS3(0x03, 0x01, 0x02, 0x04, 0x7b, 0x07, 0x07, 0x04,
8676 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
8677 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
8678 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
8679 0x00, 0x06, 0x00, 0x6d, 0x00),
8680 PHYREGS(0x08fa, 0x08f6, 0x08f2, 0x01c8, 0x01c9, 0x01ca),
8681 },
8682 { .freq = 5740,
8683 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3e, 0x05, 0x05, 0x04,
8684 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
8685 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
8686 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
8687 0x00, 0x06, 0x00, 0x6d, 0x00),
8688 PHYREGS(0x08fc, 0x08f8, 0x08f4, 0x01c8, 0x01c9, 0x01c9),
8689 },
8690 { .freq = 5745,
8691 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7d, 0x07, 0x07, 0x04,
8692 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
8693 0x20, 0x30, 0x00, 0x00, 0x00, 0x77, 0x00, 0x06,
8694 0x00, 0x6d, 0x00, 0x30, 0x00, 0x00, 0x00, 0x77,
8695 0x00, 0x06, 0x00, 0x6d, 0x00),
8696 PHYREGS(0x08fe, 0x08fa, 0x08f6, 0x01c8, 0x01c8, 0x01c9),
8697 },
8698 { .freq = 5750,
8699 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x3f, 0x05, 0x05, 0x04,
8700 0x0c, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
8701 0x20, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
8702 0x00, 0x6d, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
8703 0x00, 0x05, 0x00, 0x6d, 0x00),
8704 PHYREGS(0x0900, 0x08fc, 0x08f8, 0x01c7, 0x01c8, 0x01c9),
8705 },
8706 { .freq = 5755,
8707 RADIOREGS3(0xfe, 0x00, 0x02, 0x04, 0x7f, 0x07, 0x07, 0x04,
8708 0x10, 0x01, 0x05, 0x05, 0x05, 0x87, 0x05, 0x00,
8709 0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
8710 0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
8711 0x00, 0x05, 0x00, 0x6c, 0x00),
8712 PHYREGS(0x0902, 0x08fe, 0x08fa, 0x01c7, 0x01c8, 0x01c8),
8713 },
8714 { .freq = 5760,
8715 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x40, 0x05, 0x05, 0x04,
8716 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
8717 0x10, 0x20, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
8718 0x00, 0x6c, 0x00, 0x20, 0x00, 0x00, 0x00, 0x77,
8719 0x00, 0x05, 0x00, 0x6c, 0x00),
8720 PHYREGS(0x0904, 0x0900, 0x08fc, 0x01c6, 0x01c7, 0x01c8),
8721 },
8722 { .freq = 5765,
8723 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x81, 0x07, 0x07, 0x04,
8724 0x10, 0x01, 0x05, 0x05, 0x05, 0x86, 0x05, 0x00,
8725 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
8726 0x00, 0x6c, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
8727 0x00, 0x05, 0x00, 0x6c, 0x00),
8728 PHYREGS(0x0906, 0x0902, 0x08fe, 0x01c6, 0x01c7, 0x01c8),
8729 },
8730 { .freq = 5770,
8731 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x41, 0x05, 0x05, 0x04,
8732 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
8733 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
8734 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
8735 0x00, 0x05, 0x00, 0x6b, 0x00),
8736 PHYREGS(0x0908, 0x0904, 0x0900, 0x01c6, 0x01c6, 0x01c7),
8737 },
8738 { .freq = 5775,
8739 RADIOREGS3(0xf8, 0x00, 0x02, 0x04, 0x83, 0x07, 0x07, 0x04,
8740 0x10, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
8741 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
8742 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
8743 0x00, 0x05, 0x00, 0x6b, 0x00),
8744 PHYREGS(0x090a, 0x0906, 0x0902, 0x01c5, 0x01c6, 0x01c7),
8745 },
8746 { .freq = 5780,
8747 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x42, 0x05, 0x05, 0x04,
8748 0x0c, 0x01, 0x05, 0x05, 0x05, 0x86, 0x04, 0x00,
8749 0x10, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
8750 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
8751 0x00, 0x05, 0x00, 0x6b, 0x00),
8752 PHYREGS(0x090c, 0x0908, 0x0904, 0x01c5, 0x01c6, 0x01c6),
8753 },
8754 { .freq = 5785,
8755 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x85, 0x07, 0x07, 0x04,
8756 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
8757 0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
8758 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
8759 0x00, 0x05, 0x00, 0x6b, 0x00),
8760 PHYREGS(0x090e, 0x090a, 0x0906, 0x01c4, 0x01c5, 0x01c6),
8761 },
8762 { .freq = 5790,
8763 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x43, 0x05, 0x05, 0x04,
8764 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
8765 0x00, 0x10, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
8766 0x00, 0x6b, 0x00, 0x10, 0x00, 0x00, 0x00, 0x77,
8767 0x00, 0x05, 0x00, 0x6b, 0x00),
8768 PHYREGS(0x0910, 0x090c, 0x0908, 0x01c4, 0x01c5, 0x01c6),
8769 },
8770 { .freq = 5795,
8771 RADIOREGS3(0xf2, 0x00, 0x02, 0x04, 0x87, 0x07, 0x07, 0x04,
8772 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
8773 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
8774 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
8775 0x00, 0x05, 0x00, 0x6b, 0x00),
8776 PHYREGS(0x0912, 0x090e, 0x090a, 0x01c4, 0x01c4, 0x01c5),
8777 },
8778 { .freq = 5800,
8779 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x44, 0x05, 0x05, 0x04,
8780 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
8781 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
8782 0x00, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
8783 0x00, 0x05, 0x00, 0x6b, 0x00),
8784 PHYREGS(0x0914, 0x0910, 0x090c, 0x01c3, 0x01c4, 0x01c5),
8785 },
8786 { .freq = 5805,
8787 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x89, 0x07, 0x07, 0x04,
8788 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
8789 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
8790 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
8791 0x00, 0x05, 0x00, 0x6a, 0x00),
8792 PHYREGS(0x0916, 0x0912, 0x090e, 0x01c3, 0x01c4, 0x01c4),
8793 },
8794 { .freq = 5810,
8795 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x45, 0x05, 0x05, 0x04,
8796 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
8797 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
8798 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
8799 0x00, 0x05, 0x00, 0x6a, 0x00),
8800 PHYREGS(0x0918, 0x0914, 0x0910, 0x01c2, 0x01c3, 0x01c4),
8801 },
8802 { .freq = 5815,
8803 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8b, 0x07, 0x07, 0x04,
8804 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
8805 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
8806 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
8807 0x00, 0x05, 0x00, 0x6a, 0x00),
8808 PHYREGS(0x091a, 0x0916, 0x0912, 0x01c2, 0x01c3, 0x01c4),
8809 },
8810 { .freq = 5820,
8811 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x46, 0x05, 0x05, 0x04,
8812 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
8813 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
8814 0x00, 0x6a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
8815 0x00, 0x05, 0x00, 0x6a, 0x00),
8816 PHYREGS(0x091c, 0x0918, 0x0914, 0x01c2, 0x01c2, 0x01c3),
8817 },
8818 { .freq = 5825,
8819 RADIOREGS3(0xed, 0x00, 0x02, 0x04, 0x8d, 0x07, 0x07, 0x04,
8820 0x10, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
8821 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
8822 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
8823 0x00, 0x05, 0x00, 0x69, 0x00),
8824 PHYREGS(0x091e, 0x091a, 0x0916, 0x01c1, 0x01c2, 0x01c3),
8825 },
8826 { .freq = 5830,
8827 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x47, 0x05, 0x05, 0x04,
8828 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
8829 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x05,
8830 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
8831 0x00, 0x05, 0x00, 0x69, 0x00),
8832 PHYREGS(0x0920, 0x091c, 0x0918, 0x01c1, 0x01c2, 0x01c2),
8833 },
8834 { .freq = 5840,
8835 RADIOREGS3(0x0a, 0x01, 0x01, 0x02, 0x48, 0x05, 0x05, 0x04,
8836 0x0c, 0x01, 0x06, 0x06, 0x06, 0x86, 0x04, 0x00,
8837 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
8838 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
8839 0x00, 0x04, 0x00, 0x69, 0x00),
8840 PHYREGS(0x0924, 0x0920, 0x091c, 0x01c0, 0x01c1, 0x01c2),
8841 },
8842 { .freq = 5850,
8843 RADIOREGS3(0xe0, 0x00, 0x01, 0x02, 0x49, 0x05, 0x05, 0x04,
8844 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
8845 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
8846 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
8847 0x00, 0x04, 0x00, 0x69, 0x00),
8848 PHYREGS(0x0928, 0x0924, 0x0920, 0x01bf, 0x01c0, 0x01c1),
8849 },
8850 { .freq = 5860,
8851 RADIOREGS3(0xde, 0x00, 0x01, 0x02, 0x4a, 0x05, 0x05, 0x04,
8852 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
8853 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
8854 0x00, 0x69, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
8855 0x00, 0x04, 0x00, 0x69, 0x00),
8856 PHYREGS(0x092c, 0x0928, 0x0924, 0x01bf, 0x01bf, 0x01c0),
8857 },
8858 { .freq = 5870,
8859 RADIOREGS3(0xdb, 0x00, 0x01, 0x02, 0x4b, 0x05, 0x05, 0x04,
8860 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
8861 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
8862 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
8863 0x00, 0x04, 0x00, 0x68, 0x00),
8864 PHYREGS(0x0930, 0x092c, 0x0928, 0x01be, 0x01bf, 0x01bf),
8865 },
8866 { .freq = 5880,
8867 RADIOREGS3(0xd8, 0x00, 0x01, 0x02, 0x4c, 0x05, 0x05, 0x04,
8868 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
8869 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
8870 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
8871 0x00, 0x04, 0x00, 0x68, 0x00),
8872 PHYREGS(0x0934, 0x0930, 0x092c, 0x01bd, 0x01be, 0x01bf),
8873 },
8874 { .freq = 5890,
8875 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4d, 0x05, 0x05, 0x04,
8876 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
8877 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
8878 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
8879 0x00, 0x04, 0x00, 0x68, 0x00),
8880 PHYREGS(0x0938, 0x0934, 0x0930, 0x01bc, 0x01bd, 0x01be),
8881 },
8882 { .freq = 5900,
8883 RADIOREGS3(0xd3, 0x00, 0x01, 0x02, 0x4e, 0x05, 0x05, 0x04,
8884 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
8885 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
8886 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
8887 0x00, 0x04, 0x00, 0x68, 0x00),
8888 PHYREGS(0x093c, 0x0938, 0x0934, 0x01bc, 0x01bc, 0x01bd),
8889 },
8890 { .freq = 5910,
8891 RADIOREGS3(0xd6, 0x00, 0x01, 0x02, 0x4f, 0x05, 0x05, 0x04,
8892 0x0c, 0x01, 0x06, 0x06, 0x06, 0x85, 0x03, 0x00,
8893 0x00, 0x00, 0x00, 0x00, 0x00, 0x77, 0x00, 0x04,
8894 0x00, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x77,
8895 0x00, 0x04, 0x00, 0x68, 0x00),
8896 PHYREGS(0x0940, 0x093c, 0x0938, 0x01bb, 0x01bc, 0x01bc),
8897 },
8898 { .freq = 2412,
8899 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x6c, 0x08, 0x08, 0x04,
8900 0x16, 0x01, 0x04, 0x04, 0x04, 0x8f, 0x30, 0x00,
8901 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
8902 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
8903 0x70, 0x00, 0x0b, 0x00, 0x0a),
8904 PHYREGS(0x03c9, 0x03c5, 0x03c1, 0x043a, 0x043f, 0x0443),
8905 },
8906 { .freq = 2417,
8907 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x71, 0x08, 0x08, 0x04,
8908 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
8909 0x00, 0x00, 0x78, 0x00, 0x03, 0x00, 0x70, 0x00,
8910 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
8911 0x70, 0x00, 0x0b, 0x00, 0x0a),
8912 PHYREGS(0x03cb, 0x03c7, 0x03c3, 0x0438, 0x043d, 0x0441),
8913 },
8914 { .freq = 2422,
8915 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x76, 0x08, 0x08, 0x04,
8916 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
8917 0x00, 0x00, 0x67, 0x00, 0x03, 0x00, 0x70, 0x00,
8918 0x0b, 0x00, 0x0a, 0x00, 0x89, 0x00, 0x03, 0x00,
8919 0x70, 0x00, 0x0b, 0x00, 0x0a),
8920 PHYREGS(0x03cd, 0x03c9, 0x03c5, 0x0436, 0x043a, 0x043f),
8921 },
8922 { .freq = 2427,
8923 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x7b, 0x08, 0x08, 0x04,
8924 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
8925 0x00, 0x00, 0x57, 0x00, 0x03, 0x00, 0x70, 0x00,
8926 0x0a, 0x00, 0x0a, 0x00, 0x78, 0x00, 0x03, 0x00,
8927 0x70, 0x00, 0x0a, 0x00, 0x0a),
8928 PHYREGS(0x03cf, 0x03cb, 0x03c7, 0x0434, 0x0438, 0x043d),
8929 },
8930 { .freq = 2432,
8931 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x80, 0x08, 0x08, 0x04,
8932 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
8933 0x00, 0x00, 0x56, 0x00, 0x03, 0x00, 0x70, 0x00,
8934 0x0a, 0x00, 0x0a, 0x00, 0x77, 0x00, 0x03, 0x00,
8935 0x70, 0x00, 0x0a, 0x00, 0x0a),
8936 PHYREGS(0x03d1, 0x03cd, 0x03c9, 0x0431, 0x0436, 0x043a),
8937 },
8938 { .freq = 2437,
8939 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x85, 0x08, 0x08, 0x04,
8940 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
8941 0x00, 0x00, 0x46, 0x00, 0x03, 0x00, 0x70, 0x00,
8942 0x0a, 0x00, 0x0a, 0x00, 0x76, 0x00, 0x03, 0x00,
8943 0x70, 0x00, 0x0a, 0x00, 0x0a),
8944 PHYREGS(0x03d3, 0x03cf, 0x03cb, 0x042f, 0x0434, 0x0438),
8945 },
8946 { .freq = 2442,
8947 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8a, 0x08, 0x08, 0x04,
8948 0x16, 0x01, 0x05, 0x05, 0x05, 0x8f, 0x30, 0x00,
8949 0x00, 0x00, 0x45, 0x00, 0x02, 0x00, 0x70, 0x00,
8950 0x0a, 0x00, 0x0a, 0x00, 0x66, 0x00, 0x02, 0x00,
8951 0x70, 0x00, 0x0a, 0x00, 0x0a),
8952 PHYREGS(0x03d5, 0x03d1, 0x03cd, 0x042d, 0x0431, 0x0436),
8953 },
8954 { .freq = 2447,
8955 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x8f, 0x08, 0x08, 0x04,
8956 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
8957 0x00, 0x00, 0x34, 0x00, 0x02, 0x00, 0x70, 0x00,
8958 0x0a, 0x00, 0x09, 0x00, 0x55, 0x00, 0x02, 0x00,
8959 0x70, 0x00, 0x0a, 0x00, 0x09),
8960 PHYREGS(0x03d7, 0x03d3, 0x03cf, 0x042b, 0x042f, 0x0434),
8961 },
8962 { .freq = 2452,
8963 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x94, 0x08, 0x08, 0x04,
8964 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
8965 0x00, 0x00, 0x23, 0x00, 0x02, 0x00, 0x70, 0x00,
8966 0x0a, 0x00, 0x09, 0x00, 0x45, 0x00, 0x02, 0x00,
8967 0x70, 0x00, 0x0a, 0x00, 0x09),
8968 PHYREGS(0x03d9, 0x03d5, 0x03d1, 0x0429, 0x042d, 0x0431),
8969 },
8970 { .freq = 2457,
8971 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x99, 0x08, 0x08, 0x04,
8972 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
8973 0x00, 0x00, 0x12, 0x00, 0x02, 0x00, 0x70, 0x00,
8974 0x0a, 0x00, 0x09, 0x00, 0x34, 0x00, 0x02, 0x00,
8975 0x70, 0x00, 0x0a, 0x00, 0x09),
8976 PHYREGS(0x03db, 0x03d7, 0x03d3, 0x0427, 0x042b, 0x042f),
8977 },
8978 { .freq = 2462,
8979 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0x9e, 0x08, 0x08, 0x04,
8980 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
8981 0x00, 0x00, 0x02, 0x00, 0x02, 0x00, 0x70, 0x00,
8982 0x09, 0x00, 0x09, 0x00, 0x33, 0x00, 0x02, 0x00,
8983 0x70, 0x00, 0x09, 0x00, 0x09),
8984 PHYREGS(0x03dd, 0x03d9, 0x03d5, 0x0424, 0x0429, 0x042d),
8985 },
8986 { .freq = 2467,
8987 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa3, 0x08, 0x08, 0x04,
8988 0x16, 0x01, 0x06, 0x06, 0x06, 0x8f, 0x30, 0x00,
8989 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
8990 0x09, 0x00, 0x09, 0x00, 0x22, 0x00, 0x02, 0x00,
8991 0x70, 0x00, 0x09, 0x00, 0x09),
8992 PHYREGS(0x03df, 0x03db, 0x03d7, 0x0422, 0x0427, 0x042b),
8993 },
8994 { .freq = 2472,
8995 RADIOREGS3(0x00, 0x01, 0x03, 0x09, 0xa8, 0x08, 0x08, 0x04,
8996 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x30, 0x00,
8997 0x00, 0x00, 0x01, 0x00, 0x02, 0x00, 0x70, 0x00,
8998 0x09, 0x00, 0x09, 0x00, 0x11, 0x00, 0x02, 0x00,
8999 0x70, 0x00, 0x09, 0x00, 0x09),
9000 PHYREGS(0x03e1, 0x03dd, 0x03d9, 0x0420, 0x0424, 0x0429),
9001 },
9002 { .freq = 2484,
9003 RADIOREGS3(0xff, 0x01, 0x03, 0x09, 0xb4, 0x08, 0x08, 0x04,
9004 0x16, 0x01, 0x07, 0x07, 0x07, 0x8f, 0x20, 0x00,
9005 0x00, 0x00, 0x00, 0x00, 0x02, 0x00, 0x70, 0x00,
9006 0x09, 0x00, 0x09, 0x00, 0x00, 0x00, 0x02, 0x00,
9007 0x70, 0x00, 0x09, 0x00, 0x09),
9008 PHYREGS(0x03e6, 0x03e2, 0x03de, 0x041b, 0x041f, 0x0424),
9009 },
9010};
9011
9012static void b2056_upload_inittab(struct b43_wldev *dev, bool ghz5,
9013 bool ignore_uploadflag, u16 routing,
9014 const struct b2056_inittab_entry *e,
9015 unsigned int length)
9016{
9017 unsigned int i;
9018 u16 value;
9019
9020 for (i = 0; i < length; i++, e++) {
9021 if (!(e->flags & B2056_INITTAB_ENTRY_OK))
9022 continue;
9023 if ((e->flags & B2056_INITTAB_UPLOAD) || ignore_uploadflag) {
9024 if (ghz5)
9025 value = e->ghz5;
9026 else
9027 value = e->ghz2;
9028 b43_radio_write(dev, routing | i, value);
9029 }
9030 }
9031}
9032
9033void b2056_upload_inittabs(struct b43_wldev *dev,
9034 bool ghz5, bool ignore_uploadflag)
9035{
9036 struct b2056_inittabs_pts *pts;
9037
9038 if (dev->phy.rev >= ARRAY_SIZE(b2056_inittabs)) {
9039 B43_WARN_ON(1);
9040 return;
9041 }
9042 pts = &b2056_inittabs[dev->phy.rev];
9043
9044 b2056_upload_inittab(dev, ghz5, ignore_uploadflag,
9045 B2056_SYN, pts->syn, pts->syn_length);
9046 b2056_upload_inittab(dev, ghz5, ignore_uploadflag,
9047 B2056_TX0, pts->tx, pts->tx_length);
9048 b2056_upload_inittab(dev, ghz5, ignore_uploadflag,
9049 B2056_TX1, pts->tx, pts->tx_length);
9050 b2056_upload_inittab(dev, ghz5, ignore_uploadflag,
9051 B2056_RX0, pts->rx, pts->rx_length);
9052 b2056_upload_inittab(dev, ghz5, ignore_uploadflag,
9053 B2056_RX1, pts->rx, pts->rx_length);
9054}
9055
30const struct b43_nphy_channeltab_entry_rev3 * 9056const struct b43_nphy_channeltab_entry_rev3 *
31b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq) 9057b43_nphy_get_chantabent_rev3(struct b43_wldev *dev, u16 freq)
32{ 9058{
33 const struct b43_nphy_channeltab_entry_rev3 *e; 9059 const struct b43_nphy_channeltab_entry_rev3 *e;
34 unsigned int i; 9060 unsigned int length, i;
9061
9062 switch (dev->phy.rev) {
9063 case 3:
9064 e = b43_nphy_channeltab_rev3;
9065 length = ARRAY_SIZE(b43_nphy_channeltab_rev3);
9066 break;
9067 case 4:
9068 e = b43_nphy_channeltab_rev4;
9069 length = ARRAY_SIZE(b43_nphy_channeltab_rev4);
9070 break;
9071 case 5:
9072 e = b43_nphy_channeltab_rev5;
9073 length = ARRAY_SIZE(b43_nphy_channeltab_rev5);
9074 break;
9075 case 6:
9076 e = b43_nphy_channeltab_rev6;
9077 length = ARRAY_SIZE(b43_nphy_channeltab_rev6);
9078 break;
9079 case 7:
9080 case 9:
9081 e = b43_nphy_channeltab_rev7_9;
9082 length = ARRAY_SIZE(b43_nphy_channeltab_rev7_9);
9083 break;
9084 case 8:
9085 e = b43_nphy_channeltab_rev8;
9086 length = ARRAY_SIZE(b43_nphy_channeltab_rev8);
9087 break;
9088 default:
9089 B43_WARN_ON(1);
9090 return NULL;
9091 }
35 9092
36 for (i = 0; i < ARRAY_SIZE(b43_nphy_channeltab_rev3); i++) { 9093 for (i = 0; i < length; i++, e++) {
37 e = &(b43_nphy_channeltab_rev3[i]);
38 if (e->freq == freq) 9094 if (e->freq == freq)
39 return e; 9095 return e;
40 } 9096 }
diff --git a/drivers/net/wireless/b43/radio_2056.h b/drivers/net/wireless/b43/radio_2056.h
index fda6dafecb8c..d601f6e7e313 100644
--- a/drivers/net/wireless/b43/radio_2056.h
+++ b/drivers/net/wireless/b43/radio_2056.h
@@ -4,6 +4,9 @@
4 4
5 Copyright (c) 2010 Rafał Miłecki <zajec5@gmail.com> 5 Copyright (c) 2010 Rafał Miłecki <zajec5@gmail.com>
6 6
7 Some parts of the code in this file are derived from the brcm80211
8 driver Copyright (c) 2010 Broadcom Corporation
9
7 This program is free software; you can redistribute it and/or modify 10 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by 11 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 2 of the License, or 12 the Free Software Foundation; either version 2 of the License, or
@@ -28,15 +31,1090 @@
28 31
29#include "tables_nphy.h" 32#include "tables_nphy.h"
30 33
34#define B2056_SYN (0x0 << 12)
35#define B2056_TX0 (0x2 << 12)
36#define B2056_TX1 (0x3 << 12)
37#define B2056_RX0 (0x6 << 12)
38#define B2056_RX1 (0x7 << 12)
39#define B2056_ALLTX (0xE << 12)
40#define B2056_ALLRX (0xF << 12)
41
42#define B2056_SYN_RESERVED_ADDR0 0x00
43#define B2056_SYN_IDCODE 0x01
44#define B2056_SYN_RESERVED_ADDR2 0x02
45#define B2056_SYN_RESERVED_ADDR3 0x03
46#define B2056_SYN_RESERVED_ADDR4 0x04
47#define B2056_SYN_RESERVED_ADDR5 0x05
48#define B2056_SYN_RESERVED_ADDR6 0x06
49#define B2056_SYN_RESERVED_ADDR7 0x07
50#define B2056_SYN_COM_CTRL 0x08
51#define B2056_SYN_COM_PU 0x09
52#define B2056_SYN_COM_OVR 0x0A
53#define B2056_SYN_COM_RESET 0x0B
54#define B2056_SYN_COM_RCAL 0x0C
55#define B2056_SYN_COM_RC_RXLPF 0x0D
56#define B2056_SYN_COM_RC_TXLPF 0x0E
57#define B2056_SYN_COM_RC_RXHPF 0x0F
58#define B2056_SYN_RESERVED_ADDR16 0x10
59#define B2056_SYN_RESERVED_ADDR17 0x11
60#define B2056_SYN_RESERVED_ADDR18 0x12
61#define B2056_SYN_RESERVED_ADDR19 0x13
62#define B2056_SYN_RESERVED_ADDR20 0x14
63#define B2056_SYN_RESERVED_ADDR21 0x15
64#define B2056_SYN_RESERVED_ADDR22 0x16
65#define B2056_SYN_RESERVED_ADDR23 0x17
66#define B2056_SYN_RESERVED_ADDR24 0x18
67#define B2056_SYN_RESERVED_ADDR25 0x19
68#define B2056_SYN_RESERVED_ADDR26 0x1A
69#define B2056_SYN_RESERVED_ADDR27 0x1B
70#define B2056_SYN_RESERVED_ADDR28 0x1C
71#define B2056_SYN_RESERVED_ADDR29 0x1D
72#define B2056_SYN_RESERVED_ADDR30 0x1E
73#define B2056_SYN_RESERVED_ADDR31 0x1F
74#define B2056_SYN_GPIO_MASTER1 0x20
75#define B2056_SYN_GPIO_MASTER2 0x21
76#define B2056_SYN_TOPBIAS_MASTER 0x22
77#define B2056_SYN_TOPBIAS_RCAL 0x23
78#define B2056_SYN_AFEREG 0x24
79#define B2056_SYN_TEMPPROCSENSE 0x25
80#define B2056_SYN_TEMPPROCSENSEIDAC 0x26
81#define B2056_SYN_TEMPPROCSENSERCAL 0x27
82#define B2056_SYN_LPO 0x28
83#define B2056_SYN_VDDCAL_MASTER 0x29
84#define B2056_SYN_VDDCAL_IDAC 0x2A
85#define B2056_SYN_VDDCAL_STATUS 0x2B
86#define B2056_SYN_RCAL_MASTER 0x2C
87#define B2056_SYN_RCAL_CODE_OUT 0x2D
88#define B2056_SYN_RCCAL_CTRL0 0x2E
89#define B2056_SYN_RCCAL_CTRL1 0x2F
90#define B2056_SYN_RCCAL_CTRL2 0x30
91#define B2056_SYN_RCCAL_CTRL3 0x31
92#define B2056_SYN_RCCAL_CTRL4 0x32
93#define B2056_SYN_RCCAL_CTRL5 0x33
94#define B2056_SYN_RCCAL_CTRL6 0x34
95#define B2056_SYN_RCCAL_CTRL7 0x35
96#define B2056_SYN_RCCAL_CTRL8 0x36
97#define B2056_SYN_RCCAL_CTRL9 0x37
98#define B2056_SYN_RCCAL_CTRL10 0x38
99#define B2056_SYN_RCCAL_CTRL11 0x39
100#define B2056_SYN_ZCAL_SPARE1 0x3A
101#define B2056_SYN_ZCAL_SPARE2 0x3B
102#define B2056_SYN_PLL_MAST1 0x3C
103#define B2056_SYN_PLL_MAST2 0x3D
104#define B2056_SYN_PLL_MAST3 0x3E
105#define B2056_SYN_PLL_BIAS_RESET 0x3F
106#define B2056_SYN_PLL_XTAL0 0x40
107#define B2056_SYN_PLL_XTAL1 0x41
108#define B2056_SYN_PLL_XTAL3 0x42
109#define B2056_SYN_PLL_XTAL4 0x43
110#define B2056_SYN_PLL_XTAL5 0x44
111#define B2056_SYN_PLL_XTAL6 0x45
112#define B2056_SYN_PLL_REFDIV 0x46
113#define B2056_SYN_PLL_PFD 0x47
114#define B2056_SYN_PLL_CP1 0x48
115#define B2056_SYN_PLL_CP2 0x49
116#define B2056_SYN_PLL_CP3 0x4A
117#define B2056_SYN_PLL_LOOPFILTER1 0x4B
118#define B2056_SYN_PLL_LOOPFILTER2 0x4C
119#define B2056_SYN_PLL_LOOPFILTER3 0x4D
120#define B2056_SYN_PLL_LOOPFILTER4 0x4E
121#define B2056_SYN_PLL_LOOPFILTER5 0x4F
122#define B2056_SYN_PLL_MMD1 0x50
123#define B2056_SYN_PLL_MMD2 0x51
124#define B2056_SYN_PLL_VCO1 0x52
125#define B2056_SYN_PLL_VCO2 0x53
126#define B2056_SYN_PLL_MONITOR1 0x54
127#define B2056_SYN_PLL_MONITOR2 0x55
128#define B2056_SYN_PLL_VCOCAL1 0x56
129#define B2056_SYN_PLL_VCOCAL2 0x57
130#define B2056_SYN_PLL_VCOCAL4 0x58
131#define B2056_SYN_PLL_VCOCAL5 0x59
132#define B2056_SYN_PLL_VCOCAL6 0x5A
133#define B2056_SYN_PLL_VCOCAL7 0x5B
134#define B2056_SYN_PLL_VCOCAL8 0x5C
135#define B2056_SYN_PLL_VCOCAL9 0x5D
136#define B2056_SYN_PLL_VCOCAL10 0x5E
137#define B2056_SYN_PLL_VCOCAL11 0x5F
138#define B2056_SYN_PLL_VCOCAL12 0x60
139#define B2056_SYN_PLL_VCOCAL13 0x61
140#define B2056_SYN_PLL_VREG 0x62
141#define B2056_SYN_PLL_STATUS1 0x63
142#define B2056_SYN_PLL_STATUS2 0x64
143#define B2056_SYN_PLL_STATUS3 0x65
144#define B2056_SYN_LOGEN_PU0 0x66
145#define B2056_SYN_LOGEN_PU1 0x67
146#define B2056_SYN_LOGEN_PU2 0x68
147#define B2056_SYN_LOGEN_PU3 0x69
148#define B2056_SYN_LOGEN_PU5 0x6A
149#define B2056_SYN_LOGEN_PU6 0x6B
150#define B2056_SYN_LOGEN_PU7 0x6C
151#define B2056_SYN_LOGEN_PU8 0x6D
152#define B2056_SYN_LOGEN_BIAS_RESET 0x6E
153#define B2056_SYN_LOGEN_RCCR1 0x6F
154#define B2056_SYN_LOGEN_VCOBUF1 0x70
155#define B2056_SYN_LOGEN_MIXER1 0x71
156#define B2056_SYN_LOGEN_MIXER2 0x72
157#define B2056_SYN_LOGEN_BUF1 0x73
158#define B2056_SYN_LOGENBUF2 0x74
159#define B2056_SYN_LOGEN_BUF3 0x75
160#define B2056_SYN_LOGEN_BUF4 0x76
161#define B2056_SYN_LOGEN_DIV1 0x77
162#define B2056_SYN_LOGEN_DIV2 0x78
163#define B2056_SYN_LOGEN_DIV3 0x79
164#define B2056_SYN_LOGEN_ACL1 0x7A
165#define B2056_SYN_LOGEN_ACL2 0x7B
166#define B2056_SYN_LOGEN_ACL3 0x7C
167#define B2056_SYN_LOGEN_ACL4 0x7D
168#define B2056_SYN_LOGEN_ACL5 0x7E
169#define B2056_SYN_LOGEN_ACL6 0x7F
170#define B2056_SYN_LOGEN_ACLOUT 0x80
171#define B2056_SYN_LOGEN_ACLCAL1 0x81
172#define B2056_SYN_LOGEN_ACLCAL2 0x82
173#define B2056_SYN_LOGEN_ACLCAL3 0x83
174#define B2056_SYN_CALEN 0x84
175#define B2056_SYN_LOGEN_PEAKDET1 0x85
176#define B2056_SYN_LOGEN_CORE_ACL_OVR 0x86
177#define B2056_SYN_LOGEN_RX_DIFF_ACL_OVR 0x87
178#define B2056_SYN_LOGEN_TX_DIFF_ACL_OVR 0x88
179#define B2056_SYN_LOGEN_RX_CMOS_ACL_OVR 0x89
180#define B2056_SYN_LOGEN_TX_CMOS_ACL_OVR 0x8A
181#define B2056_SYN_LOGEN_VCOBUF2 0x8B
182#define B2056_SYN_LOGEN_MIXER3 0x8C
183#define B2056_SYN_LOGEN_BUF5 0x8D
184#define B2056_SYN_LOGEN_BUF6 0x8E
185#define B2056_SYN_LOGEN_CBUFRX1 0x8F
186#define B2056_SYN_LOGEN_CBUFRX2 0x90
187#define B2056_SYN_LOGEN_CBUFRX3 0x91
188#define B2056_SYN_LOGEN_CBUFRX4 0x92
189#define B2056_SYN_LOGEN_CBUFTX1 0x93
190#define B2056_SYN_LOGEN_CBUFTX2 0x94
191#define B2056_SYN_LOGEN_CBUFTX3 0x95
192#define B2056_SYN_LOGEN_CBUFTX4 0x96
193#define B2056_SYN_LOGEN_CMOSRX1 0x97
194#define B2056_SYN_LOGEN_CMOSRX2 0x98
195#define B2056_SYN_LOGEN_CMOSRX3 0x99
196#define B2056_SYN_LOGEN_CMOSRX4 0x9A
197#define B2056_SYN_LOGEN_CMOSTX1 0x9B
198#define B2056_SYN_LOGEN_CMOSTX2 0x9C
199#define B2056_SYN_LOGEN_CMOSTX3 0x9D
200#define B2056_SYN_LOGEN_CMOSTX4 0x9E
201#define B2056_SYN_LOGEN_VCOBUF2_OVRVAL 0x9F
202#define B2056_SYN_LOGEN_MIXER3_OVRVAL 0xA0
203#define B2056_SYN_LOGEN_BUF5_OVRVAL 0xA1
204#define B2056_SYN_LOGEN_BUF6_OVRVAL 0xA2
205#define B2056_SYN_LOGEN_CBUFRX1_OVRVAL 0xA3
206#define B2056_SYN_LOGEN_CBUFRX2_OVRVAL 0xA4
207#define B2056_SYN_LOGEN_CBUFRX3_OVRVAL 0xA5
208#define B2056_SYN_LOGEN_CBUFRX4_OVRVAL 0xA6
209#define B2056_SYN_LOGEN_CBUFTX1_OVRVAL 0xA7
210#define B2056_SYN_LOGEN_CBUFTX2_OVRVAL 0xA8
211#define B2056_SYN_LOGEN_CBUFTX3_OVRVAL 0xA9
212#define B2056_SYN_LOGEN_CBUFTX4_OVRVAL 0xAA
213#define B2056_SYN_LOGEN_CMOSRX1_OVRVAL 0xAB
214#define B2056_SYN_LOGEN_CMOSRX2_OVRVAL 0xAC
215#define B2056_SYN_LOGEN_CMOSRX3_OVRVAL 0xAD
216#define B2056_SYN_LOGEN_CMOSRX4_OVRVAL 0xAE
217#define B2056_SYN_LOGEN_CMOSTX1_OVRVAL 0xAF
218#define B2056_SYN_LOGEN_CMOSTX2_OVRVAL 0xB0
219#define B2056_SYN_LOGEN_CMOSTX3_OVRVAL 0xB1
220#define B2056_SYN_LOGEN_CMOSTX4_OVRVAL 0xB2
221#define B2056_SYN_LOGEN_ACL_WAITCNT 0xB3
222#define B2056_SYN_LOGEN_CORE_CALVALID 0xB4
223#define B2056_SYN_LOGEN_RX_CMOS_CALVALID 0xB5
224#define B2056_SYN_LOGEN_TX_CMOS_VALID 0xB6
225
226#define B2056_TX_RESERVED_ADDR0 0x00
227#define B2056_TX_IDCODE 0x01
228#define B2056_TX_RESERVED_ADDR2 0x02
229#define B2056_TX_RESERVED_ADDR3 0x03
230#define B2056_TX_RESERVED_ADDR4 0x04
231#define B2056_TX_RESERVED_ADDR5 0x05
232#define B2056_TX_RESERVED_ADDR6 0x06
233#define B2056_TX_RESERVED_ADDR7 0x07
234#define B2056_TX_COM_CTRL 0x08
235#define B2056_TX_COM_PU 0x09
236#define B2056_TX_COM_OVR 0x0A
237#define B2056_TX_COM_RESET 0x0B
238#define B2056_TX_COM_RCAL 0x0C
239#define B2056_TX_COM_RC_RXLPF 0x0D
240#define B2056_TX_COM_RC_TXLPF 0x0E
241#define B2056_TX_COM_RC_RXHPF 0x0F
242#define B2056_TX_RESERVED_ADDR16 0x10
243#define B2056_TX_RESERVED_ADDR17 0x11
244#define B2056_TX_RESERVED_ADDR18 0x12
245#define B2056_TX_RESERVED_ADDR19 0x13
246#define B2056_TX_RESERVED_ADDR20 0x14
247#define B2056_TX_RESERVED_ADDR21 0x15
248#define B2056_TX_RESERVED_ADDR22 0x16
249#define B2056_TX_RESERVED_ADDR23 0x17
250#define B2056_TX_RESERVED_ADDR24 0x18
251#define B2056_TX_RESERVED_ADDR25 0x19
252#define B2056_TX_RESERVED_ADDR26 0x1A
253#define B2056_TX_RESERVED_ADDR27 0x1B
254#define B2056_TX_RESERVED_ADDR28 0x1C
255#define B2056_TX_RESERVED_ADDR29 0x1D
256#define B2056_TX_RESERVED_ADDR30 0x1E
257#define B2056_TX_RESERVED_ADDR31 0x1F
258#define B2056_TX_IQCAL_GAIN_BW 0x20
259#define B2056_TX_LOFT_FINE_I 0x21
260#define B2056_TX_LOFT_FINE_Q 0x22
261#define B2056_TX_LOFT_COARSE_I 0x23
262#define B2056_TX_LOFT_COARSE_Q 0x24
263#define B2056_TX_TX_COM_MASTER1 0x25
264#define B2056_TX_TX_COM_MASTER2 0x26
265#define B2056_TX_RXIQCAL_TXMUX 0x27
266#define B2056_TX_TX_SSI_MASTER 0x28
267#define B2056_TX_IQCAL_VCM_HG 0x29
268#define B2056_TX_IQCAL_IDAC 0x2A
269#define B2056_TX_TSSI_VCM 0x2B
270#define B2056_TX_TX_AMP_DET 0x2C
271#define B2056_TX_TX_SSI_MUX 0x2D
272#define B2056_TX_TSSIA 0x2E
273#define B2056_TX_TSSIG 0x2F
274#define B2056_TX_TSSI_MISC1 0x30
275#define B2056_TX_TSSI_MISC2 0x31
276#define B2056_TX_TSSI_MISC3 0x32
277#define B2056_TX_PA_SPARE1 0x33
278#define B2056_TX_PA_SPARE2 0x34
279#define B2056_TX_INTPAA_MASTER 0x35
280#define B2056_TX_INTPAA_GAIN 0x36
281#define B2056_TX_INTPAA_BOOST_TUNE 0x37
282#define B2056_TX_INTPAA_IAUX_STAT 0x38
283#define B2056_TX_INTPAA_IAUX_DYN 0x39
284#define B2056_TX_INTPAA_IMAIN_STAT 0x3A
285#define B2056_TX_INTPAA_IMAIN_DYN 0x3B
286#define B2056_TX_INTPAA_CASCBIAS 0x3C
287#define B2056_TX_INTPAA_PASLOPE 0x3D
288#define B2056_TX_INTPAA_PA_MISC 0x3E
289#define B2056_TX_INTPAG_MASTER 0x3F
290#define B2056_TX_INTPAG_GAIN 0x40
291#define B2056_TX_INTPAG_BOOST_TUNE 0x41
292#define B2056_TX_INTPAG_IAUX_STAT 0x42
293#define B2056_TX_INTPAG_IAUX_DYN 0x43
294#define B2056_TX_INTPAG_IMAIN_STAT 0x44
295#define B2056_TX_INTPAG_IMAIN_DYN 0x45
296#define B2056_TX_INTPAG_CASCBIAS 0x46
297#define B2056_TX_INTPAG_PASLOPE 0x47
298#define B2056_TX_INTPAG_PA_MISC 0x48
299#define B2056_TX_PADA_MASTER 0x49
300#define B2056_TX_PADA_IDAC 0x4A
301#define B2056_TX_PADA_CASCBIAS 0x4B
302#define B2056_TX_PADA_GAIN 0x4C
303#define B2056_TX_PADA_BOOST_TUNE 0x4D
304#define B2056_TX_PADA_SLOPE 0x4E
305#define B2056_TX_PADG_MASTER 0x4F
306#define B2056_TX_PADG_IDAC 0x50
307#define B2056_TX_PADG_CASCBIAS 0x51
308#define B2056_TX_PADG_GAIN 0x52
309#define B2056_TX_PADG_BOOST_TUNE 0x53
310#define B2056_TX_PADG_SLOPE 0x54
311#define B2056_TX_PGAA_MASTER 0x55
312#define B2056_TX_PGAA_IDAC 0x56
313#define B2056_TX_PGAA_GAIN 0x57
314#define B2056_TX_PGAA_BOOST_TUNE 0x58
315#define B2056_TX_PGAA_SLOPE 0x59
316#define B2056_TX_PGAA_MISC 0x5A
317#define B2056_TX_PGAG_MASTER 0x5B
318#define B2056_TX_PGAG_IDAC 0x5C
319#define B2056_TX_PGAG_GAIN 0x5D
320#define B2056_TX_PGAG_BOOST_TUNE 0x5E
321#define B2056_TX_PGAG_SLOPE 0x5F
322#define B2056_TX_PGAG_MISC 0x60
323#define B2056_TX_MIXA_MASTER 0x61
324#define B2056_TX_MIXA_BOOST_TUNE 0x62
325#define B2056_TX_MIXG 0x63
326#define B2056_TX_MIXG_BOOST_TUNE 0x64
327#define B2056_TX_BB_GM_MASTER 0x65
328#define B2056_TX_GMBB_GM 0x66
329#define B2056_TX_GMBB_IDAC 0x67
330#define B2056_TX_TXLPF_MASTER 0x68
331#define B2056_TX_TXLPF_RCCAL 0x69
332#define B2056_TX_TXLPF_RCCAL_OFF0 0x6A
333#define B2056_TX_TXLPF_RCCAL_OFF1 0x6B
334#define B2056_TX_TXLPF_RCCAL_OFF2 0x6C
335#define B2056_TX_TXLPF_RCCAL_OFF3 0x6D
336#define B2056_TX_TXLPF_RCCAL_OFF4 0x6E
337#define B2056_TX_TXLPF_RCCAL_OFF5 0x6F
338#define B2056_TX_TXLPF_RCCAL_OFF6 0x70
339#define B2056_TX_TXLPF_BW 0x71
340#define B2056_TX_TXLPF_GAIN 0x72
341#define B2056_TX_TXLPF_IDAC 0x73
342#define B2056_TX_TXLPF_IDAC_0 0x74
343#define B2056_TX_TXLPF_IDAC_1 0x75
344#define B2056_TX_TXLPF_IDAC_2 0x76
345#define B2056_TX_TXLPF_IDAC_3 0x77
346#define B2056_TX_TXLPF_IDAC_4 0x78
347#define B2056_TX_TXLPF_IDAC_5 0x79
348#define B2056_TX_TXLPF_IDAC_6 0x7A
349#define B2056_TX_TXLPF_OPAMP_IDAC 0x7B
350#define B2056_TX_TXLPF_MISC 0x7C
351#define B2056_TX_TXSPARE1 0x7D
352#define B2056_TX_TXSPARE2 0x7E
353#define B2056_TX_TXSPARE3 0x7F
354#define B2056_TX_TXSPARE4 0x80
355#define B2056_TX_TXSPARE5 0x81
356#define B2056_TX_TXSPARE6 0x82
357#define B2056_TX_TXSPARE7 0x83
358#define B2056_TX_TXSPARE8 0x84
359#define B2056_TX_TXSPARE9 0x85
360#define B2056_TX_TXSPARE10 0x86
361#define B2056_TX_TXSPARE11 0x87
362#define B2056_TX_TXSPARE12 0x88
363#define B2056_TX_TXSPARE13 0x89
364#define B2056_TX_TXSPARE14 0x8A
365#define B2056_TX_TXSPARE15 0x8B
366#define B2056_TX_TXSPARE16 0x8C
367#define B2056_TX_STATUS_INTPA_GAIN 0x8D
368#define B2056_TX_STATUS_PAD_GAIN 0x8E
369#define B2056_TX_STATUS_PGA_GAIN 0x8F
370#define B2056_TX_STATUS_GM_TXLPF_GAIN 0x90
371#define B2056_TX_STATUS_TXLPF_BW 0x91
372#define B2056_TX_STATUS_TXLPF_RC 0x92
373#define B2056_TX_GMBB_IDAC0 0x93
374#define B2056_TX_GMBB_IDAC1 0x94
375#define B2056_TX_GMBB_IDAC2 0x95
376#define B2056_TX_GMBB_IDAC3 0x96
377#define B2056_TX_GMBB_IDAC4 0x97
378#define B2056_TX_GMBB_IDAC5 0x98
379#define B2056_TX_GMBB_IDAC6 0x99
380#define B2056_TX_GMBB_IDAC7 0x9A
381
382#define B2056_RX_RESERVED_ADDR0 0x00
383#define B2056_RX_IDCODE 0x01
384#define B2056_RX_RESERVED_ADDR2 0x02
385#define B2056_RX_RESERVED_ADDR3 0x03
386#define B2056_RX_RESERVED_ADDR4 0x04
387#define B2056_RX_RESERVED_ADDR5 0x05
388#define B2056_RX_RESERVED_ADDR6 0x06
389#define B2056_RX_RESERVED_ADDR7 0x07
390#define B2056_RX_COM_CTRL 0x08
391#define B2056_RX_COM_PU 0x09
392#define B2056_RX_COM_OVR 0x0A
393#define B2056_RX_COM_RESET 0x0B
394#define B2056_RX_COM_RCAL 0x0C
395#define B2056_RX_COM_RC_RXLPF 0x0D
396#define B2056_RX_COM_RC_TXLPF 0x0E
397#define B2056_RX_COM_RC_RXHPF 0x0F
398#define B2056_RX_RESERVED_ADDR16 0x10
399#define B2056_RX_RESERVED_ADDR17 0x11
400#define B2056_RX_RESERVED_ADDR18 0x12
401#define B2056_RX_RESERVED_ADDR19 0x13
402#define B2056_RX_RESERVED_ADDR20 0x14
403#define B2056_RX_RESERVED_ADDR21 0x15
404#define B2056_RX_RESERVED_ADDR22 0x16
405#define B2056_RX_RESERVED_ADDR23 0x17
406#define B2056_RX_RESERVED_ADDR24 0x18
407#define B2056_RX_RESERVED_ADDR25 0x19
408#define B2056_RX_RESERVED_ADDR26 0x1A
409#define B2056_RX_RESERVED_ADDR27 0x1B
410#define B2056_RX_RESERVED_ADDR28 0x1C
411#define B2056_RX_RESERVED_ADDR29 0x1D
412#define B2056_RX_RESERVED_ADDR30 0x1E
413#define B2056_RX_RESERVED_ADDR31 0x1F
414#define B2056_RX_RXIQCAL_RXMUX 0x20
415#define B2056_RX_RSSI_PU 0x21
416#define B2056_RX_RSSI_SEL 0x22
417#define B2056_RX_RSSI_GAIN 0x23
418#define B2056_RX_RSSI_NB_IDAC 0x24
419#define B2056_RX_RSSI_WB2I_IDAC_1 0x25
420#define B2056_RX_RSSI_WB2I_IDAC_2 0x26
421#define B2056_RX_RSSI_WB2Q_IDAC_1 0x27
422#define B2056_RX_RSSI_WB2Q_IDAC_2 0x28
423#define B2056_RX_RSSI_POLE 0x29
424#define B2056_RX_RSSI_WB1_IDAC 0x2A
425#define B2056_RX_RSSI_MISC 0x2B
426#define B2056_RX_LNAA_MASTER 0x2C
427#define B2056_RX_LNAA_TUNE 0x2D
428#define B2056_RX_LNAA_GAIN 0x2E
429#define B2056_RX_LNA_A_SLOPE 0x2F
430#define B2056_RX_BIASPOLE_LNAA1_IDAC 0x30
431#define B2056_RX_LNAA2_IDAC 0x31
432#define B2056_RX_LNA1A_MISC 0x32
433#define B2056_RX_LNAG_MASTER 0x33
434#define B2056_RX_LNAG_TUNE 0x34
435#define B2056_RX_LNAG_GAIN 0x35
436#define B2056_RX_LNA_G_SLOPE 0x36
437#define B2056_RX_BIASPOLE_LNAG1_IDAC 0x37
438#define B2056_RX_LNAG2_IDAC 0x38
439#define B2056_RX_LNA1G_MISC 0x39
440#define B2056_RX_MIXA_MASTER 0x3A
441#define B2056_RX_MIXA_VCM 0x3B
442#define B2056_RX_MIXA_CTRLPTAT 0x3C
443#define B2056_RX_MIXA_LOB_BIAS 0x3D
444#define B2056_RX_MIXA_CORE_IDAC 0x3E
445#define B2056_RX_MIXA_CMFB_IDAC 0x3F
446#define B2056_RX_MIXA_BIAS_AUX 0x40
447#define B2056_RX_MIXA_BIAS_MAIN 0x41
448#define B2056_RX_MIXA_BIAS_MISC 0x42
449#define B2056_RX_MIXA_MAST_BIAS 0x43
450#define B2056_RX_MIXG_MASTER 0x44
451#define B2056_RX_MIXG_VCM 0x45
452#define B2056_RX_MIXG_CTRLPTAT 0x46
453#define B2056_RX_MIXG_LOB_BIAS 0x47
454#define B2056_RX_MIXG_CORE_IDAC 0x48
455#define B2056_RX_MIXG_CMFB_IDAC 0x49
456#define B2056_RX_MIXG_BIAS_AUX 0x4A
457#define B2056_RX_MIXG_BIAS_MAIN 0x4B
458#define B2056_RX_MIXG_BIAS_MISC 0x4C
459#define B2056_RX_MIXG_MAST_BIAS 0x4D
460#define B2056_RX_TIA_MASTER 0x4E
461#define B2056_RX_TIA_IOPAMP 0x4F
462#define B2056_RX_TIA_QOPAMP 0x50
463#define B2056_RX_TIA_IMISC 0x51
464#define B2056_RX_TIA_QMISC 0x52
465#define B2056_RX_TIA_GAIN 0x53
466#define B2056_RX_TIA_SPARE1 0x54
467#define B2056_RX_TIA_SPARE2 0x55
468#define B2056_RX_BB_LPF_MASTER 0x56
469#define B2056_RX_AACI_MASTER 0x57
470#define B2056_RX_RXLPF_IDAC 0x58
471#define B2056_RX_RXLPF_OPAMPBIAS_LOWQ 0x59
472#define B2056_RX_RXLPF_OPAMPBIAS_HIGHQ 0x5A
473#define B2056_RX_RXLPF_BIAS_DCCANCEL 0x5B
474#define B2056_RX_RXLPF_OUTVCM 0x5C
475#define B2056_RX_RXLPF_INVCM_BODY 0x5D
476#define B2056_RX_RXLPF_CC_OP 0x5E
477#define B2056_RX_RXLPF_GAIN 0x5F
478#define B2056_RX_RXLPF_Q_BW 0x60
479#define B2056_RX_RXLPF_HP_CORNER_BW 0x61
480#define B2056_RX_RXLPF_RCCAL_HPC 0x62
481#define B2056_RX_RXHPF_OFF0 0x63
482#define B2056_RX_RXHPF_OFF1 0x64
483#define B2056_RX_RXHPF_OFF2 0x65
484#define B2056_RX_RXHPF_OFF3 0x66
485#define B2056_RX_RXHPF_OFF4 0x67
486#define B2056_RX_RXHPF_OFF5 0x68
487#define B2056_RX_RXHPF_OFF6 0x69
488#define B2056_RX_RXHPF_OFF7 0x6A
489#define B2056_RX_RXLPF_RCCAL_LPC 0x6B
490#define B2056_RX_RXLPF_OFF_0 0x6C
491#define B2056_RX_RXLPF_OFF_1 0x6D
492#define B2056_RX_RXLPF_OFF_2 0x6E
493#define B2056_RX_RXLPF_OFF_3 0x6F
494#define B2056_RX_RXLPF_OFF_4 0x70
495#define B2056_RX_UNUSED 0x71
496#define B2056_RX_VGA_MASTER 0x72
497#define B2056_RX_VGA_BIAS 0x73
498#define B2056_RX_VGA_BIAS_DCCANCEL 0x74
499#define B2056_RX_VGA_GAIN 0x75
500#define B2056_RX_VGA_HP_CORNER_BW 0x76
501#define B2056_RX_VGABUF_BIAS 0x77
502#define B2056_RX_VGABUF_GAIN_BW 0x78
503#define B2056_RX_TXFBMIX_A 0x79
504#define B2056_RX_TXFBMIX_G 0x7A
505#define B2056_RX_RXSPARE1 0x7B
506#define B2056_RX_RXSPARE2 0x7C
507#define B2056_RX_RXSPARE3 0x7D
508#define B2056_RX_RXSPARE4 0x7E
509#define B2056_RX_RXSPARE5 0x7F
510#define B2056_RX_RXSPARE6 0x80
511#define B2056_RX_RXSPARE7 0x81
512#define B2056_RX_RXSPARE8 0x82
513#define B2056_RX_RXSPARE9 0x83
514#define B2056_RX_RXSPARE10 0x84
515#define B2056_RX_RXSPARE11 0x85
516#define B2056_RX_RXSPARE12 0x86
517#define B2056_RX_RXSPARE13 0x87
518#define B2056_RX_RXSPARE14 0x88
519#define B2056_RX_RXSPARE15 0x89
520#define B2056_RX_RXSPARE16 0x8A
521#define B2056_RX_STATUS_LNAA_GAIN 0x8B
522#define B2056_RX_STATUS_LNAG_GAIN 0x8C
523#define B2056_RX_STATUS_MIXTIA_GAIN 0x8D
524#define B2056_RX_STATUS_RXLPF_GAIN 0x8E
525#define B2056_RX_STATUS_VGA_BUF_GAIN 0x8F
526#define B2056_RX_STATUS_RXLPF_Q 0x90
527#define B2056_RX_STATUS_RXLPF_BUF_BW 0x91
528#define B2056_RX_STATUS_RXLPF_VGA_HPC 0x92
529#define B2056_RX_STATUS_RXLPF_RC 0x93
530#define B2056_RX_STATUS_HPC_RC 0x94
531
532#define B2056_LNA1_A_PU 0x01
533#define B2056_LNA2_A_PU 0x02
534#define B2056_LNA1_G_PU 0x01
535#define B2056_LNA2_G_PU 0x02
536#define B2056_MIXA_PU_I 0x01
537#define B2056_MIXA_PU_Q 0x02
538#define B2056_MIXA_PU_GM 0x10
539#define B2056_MIXG_PU_I 0x01
540#define B2056_MIXG_PU_Q 0x02
541#define B2056_MIXG_PU_GM 0x10
542#define B2056_TIA_PU 0x01
543#define B2056_BB_LPF_PU 0x20
544#define B2056_W1_PU 0x02
545#define B2056_W2_PU 0x04
546#define B2056_NB_PU 0x08
547#define B2056_RSSI_W1_SEL 0x02
548#define B2056_RSSI_W2_SEL 0x04
549#define B2056_RSSI_NB_SEL 0x08
550#define B2056_VCM_MASK 0x1C
551#define B2056_RSSI_VCM_SHIFT 0x02
552
553#define B2056_SYN (0x0 << 12)
554#define B2056_TX0 (0x2 << 12)
555#define B2056_TX1 (0x3 << 12)
556#define B2056_RX0 (0x6 << 12)
557#define B2056_RX1 (0x7 << 12)
558#define B2056_ALLTX (0xE << 12)
559#define B2056_ALLRX (0xF << 12)
560
561#define B2056_SYN_RESERVED_ADDR0 0x00
562#define B2056_SYN_IDCODE 0x01
563#define B2056_SYN_RESERVED_ADDR2 0x02
564#define B2056_SYN_RESERVED_ADDR3 0x03
565#define B2056_SYN_RESERVED_ADDR4 0x04
566#define B2056_SYN_RESERVED_ADDR5 0x05
567#define B2056_SYN_RESERVED_ADDR6 0x06
568#define B2056_SYN_RESERVED_ADDR7 0x07
569#define B2056_SYN_COM_CTRL 0x08
570#define B2056_SYN_COM_PU 0x09
571#define B2056_SYN_COM_OVR 0x0A
572#define B2056_SYN_COM_RESET 0x0B
573#define B2056_SYN_COM_RCAL 0x0C
574#define B2056_SYN_COM_RC_RXLPF 0x0D
575#define B2056_SYN_COM_RC_TXLPF 0x0E
576#define B2056_SYN_COM_RC_RXHPF 0x0F
577#define B2056_SYN_RESERVED_ADDR16 0x10
578#define B2056_SYN_RESERVED_ADDR17 0x11
579#define B2056_SYN_RESERVED_ADDR18 0x12
580#define B2056_SYN_RESERVED_ADDR19 0x13
581#define B2056_SYN_RESERVED_ADDR20 0x14
582#define B2056_SYN_RESERVED_ADDR21 0x15
583#define B2056_SYN_RESERVED_ADDR22 0x16
584#define B2056_SYN_RESERVED_ADDR23 0x17
585#define B2056_SYN_RESERVED_ADDR24 0x18
586#define B2056_SYN_RESERVED_ADDR25 0x19
587#define B2056_SYN_RESERVED_ADDR26 0x1A
588#define B2056_SYN_RESERVED_ADDR27 0x1B
589#define B2056_SYN_RESERVED_ADDR28 0x1C
590#define B2056_SYN_RESERVED_ADDR29 0x1D
591#define B2056_SYN_RESERVED_ADDR30 0x1E
592#define B2056_SYN_RESERVED_ADDR31 0x1F
593#define B2056_SYN_GPIO_MASTER1 0x20
594#define B2056_SYN_GPIO_MASTER2 0x21
595#define B2056_SYN_TOPBIAS_MASTER 0x22
596#define B2056_SYN_TOPBIAS_RCAL 0x23
597#define B2056_SYN_AFEREG 0x24
598#define B2056_SYN_TEMPPROCSENSE 0x25
599#define B2056_SYN_TEMPPROCSENSEIDAC 0x26
600#define B2056_SYN_TEMPPROCSENSERCAL 0x27
601#define B2056_SYN_LPO 0x28
602#define B2056_SYN_VDDCAL_MASTER 0x29
603#define B2056_SYN_VDDCAL_IDAC 0x2A
604#define B2056_SYN_VDDCAL_STATUS 0x2B
605#define B2056_SYN_RCAL_MASTER 0x2C
606#define B2056_SYN_RCAL_CODE_OUT 0x2D
607#define B2056_SYN_RCCAL_CTRL0 0x2E
608#define B2056_SYN_RCCAL_CTRL1 0x2F
609#define B2056_SYN_RCCAL_CTRL2 0x30
610#define B2056_SYN_RCCAL_CTRL3 0x31
611#define B2056_SYN_RCCAL_CTRL4 0x32
612#define B2056_SYN_RCCAL_CTRL5 0x33
613#define B2056_SYN_RCCAL_CTRL6 0x34
614#define B2056_SYN_RCCAL_CTRL7 0x35
615#define B2056_SYN_RCCAL_CTRL8 0x36
616#define B2056_SYN_RCCAL_CTRL9 0x37
617#define B2056_SYN_RCCAL_CTRL10 0x38
618#define B2056_SYN_RCCAL_CTRL11 0x39
619#define B2056_SYN_ZCAL_SPARE1 0x3A
620#define B2056_SYN_ZCAL_SPARE2 0x3B
621#define B2056_SYN_PLL_MAST1 0x3C
622#define B2056_SYN_PLL_MAST2 0x3D
623#define B2056_SYN_PLL_MAST3 0x3E
624#define B2056_SYN_PLL_BIAS_RESET 0x3F
625#define B2056_SYN_PLL_XTAL0 0x40
626#define B2056_SYN_PLL_XTAL1 0x41
627#define B2056_SYN_PLL_XTAL3 0x42
628#define B2056_SYN_PLL_XTAL4 0x43
629#define B2056_SYN_PLL_XTAL5 0x44
630#define B2056_SYN_PLL_XTAL6 0x45
631#define B2056_SYN_PLL_REFDIV 0x46
632#define B2056_SYN_PLL_PFD 0x47
633#define B2056_SYN_PLL_CP1 0x48
634#define B2056_SYN_PLL_CP2 0x49
635#define B2056_SYN_PLL_CP3 0x4A
636#define B2056_SYN_PLL_LOOPFILTER1 0x4B
637#define B2056_SYN_PLL_LOOPFILTER2 0x4C
638#define B2056_SYN_PLL_LOOPFILTER3 0x4D
639#define B2056_SYN_PLL_LOOPFILTER4 0x4E
640#define B2056_SYN_PLL_LOOPFILTER5 0x4F
641#define B2056_SYN_PLL_MMD1 0x50
642#define B2056_SYN_PLL_MMD2 0x51
643#define B2056_SYN_PLL_VCO1 0x52
644#define B2056_SYN_PLL_VCO2 0x53
645#define B2056_SYN_PLL_MONITOR1 0x54
646#define B2056_SYN_PLL_MONITOR2 0x55
647#define B2056_SYN_PLL_VCOCAL1 0x56
648#define B2056_SYN_PLL_VCOCAL2 0x57
649#define B2056_SYN_PLL_VCOCAL4 0x58
650#define B2056_SYN_PLL_VCOCAL5 0x59
651#define B2056_SYN_PLL_VCOCAL6 0x5A
652#define B2056_SYN_PLL_VCOCAL7 0x5B
653#define B2056_SYN_PLL_VCOCAL8 0x5C
654#define B2056_SYN_PLL_VCOCAL9 0x5D
655#define B2056_SYN_PLL_VCOCAL10 0x5E
656#define B2056_SYN_PLL_VCOCAL11 0x5F
657#define B2056_SYN_PLL_VCOCAL12 0x60
658#define B2056_SYN_PLL_VCOCAL13 0x61
659#define B2056_SYN_PLL_VREG 0x62
660#define B2056_SYN_PLL_STATUS1 0x63
661#define B2056_SYN_PLL_STATUS2 0x64
662#define B2056_SYN_PLL_STATUS3 0x65
663#define B2056_SYN_LOGEN_PU0 0x66
664#define B2056_SYN_LOGEN_PU1 0x67
665#define B2056_SYN_LOGEN_PU2 0x68
666#define B2056_SYN_LOGEN_PU3 0x69
667#define B2056_SYN_LOGEN_PU5 0x6A
668#define B2056_SYN_LOGEN_PU6 0x6B
669#define B2056_SYN_LOGEN_PU7 0x6C
670#define B2056_SYN_LOGEN_PU8 0x6D
671#define B2056_SYN_LOGEN_BIAS_RESET 0x6E
672#define B2056_SYN_LOGEN_RCCR1 0x6F
673#define B2056_SYN_LOGEN_VCOBUF1 0x70
674#define B2056_SYN_LOGEN_MIXER1 0x71
675#define B2056_SYN_LOGEN_MIXER2 0x72
676#define B2056_SYN_LOGEN_BUF1 0x73
677#define B2056_SYN_LOGENBUF2 0x74
678#define B2056_SYN_LOGEN_BUF3 0x75
679#define B2056_SYN_LOGEN_BUF4 0x76
680#define B2056_SYN_LOGEN_DIV1 0x77
681#define B2056_SYN_LOGEN_DIV2 0x78
682#define B2056_SYN_LOGEN_DIV3 0x79
683#define B2056_SYN_LOGEN_ACL1 0x7A
684#define B2056_SYN_LOGEN_ACL2 0x7B
685#define B2056_SYN_LOGEN_ACL3 0x7C
686#define B2056_SYN_LOGEN_ACL4 0x7D
687#define B2056_SYN_LOGEN_ACL5 0x7E
688#define B2056_SYN_LOGEN_ACL6 0x7F
689#define B2056_SYN_LOGEN_ACLOUT 0x80
690#define B2056_SYN_LOGEN_ACLCAL1 0x81
691#define B2056_SYN_LOGEN_ACLCAL2 0x82
692#define B2056_SYN_LOGEN_ACLCAL3 0x83
693#define B2056_SYN_CALEN 0x84
694#define B2056_SYN_LOGEN_PEAKDET1 0x85
695#define B2056_SYN_LOGEN_CORE_ACL_OVR 0x86
696#define B2056_SYN_LOGEN_RX_DIFF_ACL_OVR 0x87
697#define B2056_SYN_LOGEN_TX_DIFF_ACL_OVR 0x88
698#define B2056_SYN_LOGEN_RX_CMOS_ACL_OVR 0x89
699#define B2056_SYN_LOGEN_TX_CMOS_ACL_OVR 0x8A
700#define B2056_SYN_LOGEN_VCOBUF2 0x8B
701#define B2056_SYN_LOGEN_MIXER3 0x8C
702#define B2056_SYN_LOGEN_BUF5 0x8D
703#define B2056_SYN_LOGEN_BUF6 0x8E
704#define B2056_SYN_LOGEN_CBUFRX1 0x8F
705#define B2056_SYN_LOGEN_CBUFRX2 0x90
706#define B2056_SYN_LOGEN_CBUFRX3 0x91
707#define B2056_SYN_LOGEN_CBUFRX4 0x92
708#define B2056_SYN_LOGEN_CBUFTX1 0x93
709#define B2056_SYN_LOGEN_CBUFTX2 0x94
710#define B2056_SYN_LOGEN_CBUFTX3 0x95
711#define B2056_SYN_LOGEN_CBUFTX4 0x96
712#define B2056_SYN_LOGEN_CMOSRX1 0x97
713#define B2056_SYN_LOGEN_CMOSRX2 0x98
714#define B2056_SYN_LOGEN_CMOSRX3 0x99
715#define B2056_SYN_LOGEN_CMOSRX4 0x9A
716#define B2056_SYN_LOGEN_CMOSTX1 0x9B
717#define B2056_SYN_LOGEN_CMOSTX2 0x9C
718#define B2056_SYN_LOGEN_CMOSTX3 0x9D
719#define B2056_SYN_LOGEN_CMOSTX4 0x9E
720#define B2056_SYN_LOGEN_VCOBUF2_OVRVAL 0x9F
721#define B2056_SYN_LOGEN_MIXER3_OVRVAL 0xA0
722#define B2056_SYN_LOGEN_BUF5_OVRVAL 0xA1
723#define B2056_SYN_LOGEN_BUF6_OVRVAL 0xA2
724#define B2056_SYN_LOGEN_CBUFRX1_OVRVAL 0xA3
725#define B2056_SYN_LOGEN_CBUFRX2_OVRVAL 0xA4
726#define B2056_SYN_LOGEN_CBUFRX3_OVRVAL 0xA5
727#define B2056_SYN_LOGEN_CBUFRX4_OVRVAL 0xA6
728#define B2056_SYN_LOGEN_CBUFTX1_OVRVAL 0xA7
729#define B2056_SYN_LOGEN_CBUFTX2_OVRVAL 0xA8
730#define B2056_SYN_LOGEN_CBUFTX3_OVRVAL 0xA9
731#define B2056_SYN_LOGEN_CBUFTX4_OVRVAL 0xAA
732#define B2056_SYN_LOGEN_CMOSRX1_OVRVAL 0xAB
733#define B2056_SYN_LOGEN_CMOSRX2_OVRVAL 0xAC
734#define B2056_SYN_LOGEN_CMOSRX3_OVRVAL 0xAD
735#define B2056_SYN_LOGEN_CMOSRX4_OVRVAL 0xAE
736#define B2056_SYN_LOGEN_CMOSTX1_OVRVAL 0xAF
737#define B2056_SYN_LOGEN_CMOSTX2_OVRVAL 0xB0
738#define B2056_SYN_LOGEN_CMOSTX3_OVRVAL 0xB1
739#define B2056_SYN_LOGEN_CMOSTX4_OVRVAL 0xB2
740#define B2056_SYN_LOGEN_ACL_WAITCNT 0xB3
741#define B2056_SYN_LOGEN_CORE_CALVALID 0xB4
742#define B2056_SYN_LOGEN_RX_CMOS_CALVALID 0xB5
743#define B2056_SYN_LOGEN_TX_CMOS_VALID 0xB6
744
745#define B2056_TX_RESERVED_ADDR0 0x00
746#define B2056_TX_IDCODE 0x01
747#define B2056_TX_RESERVED_ADDR2 0x02
748#define B2056_TX_RESERVED_ADDR3 0x03
749#define B2056_TX_RESERVED_ADDR4 0x04
750#define B2056_TX_RESERVED_ADDR5 0x05
751#define B2056_TX_RESERVED_ADDR6 0x06
752#define B2056_TX_RESERVED_ADDR7 0x07
753#define B2056_TX_COM_CTRL 0x08
754#define B2056_TX_COM_PU 0x09
755#define B2056_TX_COM_OVR 0x0A
756#define B2056_TX_COM_RESET 0x0B
757#define B2056_TX_COM_RCAL 0x0C
758#define B2056_TX_COM_RC_RXLPF 0x0D
759#define B2056_TX_COM_RC_TXLPF 0x0E
760#define B2056_TX_COM_RC_RXHPF 0x0F
761#define B2056_TX_RESERVED_ADDR16 0x10
762#define B2056_TX_RESERVED_ADDR17 0x11
763#define B2056_TX_RESERVED_ADDR18 0x12
764#define B2056_TX_RESERVED_ADDR19 0x13
765#define B2056_TX_RESERVED_ADDR20 0x14
766#define B2056_TX_RESERVED_ADDR21 0x15
767#define B2056_TX_RESERVED_ADDR22 0x16
768#define B2056_TX_RESERVED_ADDR23 0x17
769#define B2056_TX_RESERVED_ADDR24 0x18
770#define B2056_TX_RESERVED_ADDR25 0x19
771#define B2056_TX_RESERVED_ADDR26 0x1A
772#define B2056_TX_RESERVED_ADDR27 0x1B
773#define B2056_TX_RESERVED_ADDR28 0x1C
774#define B2056_TX_RESERVED_ADDR29 0x1D
775#define B2056_TX_RESERVED_ADDR30 0x1E
776#define B2056_TX_RESERVED_ADDR31 0x1F
777#define B2056_TX_IQCAL_GAIN_BW 0x20
778#define B2056_TX_LOFT_FINE_I 0x21
779#define B2056_TX_LOFT_FINE_Q 0x22
780#define B2056_TX_LOFT_COARSE_I 0x23
781#define B2056_TX_LOFT_COARSE_Q 0x24
782#define B2056_TX_TX_COM_MASTER1 0x25
783#define B2056_TX_TX_COM_MASTER2 0x26
784#define B2056_TX_RXIQCAL_TXMUX 0x27
785#define B2056_TX_TX_SSI_MASTER 0x28
786#define B2056_TX_IQCAL_VCM_HG 0x29
787#define B2056_TX_IQCAL_IDAC 0x2A
788#define B2056_TX_TSSI_VCM 0x2B
789#define B2056_TX_TX_AMP_DET 0x2C
790#define B2056_TX_TX_SSI_MUX 0x2D
791#define B2056_TX_TSSIA 0x2E
792#define B2056_TX_TSSIG 0x2F
793#define B2056_TX_TSSI_MISC1 0x30
794#define B2056_TX_TSSI_MISC2 0x31
795#define B2056_TX_TSSI_MISC3 0x32
796#define B2056_TX_PA_SPARE1 0x33
797#define B2056_TX_PA_SPARE2 0x34
798#define B2056_TX_INTPAA_MASTER 0x35
799#define B2056_TX_INTPAA_GAIN 0x36
800#define B2056_TX_INTPAA_BOOST_TUNE 0x37
801#define B2056_TX_INTPAA_IAUX_STAT 0x38
802#define B2056_TX_INTPAA_IAUX_DYN 0x39
803#define B2056_TX_INTPAA_IMAIN_STAT 0x3A
804#define B2056_TX_INTPAA_IMAIN_DYN 0x3B
805#define B2056_TX_INTPAA_CASCBIAS 0x3C
806#define B2056_TX_INTPAA_PASLOPE 0x3D
807#define B2056_TX_INTPAA_PA_MISC 0x3E
808#define B2056_TX_INTPAG_MASTER 0x3F
809#define B2056_TX_INTPAG_GAIN 0x40
810#define B2056_TX_INTPAG_BOOST_TUNE 0x41
811#define B2056_TX_INTPAG_IAUX_STAT 0x42
812#define B2056_TX_INTPAG_IAUX_DYN 0x43
813#define B2056_TX_INTPAG_IMAIN_STAT 0x44
814#define B2056_TX_INTPAG_IMAIN_DYN 0x45
815#define B2056_TX_INTPAG_CASCBIAS 0x46
816#define B2056_TX_INTPAG_PASLOPE 0x47
817#define B2056_TX_INTPAG_PA_MISC 0x48
818#define B2056_TX_PADA_MASTER 0x49
819#define B2056_TX_PADA_IDAC 0x4A
820#define B2056_TX_PADA_CASCBIAS 0x4B
821#define B2056_TX_PADA_GAIN 0x4C
822#define B2056_TX_PADA_BOOST_TUNE 0x4D
823#define B2056_TX_PADA_SLOPE 0x4E
824#define B2056_TX_PADG_MASTER 0x4F
825#define B2056_TX_PADG_IDAC 0x50
826#define B2056_TX_PADG_CASCBIAS 0x51
827#define B2056_TX_PADG_GAIN 0x52
828#define B2056_TX_PADG_BOOST_TUNE 0x53
829#define B2056_TX_PADG_SLOPE 0x54
830#define B2056_TX_PGAA_MASTER 0x55
831#define B2056_TX_PGAA_IDAC 0x56
832#define B2056_TX_PGAA_GAIN 0x57
833#define B2056_TX_PGAA_BOOST_TUNE 0x58
834#define B2056_TX_PGAA_SLOPE 0x59
835#define B2056_TX_PGAA_MISC 0x5A
836#define B2056_TX_PGAG_MASTER 0x5B
837#define B2056_TX_PGAG_IDAC 0x5C
838#define B2056_TX_PGAG_GAIN 0x5D
839#define B2056_TX_PGAG_BOOST_TUNE 0x5E
840#define B2056_TX_PGAG_SLOPE 0x5F
841#define B2056_TX_PGAG_MISC 0x60
842#define B2056_TX_MIXA_MASTER 0x61
843#define B2056_TX_MIXA_BOOST_TUNE 0x62
844#define B2056_TX_MIXG 0x63
845#define B2056_TX_MIXG_BOOST_TUNE 0x64
846#define B2056_TX_BB_GM_MASTER 0x65
847#define B2056_TX_GMBB_GM 0x66
848#define B2056_TX_GMBB_IDAC 0x67
849#define B2056_TX_TXLPF_MASTER 0x68
850#define B2056_TX_TXLPF_RCCAL 0x69
851#define B2056_TX_TXLPF_RCCAL_OFF0 0x6A
852#define B2056_TX_TXLPF_RCCAL_OFF1 0x6B
853#define B2056_TX_TXLPF_RCCAL_OFF2 0x6C
854#define B2056_TX_TXLPF_RCCAL_OFF3 0x6D
855#define B2056_TX_TXLPF_RCCAL_OFF4 0x6E
856#define B2056_TX_TXLPF_RCCAL_OFF5 0x6F
857#define B2056_TX_TXLPF_RCCAL_OFF6 0x70
858#define B2056_TX_TXLPF_BW 0x71
859#define B2056_TX_TXLPF_GAIN 0x72
860#define B2056_TX_TXLPF_IDAC 0x73
861#define B2056_TX_TXLPF_IDAC_0 0x74
862#define B2056_TX_TXLPF_IDAC_1 0x75
863#define B2056_TX_TXLPF_IDAC_2 0x76
864#define B2056_TX_TXLPF_IDAC_3 0x77
865#define B2056_TX_TXLPF_IDAC_4 0x78
866#define B2056_TX_TXLPF_IDAC_5 0x79
867#define B2056_TX_TXLPF_IDAC_6 0x7A
868#define B2056_TX_TXLPF_OPAMP_IDAC 0x7B
869#define B2056_TX_TXLPF_MISC 0x7C
870#define B2056_TX_TXSPARE1 0x7D
871#define B2056_TX_TXSPARE2 0x7E
872#define B2056_TX_TXSPARE3 0x7F
873#define B2056_TX_TXSPARE4 0x80
874#define B2056_TX_TXSPARE5 0x81
875#define B2056_TX_TXSPARE6 0x82
876#define B2056_TX_TXSPARE7 0x83
877#define B2056_TX_TXSPARE8 0x84
878#define B2056_TX_TXSPARE9 0x85
879#define B2056_TX_TXSPARE10 0x86
880#define B2056_TX_TXSPARE11 0x87
881#define B2056_TX_TXSPARE12 0x88
882#define B2056_TX_TXSPARE13 0x89
883#define B2056_TX_TXSPARE14 0x8A
884#define B2056_TX_TXSPARE15 0x8B
885#define B2056_TX_TXSPARE16 0x8C
886#define B2056_TX_STATUS_INTPA_GAIN 0x8D
887#define B2056_TX_STATUS_PAD_GAIN 0x8E
888#define B2056_TX_STATUS_PGA_GAIN 0x8F
889#define B2056_TX_STATUS_GM_TXLPF_GAIN 0x90
890#define B2056_TX_STATUS_TXLPF_BW 0x91
891#define B2056_TX_STATUS_TXLPF_RC 0x92
892#define B2056_TX_GMBB_IDAC0 0x93
893#define B2056_TX_GMBB_IDAC1 0x94
894#define B2056_TX_GMBB_IDAC2 0x95
895#define B2056_TX_GMBB_IDAC3 0x96
896#define B2056_TX_GMBB_IDAC4 0x97
897#define B2056_TX_GMBB_IDAC5 0x98
898#define B2056_TX_GMBB_IDAC6 0x99
899#define B2056_TX_GMBB_IDAC7 0x9A
900
901#define B2056_RX_RESERVED_ADDR0 0x00
902#define B2056_RX_IDCODE 0x01
903#define B2056_RX_RESERVED_ADDR2 0x02
904#define B2056_RX_RESERVED_ADDR3 0x03
905#define B2056_RX_RESERVED_ADDR4 0x04
906#define B2056_RX_RESERVED_ADDR5 0x05
907#define B2056_RX_RESERVED_ADDR6 0x06
908#define B2056_RX_RESERVED_ADDR7 0x07
909#define B2056_RX_COM_CTRL 0x08
910#define B2056_RX_COM_PU 0x09
911#define B2056_RX_COM_OVR 0x0A
912#define B2056_RX_COM_RESET 0x0B
913#define B2056_RX_COM_RCAL 0x0C
914#define B2056_RX_COM_RC_RXLPF 0x0D
915#define B2056_RX_COM_RC_TXLPF 0x0E
916#define B2056_RX_COM_RC_RXHPF 0x0F
917#define B2056_RX_RESERVED_ADDR16 0x10
918#define B2056_RX_RESERVED_ADDR17 0x11
919#define B2056_RX_RESERVED_ADDR18 0x12
920#define B2056_RX_RESERVED_ADDR19 0x13
921#define B2056_RX_RESERVED_ADDR20 0x14
922#define B2056_RX_RESERVED_ADDR21 0x15
923#define B2056_RX_RESERVED_ADDR22 0x16
924#define B2056_RX_RESERVED_ADDR23 0x17
925#define B2056_RX_RESERVED_ADDR24 0x18
926#define B2056_RX_RESERVED_ADDR25 0x19
927#define B2056_RX_RESERVED_ADDR26 0x1A
928#define B2056_RX_RESERVED_ADDR27 0x1B
929#define B2056_RX_RESERVED_ADDR28 0x1C
930#define B2056_RX_RESERVED_ADDR29 0x1D
931#define B2056_RX_RESERVED_ADDR30 0x1E
932#define B2056_RX_RESERVED_ADDR31 0x1F
933#define B2056_RX_RXIQCAL_RXMUX 0x20
934#define B2056_RX_RSSI_PU 0x21
935#define B2056_RX_RSSI_SEL 0x22
936#define B2056_RX_RSSI_GAIN 0x23
937#define B2056_RX_RSSI_NB_IDAC 0x24
938#define B2056_RX_RSSI_WB2I_IDAC_1 0x25
939#define B2056_RX_RSSI_WB2I_IDAC_2 0x26
940#define B2056_RX_RSSI_WB2Q_IDAC_1 0x27
941#define B2056_RX_RSSI_WB2Q_IDAC_2 0x28
942#define B2056_RX_RSSI_POLE 0x29
943#define B2056_RX_RSSI_WB1_IDAC 0x2A
944#define B2056_RX_RSSI_MISC 0x2B
945#define B2056_RX_LNAA_MASTER 0x2C
946#define B2056_RX_LNAA_TUNE 0x2D
947#define B2056_RX_LNAA_GAIN 0x2E
948#define B2056_RX_LNA_A_SLOPE 0x2F
949#define B2056_RX_BIASPOLE_LNAA1_IDAC 0x30
950#define B2056_RX_LNAA2_IDAC 0x31
951#define B2056_RX_LNA1A_MISC 0x32
952#define B2056_RX_LNAG_MASTER 0x33
953#define B2056_RX_LNAG_TUNE 0x34
954#define B2056_RX_LNAG_GAIN 0x35
955#define B2056_RX_LNA_G_SLOPE 0x36
956#define B2056_RX_BIASPOLE_LNAG1_IDAC 0x37
957#define B2056_RX_LNAG2_IDAC 0x38
958#define B2056_RX_LNA1G_MISC 0x39
959#define B2056_RX_MIXA_MASTER 0x3A
960#define B2056_RX_MIXA_VCM 0x3B
961#define B2056_RX_MIXA_CTRLPTAT 0x3C
962#define B2056_RX_MIXA_LOB_BIAS 0x3D
963#define B2056_RX_MIXA_CORE_IDAC 0x3E
964#define B2056_RX_MIXA_CMFB_IDAC 0x3F
965#define B2056_RX_MIXA_BIAS_AUX 0x40
966#define B2056_RX_MIXA_BIAS_MAIN 0x41
967#define B2056_RX_MIXA_BIAS_MISC 0x42
968#define B2056_RX_MIXA_MAST_BIAS 0x43
969#define B2056_RX_MIXG_MASTER 0x44
970#define B2056_RX_MIXG_VCM 0x45
971#define B2056_RX_MIXG_CTRLPTAT 0x46
972#define B2056_RX_MIXG_LOB_BIAS 0x47
973#define B2056_RX_MIXG_CORE_IDAC 0x48
974#define B2056_RX_MIXG_CMFB_IDAC 0x49
975#define B2056_RX_MIXG_BIAS_AUX 0x4A
976#define B2056_RX_MIXG_BIAS_MAIN 0x4B
977#define B2056_RX_MIXG_BIAS_MISC 0x4C
978#define B2056_RX_MIXG_MAST_BIAS 0x4D
979#define B2056_RX_TIA_MASTER 0x4E
980#define B2056_RX_TIA_IOPAMP 0x4F
981#define B2056_RX_TIA_QOPAMP 0x50
982#define B2056_RX_TIA_IMISC 0x51
983#define B2056_RX_TIA_QMISC 0x52
984#define B2056_RX_TIA_GAIN 0x53
985#define B2056_RX_TIA_SPARE1 0x54
986#define B2056_RX_TIA_SPARE2 0x55
987#define B2056_RX_BB_LPF_MASTER 0x56
988#define B2056_RX_AACI_MASTER 0x57
989#define B2056_RX_RXLPF_IDAC 0x58
990#define B2056_RX_RXLPF_OPAMPBIAS_LOWQ 0x59
991#define B2056_RX_RXLPF_OPAMPBIAS_HIGHQ 0x5A
992#define B2056_RX_RXLPF_BIAS_DCCANCEL 0x5B
993#define B2056_RX_RXLPF_OUTVCM 0x5C
994#define B2056_RX_RXLPF_INVCM_BODY 0x5D
995#define B2056_RX_RXLPF_CC_OP 0x5E
996#define B2056_RX_RXLPF_GAIN 0x5F
997#define B2056_RX_RXLPF_Q_BW 0x60
998#define B2056_RX_RXLPF_HP_CORNER_BW 0x61
999#define B2056_RX_RXLPF_RCCAL_HPC 0x62
1000#define B2056_RX_RXHPF_OFF0 0x63
1001#define B2056_RX_RXHPF_OFF1 0x64
1002#define B2056_RX_RXHPF_OFF2 0x65
1003#define B2056_RX_RXHPF_OFF3 0x66
1004#define B2056_RX_RXHPF_OFF4 0x67
1005#define B2056_RX_RXHPF_OFF5 0x68
1006#define B2056_RX_RXHPF_OFF6 0x69
1007#define B2056_RX_RXHPF_OFF7 0x6A
1008#define B2056_RX_RXLPF_RCCAL_LPC 0x6B
1009#define B2056_RX_RXLPF_OFF_0 0x6C
1010#define B2056_RX_RXLPF_OFF_1 0x6D
1011#define B2056_RX_RXLPF_OFF_2 0x6E
1012#define B2056_RX_RXLPF_OFF_3 0x6F
1013#define B2056_RX_RXLPF_OFF_4 0x70
1014#define B2056_RX_UNUSED 0x71
1015#define B2056_RX_VGA_MASTER 0x72
1016#define B2056_RX_VGA_BIAS 0x73
1017#define B2056_RX_VGA_BIAS_DCCANCEL 0x74
1018#define B2056_RX_VGA_GAIN 0x75
1019#define B2056_RX_VGA_HP_CORNER_BW 0x76
1020#define B2056_RX_VGABUF_BIAS 0x77
1021#define B2056_RX_VGABUF_GAIN_BW 0x78
1022#define B2056_RX_TXFBMIX_A 0x79
1023#define B2056_RX_TXFBMIX_G 0x7A
1024#define B2056_RX_RXSPARE1 0x7B
1025#define B2056_RX_RXSPARE2 0x7C
1026#define B2056_RX_RXSPARE3 0x7D
1027#define B2056_RX_RXSPARE4 0x7E
1028#define B2056_RX_RXSPARE5 0x7F
1029#define B2056_RX_RXSPARE6 0x80
1030#define B2056_RX_RXSPARE7 0x81
1031#define B2056_RX_RXSPARE8 0x82
1032#define B2056_RX_RXSPARE9 0x83
1033#define B2056_RX_RXSPARE10 0x84
1034#define B2056_RX_RXSPARE11 0x85
1035#define B2056_RX_RXSPARE12 0x86
1036#define B2056_RX_RXSPARE13 0x87
1037#define B2056_RX_RXSPARE14 0x88
1038#define B2056_RX_RXSPARE15 0x89
1039#define B2056_RX_RXSPARE16 0x8A
1040#define B2056_RX_STATUS_LNAA_GAIN 0x8B
1041#define B2056_RX_STATUS_LNAG_GAIN 0x8C
1042#define B2056_RX_STATUS_MIXTIA_GAIN 0x8D
1043#define B2056_RX_STATUS_RXLPF_GAIN 0x8E
1044#define B2056_RX_STATUS_VGA_BUF_GAIN 0x8F
1045#define B2056_RX_STATUS_RXLPF_Q 0x90
1046#define B2056_RX_STATUS_RXLPF_BUF_BW 0x91
1047#define B2056_RX_STATUS_RXLPF_VGA_HPC 0x92
1048#define B2056_RX_STATUS_RXLPF_RC 0x93
1049#define B2056_RX_STATUS_HPC_RC 0x94
1050
1051#define B2056_LNA1_A_PU 0x01
1052#define B2056_LNA2_A_PU 0x02
1053#define B2056_LNA1_G_PU 0x01
1054#define B2056_LNA2_G_PU 0x02
1055#define B2056_MIXA_PU_I 0x01
1056#define B2056_MIXA_PU_Q 0x02
1057#define B2056_MIXA_PU_GM 0x10
1058#define B2056_MIXG_PU_I 0x01
1059#define B2056_MIXG_PU_Q 0x02
1060#define B2056_MIXG_PU_GM 0x10
1061#define B2056_TIA_PU 0x01
1062#define B2056_BB_LPF_PU 0x20
1063#define B2056_W1_PU 0x02
1064#define B2056_W2_PU 0x04
1065#define B2056_NB_PU 0x08
1066#define B2056_RSSI_W1_SEL 0x02
1067#define B2056_RSSI_W2_SEL 0x04
1068#define B2056_RSSI_NB_SEL 0x08
1069#define B2056_VCM_MASK 0x1C
1070#define B2056_RSSI_VCM_SHIFT 0x02
1071
31struct b43_nphy_channeltab_entry_rev3 { 1072struct b43_nphy_channeltab_entry_rev3 {
32 /* The channel number */
33 u8 channel;
34 /* The channel frequency in MHz */ 1073 /* The channel frequency in MHz */
35 u16 freq; 1074 u16 freq;
36 /* Radio register values on channelswitch */ 1075 /* Radio register values on channelswitch */
37 /* TODO */ 1076 u8 radio_syn_pll_vcocal1;
1077 u8 radio_syn_pll_vcocal2;
1078 u8 radio_syn_pll_refdiv;
1079 u8 radio_syn_pll_mmd2;
1080 u8 radio_syn_pll_mmd1;
1081 u8 radio_syn_pll_loopfilter1;
1082 u8 radio_syn_pll_loopfilter2;
1083 u8 radio_syn_pll_loopfilter3;
1084 u8 radio_syn_pll_loopfilter4;
1085 u8 radio_syn_pll_loopfilter5;
1086 u8 radio_syn_reserved_addr27;
1087 u8 radio_syn_reserved_addr28;
1088 u8 radio_syn_reserved_addr29;
1089 u8 radio_syn_logen_vcobuf1;
1090 u8 radio_syn_logen_mixer2;
1091 u8 radio_syn_logen_buf3;
1092 u8 radio_syn_logen_buf4;
1093 u8 radio_rx0_lnaa_tune;
1094 u8 radio_rx0_lnag_tune;
1095 u8 radio_tx0_intpaa_boost_tune;
1096 u8 radio_tx0_intpag_boost_tune;
1097 u8 radio_tx0_pada_boost_tune;
1098 u8 radio_tx0_padg_boost_tune;
1099 u8 radio_tx0_pgaa_boost_tune;
1100 u8 radio_tx0_pgag_boost_tune;
1101 u8 radio_tx0_mixa_boost_tune;
1102 u8 radio_tx0_mixg_boost_tune;
1103 u8 radio_rx1_lnaa_tune;
1104 u8 radio_rx1_lnag_tune;
1105 u8 radio_tx1_intpaa_boost_tune;
1106 u8 radio_tx1_intpag_boost_tune;
1107 u8 radio_tx1_pada_boost_tune;
1108 u8 radio_tx1_padg_boost_tune;
1109 u8 radio_tx1_pgaa_boost_tune;
1110 u8 radio_tx1_pgag_boost_tune;
1111 u8 radio_tx1_mixa_boost_tune;
1112 u8 radio_tx1_mixg_boost_tune;
38 /* PHY register values on channelswitch */ 1113 /* PHY register values on channelswitch */
39 struct b43_phy_n_sfo_cfg phy_regs; 1114 struct b43_phy_n_sfo_cfg phy_regs;
40}; 1115};
41 1116
1117void b2056_upload_inittabs(struct b43_wldev *dev,
1118 bool ghz5, bool ignore_uploadflag);
1119
42#endif /* B43_RADIO_2056_H_ */ 1120#endif /* B43_RADIO_2056_H_ */
diff --git a/drivers/net/wireless/b43/rfkill.c b/drivers/net/wireless/b43/rfkill.c
index 78016ae21c50..86bc0a0f735c 100644
--- a/drivers/net/wireless/b43/rfkill.c
+++ b/drivers/net/wireless/b43/rfkill.c
@@ -28,23 +28,8 @@
28/* Returns TRUE, if the radio is enabled in hardware. */ 28/* Returns TRUE, if the radio is enabled in hardware. */
29bool b43_is_hw_radio_enabled(struct b43_wldev *dev) 29bool b43_is_hw_radio_enabled(struct b43_wldev *dev)
30{ 30{
31 if (dev->phy.rev >= 3 || dev->phy.type == B43_PHYTYPE_LP) { 31 return !(b43_read32(dev, B43_MMIO_RADIO_HWENABLED_HI)
32 if (!(b43_read32(dev, B43_MMIO_RADIO_HWENABLED_HI) 32 & B43_MMIO_RADIO_HWENABLED_HI_MASK);
33 & B43_MMIO_RADIO_HWENABLED_HI_MASK))
34 return 1;
35 } else {
36 /* To prevent CPU fault on PPC, do not read a register
37 * unless the interface is started; however, on resume
38 * for hibernation, this routine is entered early. When
39 * that happens, unconditionally return TRUE.
40 */
41 if (b43_status(dev) < B43_STAT_STARTED)
42 return 1;
43 if (b43_read16(dev, B43_MMIO_RADIO_HWENABLED_LO)
44 & B43_MMIO_RADIO_HWENABLED_LO_MASK)
45 return 1;
46 }
47 return 0;
48} 33}
49 34
50/* The poll callback for the hardware button. */ 35/* The poll callback for the hardware button. */
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index d60db078eae2..dc8ef09a8552 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -28,41 +28,41 @@
28#include "phy_n.h" 28#include "phy_n.h"
29 29
30static const u8 b43_ntab_adjustpower0[] = { 30static const u8 b43_ntab_adjustpower0[] = {
31 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 31 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
32 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 32 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
33 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05, 33 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
34 0x06, 0x06, 0x06, 0x06, 0x07, 0x07, 0x07, 0x07, 34 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
35 0x08, 0x08, 0x08, 0x08, 0x09, 0x09, 0x09, 0x09, 35 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
36 0x0A, 0x0A, 0x0A, 0x0A, 0x0B, 0x0B, 0x0B, 0x0B, 36 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
37 0x0C, 0x0C, 0x0C, 0x0C, 0x0D, 0x0D, 0x0D, 0x0D, 37 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
38 0x0E, 0x0E, 0x0E, 0x0E, 0x0F, 0x0F, 0x0F, 0x0F, 38 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
39 0x10, 0x10, 0x10, 0x10, 0x11, 0x11, 0x11, 0x11, 39 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
40 0x12, 0x12, 0x12, 0x12, 0x13, 0x13, 0x13, 0x13, 40 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
41 0x14, 0x14, 0x14, 0x14, 0x15, 0x15, 0x15, 0x15, 41 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
42 0x16, 0x16, 0x16, 0x16, 0x17, 0x17, 0x17, 0x17, 42 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
43 0x18, 0x18, 0x18, 0x18, 0x19, 0x19, 0x19, 0x19, 43 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
44 0x1A, 0x1A, 0x1A, 0x1A, 0x1B, 0x1B, 0x1B, 0x1B, 44 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
45 0x1C, 0x1C, 0x1C, 0x1C, 0x1D, 0x1D, 0x1D, 0x1D, 45 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
46 0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F, 46 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
47}; 47};
48 48
49static const u8 b43_ntab_adjustpower1[] = { 49static const u8 b43_ntab_adjustpower1[] = {
50 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 50 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
51 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 51 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
52 0x04, 0x04, 0x04, 0x04, 0x05, 0x05, 0x05, 0x05, 52 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
53 0x06, 0x06, 0x06, 0x06, 0x07, 0x07, 0x07, 0x07, 53 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
54 0x08, 0x08, 0x08, 0x08, 0x09, 0x09, 0x09, 0x09, 54 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
55 0x0A, 0x0A, 0x0A, 0x0A, 0x0B, 0x0B, 0x0B, 0x0B, 55 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
56 0x0C, 0x0C, 0x0C, 0x0C, 0x0D, 0x0D, 0x0D, 0x0D, 56 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
57 0x0E, 0x0E, 0x0E, 0x0E, 0x0F, 0x0F, 0x0F, 0x0F, 57 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
58 0x10, 0x10, 0x10, 0x10, 0x11, 0x11, 0x11, 0x11, 58 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
59 0x12, 0x12, 0x12, 0x12, 0x13, 0x13, 0x13, 0x13, 59 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
60 0x14, 0x14, 0x14, 0x14, 0x15, 0x15, 0x15, 0x15, 60 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
61 0x16, 0x16, 0x16, 0x16, 0x17, 0x17, 0x17, 0x17, 61 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
62 0x18, 0x18, 0x18, 0x18, 0x19, 0x19, 0x19, 0x19, 62 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
63 0x1A, 0x1A, 0x1A, 0x1A, 0x1B, 0x1B, 0x1B, 0x1B, 63 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
64 0x1C, 0x1C, 0x1C, 0x1C, 0x1D, 0x1D, 0x1D, 0x1D, 64 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
65 0x1E, 0x1E, 0x1E, 0x1E, 0x1F, 0x1F, 0x1F, 0x1F, 65 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
66}; 66};
67 67
68static const u16 b43_ntab_bdi[] = { 68static const u16 b43_ntab_bdi[] = {
@@ -130,8 +130,8 @@ static const u32 b43_ntab_framestruct[] = {
130 0x09804506, 0x00100030, 0x09804507, 0x00100030, 130 0x09804506, 0x00100030, 0x09804507, 0x00100030,
131 0x00000000, 0x00000000, 0x00000000, 0x00000000, 131 0x00000000, 0x00000000, 0x00000000, 0x00000000,
132 0x00000000, 0x00000000, 0x00000000, 0x00000000, 132 0x00000000, 0x00000000, 0x00000000, 0x00000000,
133 0x08004A0C, 0x00100008, 0x01000A0D, 0x00100028, 133 0x08004A0C, 0x00100004, 0x01000A0D, 0x00100024,
134 0x0980450E, 0x00100038, 0x0980450F, 0x00100038, 134 0x0980450E, 0x00100034, 0x0980450F, 0x00100034,
135 0x00000000, 0x00000000, 0x00000000, 0x00000000, 135 0x00000000, 0x00000000, 0x00000000, 0x00000000,
136 0x00000000, 0x00000000, 0x00000000, 0x00000000, 136 0x00000000, 0x00000000, 0x00000000, 0x00000000,
137 0x00000A04, 0x00100000, 0x11008A05, 0x00100020, 137 0x00000A04, 0x00100000, 0x11008A05, 0x00100020,
@@ -202,13 +202,13 @@ static const u32 b43_ntab_framestruct[] = {
202 0x53028A06, 0x01900060, 0x53028A07, 0x01900060, 202 0x53028A06, 0x01900060, 0x53028A07, 0x01900060,
203 0x00000000, 0x00000000, 0x00000000, 0x00000000, 203 0x00000000, 0x00000000, 0x00000000, 0x00000000,
204 0x00000000, 0x00000000, 0x00000000, 0x00000000, 204 0x00000000, 0x00000000, 0x00000000, 0x00000000,
205 0x4002140C, 0x000F4810, 0x6203140D, 0x00100050, 205 0x4002140C, 0x000F4808, 0x6203140D, 0x00100048,
206 0x53028A0E, 0x01900070, 0x53028A0F, 0x01900070, 206 0x53028A0E, 0x01900068, 0x53028A0F, 0x01900068,
207 0x00000000, 0x00000000, 0x00000000, 0x00000000, 207 0x00000000, 0x00000000, 0x00000000, 0x00000000,
208 0x00000000, 0x00000000, 0x00000000, 0x00000000, 208 0x00000000, 0x00000000, 0x00000000, 0x00000000,
209 0x00000A0C, 0x00100008, 0x11008A0D, 0x00100028, 209 0x00000A0C, 0x00100004, 0x11008A0D, 0x00100024,
210 0x1980C50E, 0x00100038, 0x2181050E, 0x00100038, 210 0x1980C50E, 0x00100034, 0x2181050E, 0x00100034,
211 0x2181050E, 0x00100038, 0x0180050C, 0x00100038, 211 0x2181050E, 0x00100034, 0x0180050C, 0x00100038,
212 0x1180850D, 0x00100038, 0x1181850D, 0x00100038, 212 0x1180850D, 0x00100038, 0x1181850D, 0x00100038,
213 0x2981450F, 0x01100038, 0x00000000, 0x00000000, 213 0x2981450F, 0x01100038, 0x00000000, 0x00000000,
214 0x00000000, 0x00000000, 0x00000000, 0x00000000, 214 0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -238,9 +238,9 @@ static const u32 b43_ntab_framestruct[] = {
238 0x00000000, 0x00000000, 0x00000000, 0x00000000, 238 0x00000000, 0x00000000, 0x00000000, 0x00000000,
239 0x00000000, 0x00000000, 0x00000000, 0x00000000, 239 0x00000000, 0x00000000, 0x00000000, 0x00000000,
240 0x00000000, 0x00000000, 0x00000000, 0x00000000, 240 0x00000000, 0x00000000, 0x00000000, 0x00000000,
241 0x4002140C, 0x00100010, 0x0200140D, 0x00100050, 241 0x4002140C, 0x00100008, 0x0200140D, 0x00100048,
242 0x0B004A0E, 0x01900070, 0x13008A0E, 0x01900070, 242 0x0B004A0E, 0x01900068, 0x13008A0E, 0x01900068,
243 0x13008A0E, 0x01900070, 0x43020A0C, 0x00100070, 243 0x13008A0E, 0x01900068, 0x43020A0C, 0x00100070,
244 0x1B00CA0D, 0x00100070, 0x1B014A0D, 0x00100070, 244 0x1B00CA0D, 0x00100070, 0x1B014A0D, 0x00100070,
245 0x23010A0F, 0x01500070, 0x00000000, 0x00000000, 245 0x23010A0F, 0x01500070, 0x00000000, 0x00000000,
246 0x00000000, 0x00000000, 0x00000000, 0x00000000, 246 0x00000000, 0x00000000, 0x00000000, 0x00000000,
@@ -337,73 +337,73 @@ static const u32 b43_ntab_framestruct[] = {
337}; 337};
338 338
339static const u32 b43_ntab_gainctl0[] = { 339static const u32 b43_ntab_gainctl0[] = {
340 0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E, 340 0x03CC2B44, 0x03CC2B42, 0x03CC2B40, 0x03CC2B3E,
341 0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C, 341 0x03CC2B3D, 0x03CC2B3B, 0x03C82B44, 0x03C82B42,
342 0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A, 342 0x03C82B40, 0x03C82B3E, 0x03C82B3D, 0x03C82B3B,
343 0x00730C39, 0x00720D39, 0x00710E38, 0x00700F38, 343 0x03C82B39, 0x03C82B38, 0x03C82B36, 0x03C82B34,
344 0x006F0037, 0x006E0137, 0x006D0236, 0x006C0336, 344 0x03C42B44, 0x03C42B42, 0x03C42B40, 0x03C42B3E,
345 0x006B0435, 0x006A0535, 0x00690634, 0x00680734, 345 0x03C42B3D, 0x03C42B3B, 0x03C42B39, 0x03C42B38,
346 0x00670833, 0x00660933, 0x00650A32, 0x00640B32, 346 0x03C42B36, 0x03C42B34, 0x03C42B33, 0x03C42B32,
347 0x00630C31, 0x00620D31, 0x00610E30, 0x00600F30, 347 0x03C42B30, 0x03C42B2F, 0x03C42B2D, 0x03C02B44,
348 0x005F002F, 0x005E012F, 0x005D022E, 0x005C032E, 348 0x03C02B42, 0x03C02B40, 0x03C02B3E, 0x03C02B3D,
349 0x005B042D, 0x005A052D, 0x0059062C, 0x0058072C, 349 0x03C02B3B, 0x03C02B39, 0x03C02B38, 0x03C02B36,
350 0x0057082B, 0x0056092B, 0x00550A2A, 0x00540B2A, 350 0x03C02B34, 0x03B02B44, 0x03B02B42, 0x03B02B40,
351 0x00530C29, 0x00520D29, 0x00510E28, 0x00500F28, 351 0x03B02B3E, 0x03B02B3D, 0x03B02B3B, 0x03B02B39,
352 0x004F0027, 0x004E0127, 0x004D0226, 0x004C0326, 352 0x03B02B38, 0x03B02B36, 0x03B02B34, 0x03B02B33,
353 0x004B0425, 0x004A0525, 0x00490624, 0x00480724, 353 0x03B02B32, 0x03B02B30, 0x03B02B2F, 0x03B02B2D,
354 0x00470823, 0x00460923, 0x00450A22, 0x00440B22, 354 0x03A02B44, 0x03A02B42, 0x03A02B40, 0x03A02B3E,
355 0x00430C21, 0x00420D21, 0x00410E20, 0x00400F20, 355 0x03A02B3D, 0x03A02B3B, 0x03A02B39, 0x03A02B38,
356 0x003F001F, 0x003E011F, 0x003D021E, 0x003C031E, 356 0x03A02B36, 0x03A02B34, 0x03902B44, 0x03902B42,
357 0x003B041D, 0x003A051D, 0x0039061C, 0x0038071C, 357 0x03902B40, 0x03902B3E, 0x03902B3D, 0x03902B3B,
358 0x0037081B, 0x0036091B, 0x00350A1A, 0x00340B1A, 358 0x03902B39, 0x03902B38, 0x03902B36, 0x03902B34,
359 0x00330C19, 0x00320D19, 0x00310E18, 0x00300F18, 359 0x03902B33, 0x03902B32, 0x03902B30, 0x03802B44,
360 0x002F0017, 0x002E0117, 0x002D0216, 0x002C0316, 360 0x03802B42, 0x03802B40, 0x03802B3E, 0x03802B3D,
361 0x002B0415, 0x002A0515, 0x00290614, 0x00280714, 361 0x03802B3B, 0x03802B39, 0x03802B38, 0x03802B36,
362 0x00270813, 0x00260913, 0x00250A12, 0x00240B12, 362 0x03802B34, 0x03802B33, 0x03802B32, 0x03802B30,
363 0x00230C11, 0x00220D11, 0x00210E10, 0x00200F10, 363 0x03802B2F, 0x03802B2D, 0x03802B2C, 0x03802B2B,
364 0x001F000F, 0x001E010F, 0x001D020E, 0x001C030E, 364 0x03802B2A, 0x03802B29, 0x03802B27, 0x03802B26,
365 0x001B040D, 0x001A050D, 0x0019060C, 0x0018070C, 365 0x03802B25, 0x03802B24, 0x03802B23, 0x03802B22,
366 0x0017080B, 0x0016090B, 0x00150A0A, 0x00140B0A, 366 0x03802B21, 0x03802B20, 0x03802B1F, 0x03802B1E,
367 0x00130C09, 0x00120D09, 0x00110E08, 0x00100F08, 367 0x03802B1E, 0x03802B1D, 0x03802B1C, 0x03802B1B,
368 0x000F0007, 0x000E0107, 0x000D0206, 0x000C0306, 368 0x03802B1A, 0x03802B1A, 0x03802B19, 0x03802B18,
369 0x000B0405, 0x000A0505, 0x00090604, 0x00080704, 369 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18,
370 0x00070803, 0x00060903, 0x00050A02, 0x00040B02, 370 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18,
371 0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00, 371 0x03802B18, 0x03802B18, 0x03802B18, 0x00002B00,
372}; 372};
373 373
374static const u32 b43_ntab_gainctl1[] = { 374static const u32 b43_ntab_gainctl1[] = {
375 0x007F003F, 0x007E013F, 0x007D023E, 0x007C033E, 375 0x03CC2B44, 0x03CC2B42, 0x03CC2B40, 0x03CC2B3E,
376 0x007B043D, 0x007A053D, 0x0079063C, 0x0078073C, 376 0x03CC2B3D, 0x03CC2B3B, 0x03C82B44, 0x03C82B42,
377 0x0077083B, 0x0076093B, 0x00750A3A, 0x00740B3A, 377 0x03C82B40, 0x03C82B3E, 0x03C82B3D, 0x03C82B3B,
378 0x00730C39, 0x00720D39, 0x00710E38, 0x00700F38, 378 0x03C82B39, 0x03C82B38, 0x03C82B36, 0x03C82B34,
379 0x006F0037, 0x006E0137, 0x006D0236, 0x006C0336, 379 0x03C42B44, 0x03C42B42, 0x03C42B40, 0x03C42B3E,
380 0x006B0435, 0x006A0535, 0x00690634, 0x00680734, 380 0x03C42B3D, 0x03C42B3B, 0x03C42B39, 0x03C42B38,
381 0x00670833, 0x00660933, 0x00650A32, 0x00640B32, 381 0x03C42B36, 0x03C42B34, 0x03C42B33, 0x03C42B32,
382 0x00630C31, 0x00620D31, 0x00610E30, 0x00600F30, 382 0x03C42B30, 0x03C42B2F, 0x03C42B2D, 0x03C02B44,
383 0x005F002F, 0x005E012F, 0x005D022E, 0x005C032E, 383 0x03C02B42, 0x03C02B40, 0x03C02B3E, 0x03C02B3D,
384 0x005B042D, 0x005A052D, 0x0059062C, 0x0058072C, 384 0x03C02B3B, 0x03C02B39, 0x03C02B38, 0x03C02B36,
385 0x0057082B, 0x0056092B, 0x00550A2A, 0x00540B2A, 385 0x03C02B34, 0x03B02B44, 0x03B02B42, 0x03B02B40,
386 0x00530C29, 0x00520D29, 0x00510E28, 0x00500F28, 386 0x03B02B3E, 0x03B02B3D, 0x03B02B3B, 0x03B02B39,
387 0x004F0027, 0x004E0127, 0x004D0226, 0x004C0326, 387 0x03B02B38, 0x03B02B36, 0x03B02B34, 0x03B02B33,
388 0x004B0425, 0x004A0525, 0x00490624, 0x00480724, 388 0x03B02B32, 0x03B02B30, 0x03B02B2F, 0x03B02B2D,
389 0x00470823, 0x00460923, 0x00450A22, 0x00440B22, 389 0x03A02B44, 0x03A02B42, 0x03A02B40, 0x03A02B3E,
390 0x00430C21, 0x00420D21, 0x00410E20, 0x00400F20, 390 0x03A02B3D, 0x03A02B3B, 0x03A02B39, 0x03A02B38,
391 0x003F001F, 0x003E011F, 0x003D021E, 0x003C031E, 391 0x03A02B36, 0x03A02B34, 0x03902B44, 0x03902B42,
392 0x003B041D, 0x003A051D, 0x0039061C, 0x0038071C, 392 0x03902B40, 0x03902B3E, 0x03902B3D, 0x03902B3B,
393 0x0037081B, 0x0036091B, 0x00350A1A, 0x00340B1A, 393 0x03902B39, 0x03902B38, 0x03902B36, 0x03902B34,
394 0x00330C19, 0x00320D19, 0x00310E18, 0x00300F18, 394 0x03902B33, 0x03902B32, 0x03902B30, 0x03802B44,
395 0x002F0017, 0x002E0117, 0x002D0216, 0x002C0316, 395 0x03802B42, 0x03802B40, 0x03802B3E, 0x03802B3D,
396 0x002B0415, 0x002A0515, 0x00290614, 0x00280714, 396 0x03802B3B, 0x03802B39, 0x03802B38, 0x03802B36,
397 0x00270813, 0x00260913, 0x00250A12, 0x00240B12, 397 0x03802B34, 0x03802B33, 0x03802B32, 0x03802B30,
398 0x00230C11, 0x00220D11, 0x00210E10, 0x00200F10, 398 0x03802B2F, 0x03802B2D, 0x03802B2C, 0x03802B2B,
399 0x001F000F, 0x001E010F, 0x001D020E, 0x001C030E, 399 0x03802B2A, 0x03802B29, 0x03802B27, 0x03802B26,
400 0x001B040D, 0x001A050D, 0x0019060C, 0x0018070C, 400 0x03802B25, 0x03802B24, 0x03802B23, 0x03802B22,
401 0x0017080B, 0x0016090B, 0x00150A0A, 0x00140B0A, 401 0x03802B21, 0x03802B20, 0x03802B1F, 0x03802B1E,
402 0x00130C09, 0x00120D09, 0x00110E08, 0x00100F08, 402 0x03802B1E, 0x03802B1D, 0x03802B1C, 0x03802B1B,
403 0x000F0007, 0x000E0107, 0x000D0206, 0x000C0306, 403 0x03802B1A, 0x03802B1A, 0x03802B19, 0x03802B18,
404 0x000B0405, 0x000A0505, 0x00090604, 0x00080704, 404 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18,
405 0x00070803, 0x00060903, 0x00050A02, 0x00040B02, 405 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18,
406 0x00030C01, 0x00020D01, 0x00010E00, 0x00000F00, 406 0x03802B18, 0x03802B18, 0x03802B18, 0x00002B00,
407}; 407};
408 408
409static const u32 b43_ntab_intlevel[] = { 409static const u32 b43_ntab_intlevel[] = {
@@ -1811,9 +1811,7 @@ void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset,
1811} 1811}
1812 1812
1813#define ntab_upload(dev, offset, data) do { \ 1813#define ntab_upload(dev, offset, data) do { \
1814 unsigned int i; \ 1814 b43_ntab_write_bulk(dev, offset, offset##_SIZE, data); \
1815 for (i = 0; i < (offset##_SIZE); i++) \
1816 b43_ntab_write(dev, (offset) + i, (data)[i]); \
1817 } while (0) 1815 } while (0)
1818 1816
1819void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev) 1817void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
@@ -1825,24 +1823,24 @@ void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev)
1825 ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn); 1823 ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn);
1826 ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel); 1824 ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel);
1827 ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot); 1825 ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot);
1828 ntab_upload(dev, B43_NTAB_PILOTLT, b43_ntab_pilotlt);
1829 ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0); 1826 ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0);
1830 ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1); 1827 ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1);
1831 ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0); 1828 ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0);
1832 ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1); 1829 ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1);
1833 ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi);
1834 ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest); 1830 ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest);
1835 ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs); 1831 ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs);
1836
1837 /* Volatile tables */
1838 ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10); 1832 ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10);
1839 ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11); 1833 ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11);
1834
1835 /* Volatile tables */
1836 ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi);
1837 ntab_upload(dev, B43_NTAB_PILOTLT, b43_ntab_pilotlt);
1838 ntab_upload(dev, B43_NTAB_C0_GAINCTL, b43_ntab_gainctl0);
1839 ntab_upload(dev, B43_NTAB_C1_GAINCTL, b43_ntab_gainctl1);
1840 ntab_upload(dev, B43_NTAB_C0_ESTPLT, b43_ntab_estimatepowerlt0); 1840 ntab_upload(dev, B43_NTAB_C0_ESTPLT, b43_ntab_estimatepowerlt0);
1841 ntab_upload(dev, B43_NTAB_C1_ESTPLT, b43_ntab_estimatepowerlt1); 1841 ntab_upload(dev, B43_NTAB_C1_ESTPLT, b43_ntab_estimatepowerlt1);
1842 ntab_upload(dev, B43_NTAB_C0_ADJPLT, b43_ntab_adjustpower0); 1842 ntab_upload(dev, B43_NTAB_C0_ADJPLT, b43_ntab_adjustpower0);
1843 ntab_upload(dev, B43_NTAB_C1_ADJPLT, b43_ntab_adjustpower1); 1843 ntab_upload(dev, B43_NTAB_C1_ADJPLT, b43_ntab_adjustpower1);
1844 ntab_upload(dev, B43_NTAB_C0_GAINCTL, b43_ntab_gainctl0);
1845 ntab_upload(dev, B43_NTAB_C1_GAINCTL, b43_ntab_gainctl1);
1846 ntab_upload(dev, B43_NTAB_C0_IQLT, b43_ntab_iqlt0); 1844 ntab_upload(dev, B43_NTAB_C0_IQLT, b43_ntab_iqlt0);
1847 ntab_upload(dev, B43_NTAB_C1_IQLT, b43_ntab_iqlt1); 1845 ntab_upload(dev, B43_NTAB_C1_IQLT, b43_ntab_iqlt1);
1848 ntab_upload(dev, B43_NTAB_C0_LOFEEDTH, b43_ntab_loftlt0); 1846 ntab_upload(dev, B43_NTAB_C0_LOFEEDTH, b43_ntab_loftlt0);
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 67f18ecdb3bf..1f11e1670bf0 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -181,52 +181,75 @@ static int b43legacy_ratelimit(struct b43legacy_wl *wl)
181 181
182void b43legacyinfo(struct b43legacy_wl *wl, const char *fmt, ...) 182void b43legacyinfo(struct b43legacy_wl *wl, const char *fmt, ...)
183{ 183{
184 struct va_format vaf;
184 va_list args; 185 va_list args;
185 186
186 if (!b43legacy_ratelimit(wl)) 187 if (!b43legacy_ratelimit(wl))
187 return; 188 return;
189
188 va_start(args, fmt); 190 va_start(args, fmt);
189 printk(KERN_INFO "b43legacy-%s: ", 191
190 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); 192 vaf.fmt = fmt;
191 vprintk(fmt, args); 193 vaf.va = &args;
194
195 printk(KERN_INFO "b43legacy-%s: %pV",
196 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
197
192 va_end(args); 198 va_end(args);
193} 199}
194 200
195void b43legacyerr(struct b43legacy_wl *wl, const char *fmt, ...) 201void b43legacyerr(struct b43legacy_wl *wl, const char *fmt, ...)
196{ 202{
203 struct va_format vaf;
197 va_list args; 204 va_list args;
198 205
199 if (!b43legacy_ratelimit(wl)) 206 if (!b43legacy_ratelimit(wl))
200 return; 207 return;
208
201 va_start(args, fmt); 209 va_start(args, fmt);
202 printk(KERN_ERR "b43legacy-%s ERROR: ", 210
203 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); 211 vaf.fmt = fmt;
204 vprintk(fmt, args); 212 vaf.va = &args;
213
214 printk(KERN_ERR "b43legacy-%s ERROR: %pV",
215 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
216
205 va_end(args); 217 va_end(args);
206} 218}
207 219
208void b43legacywarn(struct b43legacy_wl *wl, const char *fmt, ...) 220void b43legacywarn(struct b43legacy_wl *wl, const char *fmt, ...)
209{ 221{
222 struct va_format vaf;
210 va_list args; 223 va_list args;
211 224
212 if (!b43legacy_ratelimit(wl)) 225 if (!b43legacy_ratelimit(wl))
213 return; 226 return;
227
214 va_start(args, fmt); 228 va_start(args, fmt);
215 printk(KERN_WARNING "b43legacy-%s warning: ", 229
216 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); 230 vaf.fmt = fmt;
217 vprintk(fmt, args); 231 vaf.va = &args;
232
233 printk(KERN_WARNING "b43legacy-%s warning: %pV",
234 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
235
218 va_end(args); 236 va_end(args);
219} 237}
220 238
221#if B43legacy_DEBUG 239#if B43legacy_DEBUG
222void b43legacydbg(struct b43legacy_wl *wl, const char *fmt, ...) 240void b43legacydbg(struct b43legacy_wl *wl, const char *fmt, ...)
223{ 241{
242 struct va_format vaf;
224 va_list args; 243 va_list args;
225 244
226 va_start(args, fmt); 245 va_start(args, fmt);
227 printk(KERN_DEBUG "b43legacy-%s debug: ", 246
228 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan"); 247 vaf.fmt = fmt;
229 vprintk(fmt, args); 248 vaf.va = &args;
249
250 printk(KERN_DEBUG "b43legacy-%s debug: %pV",
251 (wl && wl->hw) ? wiphy_name(wl->hw->wiphy) : "wlan", &vaf);
252
230 va_end(args); 253 va_end(args);
231} 254}
232#endif /* DEBUG */ 255#endif /* DEBUG */
diff --git a/drivers/net/wireless/b43legacy/rfkill.c b/drivers/net/wireless/b43legacy/rfkill.c
index d579df72b783..b90f223fb31c 100644
--- a/drivers/net/wireless/b43legacy/rfkill.c
+++ b/drivers/net/wireless/b43legacy/rfkill.c
@@ -29,7 +29,7 @@
29/* Returns TRUE, if the radio is enabled in hardware. */ 29/* Returns TRUE, if the radio is enabled in hardware. */
30bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev) 30bool b43legacy_is_hw_radio_enabled(struct b43legacy_wldev *dev)
31{ 31{
32 if (dev->phy.rev >= 3) { 32 if (dev->dev->id.revision >= 3) {
33 if (!(b43legacy_read32(dev, B43legacy_MMIO_RADIO_HWENABLED_HI) 33 if (!(b43legacy_read32(dev, B43legacy_MMIO_RADIO_HWENABLED_HI)
34 & B43legacy_MMIO_RADIO_HWENABLED_HI_MASK)) 34 & B43legacy_MMIO_RADIO_HWENABLED_HI_MASK))
35 return 1; 35 return 1;
diff --git a/drivers/net/wireless/hostap/hostap_ap.c b/drivers/net/wireless/hostap/hostap_ap.c
index dbb986946e1a..18d63f57777d 100644
--- a/drivers/net/wireless/hostap/hostap_ap.c
+++ b/drivers/net/wireless/hostap/hostap_ap.c
@@ -858,7 +858,10 @@ void hostap_free_data(struct ap_data *ap)
858 return; 858 return;
859 } 859 }
860 860
861 flush_work_sync(&ap->add_sta_proc_queue);
862
861#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT 863#ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT
864 flush_work_sync(&ap->wds_oper_queue);
862 if (ap->crypt) 865 if (ap->crypt)
863 ap->crypt->deinit(ap->crypt_priv); 866 ap->crypt->deinit(ap->crypt_priv);
864 ap->crypt = ap->crypt_priv = NULL; 867 ap->crypt = ap->crypt_priv = NULL;
diff --git a/drivers/net/wireless/hostap/hostap_hw.c b/drivers/net/wireless/hostap/hostap_hw.c
index b7cb165d612b..a8bddd81b4d1 100644
--- a/drivers/net/wireless/hostap/hostap_hw.c
+++ b/drivers/net/wireless/hostap/hostap_hw.c
@@ -3317,7 +3317,13 @@ static void prism2_free_local_data(struct net_device *dev)
3317 3317
3318 unregister_netdev(local->dev); 3318 unregister_netdev(local->dev);
3319 3319
3320 flush_scheduled_work(); 3320 flush_work_sync(&local->reset_queue);
3321 flush_work_sync(&local->set_multicast_list_queue);
3322 flush_work_sync(&local->set_tim_queue);
3323#ifndef PRISM2_NO_STATION_MODES
3324 flush_work_sync(&local->info_queue);
3325#endif
3326 flush_work_sync(&local->comms_qual_update);
3321 3327
3322 lib80211_crypt_info_free(&local->crypt_info); 3328 lib80211_crypt_info_free(&local->crypt_info);
3323 3329
diff --git a/drivers/net/wireless/hostap/hostap_main.c b/drivers/net/wireless/hostap/hostap_main.c
index 25a2722c8a98..1d9aed645723 100644
--- a/drivers/net/wireless/hostap/hostap_main.c
+++ b/drivers/net/wireless/hostap/hostap_main.c
@@ -891,7 +891,6 @@ void hostap_setup_dev(struct net_device *dev, local_info_t *local,
891 891
892 SET_ETHTOOL_OPS(dev, &prism2_ethtool_ops); 892 SET_ETHTOOL_OPS(dev, &prism2_ethtool_ops);
893 893
894 netif_stop_queue(dev);
895} 894}
896 895
897static int hostap_enable_hostapd(local_info_t *local, int rtnl_locked) 896static int hostap_enable_hostapd(local_info_t *local, int rtnl_locked)
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index b82364258dc5..ed424574160e 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -106,6 +106,9 @@ config IWL5000
106 Intel WiFi Link 1000BGN 106 Intel WiFi Link 1000BGN
107 Intel Wireless WiFi 5150AGN 107 Intel Wireless WiFi 5150AGN
108 Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN 108 Intel Wireless WiFi 5100AGN, 5300AGN, and 5350AGN
109 Intel 6000 Gen 2 Series Wi-Fi Adapters (6000G2A and 6000G2B)
110 Intel WIreless WiFi Link 6050BGN Gen 2 Adapter
111 Intel 100 Series Wi-Fi Adapters (100BGN and 130BGN)
109 112
110config IWL3945 113config IWL3945
111 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)" 114 tristate "Intel PRO/Wireless 3945ABG/BG Network Connection (iwl3945)"
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 63edbe2e557f..93380f97835f 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -2,20 +2,27 @@ obj-$(CONFIG_IWLWIFI) += iwlcore.o
2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o 2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
3iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o 3iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o
4iwlcore-objs += iwl-scan.o iwl-led.o 4iwlcore-objs += iwl-scan.o iwl-led.o
5iwlcore-$(CONFIG_IWL3945) += iwl-legacy.o
6iwlcore-$(CONFIG_IWL4965) += iwl-legacy.o
5iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o 7iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
6iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o 8iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
7 9
10# If 3945 is selected only, iwl-legacy.o will be added
11# to iwlcore-m above, but it needs to be built in.
12iwlcore-objs += $(iwlcore-m)
13
8CFLAGS_iwl-devtrace.o := -I$(src) 14CFLAGS_iwl-devtrace.o := -I$(src)
9 15
10# AGN 16# AGN
11obj-$(CONFIG_IWLAGN) += iwlagn.o 17obj-$(CONFIG_IWLAGN) += iwlagn.o
12iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o iwl-agn-ict.o 18iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o
13iwlagn-objs += iwl-agn-ucode.o iwl-agn-hcmd.o iwl-agn-tx.o 19iwlagn-objs += iwl-agn-ucode.o iwl-agn-tx.o
14iwlagn-objs += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o 20iwlagn-objs += iwl-agn-lib.o iwl-agn-rx.o iwl-agn-calib.o
15iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o 21iwlagn-objs += iwl-agn-tt.o iwl-agn-sta.o iwl-agn-eeprom.o
16iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o 22iwlagn-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-agn-debugfs.o
17 23
18iwlagn-$(CONFIG_IWL4965) += iwl-4965.o 24iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
25iwlagn-$(CONFIG_IWL5000) += iwl-agn-rxon.o iwl-agn-hcmd.o iwl-agn-ict.o
19iwlagn-$(CONFIG_IWL5000) += iwl-5000.o 26iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
20iwlagn-$(CONFIG_IWL5000) += iwl-6000.o 27iwlagn-$(CONFIG_IWL5000) += iwl-6000.o
21iwlagn-$(CONFIG_IWL5000) += iwl-1000.o 28iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index db540910b110..ba78bc8a259f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -147,7 +147,11 @@ static int iwl1000_hw_set_hw_params(struct iwl_priv *priv)
147 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; 147 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
148 148
149 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); 149 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
150 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); 150 if (priv->cfg->rx_with_siso_diversity)
151 priv->hw_params.rx_chains_num = 1;
152 else
153 priv->hw_params.rx_chains_num =
154 num_of_ant(priv->cfg->valid_rx_ant);
151 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; 155 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
152 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; 156 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
153 157
@@ -211,14 +215,16 @@ static struct iwl_lib_ops iwl1000_lib = {
211 .calib_version = iwlagn_eeprom_calib_version, 215 .calib_version = iwlagn_eeprom_calib_version,
212 .query_addr = iwlagn_eeprom_query_addr, 216 .query_addr = iwlagn_eeprom_query_addr,
213 }, 217 },
214 .post_associate = iwl_post_associate, 218 .isr_ops = {
215 .isr = iwl_isr_ict, 219 .isr = iwl_isr_ict,
216 .config_ap = iwl_config_ap, 220 .free = iwl_free_isr_ict,
221 .alloc = iwl_alloc_isr_ict,
222 .reset = iwl_reset_ict,
223 .disable = iwl_disable_ict,
224 },
217 .temp_ops = { 225 .temp_ops = {
218 .temperature = iwlagn_temperature, 226 .temperature = iwlagn_temperature,
219 }, 227 },
220 .manage_ibss_station = iwlagn_manage_ibss_station,
221 .update_bcast_stations = iwl_update_bcast_stations,
222 .debugfs_ops = { 228 .debugfs_ops = {
223 .rx_stats_read = iwl_ucode_rx_stats_read, 229 .rx_stats_read = iwl_ucode_rx_stats_read,
224 .tx_stats_read = iwl_ucode_tx_stats_read, 230 .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -226,7 +232,6 @@ static struct iwl_lib_ops iwl1000_lib = {
226 .bt_stats_read = iwl_ucode_bt_stats_read, 232 .bt_stats_read = iwl_ucode_bt_stats_read,
227 .reply_tx_error = iwl_reply_tx_error_read, 233 .reply_tx_error = iwl_reply_tx_error_read,
228 }, 234 },
229 .recover_from_tx_stall = iwl_bg_monitor_recover,
230 .check_plcp_health = iwl_good_plcp_health, 235 .check_plcp_health = iwl_good_plcp_health,
231 .check_ack_health = iwl_good_ack_health, 236 .check_ack_health = iwl_good_ack_health,
232 .txfifo_flush = iwlagn_txfifo_flush, 237 .txfifo_flush = iwlagn_txfifo_flush,
@@ -243,6 +248,7 @@ static const struct iwl_ops iwl1000_ops = {
243 .hcmd = &iwlagn_hcmd, 248 .hcmd = &iwlagn_hcmd,
244 .utils = &iwlagn_hcmd_utils, 249 .utils = &iwlagn_hcmd_utils,
245 .led = &iwlagn_led_ops, 250 .led = &iwlagn_led_ops,
251 .ieee80211_ops = &iwlagn_hw_ops,
246}; 252};
247 253
248static struct iwl_base_params iwl1000_base_params = { 254static struct iwl_base_params iwl1000_base_params = {
@@ -259,7 +265,7 @@ static struct iwl_base_params iwl1000_base_params = {
259 .support_ct_kill_exit = true, 265 .support_ct_kill_exit = true,
260 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF, 266 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_EXT_LONG_THRESHOLD_DEF,
261 .chain_noise_scale = 1000, 267 .chain_noise_scale = 1000,
262 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, 268 .wd_timeout = IWL_DEF_WD_TIMEOUT,
263 .max_event_log_size = 128, 269 .max_event_log_size = 128,
264 .ucode_tracing = true, 270 .ucode_tracing = true,
265 .sensitivity_calib_by_driver = true, 271 .sensitivity_calib_by_driver = true,
@@ -270,66 +276,49 @@ static struct iwl_ht_params iwl1000_ht_params = {
270 .use_rts_for_aggregation = true, /* use rts/cts protection */ 276 .use_rts_for_aggregation = true, /* use rts/cts protection */
271}; 277};
272 278
279#define IWL_DEVICE_1000 \
280 .fw_name_pre = IWL1000_FW_PRE, \
281 .ucode_api_max = IWL1000_UCODE_API_MAX, \
282 .ucode_api_min = IWL1000_UCODE_API_MIN, \
283 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
284 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
285 .ops = &iwl1000_ops, \
286 .mod_params = &iwlagn_mod_params, \
287 .base_params = &iwl1000_base_params, \
288 .led_mode = IWL_LED_BLINK
289
273struct iwl_cfg iwl1000_bgn_cfg = { 290struct iwl_cfg iwl1000_bgn_cfg = {
274 .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", 291 .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN",
275 .fw_name_pre = IWL1000_FW_PRE, 292 IWL_DEVICE_1000,
276 .ucode_api_max = IWL1000_UCODE_API_MAX,
277 .ucode_api_min = IWL1000_UCODE_API_MIN,
278 .sku = IWL_SKU_G|IWL_SKU_N,
279 .valid_tx_ant = ANT_A,
280 .valid_rx_ant = ANT_AB,
281 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
282 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
283 .ops = &iwl1000_ops,
284 .mod_params = &iwlagn_mod_params,
285 .base_params = &iwl1000_base_params,
286 .ht_params = &iwl1000_ht_params, 293 .ht_params = &iwl1000_ht_params,
287}; 294};
288 295
289struct iwl_cfg iwl1000_bg_cfg = { 296struct iwl_cfg iwl1000_bg_cfg = {
290 .name = "Intel(R) Centrino(R) Wireless-N 1000 BG", 297 .name = "Intel(R) Centrino(R) Wireless-N 1000 BG",
291 .fw_name_pre = IWL1000_FW_PRE, 298 IWL_DEVICE_1000,
292 .ucode_api_max = IWL1000_UCODE_API_MAX,
293 .ucode_api_min = IWL1000_UCODE_API_MIN,
294 .sku = IWL_SKU_G,
295 .valid_tx_ant = ANT_A,
296 .valid_rx_ant = ANT_AB,
297 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
298 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
299 .ops = &iwl1000_ops,
300 .mod_params = &iwlagn_mod_params,
301 .base_params = &iwl1000_base_params,
302}; 299};
303 300
301#define IWL_DEVICE_100 \
302 .fw_name_pre = IWL100_FW_PRE, \
303 .ucode_api_max = IWL100_UCODE_API_MAX, \
304 .ucode_api_min = IWL100_UCODE_API_MIN, \
305 .eeprom_ver = EEPROM_1000_EEPROM_VERSION, \
306 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION, \
307 .ops = &iwl1000_ops, \
308 .mod_params = &iwlagn_mod_params, \
309 .base_params = &iwl1000_base_params, \
310 .led_mode = IWL_LED_RF_STATE, \
311 .rx_with_siso_diversity = true
312
304struct iwl_cfg iwl100_bgn_cfg = { 313struct iwl_cfg iwl100_bgn_cfg = {
305 .name = "Intel(R) 100 Series 1x1 BGN", 314 .name = "Intel(R) Centrino(R) Wireless-N 100 BGN",
306 .fw_name_pre = IWL100_FW_PRE, 315 IWL_DEVICE_100,
307 .ucode_api_max = IWL100_UCODE_API_MAX,
308 .ucode_api_min = IWL100_UCODE_API_MIN,
309 .sku = IWL_SKU_G|IWL_SKU_N,
310 .valid_tx_ant = ANT_A,
311 .valid_rx_ant = ANT_A,
312 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
313 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
314 .ops = &iwl1000_ops,
315 .mod_params = &iwlagn_mod_params,
316 .base_params = &iwl1000_base_params,
317 .ht_params = &iwl1000_ht_params, 316 .ht_params = &iwl1000_ht_params,
318}; 317};
319 318
320struct iwl_cfg iwl100_bg_cfg = { 319struct iwl_cfg iwl100_bg_cfg = {
321 .name = "Intel(R) 100 Series 1x1 BG", 320 .name = "Intel(R) Centrino(R) Wireless-N 100 BG",
322 .fw_name_pre = IWL100_FW_PRE, 321 IWL_DEVICE_100,
323 .ucode_api_max = IWL100_UCODE_API_MAX,
324 .ucode_api_min = IWL100_UCODE_API_MIN,
325 .sku = IWL_SKU_G,
326 .valid_tx_ant = ANT_A,
327 .valid_rx_ant = ANT_A,
328 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
329 .eeprom_calib_ver = EEPROM_1000_TX_POWER_VERSION,
330 .ops = &iwl1000_ops,
331 .mod_params = &iwlagn_mod_params,
332 .base_params = &iwl1000_base_params,
333}; 322};
334 323
335MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX)); 324MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 176e52577673..a9b852be4509 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -51,6 +51,7 @@
51#include "iwl-led.h" 51#include "iwl-led.h"
52#include "iwl-3945-led.h" 52#include "iwl-3945-led.h"
53#include "iwl-3945-debugfs.h" 53#include "iwl-3945-debugfs.h"
54#include "iwl-legacy.h"
54 55
55#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \ 56#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
56 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ 57 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
@@ -115,7 +116,7 @@ void iwl3945_disable_events(struct iwl_priv *priv)
115 u32 base; /* SRAM address of event log header */ 116 u32 base; /* SRAM address of event log header */
116 u32 disable_ptr; /* SRAM address of event-disable bitmap array */ 117 u32 disable_ptr; /* SRAM address of event-disable bitmap array */
117 u32 array_size; /* # of u32 entries in array */ 118 u32 array_size; /* # of u32 entries in array */
118 u32 evt_disable[IWL_EVT_DISABLE_SIZE] = { 119 static const u32 evt_disable[IWL_EVT_DISABLE_SIZE] = {
119 0x00000000, /* 31 - 0 Event id numbers */ 120 0x00000000, /* 31 - 0 Event id numbers */
120 0x00000000, /* 63 - 32 */ 121 0x00000000, /* 63 - 32 */
121 0x00000000, /* 95 - 64 */ 122 0x00000000, /* 95 - 64 */
@@ -296,7 +297,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
296 if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) && 297 if (iwl_queue_space(q) > q->low_mark && (txq_id >= 0) &&
297 (txq_id != IWL39_CMD_QUEUE_NUM) && 298 (txq_id != IWL39_CMD_QUEUE_NUM) &&
298 priv->mac80211_registered) 299 priv->mac80211_registered)
299 iwl_wake_queue(priv, txq_id); 300 iwl_wake_queue(priv, txq);
300} 301}
301 302
302/** 303/**
@@ -324,6 +325,7 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
324 return; 325 return;
325 } 326 }
326 327
328 txq->time_stamp = jiffies;
327 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); 329 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
328 ieee80211_tx_info_clear_status(info); 330 ieee80211_tx_info_clear_status(info);
329 331
@@ -1451,6 +1453,10 @@ static int iwl3945_send_tx_power(struct iwl_priv *priv)
1451 }; 1453 };
1452 u16 chan; 1454 u16 chan;
1453 1455
1456 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
1457 "TX Power requested while scanning!\n"))
1458 return -EAGAIN;
1459
1454 chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel); 1460 chan = le16_to_cpu(priv->contexts[IWL_RXON_CTX_BSS].active.channel);
1455 1461
1456 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1; 1462 txpower.band = (priv->band == IEEE80211_BAND_5GHZ) ? 0 : 1;
@@ -1779,6 +1785,9 @@ int iwl3945_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1779 int rc = 0; 1785 int rc = 0;
1780 bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK); 1786 bool new_assoc = !!(staging_rxon->filter_flags & RXON_FILTER_ASSOC_MSK);
1781 1787
1788 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1789 return -EINVAL;
1790
1782 if (!iwl_is_alive(priv)) 1791 if (!iwl_is_alive(priv))
1783 return -1; 1792 return -1;
1784 1793
@@ -2722,11 +2731,9 @@ static struct iwl_lib_ops iwl3945_lib = {
2722 }, 2731 },
2723 .send_tx_power = iwl3945_send_tx_power, 2732 .send_tx_power = iwl3945_send_tx_power,
2724 .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr, 2733 .is_valid_rtc_data_addr = iwl3945_hw_valid_rtc_data_addr,
2725 .post_associate = iwl3945_post_associate, 2734 .isr_ops = {
2726 .isr = iwl_isr_legacy, 2735 .isr = iwl_isr_legacy,
2727 .config_ap = iwl3945_config_ap, 2736 },
2728 .manage_ibss_station = iwl3945_manage_ibss_station,
2729 .recover_from_tx_stall = iwl_bg_monitor_recover,
2730 .check_plcp_health = iwl3945_good_plcp_health, 2737 .check_plcp_health = iwl3945_good_plcp_health,
2731 2738
2732 .debugfs_ops = { 2739 .debugfs_ops = {
@@ -2736,10 +2743,16 @@ static struct iwl_lib_ops iwl3945_lib = {
2736 }, 2743 },
2737}; 2744};
2738 2745
2746static const struct iwl_legacy_ops iwl3945_legacy_ops = {
2747 .post_associate = iwl3945_post_associate,
2748 .config_ap = iwl3945_config_ap,
2749 .manage_ibss_station = iwl3945_manage_ibss_station,
2750};
2751
2739static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { 2752static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2740 .get_hcmd_size = iwl3945_get_hcmd_size, 2753 .get_hcmd_size = iwl3945_get_hcmd_size,
2741 .build_addsta_hcmd = iwl3945_build_addsta_hcmd, 2754 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
2742 .tx_cmd_protection = iwlcore_tx_cmd_protection, 2755 .tx_cmd_protection = iwl_legacy_tx_cmd_protection,
2743 .request_scan = iwl3945_request_scan, 2756 .request_scan = iwl3945_request_scan,
2744 .post_scan = iwl3945_post_scan, 2757 .post_scan = iwl3945_post_scan,
2745}; 2758};
@@ -2749,6 +2762,8 @@ static const struct iwl_ops iwl3945_ops = {
2749 .hcmd = &iwl3945_hcmd, 2762 .hcmd = &iwl3945_hcmd,
2750 .utils = &iwl3945_hcmd_utils, 2763 .utils = &iwl3945_hcmd_utils,
2751 .led = &iwl3945_led_ops, 2764 .led = &iwl3945_led_ops,
2765 .legacy = &iwl3945_legacy_ops,
2766 .ieee80211_ops = &iwl3945_hw_ops,
2752}; 2767};
2753 2768
2754static struct iwl_base_params iwl3945_base_params = { 2769static struct iwl_base_params iwl3945_base_params = {
@@ -2761,7 +2776,7 @@ static struct iwl_base_params iwl3945_base_params = {
2761 .led_compensation = 64, 2776 .led_compensation = 64,
2762 .broken_powersave = true, 2777 .broken_powersave = true,
2763 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 2778 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
2764 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, 2779 .wd_timeout = IWL_DEF_WD_TIMEOUT,
2765 .max_event_log_size = 512, 2780 .max_event_log_size = 512,
2766 .tx_power_by_driver = true, 2781 .tx_power_by_driver = true,
2767}; 2782};
@@ -2776,6 +2791,7 @@ static struct iwl_cfg iwl3945_bg_cfg = {
2776 .ops = &iwl3945_ops, 2791 .ops = &iwl3945_ops,
2777 .mod_params = &iwl3945_mod_params, 2792 .mod_params = &iwl3945_mod_params,
2778 .base_params = &iwl3945_base_params, 2793 .base_params = &iwl3945_base_params,
2794 .led_mode = IWL_LED_BLINK,
2779}; 2795};
2780 2796
2781static struct iwl_cfg iwl3945_abg_cfg = { 2797static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2788,6 +2804,7 @@ static struct iwl_cfg iwl3945_abg_cfg = {
2788 .ops = &iwl3945_ops, 2804 .ops = &iwl3945_ops,
2789 .mod_params = &iwl3945_mod_params, 2805 .mod_params = &iwl3945_mod_params,
2790 .base_params = &iwl3945_base_params, 2806 .base_params = &iwl3945_base_params,
2807 .led_mode = IWL_LED_BLINK,
2791}; 2808};
2792 2809
2793DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = { 2810DEFINE_PCI_DEVICE_TABLE(iwl3945_hw_card_ids) = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 09391f0ee61f..3eef1eb74a78 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -264,10 +264,8 @@ void iwl3945_reply_statistics(struct iwl_priv *priv,
264 struct iwl_rx_mem_buffer *rxb); 264 struct iwl_rx_mem_buffer *rxb);
265extern void iwl3945_disable_events(struct iwl_priv *priv); 265extern void iwl3945_disable_events(struct iwl_priv *priv);
266extern int iwl4965_get_temperature(const struct iwl_priv *priv); 266extern int iwl4965_get_temperature(const struct iwl_priv *priv);
267extern void iwl3945_post_associate(struct iwl_priv *priv, 267extern void iwl3945_post_associate(struct iwl_priv *priv);
268 struct ieee80211_vif *vif); 268extern void iwl3945_config_ap(struct iwl_priv *priv);
269extern void iwl3945_config_ap(struct iwl_priv *priv,
270 struct ieee80211_vif *vif);
271 269
272extern int iwl3945_commit_rxon(struct iwl_priv *priv, 270extern int iwl3945_commit_rxon(struct iwl_priv *priv,
273 struct iwl_rxon_context *ctx); 271 struct iwl_rxon_context *ctx);
@@ -282,6 +280,8 @@ extern int iwl3945_commit_rxon(struct iwl_priv *priv,
282 */ 280 */
283extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid); 281extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
284 282
283extern struct ieee80211_ops iwl3945_hw_ops;
284
285/* 285/*
286 * Forward declare iwl-3945.c functions for iwl-base.c 286 * Forward declare iwl-3945.c functions for iwl-base.c
287 */ 287 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index b207e3e9299f..3f1e5f1bf847 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -48,6 +48,7 @@
48#include "iwl-agn-led.h" 48#include "iwl-agn-led.h"
49#include "iwl-agn.h" 49#include "iwl-agn.h"
50#include "iwl-agn-debugfs.h" 50#include "iwl-agn-debugfs.h"
51#include "iwl-legacy.h"
51 52
52static int iwl4965_send_tx_power(struct iwl_priv *priv); 53static int iwl4965_send_tx_power(struct iwl_priv *priv);
53static int iwl4965_hw_get_temperature(struct iwl_priv *priv); 54static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
@@ -1377,13 +1378,9 @@ static int iwl4965_send_tx_power(struct iwl_priv *priv)
1377 u8 ctrl_chan_high = 0; 1378 u8 ctrl_chan_high = 0;
1378 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 1379 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
1379 1380
1380 if (test_bit(STATUS_SCANNING, &priv->status)) { 1381 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
1381 /* If this gets hit a lot, switch it to a BUG() and catch 1382 "TX Power requested while scanning!\n"))
1382 * the stack trace to find out who is calling this during
1383 * a scan. */
1384 IWL_WARN(priv, "TX Power requested while scanning!\n");
1385 return -EAGAIN; 1383 return -EAGAIN;
1386 }
1387 1384
1388 band = priv->band == IEEE80211_BAND_2GHZ; 1385 band = priv->band == IEEE80211_BAND_2GHZ;
1389 1386
@@ -1447,6 +1444,142 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv,
1447 return ret; 1444 return ret;
1448} 1445}
1449 1446
1447static int iwl4965_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1448{
1449 /* cast away the const for active_rxon in this function */
1450 struct iwl_rxon_cmd *active_rxon = (void *)&ctx->active;
1451 int ret;
1452 bool new_assoc =
1453 !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
1454
1455 if (!iwl_is_alive(priv))
1456 return -EBUSY;
1457
1458 if (!ctx->is_active)
1459 return 0;
1460
1461 /* always get timestamp with Rx frame */
1462 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
1463
1464 ret = iwl_check_rxon_cmd(priv, ctx);
1465 if (ret) {
1466 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
1467 return -EINVAL;
1468 }
1469
1470 /*
1471 * receive commit_rxon request
1472 * abort any previous channel switch if still in process
1473 */
1474 if (priv->switch_rxon.switch_in_progress &&
1475 (priv->switch_rxon.channel != ctx->staging.channel)) {
1476 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
1477 le16_to_cpu(priv->switch_rxon.channel));
1478 iwl_chswitch_done(priv, false);
1479 }
1480
1481 /* If we don't need to send a full RXON, we can use
1482 * iwl_rxon_assoc_cmd which is used to reconfigure filter
1483 * and other flags for the current radio configuration. */
1484 if (!iwl_full_rxon_required(priv, ctx)) {
1485 ret = iwl_send_rxon_assoc(priv, ctx);
1486 if (ret) {
1487 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
1488 return ret;
1489 }
1490
1491 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1492 iwl_print_rx_config_cmd(priv, ctx);
1493 return 0;
1494 }
1495
1496 /* If we are currently associated and the new config requires
1497 * an RXON_ASSOC and the new config wants the associated mask enabled,
1498 * we must clear the associated from the active configuration
1499 * before we apply the new config */
1500 if (iwl_is_associated_ctx(ctx) && new_assoc) {
1501 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
1502 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1503
1504 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
1505 sizeof(struct iwl_rxon_cmd),
1506 active_rxon);
1507
1508 /* If the mask clearing failed then we set
1509 * active_rxon back to what it was previously */
1510 if (ret) {
1511 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
1512 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
1513 return ret;
1514 }
1515 iwl_clear_ucode_stations(priv, ctx);
1516 iwl_restore_stations(priv, ctx);
1517 ret = iwl_restore_default_wep_keys(priv, ctx);
1518 if (ret) {
1519 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1520 return ret;
1521 }
1522 }
1523
1524 IWL_DEBUG_INFO(priv, "Sending RXON\n"
1525 "* with%s RXON_FILTER_ASSOC_MSK\n"
1526 "* channel = %d\n"
1527 "* bssid = %pM\n",
1528 (new_assoc ? "" : "out"),
1529 le16_to_cpu(ctx->staging.channel),
1530 ctx->staging.bssid_addr);
1531
1532 iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto);
1533
1534 /* Apply the new configuration
1535 * RXON unassoc clears the station table in uCode so restoration of
1536 * stations is needed after it (the RXON command) completes
1537 */
1538 if (!new_assoc) {
1539 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
1540 sizeof(struct iwl_rxon_cmd), &ctx->staging);
1541 if (ret) {
1542 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1543 return ret;
1544 }
1545 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
1546 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1547 iwl_clear_ucode_stations(priv, ctx);
1548 iwl_restore_stations(priv, ctx);
1549 ret = iwl_restore_default_wep_keys(priv, ctx);
1550 if (ret) {
1551 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
1552 return ret;
1553 }
1554 }
1555 if (new_assoc) {
1556 priv->start_calib = 0;
1557 /* Apply the new configuration
1558 * RXON assoc doesn't clear the station table in uCode,
1559 */
1560 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
1561 sizeof(struct iwl_rxon_cmd), &ctx->staging);
1562 if (ret) {
1563 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
1564 return ret;
1565 }
1566 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
1567 }
1568 iwl_print_rx_config_cmd(priv, ctx);
1569
1570 iwl_init_sensitivity(priv);
1571
1572 /* If we issue a new RXON command which required a tune then we must
1573 * send a new TXPOWER command or we won't be able to Tx any frames */
1574 ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
1575 if (ret) {
1576 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
1577 return ret;
1578 }
1579
1580 return 0;
1581}
1582
1450static int iwl4965_hw_channel_switch(struct iwl_priv *priv, 1583static int iwl4965_hw_channel_switch(struct iwl_priv *priv,
1451 struct ieee80211_channel_switch *ch_switch) 1584 struct ieee80211_channel_switch *ch_switch)
1452{ 1585{
@@ -1554,22 +1687,6 @@ static void iwl4965_txq_update_byte_cnt_tbl(struct iwl_priv *priv,
1554} 1687}
1555 1688
1556/** 1689/**
1557 * sign_extend - Sign extend a value using specified bit as sign-bit
1558 *
1559 * Example: sign_extend(9, 3) would return -7 as bit3 of 1001b is 1
1560 * and bit0..2 is 001b which when sign extended to 1111111111111001b is -7.
1561 *
1562 * @param oper value to sign extend
1563 * @param index 0 based bit index (0<=index<32) to sign bit
1564 */
1565static s32 sign_extend(u32 oper, int index)
1566{
1567 u8 shift = 31 - index;
1568
1569 return (s32)(oper << shift) >> shift;
1570}
1571
1572/**
1573 * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin) 1690 * iwl4965_hw_get_temperature - return the calibrated temperature (in Kelvin)
1574 * @statistics: Provides the temperature reading from the uCode 1691 * @statistics: Provides the temperature reading from the uCode
1575 * 1692 *
@@ -1606,9 +1723,9 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv)
1606 * "initialize" ALIVE response. 1723 * "initialize" ALIVE response.
1607 */ 1724 */
1608 if (!test_bit(STATUS_TEMPERATURE, &priv->status)) 1725 if (!test_bit(STATUS_TEMPERATURE, &priv->status))
1609 vt = sign_extend(R4, 23); 1726 vt = sign_extend32(R4, 23);
1610 else 1727 else
1611 vt = sign_extend(le32_to_cpu(priv->_agn.statistics. 1728 vt = sign_extend32(le32_to_cpu(priv->_agn.statistics.
1612 general.common.temperature), 23); 1729 general.common.temperature), 23);
1613 1730
1614 IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt); 1731 IWL_DEBUG_TEMP(priv, "Calib values R[1-3]: %d %d %d R4: %d\n", R1, R2, R3, vt);
@@ -2081,6 +2198,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2081 return; 2198 return;
2082 } 2199 }
2083 2200
2201 txq->time_stamp = jiffies;
2084 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); 2202 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
2085 memset(&info->status, 0, sizeof(info->status)); 2203 memset(&info->status, 0, sizeof(info->status));
2086 2204
@@ -2121,12 +2239,8 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2121 2239
2122 if (priv->mac80211_registered && 2240 if (priv->mac80211_registered &&
2123 (iwl_queue_space(&txq->q) > txq->q.low_mark) && 2241 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
2124 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) { 2242 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
2125 if (agg->state == IWL_AGG_OFF) 2243 iwl_wake_queue(priv, txq);
2126 iwl_wake_queue(priv, txq_id);
2127 else
2128 iwl_wake_queue(priv, txq->swq_id);
2129 }
2130 } 2244 }
2131 } else { 2245 } else {
2132 info->status.rates[0].count = tx_resp->failure_frame + 1; 2246 info->status.rates[0].count = tx_resp->failure_frame + 1;
@@ -2150,7 +2264,7 @@ static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2150 2264
2151 if (priv->mac80211_registered && 2265 if (priv->mac80211_registered &&
2152 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 2266 (iwl_queue_space(&txq->q) > txq->q.low_mark))
2153 iwl_wake_queue(priv, txq_id); 2267 iwl_wake_queue(priv, txq);
2154 } 2268 }
2155 if (qc && likely(sta_id != IWL_INVALID_STATION)) 2269 if (qc && likely(sta_id != IWL_INVALID_STATION))
2156 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id); 2270 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
@@ -2216,7 +2330,7 @@ static void iwl4965_cancel_deferred_work(struct iwl_priv *priv)
2216 2330
2217static struct iwl_hcmd_ops iwl4965_hcmd = { 2331static struct iwl_hcmd_ops iwl4965_hcmd = {
2218 .rxon_assoc = iwl4965_send_rxon_assoc, 2332 .rxon_assoc = iwl4965_send_rxon_assoc,
2219 .commit_rxon = iwlagn_commit_rxon, 2333 .commit_rxon = iwl4965_commit_rxon,
2220 .set_rxon_chain = iwlagn_set_rxon_chain, 2334 .set_rxon_chain = iwlagn_set_rxon_chain,
2221 .send_bt_config = iwl_send_bt_config, 2335 .send_bt_config = iwl_send_bt_config,
2222}; 2336};
@@ -2233,12 +2347,155 @@ static void iwl4965_post_scan(struct iwl_priv *priv)
2233 iwlcore_commit_rxon(priv, ctx); 2347 iwlcore_commit_rxon(priv, ctx);
2234} 2348}
2235 2349
2350static void iwl4965_post_associate(struct iwl_priv *priv)
2351{
2352 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2353 struct ieee80211_vif *vif = ctx->vif;
2354 struct ieee80211_conf *conf = NULL;
2355 int ret = 0;
2356
2357 if (!vif || !priv->is_open)
2358 return;
2359
2360 if (vif->type == NL80211_IFTYPE_AP) {
2361 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
2362 return;
2363 }
2364
2365 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2366 return;
2367
2368 iwl_scan_cancel_timeout(priv, 200);
2369
2370 conf = ieee80211_get_hw_conf(priv->hw);
2371
2372 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2373 iwlcore_commit_rxon(priv, ctx);
2374
2375 ret = iwl_send_rxon_timing(priv, ctx);
2376 if (ret)
2377 IWL_WARN(priv, "RXON timing - "
2378 "Attempting to continue.\n");
2379
2380 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2381
2382 iwl_set_rxon_ht(priv, &priv->current_ht_config);
2383
2384 if (priv->cfg->ops->hcmd->set_rxon_chain)
2385 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2386
2387 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
2388
2389 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
2390 vif->bss_conf.aid, vif->bss_conf.beacon_int);
2391
2392 if (vif->bss_conf.use_short_preamble)
2393 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
2394 else
2395 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
2396
2397 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2398 if (vif->bss_conf.use_short_slot)
2399 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
2400 else
2401 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
2402 }
2403
2404 iwlcore_commit_rxon(priv, ctx);
2405
2406 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
2407 vif->bss_conf.aid, ctx->active.bssid_addr);
2408
2409 switch (vif->type) {
2410 case NL80211_IFTYPE_STATION:
2411 break;
2412 case NL80211_IFTYPE_ADHOC:
2413 iwlagn_send_beacon_cmd(priv);
2414 break;
2415 default:
2416 IWL_ERR(priv, "%s Should not be called in %d mode\n",
2417 __func__, vif->type);
2418 break;
2419 }
2420
2421 /* the chain noise calibration will enabled PM upon completion
2422 * If chain noise has already been run, then we need to enable
2423 * power management here */
2424 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
2425 iwl_power_update_mode(priv, false);
2426
2427 /* Enable Rx differential gain and sensitivity calibrations */
2428 iwl_chain_noise_reset(priv);
2429 priv->start_calib = 1;
2430}
2431
2432static void iwl4965_config_ap(struct iwl_priv *priv)
2433{
2434 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2435 struct ieee80211_vif *vif = ctx->vif;
2436 int ret = 0;
2437
2438 lockdep_assert_held(&priv->mutex);
2439
2440 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2441 return;
2442
2443 /* The following should be done only at AP bring up */
2444 if (!iwl_is_associated_ctx(ctx)) {
2445
2446 /* RXON - unassoc (to set timing command) */
2447 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2448 iwlcore_commit_rxon(priv, ctx);
2449
2450 /* RXON Timing */
2451 ret = iwl_send_rxon_timing(priv, ctx);
2452 if (ret)
2453 IWL_WARN(priv, "RXON timing failed - "
2454 "Attempting to continue.\n");
2455
2456 /* AP has all antennas */
2457 priv->chain_noise_data.active_chains =
2458 priv->hw_params.valid_rx_ant;
2459 iwl_set_rxon_ht(priv, &priv->current_ht_config);
2460 if (priv->cfg->ops->hcmd->set_rxon_chain)
2461 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
2462
2463 ctx->staging.assoc_id = 0;
2464
2465 if (vif->bss_conf.use_short_preamble)
2466 ctx->staging.flags |=
2467 RXON_FLG_SHORT_PREAMBLE_MSK;
2468 else
2469 ctx->staging.flags &=
2470 ~RXON_FLG_SHORT_PREAMBLE_MSK;
2471
2472 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
2473 if (vif->bss_conf.use_short_slot)
2474 ctx->staging.flags |=
2475 RXON_FLG_SHORT_SLOT_MSK;
2476 else
2477 ctx->staging.flags &=
2478 ~RXON_FLG_SHORT_SLOT_MSK;
2479 }
2480 /* need to send beacon cmd before committing assoc RXON! */
2481 iwlagn_send_beacon_cmd(priv);
2482 /* restore RXON assoc */
2483 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
2484 iwlcore_commit_rxon(priv, ctx);
2485 }
2486 iwlagn_send_beacon_cmd(priv);
2487
2488 /* FIXME - we need to add code here to detect a totally new
2489 * configuration, reset the AP, unassoc, rxon timing, assoc,
2490 * clear sta table, add BCAST sta... */
2491}
2492
2236static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = { 2493static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2237 .get_hcmd_size = iwl4965_get_hcmd_size, 2494 .get_hcmd_size = iwl4965_get_hcmd_size,
2238 .build_addsta_hcmd = iwl4965_build_addsta_hcmd, 2495 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
2239 .chain_noise_reset = iwl4965_chain_noise_reset, 2496 .chain_noise_reset = iwl4965_chain_noise_reset,
2240 .gain_computation = iwl4965_gain_computation, 2497 .gain_computation = iwl4965_gain_computation,
2241 .tx_cmd_protection = iwlcore_tx_cmd_protection, 2498 .tx_cmd_protection = iwl_legacy_tx_cmd_protection,
2242 .calc_rssi = iwl4965_calc_rssi, 2499 .calc_rssi = iwl4965_calc_rssi,
2243 .request_scan = iwlagn_request_scan, 2500 .request_scan = iwlagn_request_scan,
2244 .post_scan = iwl4965_post_scan, 2501 .post_scan = iwl4965_post_scan,
@@ -2285,14 +2542,12 @@ static struct iwl_lib_ops iwl4965_lib = {
2285 }, 2542 },
2286 .send_tx_power = iwl4965_send_tx_power, 2543 .send_tx_power = iwl4965_send_tx_power,
2287 .update_chain_flags = iwl_update_chain_flags, 2544 .update_chain_flags = iwl_update_chain_flags,
2288 .post_associate = iwl_post_associate, 2545 .isr_ops = {
2289 .config_ap = iwl_config_ap, 2546 .isr = iwl_isr_legacy,
2290 .isr = iwl_isr_legacy, 2547 },
2291 .temp_ops = { 2548 .temp_ops = {
2292 .temperature = iwl4965_temperature_calib, 2549 .temperature = iwl4965_temperature_calib,
2293 }, 2550 },
2294 .manage_ibss_station = iwlagn_manage_ibss_station,
2295 .update_bcast_stations = iwl_update_bcast_stations,
2296 .debugfs_ops = { 2551 .debugfs_ops = {
2297 .rx_stats_read = iwl_ucode_rx_stats_read, 2552 .rx_stats_read = iwl_ucode_rx_stats_read,
2298 .tx_stats_read = iwl_ucode_tx_stats_read, 2553 .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -2300,15 +2555,46 @@ static struct iwl_lib_ops iwl4965_lib = {
2300 .bt_stats_read = iwl_ucode_bt_stats_read, 2555 .bt_stats_read = iwl_ucode_bt_stats_read,
2301 .reply_tx_error = iwl_reply_tx_error_read, 2556 .reply_tx_error = iwl_reply_tx_error_read,
2302 }, 2557 },
2303 .recover_from_tx_stall = iwl_bg_monitor_recover,
2304 .check_plcp_health = iwl_good_plcp_health, 2558 .check_plcp_health = iwl_good_plcp_health,
2305}; 2559};
2306 2560
2561static const struct iwl_legacy_ops iwl4965_legacy_ops = {
2562 .post_associate = iwl4965_post_associate,
2563 .config_ap = iwl4965_config_ap,
2564 .manage_ibss_station = iwlagn_manage_ibss_station,
2565 .update_bcast_stations = iwl_update_bcast_stations,
2566};
2567
2568struct ieee80211_ops iwl4965_hw_ops = {
2569 .tx = iwlagn_mac_tx,
2570 .start = iwlagn_mac_start,
2571 .stop = iwlagn_mac_stop,
2572 .add_interface = iwl_mac_add_interface,
2573 .remove_interface = iwl_mac_remove_interface,
2574 .change_interface = iwl_mac_change_interface,
2575 .config = iwl_legacy_mac_config,
2576 .configure_filter = iwlagn_configure_filter,
2577 .set_key = iwlagn_mac_set_key,
2578 .update_tkip_key = iwlagn_mac_update_tkip_key,
2579 .conf_tx = iwl_mac_conf_tx,
2580 .reset_tsf = iwl_legacy_mac_reset_tsf,
2581 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
2582 .ampdu_action = iwlagn_mac_ampdu_action,
2583 .hw_scan = iwl_mac_hw_scan,
2584 .sta_add = iwlagn_mac_sta_add,
2585 .sta_remove = iwl_mac_sta_remove,
2586 .channel_switch = iwlagn_mac_channel_switch,
2587 .flush = iwlagn_mac_flush,
2588 .tx_last_beacon = iwl_mac_tx_last_beacon,
2589};
2590
2307static const struct iwl_ops iwl4965_ops = { 2591static const struct iwl_ops iwl4965_ops = {
2308 .lib = &iwl4965_lib, 2592 .lib = &iwl4965_lib,
2309 .hcmd = &iwl4965_hcmd, 2593 .hcmd = &iwl4965_hcmd,
2310 .utils = &iwl4965_hcmd_utils, 2594 .utils = &iwl4965_hcmd_utils,
2311 .led = &iwlagn_led_ops, 2595 .led = &iwlagn_led_ops,
2596 .legacy = &iwl4965_legacy_ops,
2597 .ieee80211_ops = &iwl4965_hw_ops,
2312}; 2598};
2313 2599
2314static struct iwl_base_params iwl4965_base_params = { 2600static struct iwl_base_params iwl4965_base_params = {
@@ -2323,13 +2609,14 @@ static struct iwl_base_params iwl4965_base_params = {
2323 .led_compensation = 61, 2609 .led_compensation = 61,
2324 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS, 2610 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
2325 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 2611 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
2326 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, 2612 .wd_timeout = IWL_DEF_WD_TIMEOUT,
2327 .temperature_kelvin = true, 2613 .temperature_kelvin = true,
2328 .max_event_log_size = 512, 2614 .max_event_log_size = 512,
2329 .tx_power_by_driver = true, 2615 .tx_power_by_driver = true,
2330 .ucode_tracing = true, 2616 .ucode_tracing = true,
2331 .sensitivity_calib_by_driver = true, 2617 .sensitivity_calib_by_driver = true,
2332 .chain_noise_calib_by_driver = true, 2618 .chain_noise_calib_by_driver = true,
2619 .no_agg_framecnt_info = true,
2333}; 2620};
2334 2621
2335struct iwl_cfg iwl4965_agn_cfg = { 2622struct iwl_cfg iwl4965_agn_cfg = {
@@ -2337,7 +2624,6 @@ struct iwl_cfg iwl4965_agn_cfg = {
2337 .fw_name_pre = IWL4965_FW_PRE, 2624 .fw_name_pre = IWL4965_FW_PRE,
2338 .ucode_api_max = IWL4965_UCODE_API_MAX, 2625 .ucode_api_max = IWL4965_UCODE_API_MAX,
2339 .ucode_api_min = IWL4965_UCODE_API_MIN, 2626 .ucode_api_min = IWL4965_UCODE_API_MIN,
2340 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
2341 .valid_tx_ant = ANT_AB, 2627 .valid_tx_ant = ANT_AB,
2342 .valid_rx_ant = ANT_ABC, 2628 .valid_rx_ant = ANT_ABC,
2343 .eeprom_ver = EEPROM_4965_EEPROM_VERSION, 2629 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
@@ -2345,6 +2631,7 @@ struct iwl_cfg iwl4965_agn_cfg = {
2345 .ops = &iwl4965_ops, 2631 .ops = &iwl4965_ops,
2346 .mod_params = &iwlagn_mod_params, 2632 .mod_params = &iwlagn_mod_params,
2347 .base_params = &iwl4965_base_params, 2633 .base_params = &iwl4965_base_params,
2634 .led_mode = IWL_LED_BLINK,
2348 /* 2635 /*
2349 * Force use of chains B and C for scan RX on 5 GHz band 2636 * Force use of chains B and C for scan RX on 5 GHz band
2350 * because the device has off-channel reception on chain A. 2637 * because the device has off-channel reception on chain A.
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index fd9fbc93ea1b..79ab0a6b1386 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -385,14 +385,16 @@ static struct iwl_lib_ops iwl5000_lib = {
385 .calib_version = iwlagn_eeprom_calib_version, 385 .calib_version = iwlagn_eeprom_calib_version,
386 .query_addr = iwlagn_eeprom_query_addr, 386 .query_addr = iwlagn_eeprom_query_addr,
387 }, 387 },
388 .post_associate = iwl_post_associate, 388 .isr_ops = {
389 .isr = iwl_isr_ict, 389 .isr = iwl_isr_ict,
390 .config_ap = iwl_config_ap, 390 .free = iwl_free_isr_ict,
391 .alloc = iwl_alloc_isr_ict,
392 .reset = iwl_reset_ict,
393 .disable = iwl_disable_ict,
394 },
391 .temp_ops = { 395 .temp_ops = {
392 .temperature = iwlagn_temperature, 396 .temperature = iwlagn_temperature,
393 }, 397 },
394 .manage_ibss_station = iwlagn_manage_ibss_station,
395 .update_bcast_stations = iwl_update_bcast_stations,
396 .debugfs_ops = { 398 .debugfs_ops = {
397 .rx_stats_read = iwl_ucode_rx_stats_read, 399 .rx_stats_read = iwl_ucode_rx_stats_read,
398 .tx_stats_read = iwl_ucode_tx_stats_read, 400 .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -400,7 +402,6 @@ static struct iwl_lib_ops iwl5000_lib = {
400 .bt_stats_read = iwl_ucode_bt_stats_read, 402 .bt_stats_read = iwl_ucode_bt_stats_read,
401 .reply_tx_error = iwl_reply_tx_error_read, 403 .reply_tx_error = iwl_reply_tx_error_read,
402 }, 404 },
403 .recover_from_tx_stall = iwl_bg_monitor_recover,
404 .check_plcp_health = iwl_good_plcp_health, 405 .check_plcp_health = iwl_good_plcp_health,
405 .check_ack_health = iwl_good_ack_health, 406 .check_ack_health = iwl_good_ack_health,
406 .txfifo_flush = iwlagn_txfifo_flush, 407 .txfifo_flush = iwlagn_txfifo_flush,
@@ -453,14 +454,16 @@ static struct iwl_lib_ops iwl5150_lib = {
453 .calib_version = iwlagn_eeprom_calib_version, 454 .calib_version = iwlagn_eeprom_calib_version,
454 .query_addr = iwlagn_eeprom_query_addr, 455 .query_addr = iwlagn_eeprom_query_addr,
455 }, 456 },
456 .post_associate = iwl_post_associate, 457 .isr_ops = {
457 .isr = iwl_isr_ict, 458 .isr = iwl_isr_ict,
458 .config_ap = iwl_config_ap, 459 .free = iwl_free_isr_ict,
460 .alloc = iwl_alloc_isr_ict,
461 .reset = iwl_reset_ict,
462 .disable = iwl_disable_ict,
463 },
459 .temp_ops = { 464 .temp_ops = {
460 .temperature = iwl5150_temperature, 465 .temperature = iwl5150_temperature,
461 }, 466 },
462 .manage_ibss_station = iwlagn_manage_ibss_station,
463 .update_bcast_stations = iwl_update_bcast_stations,
464 .debugfs_ops = { 467 .debugfs_ops = {
465 .rx_stats_read = iwl_ucode_rx_stats_read, 468 .rx_stats_read = iwl_ucode_rx_stats_read,
466 .tx_stats_read = iwl_ucode_tx_stats_read, 469 .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -468,7 +471,6 @@ static struct iwl_lib_ops iwl5150_lib = {
468 .bt_stats_read = iwl_ucode_bt_stats_read, 471 .bt_stats_read = iwl_ucode_bt_stats_read,
469 .reply_tx_error = iwl_reply_tx_error_read, 472 .reply_tx_error = iwl_reply_tx_error_read,
470 }, 473 },
471 .recover_from_tx_stall = iwl_bg_monitor_recover,
472 .check_plcp_health = iwl_good_plcp_health, 474 .check_plcp_health = iwl_good_plcp_health,
473 .check_ack_health = iwl_good_ack_health, 475 .check_ack_health = iwl_good_ack_health,
474 .txfifo_flush = iwlagn_txfifo_flush, 476 .txfifo_flush = iwlagn_txfifo_flush,
@@ -485,6 +487,7 @@ static const struct iwl_ops iwl5000_ops = {
485 .hcmd = &iwlagn_hcmd, 487 .hcmd = &iwlagn_hcmd,
486 .utils = &iwlagn_hcmd_utils, 488 .utils = &iwlagn_hcmd_utils,
487 .led = &iwlagn_led_ops, 489 .led = &iwlagn_led_ops,
490 .ieee80211_ops = &iwlagn_hw_ops,
488}; 491};
489 492
490static const struct iwl_ops iwl5150_ops = { 493static const struct iwl_ops iwl5150_ops = {
@@ -492,6 +495,7 @@ static const struct iwl_ops iwl5150_ops = {
492 .hcmd = &iwlagn_hcmd, 495 .hcmd = &iwlagn_hcmd,
493 .utils = &iwlagn_hcmd_utils, 496 .utils = &iwlagn_hcmd_utils,
494 .led = &iwlagn_led_ops, 497 .led = &iwlagn_led_ops,
498 .ieee80211_ops = &iwlagn_hw_ops,
495}; 499};
496 500
497static struct iwl_base_params iwl5000_base_params = { 501static struct iwl_base_params iwl5000_base_params = {
@@ -505,7 +509,7 @@ static struct iwl_base_params iwl5000_base_params = {
505 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 509 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
506 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF, 510 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_LONG_THRESHOLD_DEF,
507 .chain_noise_scale = 1000, 511 .chain_noise_scale = 1000,
508 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, 512 .wd_timeout = IWL_LONG_WD_TIMEOUT,
509 .max_event_log_size = 512, 513 .max_event_log_size = 512,
510 .ucode_tracing = true, 514 .ucode_tracing = true,
511 .sensitivity_calib_by_driver = true, 515 .sensitivity_calib_by_driver = true,
@@ -516,66 +520,43 @@ static struct iwl_ht_params iwl5000_ht_params = {
516 .use_rts_for_aggregation = true, /* use rts/cts protection */ 520 .use_rts_for_aggregation = true, /* use rts/cts protection */
517}; 521};
518 522
523#define IWL_DEVICE_5000 \
524 .fw_name_pre = IWL5000_FW_PRE, \
525 .ucode_api_max = IWL5000_UCODE_API_MAX, \
526 .ucode_api_min = IWL5000_UCODE_API_MIN, \
527 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, \
528 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, \
529 .ops = &iwl5000_ops, \
530 .mod_params = &iwlagn_mod_params, \
531 .base_params = &iwl5000_base_params, \
532 .led_mode = IWL_LED_BLINK
533
519struct iwl_cfg iwl5300_agn_cfg = { 534struct iwl_cfg iwl5300_agn_cfg = {
520 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN", 535 .name = "Intel(R) Ultimate N WiFi Link 5300 AGN",
521 .fw_name_pre = IWL5000_FW_PRE, 536 IWL_DEVICE_5000,
522 .ucode_api_max = IWL5000_UCODE_API_MAX,
523 .ucode_api_min = IWL5000_UCODE_API_MIN,
524 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
525 .valid_tx_ant = ANT_ABC,
526 .valid_rx_ant = ANT_ABC,
527 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
528 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
529 .ops = &iwl5000_ops,
530 .mod_params = &iwlagn_mod_params,
531 .base_params = &iwl5000_base_params,
532 .ht_params = &iwl5000_ht_params, 537 .ht_params = &iwl5000_ht_params,
533}; 538};
534 539
535struct iwl_cfg iwl5100_bgn_cfg = { 540struct iwl_cfg iwl5100_bgn_cfg = {
536 .name = "Intel(R) WiFi Link 5100 BGN", 541 .name = "Intel(R) WiFi Link 5100 BGN",
537 .fw_name_pre = IWL5000_FW_PRE, 542 IWL_DEVICE_5000,
538 .ucode_api_max = IWL5000_UCODE_API_MAX, 543 .valid_tx_ant = ANT_B, /* .cfg overwrite */
539 .ucode_api_min = IWL5000_UCODE_API_MIN, 544 .valid_rx_ant = ANT_AB, /* .cfg overwrite */
540 .sku = IWL_SKU_G|IWL_SKU_N,
541 .valid_tx_ant = ANT_B,
542 .valid_rx_ant = ANT_AB,
543 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
544 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
545 .ops = &iwl5000_ops,
546 .mod_params = &iwlagn_mod_params,
547 .base_params = &iwl5000_base_params,
548 .ht_params = &iwl5000_ht_params, 545 .ht_params = &iwl5000_ht_params,
549}; 546};
550 547
551struct iwl_cfg iwl5100_abg_cfg = { 548struct iwl_cfg iwl5100_abg_cfg = {
552 .name = "Intel(R) WiFi Link 5100 ABG", 549 .name = "Intel(R) WiFi Link 5100 ABG",
553 .fw_name_pre = IWL5000_FW_PRE, 550 IWL_DEVICE_5000,
554 .ucode_api_max = IWL5000_UCODE_API_MAX, 551 .valid_tx_ant = ANT_B, /* .cfg overwrite */
555 .ucode_api_min = IWL5000_UCODE_API_MIN, 552 .valid_rx_ant = ANT_AB, /* .cfg overwrite */
556 .sku = IWL_SKU_A|IWL_SKU_G,
557 .valid_tx_ant = ANT_B,
558 .valid_rx_ant = ANT_AB,
559 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
560 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
561 .ops = &iwl5000_ops,
562 .mod_params = &iwlagn_mod_params,
563 .base_params = &iwl5000_base_params,
564}; 553};
565 554
566struct iwl_cfg iwl5100_agn_cfg = { 555struct iwl_cfg iwl5100_agn_cfg = {
567 .name = "Intel(R) WiFi Link 5100 AGN", 556 .name = "Intel(R) WiFi Link 5100 AGN",
568 .fw_name_pre = IWL5000_FW_PRE, 557 IWL_DEVICE_5000,
569 .ucode_api_max = IWL5000_UCODE_API_MAX, 558 .valid_tx_ant = ANT_B, /* .cfg overwrite */
570 .ucode_api_min = IWL5000_UCODE_API_MIN, 559 .valid_rx_ant = ANT_AB, /* .cfg overwrite */
571 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
572 .valid_tx_ant = ANT_B,
573 .valid_rx_ant = ANT_AB,
574 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
575 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
576 .ops = &iwl5000_ops,
577 .mod_params = &iwlagn_mod_params,
578 .base_params = &iwl5000_base_params,
579 .ht_params = &iwl5000_ht_params, 560 .ht_params = &iwl5000_ht_params,
580}; 561};
581 562
@@ -584,48 +565,39 @@ struct iwl_cfg iwl5350_agn_cfg = {
584 .fw_name_pre = IWL5000_FW_PRE, 565 .fw_name_pre = IWL5000_FW_PRE,
585 .ucode_api_max = IWL5000_UCODE_API_MAX, 566 .ucode_api_max = IWL5000_UCODE_API_MAX,
586 .ucode_api_min = IWL5000_UCODE_API_MIN, 567 .ucode_api_min = IWL5000_UCODE_API_MIN,
587 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
588 .valid_tx_ant = ANT_ABC,
589 .valid_rx_ant = ANT_ABC,
590 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 568 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
591 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 569 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
592 .ops = &iwl5000_ops, 570 .ops = &iwl5000_ops,
593 .mod_params = &iwlagn_mod_params, 571 .mod_params = &iwlagn_mod_params,
594 .base_params = &iwl5000_base_params, 572 .base_params = &iwl5000_base_params,
595 .ht_params = &iwl5000_ht_params, 573 .ht_params = &iwl5000_ht_params,
574 .led_mode = IWL_LED_BLINK,
575 .internal_wimax_coex = true,
596}; 576};
597 577
578#define IWL_DEVICE_5150 \
579 .fw_name_pre = IWL5150_FW_PRE, \
580 .ucode_api_max = IWL5150_UCODE_API_MAX, \
581 .ucode_api_min = IWL5150_UCODE_API_MIN, \
582 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, \
583 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, \
584 .ops = &iwl5150_ops, \
585 .mod_params = &iwlagn_mod_params, \
586 .base_params = &iwl5000_base_params, \
587 .need_dc_calib = true, \
588 .led_mode = IWL_LED_BLINK, \
589 .internal_wimax_coex = true
590
598struct iwl_cfg iwl5150_agn_cfg = { 591struct iwl_cfg iwl5150_agn_cfg = {
599 .name = "Intel(R) WiMAX/WiFi Link 5150 AGN", 592 .name = "Intel(R) WiMAX/WiFi Link 5150 AGN",
600 .fw_name_pre = IWL5150_FW_PRE, 593 IWL_DEVICE_5150,
601 .ucode_api_max = IWL5150_UCODE_API_MAX,
602 .ucode_api_min = IWL5150_UCODE_API_MIN,
603 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
604 .valid_tx_ant = ANT_A,
605 .valid_rx_ant = ANT_AB,
606 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
607 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
608 .ops = &iwl5150_ops,
609 .mod_params = &iwlagn_mod_params,
610 .base_params = &iwl5000_base_params,
611 .ht_params = &iwl5000_ht_params, 594 .ht_params = &iwl5000_ht_params,
612 .need_dc_calib = true, 595
613}; 596};
614 597
615struct iwl_cfg iwl5150_abg_cfg = { 598struct iwl_cfg iwl5150_abg_cfg = {
616 .name = "Intel(R) WiMAX/WiFi Link 5150 ABG", 599 .name = "Intel(R) WiMAX/WiFi Link 5150 ABG",
617 .fw_name_pre = IWL5150_FW_PRE, 600 IWL_DEVICE_5150,
618 .ucode_api_max = IWL5150_UCODE_API_MAX,
619 .ucode_api_min = IWL5150_UCODE_API_MIN,
620 .sku = IWL_SKU_A|IWL_SKU_G,
621 .valid_tx_ant = ANT_A,
622 .valid_rx_ant = ANT_AB,
623 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
624 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
625 .ops = &iwl5150_ops,
626 .mod_params = &iwlagn_mod_params,
627 .base_params = &iwl5000_base_params,
628 .need_dc_calib = true,
629}; 601};
630 602
631MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); 603MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 11e6532fc573..af505bcd7ae0 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -53,13 +53,11 @@
53#define IWL6000_UCODE_API_MAX 4 53#define IWL6000_UCODE_API_MAX 4
54#define IWL6050_UCODE_API_MAX 5 54#define IWL6050_UCODE_API_MAX 5
55#define IWL6000G2_UCODE_API_MAX 5 55#define IWL6000G2_UCODE_API_MAX 5
56#define IWL130_UCODE_API_MAX 5
57 56
58/* Lowest firmware API version supported */ 57/* Lowest firmware API version supported */
59#define IWL6000_UCODE_API_MIN 4 58#define IWL6000_UCODE_API_MIN 4
60#define IWL6050_UCODE_API_MIN 4 59#define IWL6050_UCODE_API_MIN 4
61#define IWL6000G2_UCODE_API_MIN 4 60#define IWL6000G2_UCODE_API_MIN 4
62#define IWL130_UCODE_API_MIN 5
63 61
64#define IWL6000_FW_PRE "iwlwifi-6000-" 62#define IWL6000_FW_PRE "iwlwifi-6000-"
65#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode" 63#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
@@ -77,10 +75,6 @@
77#define _IWL6000G2B_MODULE_FIRMWARE(api) IWL6000G2B_FW_PRE #api ".ucode" 75#define _IWL6000G2B_MODULE_FIRMWARE(api) IWL6000G2B_FW_PRE #api ".ucode"
78#define IWL6000G2B_MODULE_FIRMWARE(api) _IWL6000G2B_MODULE_FIRMWARE(api) 76#define IWL6000G2B_MODULE_FIRMWARE(api) _IWL6000G2B_MODULE_FIRMWARE(api)
79 77
80#define IWL130_FW_PRE "iwlwifi-130-"
81#define _IWL130_MODULE_FIRMWARE(api) IWL130_FW_PRE #api ".ucode"
82#define IWL130_MODULE_FIRMWARE(api) _IWL130_MODULE_FIRMWARE(api)
83
84static void iwl6000_set_ct_threshold(struct iwl_priv *priv) 78static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
85{ 79{
86 /* want Celsius */ 80 /* want Celsius */
@@ -188,7 +182,11 @@ static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
188 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; 182 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
189 183
190 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant); 184 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
191 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant); 185 if (priv->cfg->rx_with_siso_diversity)
186 priv->hw_params.rx_chains_num = 1;
187 else
188 priv->hw_params.rx_chains_num =
189 num_of_ant(priv->cfg->valid_rx_ant);
192 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant; 190 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
193 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant; 191 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
194 192
@@ -328,14 +326,16 @@ static struct iwl_lib_ops iwl6000_lib = {
328 .query_addr = iwlagn_eeprom_query_addr, 326 .query_addr = iwlagn_eeprom_query_addr,
329 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, 327 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
330 }, 328 },
331 .post_associate = iwl_post_associate, 329 .isr_ops = {
332 .isr = iwl_isr_ict, 330 .isr = iwl_isr_ict,
333 .config_ap = iwl_config_ap, 331 .free = iwl_free_isr_ict,
332 .alloc = iwl_alloc_isr_ict,
333 .reset = iwl_reset_ict,
334 .disable = iwl_disable_ict,
335 },
334 .temp_ops = { 336 .temp_ops = {
335 .temperature = iwlagn_temperature, 337 .temperature = iwlagn_temperature,
336 }, 338 },
337 .manage_ibss_station = iwlagn_manage_ibss_station,
338 .update_bcast_stations = iwl_update_bcast_stations,
339 .debugfs_ops = { 339 .debugfs_ops = {
340 .rx_stats_read = iwl_ucode_rx_stats_read, 340 .rx_stats_read = iwl_ucode_rx_stats_read,
341 .tx_stats_read = iwl_ucode_tx_stats_read, 341 .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -343,7 +343,6 @@ static struct iwl_lib_ops iwl6000_lib = {
343 .bt_stats_read = iwl_ucode_bt_stats_read, 343 .bt_stats_read = iwl_ucode_bt_stats_read,
344 .reply_tx_error = iwl_reply_tx_error_read, 344 .reply_tx_error = iwl_reply_tx_error_read,
345 }, 345 },
346 .recover_from_tx_stall = iwl_bg_monitor_recover,
347 .check_plcp_health = iwl_good_plcp_health, 346 .check_plcp_health = iwl_good_plcp_health,
348 .check_ack_health = iwl_good_ack_health, 347 .check_ack_health = iwl_good_ack_health,
349 .txfifo_flush = iwlagn_txfifo_flush, 348 .txfifo_flush = iwlagn_txfifo_flush,
@@ -399,14 +398,16 @@ static struct iwl_lib_ops iwl6000g2b_lib = {
399 .query_addr = iwlagn_eeprom_query_addr, 398 .query_addr = iwlagn_eeprom_query_addr,
400 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower, 399 .update_enhanced_txpower = iwlcore_eeprom_enhanced_txpower,
401 }, 400 },
402 .post_associate = iwl_post_associate, 401 .isr_ops = {
403 .isr = iwl_isr_ict, 402 .isr = iwl_isr_ict,
404 .config_ap = iwl_config_ap, 403 .free = iwl_free_isr_ict,
404 .alloc = iwl_alloc_isr_ict,
405 .reset = iwl_reset_ict,
406 .disable = iwl_disable_ict,
407 },
405 .temp_ops = { 408 .temp_ops = {
406 .temperature = iwlagn_temperature, 409 .temperature = iwlagn_temperature,
407 }, 410 },
408 .manage_ibss_station = iwlagn_manage_ibss_station,
409 .update_bcast_stations = iwl_update_bcast_stations,
410 .debugfs_ops = { 411 .debugfs_ops = {
411 .rx_stats_read = iwl_ucode_rx_stats_read, 412 .rx_stats_read = iwl_ucode_rx_stats_read,
412 .tx_stats_read = iwl_ucode_tx_stats_read, 413 .tx_stats_read = iwl_ucode_tx_stats_read,
@@ -414,7 +415,6 @@ static struct iwl_lib_ops iwl6000g2b_lib = {
414 .bt_stats_read = iwl_ucode_bt_stats_read, 415 .bt_stats_read = iwl_ucode_bt_stats_read,
415 .reply_tx_error = iwl_reply_tx_error_read, 416 .reply_tx_error = iwl_reply_tx_error_read,
416 }, 417 },
417 .recover_from_tx_stall = iwl_bg_monitor_recover,
418 .check_plcp_health = iwl_good_plcp_health, 418 .check_plcp_health = iwl_good_plcp_health,
419 .check_ack_health = iwl_good_ack_health, 419 .check_ack_health = iwl_good_ack_health,
420 .txfifo_flush = iwlagn_txfifo_flush, 420 .txfifo_flush = iwlagn_txfifo_flush,
@@ -439,6 +439,7 @@ static const struct iwl_ops iwl6000_ops = {
439 .hcmd = &iwlagn_hcmd, 439 .hcmd = &iwlagn_hcmd,
440 .utils = &iwlagn_hcmd_utils, 440 .utils = &iwlagn_hcmd_utils,
441 .led = &iwlagn_led_ops, 441 .led = &iwlagn_led_ops,
442 .ieee80211_ops = &iwlagn_hw_ops,
442}; 443};
443 444
444static const struct iwl_ops iwl6050_ops = { 445static const struct iwl_ops iwl6050_ops = {
@@ -447,6 +448,7 @@ static const struct iwl_ops iwl6050_ops = {
447 .utils = &iwlagn_hcmd_utils, 448 .utils = &iwlagn_hcmd_utils,
448 .led = &iwlagn_led_ops, 449 .led = &iwlagn_led_ops,
449 .nic = &iwl6050_nic_ops, 450 .nic = &iwl6050_nic_ops,
451 .ieee80211_ops = &iwlagn_hw_ops,
450}; 452};
451 453
452static const struct iwl_ops iwl6050g2_ops = { 454static const struct iwl_ops iwl6050g2_ops = {
@@ -455,6 +457,7 @@ static const struct iwl_ops iwl6050g2_ops = {
455 .utils = &iwlagn_hcmd_utils, 457 .utils = &iwlagn_hcmd_utils,
456 .led = &iwlagn_led_ops, 458 .led = &iwlagn_led_ops,
457 .nic = &iwl6050g2_nic_ops, 459 .nic = &iwl6050g2_nic_ops,
460 .ieee80211_ops = &iwlagn_hw_ops,
458}; 461};
459 462
460static const struct iwl_ops iwl6000g2b_ops = { 463static const struct iwl_ops iwl6000g2b_ops = {
@@ -462,6 +465,7 @@ static const struct iwl_ops iwl6000g2b_ops = {
462 .hcmd = &iwlagn_bt_hcmd, 465 .hcmd = &iwlagn_bt_hcmd,
463 .utils = &iwlagn_hcmd_utils, 466 .utils = &iwlagn_hcmd_utils,
464 .led = &iwlagn_led_ops, 467 .led = &iwlagn_led_ops,
468 .ieee80211_ops = &iwlagn_hw_ops,
465}; 469};
466 470
467static struct iwl_base_params iwl6000_base_params = { 471static struct iwl_base_params iwl6000_base_params = {
@@ -480,11 +484,12 @@ static struct iwl_base_params iwl6000_base_params = {
480 .support_ct_kill_exit = true, 484 .support_ct_kill_exit = true,
481 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 485 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
482 .chain_noise_scale = 1000, 486 .chain_noise_scale = 1000,
483 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, 487 .wd_timeout = IWL_DEF_WD_TIMEOUT,
484 .max_event_log_size = 512, 488 .max_event_log_size = 512,
485 .ucode_tracing = true, 489 .ucode_tracing = true,
486 .sensitivity_calib_by_driver = true, 490 .sensitivity_calib_by_driver = true,
487 .chain_noise_calib_by_driver = true, 491 .chain_noise_calib_by_driver = true,
492 .shadow_reg_enable = true,
488}; 493};
489 494
490static struct iwl_base_params iwl6050_base_params = { 495static struct iwl_base_params iwl6050_base_params = {
@@ -503,13 +508,14 @@ static struct iwl_base_params iwl6050_base_params = {
503 .support_ct_kill_exit = true, 508 .support_ct_kill_exit = true,
504 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 509 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
505 .chain_noise_scale = 1500, 510 .chain_noise_scale = 1500,
506 .monitor_recover_period = IWL_DEF_MONITORING_PERIOD, 511 .wd_timeout = IWL_DEF_WD_TIMEOUT,
507 .max_event_log_size = 1024, 512 .max_event_log_size = 1024,
508 .ucode_tracing = true, 513 .ucode_tracing = true,
509 .sensitivity_calib_by_driver = true, 514 .sensitivity_calib_by_driver = true,
510 .chain_noise_calib_by_driver = true, 515 .chain_noise_calib_by_driver = true,
516 .shadow_reg_enable = true,
511}; 517};
512static struct iwl_base_params iwl6000_coex_base_params = { 518static struct iwl_base_params iwl6000_g2_base_params = {
513 .eeprom_size = OTP_LOW_IMAGE_SIZE, 519 .eeprom_size = OTP_LOW_IMAGE_SIZE,
514 .num_of_queues = IWLAGN_NUM_QUEUES, 520 .num_of_queues = IWLAGN_NUM_QUEUES,
515 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES, 521 .num_of_ampdu_queues = IWLAGN_NUM_AMPDU_QUEUES,
@@ -518,18 +524,19 @@ static struct iwl_base_params iwl6000_coex_base_params = {
518 .use_bsm = false, 524 .use_bsm = false,
519 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 525 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
520 .shadow_ram_support = true, 526 .shadow_ram_support = true,
521 .led_compensation = 51, 527 .led_compensation = 57,
522 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS, 528 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
523 .supports_idle = true, 529 .supports_idle = true,
524 .adv_thermal_throttle = true, 530 .adv_thermal_throttle = true,
525 .support_ct_kill_exit = true, 531 .support_ct_kill_exit = true,
526 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF, 532 .plcp_delta_threshold = IWL_MAX_PLCP_ERR_THRESHOLD_DEF,
527 .chain_noise_scale = 1000, 533 .chain_noise_scale = 1000,
528 .monitor_recover_period = IWL_LONG_MONITORING_PERIOD, 534 .wd_timeout = IWL_LONG_WD_TIMEOUT,
529 .max_event_log_size = 512, 535 .max_event_log_size = 512,
530 .ucode_tracing = true, 536 .ucode_tracing = true,
531 .sensitivity_calib_by_driver = true, 537 .sensitivity_calib_by_driver = true,
532 .chain_noise_calib_by_driver = true, 538 .chain_noise_calib_by_driver = true,
539 .shadow_reg_enable = true,
533}; 540};
534 541
535static struct iwl_ht_params iwl6000_ht_params = { 542static struct iwl_ht_params iwl6000_ht_params = {
@@ -541,262 +548,164 @@ static struct iwl_bt_params iwl6000_bt_params = {
541 .bt_statistics = true, 548 .bt_statistics = true,
542 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 549 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
543 .advanced_bt_coexist = true, 550 .advanced_bt_coexist = true,
551 .agg_time_limit = BT_AGG_THRESHOLD_DEF,
544 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE, 552 .bt_init_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_NONE,
545 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT, 553 .bt_prio_boost = IWLAGN_BT_PRIO_BOOST_DEFAULT,
554 .bt_sco_disable = true,
555};
556
557#define IWL_DEVICE_6005 \
558 .fw_name_pre = IWL6000G2A_FW_PRE, \
559 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
560 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
561 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, \
562 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, \
563 .ops = &iwl6000_ops, \
564 .mod_params = &iwlagn_mod_params, \
565 .base_params = &iwl6000_g2_base_params, \
566 .need_dc_calib = true, \
567 .need_temp_offset_calib = true, \
568 .led_mode = IWL_LED_RF_STATE
569
570struct iwl_cfg iwl6005_2agn_cfg = {
571 .name = "Intel(R) Centrino(R) Advanced-N 6205 AGN",
572 IWL_DEVICE_6005,
573 .ht_params = &iwl6000_ht_params,
546}; 574};
547 575
548struct iwl_cfg iwl6000g2a_2agn_cfg = { 576struct iwl_cfg iwl6005_2abg_cfg = {
549 .name = "6000 Series 2x2 AGN Gen2a", 577 .name = "Intel(R) Centrino(R) Advanced-N 6205 ABG",
550 .fw_name_pre = IWL6000G2A_FW_PRE, 578 IWL_DEVICE_6005,
551 .ucode_api_max = IWL6000G2_UCODE_API_MAX, 579};
552 .ucode_api_min = IWL6000G2_UCODE_API_MIN, 580
553 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 581struct iwl_cfg iwl6005_2bg_cfg = {
554 .valid_tx_ant = ANT_AB, 582 .name = "Intel(R) Centrino(R) Advanced-N 6205 BG",
555 .valid_rx_ant = ANT_AB, 583 IWL_DEVICE_6005,
556 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, 584};
557 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, 585
558 .ops = &iwl6000_ops, 586#define IWL_DEVICE_6030 \
559 .mod_params = &iwlagn_mod_params, 587 .fw_name_pre = IWL6000G2B_FW_PRE, \
560 .base_params = &iwl6000_base_params, 588 .ucode_api_max = IWL6000G2_UCODE_API_MAX, \
589 .ucode_api_min = IWL6000G2_UCODE_API_MIN, \
590 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION, \
591 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION, \
592 .ops = &iwl6000g2b_ops, \
593 .mod_params = &iwlagn_mod_params, \
594 .base_params = &iwl6000_g2_base_params, \
595 .bt_params = &iwl6000_bt_params, \
596 .need_dc_calib = true, \
597 .need_temp_offset_calib = true, \
598 .led_mode = IWL_LED_RF_STATE, \
599 .adv_pm = true \
600
601struct iwl_cfg iwl6030_2agn_cfg = {
602 .name = "Intel(R) Centrino(R) Advanced-N 6230 AGN",
603 IWL_DEVICE_6030,
561 .ht_params = &iwl6000_ht_params, 604 .ht_params = &iwl6000_ht_params,
562 .need_dc_calib = true, 605};
563 .need_temp_offset_calib = true, 606
564}; 607struct iwl_cfg iwl6030_2abg_cfg = {
565 608 .name = "Intel(R) Centrino(R) Advanced-N 6230 ABG",
566struct iwl_cfg iwl6000g2a_2abg_cfg = { 609 IWL_DEVICE_6030,
567 .name = "6000 Series 2x2 ABG Gen2a", 610};
568 .fw_name_pre = IWL6000G2A_FW_PRE, 611
569 .ucode_api_max = IWL6000G2_UCODE_API_MAX, 612struct iwl_cfg iwl6030_2bgn_cfg = {
570 .ucode_api_min = IWL6000G2_UCODE_API_MIN, 613 .name = "Intel(R) Centrino(R) Advanced-N 6230 BGN",
571 .sku = IWL_SKU_A|IWL_SKU_G, 614 IWL_DEVICE_6030,
572 .valid_tx_ant = ANT_AB,
573 .valid_rx_ant = ANT_AB,
574 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
575 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
576 .ops = &iwl6000_ops,
577 .mod_params = &iwlagn_mod_params,
578 .base_params = &iwl6000_base_params,
579 .need_dc_calib = true,
580 .need_temp_offset_calib = true,
581};
582
583struct iwl_cfg iwl6000g2a_2bg_cfg = {
584 .name = "6000 Series 2x2 BG Gen2a",
585 .fw_name_pre = IWL6000G2A_FW_PRE,
586 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
587 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
588 .sku = IWL_SKU_G,
589 .valid_tx_ant = ANT_AB,
590 .valid_rx_ant = ANT_AB,
591 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
592 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
593 .ops = &iwl6000_ops,
594 .mod_params = &iwlagn_mod_params,
595 .base_params = &iwl6000_base_params,
596 .need_dc_calib = true,
597 .need_temp_offset_calib = true,
598};
599
600struct iwl_cfg iwl6000g2b_2agn_cfg = {
601 .name = "6000 Series 2x2 AGN Gen2b",
602 .fw_name_pre = IWL6000G2B_FW_PRE,
603 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
604 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
605 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
606 .valid_tx_ant = ANT_AB,
607 .valid_rx_ant = ANT_AB,
608 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
609 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
610 .ops = &iwl6000g2b_ops,
611 .mod_params = &iwlagn_mod_params,
612 .base_params = &iwl6000_coex_base_params,
613 .bt_params = &iwl6000_bt_params,
614 .ht_params = &iwl6000_ht_params, 615 .ht_params = &iwl6000_ht_params,
615 .need_dc_calib = true, 616};
616 .need_temp_offset_calib = true, 617
617 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 618struct iwl_cfg iwl6030_2bg_cfg = {
618 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 619 .name = "Intel(R) Centrino(R) Advanced-N 6230 BG",
619}; 620 IWL_DEVICE_6030,
620 621};
621struct iwl_cfg iwl6000g2b_2abg_cfg = { 622
622 .name = "6000 Series 2x2 ABG Gen2b", 623struct iwl_cfg iwl1030_bgn_cfg = {
623 .fw_name_pre = IWL6000G2B_FW_PRE, 624 .name = "Intel(R) Centrino(R) Wireless-N 1030 BGN",
624 .ucode_api_max = IWL6000G2_UCODE_API_MAX, 625 IWL_DEVICE_6030,
625 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
626 .sku = IWL_SKU_A|IWL_SKU_G,
627 .valid_tx_ant = ANT_AB,
628 .valid_rx_ant = ANT_AB,
629 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
630 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
631 .ops = &iwl6000g2b_ops,
632 .mod_params = &iwlagn_mod_params,
633 .base_params = &iwl6000_coex_base_params,
634 .bt_params = &iwl6000_bt_params,
635 .need_dc_calib = true,
636 .need_temp_offset_calib = true,
637 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
638 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
639};
640
641struct iwl_cfg iwl6000g2b_2bgn_cfg = {
642 .name = "6000 Series 2x2 BGN Gen2b",
643 .fw_name_pre = IWL6000G2B_FW_PRE,
644 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
645 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
646 .sku = IWL_SKU_G|IWL_SKU_N,
647 .valid_tx_ant = ANT_AB,
648 .valid_rx_ant = ANT_AB,
649 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
650 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
651 .ops = &iwl6000g2b_ops,
652 .mod_params = &iwlagn_mod_params,
653 .base_params = &iwl6000_coex_base_params,
654 .bt_params = &iwl6000_bt_params,
655 .ht_params = &iwl6000_ht_params, 626 .ht_params = &iwl6000_ht_params,
656 .need_dc_calib = true, 627};
657 .need_temp_offset_calib = true, 628
658 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 629struct iwl_cfg iwl1030_bg_cfg = {
659 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 630 .name = "Intel(R) Centrino(R) Wireless-N 1030 BG",
660}; 631 IWL_DEVICE_6030,
661 632};
662struct iwl_cfg iwl6000g2b_2bg_cfg = { 633
663 .name = "6000 Series 2x2 BG Gen2b", 634struct iwl_cfg iwl130_bgn_cfg = {
664 .fw_name_pre = IWL6000G2B_FW_PRE, 635 .name = "Intel(R) Centrino(R) Wireless-N 130 BGN",
665 .ucode_api_max = IWL6000G2_UCODE_API_MAX, 636 IWL_DEVICE_6030,
666 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
667 .sku = IWL_SKU_G,
668 .valid_tx_ant = ANT_AB,
669 .valid_rx_ant = ANT_AB,
670 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
671 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
672 .ops = &iwl6000g2b_ops,
673 .mod_params = &iwlagn_mod_params,
674 .base_params = &iwl6000_coex_base_params,
675 .bt_params = &iwl6000_bt_params,
676 .need_dc_calib = true,
677 .need_temp_offset_calib = true,
678 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
679 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
680};
681
682struct iwl_cfg iwl6000g2b_bgn_cfg = {
683 .name = "6000 Series 1x2 BGN Gen2b",
684 .fw_name_pre = IWL6000G2B_FW_PRE,
685 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
686 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
687 .sku = IWL_SKU_G|IWL_SKU_N,
688 .valid_tx_ant = ANT_A,
689 .valid_rx_ant = ANT_AB,
690 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
691 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
692 .ops = &iwl6000g2b_ops,
693 .mod_params = &iwlagn_mod_params,
694 .base_params = &iwl6000_coex_base_params,
695 .bt_params = &iwl6000_bt_params,
696 .ht_params = &iwl6000_ht_params, 637 .ht_params = &iwl6000_ht_params,
697 .need_dc_calib = true, 638 .rx_with_siso_diversity = true,
698 .need_temp_offset_calib = true, 639};
699 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */ 640
700 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A, 641struct iwl_cfg iwl130_bg_cfg = {
701}; 642 .name = "Intel(R) Centrino(R) Wireless-N 130 BG",
702 643 IWL_DEVICE_6030,
703struct iwl_cfg iwl6000g2b_bg_cfg = { 644 .rx_with_siso_diversity = true,
704 .name = "6000 Series 1x2 BG Gen2b",
705 .fw_name_pre = IWL6000G2B_FW_PRE,
706 .ucode_api_max = IWL6000G2_UCODE_API_MAX,
707 .ucode_api_min = IWL6000G2_UCODE_API_MIN,
708 .sku = IWL_SKU_G,
709 .valid_tx_ant = ANT_A,
710 .valid_rx_ant = ANT_AB,
711 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
712 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
713 .ops = &iwl6000g2b_ops,
714 .mod_params = &iwlagn_mod_params,
715 .base_params = &iwl6000_coex_base_params,
716 .bt_params = &iwl6000_bt_params,
717 .need_dc_calib = true,
718 .need_temp_offset_calib = true,
719 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
720 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
721}; 645};
722 646
723/* 647/*
724 * "i": Internal configuration, use internal Power Amplifier 648 * "i": Internal configuration, use internal Power Amplifier
725 */ 649 */
650#define IWL_DEVICE_6000i \
651 .fw_name_pre = IWL6000_FW_PRE, \
652 .ucode_api_max = IWL6000_UCODE_API_MAX, \
653 .ucode_api_min = IWL6000_UCODE_API_MIN, \
654 .valid_tx_ant = ANT_BC, /* .cfg overwrite */ \
655 .valid_rx_ant = ANT_BC, /* .cfg overwrite */ \
656 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, \
657 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, \
658 .ops = &iwl6000_ops, \
659 .mod_params = &iwlagn_mod_params, \
660 .base_params = &iwl6000_base_params, \
661 .pa_type = IWL_PA_INTERNAL, \
662 .led_mode = IWL_LED_BLINK
663
726struct iwl_cfg iwl6000i_2agn_cfg = { 664struct iwl_cfg iwl6000i_2agn_cfg = {
727 .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN", 665 .name = "Intel(R) Centrino(R) Advanced-N 6200 AGN",
728 .fw_name_pre = IWL6000_FW_PRE, 666 IWL_DEVICE_6000i,
729 .ucode_api_max = IWL6000_UCODE_API_MAX,
730 .ucode_api_min = IWL6000_UCODE_API_MIN,
731 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
732 .valid_tx_ant = ANT_BC,
733 .valid_rx_ant = ANT_BC,
734 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
735 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
736 .ops = &iwl6000_ops,
737 .mod_params = &iwlagn_mod_params,
738 .base_params = &iwl6000_base_params,
739 .ht_params = &iwl6000_ht_params, 667 .ht_params = &iwl6000_ht_params,
740 .pa_type = IWL_PA_INTERNAL,
741}; 668};
742 669
743struct iwl_cfg iwl6000i_2abg_cfg = { 670struct iwl_cfg iwl6000i_2abg_cfg = {
744 .name = "Intel(R) Centrino(R) Advanced-N 6200 ABG", 671 .name = "Intel(R) Centrino(R) Advanced-N 6200 ABG",
745 .fw_name_pre = IWL6000_FW_PRE, 672 IWL_DEVICE_6000i,
746 .ucode_api_max = IWL6000_UCODE_API_MAX,
747 .ucode_api_min = IWL6000_UCODE_API_MIN,
748 .sku = IWL_SKU_A|IWL_SKU_G,
749 .valid_tx_ant = ANT_BC,
750 .valid_rx_ant = ANT_BC,
751 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
752 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
753 .ops = &iwl6000_ops,
754 .mod_params = &iwlagn_mod_params,
755 .base_params = &iwl6000_base_params,
756 .pa_type = IWL_PA_INTERNAL,
757}; 673};
758 674
759struct iwl_cfg iwl6000i_2bg_cfg = { 675struct iwl_cfg iwl6000i_2bg_cfg = {
760 .name = "Intel(R) Centrino(R) Advanced-N 6200 BG", 676 .name = "Intel(R) Centrino(R) Advanced-N 6200 BG",
761 .fw_name_pre = IWL6000_FW_PRE, 677 IWL_DEVICE_6000i,
762 .ucode_api_max = IWL6000_UCODE_API_MAX, 678};
763 .ucode_api_min = IWL6000_UCODE_API_MIN, 679
764 .sku = IWL_SKU_G, 680#define IWL_DEVICE_6050 \
765 .valid_tx_ant = ANT_BC, 681 .fw_name_pre = IWL6050_FW_PRE, \
766 .valid_rx_ant = ANT_BC, 682 .ucode_api_max = IWL6050_UCODE_API_MAX, \
767 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 683 .ucode_api_min = IWL6050_UCODE_API_MIN, \
768 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, 684 .ops = &iwl6050_ops, \
769 .ops = &iwl6000_ops, 685 .eeprom_ver = EEPROM_6050_EEPROM_VERSION, \
770 .mod_params = &iwlagn_mod_params, 686 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION, \
771 .base_params = &iwl6000_base_params, 687 .mod_params = &iwlagn_mod_params, \
772 .pa_type = IWL_PA_INTERNAL, 688 .base_params = &iwl6050_base_params, \
773}; 689 .need_dc_calib = true, \
690 .led_mode = IWL_LED_BLINK, \
691 .internal_wimax_coex = true
774 692
775struct iwl_cfg iwl6050_2agn_cfg = { 693struct iwl_cfg iwl6050_2agn_cfg = {
776 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN", 694 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 AGN",
777 .fw_name_pre = IWL6050_FW_PRE, 695 IWL_DEVICE_6050,
778 .ucode_api_max = IWL6050_UCODE_API_MAX,
779 .ucode_api_min = IWL6050_UCODE_API_MIN,
780 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
781 .valid_tx_ant = ANT_AB,
782 .valid_rx_ant = ANT_AB,
783 .ops = &iwl6050_ops,
784 .eeprom_ver = EEPROM_6050_EEPROM_VERSION,
785 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
786 .mod_params = &iwlagn_mod_params,
787 .base_params = &iwl6050_base_params,
788 .ht_params = &iwl6000_ht_params, 696 .ht_params = &iwl6000_ht_params,
789 .need_dc_calib = true,
790}; 697};
791 698
792struct iwl_cfg iwl6050g2_bgn_cfg = { 699struct iwl_cfg iwl6050_2abg_cfg = {
793 .name = "6050 Series 1x2 BGN Gen2", 700 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG",
701 IWL_DEVICE_6050,
702};
703
704struct iwl_cfg iwl6150_bgn_cfg = {
705 .name = "Intel(R) Centrino(R) Wireless-N + WiMAX 6150 BGN",
794 .fw_name_pre = IWL6050_FW_PRE, 706 .fw_name_pre = IWL6050_FW_PRE,
795 .ucode_api_max = IWL6050_UCODE_API_MAX, 707 .ucode_api_max = IWL6050_UCODE_API_MAX,
796 .ucode_api_min = IWL6050_UCODE_API_MIN, 708 .ucode_api_min = IWL6050_UCODE_API_MIN,
797 .sku = IWL_SKU_G|IWL_SKU_N,
798 .valid_tx_ant = ANT_A,
799 .valid_rx_ant = ANT_AB,
800 .eeprom_ver = EEPROM_6050G2_EEPROM_VERSION, 709 .eeprom_ver = EEPROM_6050G2_EEPROM_VERSION,
801 .eeprom_calib_ver = EEPROM_6050G2_TX_POWER_VERSION, 710 .eeprom_calib_ver = EEPROM_6050G2_TX_POWER_VERSION,
802 .ops = &iwl6050g2_ops, 711 .ops = &iwl6050g2_ops,
@@ -804,22 +713,8 @@ struct iwl_cfg iwl6050g2_bgn_cfg = {
804 .base_params = &iwl6050_base_params, 713 .base_params = &iwl6050_base_params,
805 .ht_params = &iwl6000_ht_params, 714 .ht_params = &iwl6000_ht_params,
806 .need_dc_calib = true, 715 .need_dc_calib = true,
807}; 716 .led_mode = IWL_LED_RF_STATE,
808 717 .internal_wimax_coex = true,
809struct iwl_cfg iwl6050_2abg_cfg = {
810 .name = "Intel(R) Centrino(R) Advanced-N + WiMAX 6250 ABG",
811 .fw_name_pre = IWL6050_FW_PRE,
812 .ucode_api_max = IWL6050_UCODE_API_MAX,
813 .ucode_api_min = IWL6050_UCODE_API_MIN,
814 .sku = IWL_SKU_A|IWL_SKU_G,
815 .valid_tx_ant = ANT_AB,
816 .valid_rx_ant = ANT_AB,
817 .eeprom_ver = EEPROM_6050_EEPROM_VERSION,
818 .eeprom_calib_ver = EEPROM_6050_TX_POWER_VERSION,
819 .ops = &iwl6050_ops,
820 .mod_params = &iwlagn_mod_params,
821 .base_params = &iwl6050_base_params,
822 .need_dc_calib = true,
823}; 718};
824 719
825struct iwl_cfg iwl6000_3agn_cfg = { 720struct iwl_cfg iwl6000_3agn_cfg = {
@@ -827,9 +722,6 @@ struct iwl_cfg iwl6000_3agn_cfg = {
827 .fw_name_pre = IWL6000_FW_PRE, 722 .fw_name_pre = IWL6000_FW_PRE,
828 .ucode_api_max = IWL6000_UCODE_API_MAX, 723 .ucode_api_max = IWL6000_UCODE_API_MAX,
829 .ucode_api_min = IWL6000_UCODE_API_MIN, 724 .ucode_api_min = IWL6000_UCODE_API_MIN,
830 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
831 .valid_tx_ant = ANT_ABC,
832 .valid_rx_ant = ANT_ABC,
833 .eeprom_ver = EEPROM_6000_EEPROM_VERSION, 725 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
834 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION, 726 .eeprom_calib_ver = EEPROM_6000_TX_POWER_VERSION,
835 .ops = &iwl6000_ops, 727 .ops = &iwl6000_ops,
@@ -837,49 +729,10 @@ struct iwl_cfg iwl6000_3agn_cfg = {
837 .base_params = &iwl6000_base_params, 729 .base_params = &iwl6000_base_params,
838 .ht_params = &iwl6000_ht_params, 730 .ht_params = &iwl6000_ht_params,
839 .need_dc_calib = true, 731 .need_dc_calib = true,
840}; 732 .led_mode = IWL_LED_BLINK,
841
842struct iwl_cfg iwl130_bgn_cfg = {
843 .name = "Intel(R) 130 Series 1x1 BGN",
844 .fw_name_pre = IWL6000G2B_FW_PRE,
845 .ucode_api_max = IWL130_UCODE_API_MAX,
846 .ucode_api_min = IWL130_UCODE_API_MIN,
847 .sku = IWL_SKU_G|IWL_SKU_N,
848 .valid_tx_ant = ANT_A,
849 .valid_rx_ant = ANT_A,
850 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
851 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
852 .ops = &iwl6000g2b_ops,
853 .mod_params = &iwlagn_mod_params,
854 .base_params = &iwl6000_coex_base_params,
855 .bt_params = &iwl6000_bt_params,
856 .ht_params = &iwl6000_ht_params,
857 .need_dc_calib = true,
858 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
859 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
860};
861
862struct iwl_cfg iwl130_bg_cfg = {
863 .name = "Intel(R) 130 Series 1x2 BG",
864 .fw_name_pre = IWL6000G2B_FW_PRE,
865 .ucode_api_max = IWL130_UCODE_API_MAX,
866 .ucode_api_min = IWL130_UCODE_API_MIN,
867 .sku = IWL_SKU_G,
868 .valid_tx_ant = ANT_A,
869 .valid_rx_ant = ANT_A,
870 .eeprom_ver = EEPROM_6000G2_EEPROM_VERSION,
871 .eeprom_calib_ver = EEPROM_6000G2_TX_POWER_VERSION,
872 .ops = &iwl6000g2b_ops,
873 .mod_params = &iwlagn_mod_params,
874 .base_params = &iwl6000_coex_base_params,
875 .bt_params = &iwl6000_bt_params,
876 .need_dc_calib = true,
877 /* Due to bluetooth, we transmit 2.4 GHz probes only on antenna A */
878 .scan_tx_antennas[IEEE80211_BAND_2GHZ] = ANT_A,
879}; 733};
880 734
881MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); 735MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
882MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX)); 736MODULE_FIRMWARE(IWL6050_MODULE_FIRMWARE(IWL6050_UCODE_API_MAX));
883MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); 737MODULE_FIRMWARE(IWL6000G2A_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
884MODULE_FIRMWARE(IWL6000G2B_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX)); 738MODULE_FIRMWARE(IWL6000G2B_MODULE_FIRMWARE(IWL6000G2_UCODE_API_MAX));
885MODULE_FIRMWARE(IWL130_MODULE_FIRMWARE(IWL130_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
index e2019e756936..d16bb5ede014 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-calib.c
@@ -732,8 +732,122 @@ static inline u8 find_first_chain(u8 mask)
732 return CHAIN_C; 732 return CHAIN_C;
733} 733}
734 734
735/**
736 * Run disconnected antenna algorithm to find out which antennas are
737 * disconnected.
738 */
739static void iwl_find_disconn_antenna(struct iwl_priv *priv, u32* average_sig,
740 struct iwl_chain_noise_data *data)
741{
742 u32 active_chains = 0;
743 u32 max_average_sig;
744 u16 max_average_sig_antenna_i;
745 u8 num_tx_chains;
746 u8 first_chain;
747 u16 i = 0;
748
749 average_sig[0] = data->chain_signal_a /
750 priv->cfg->base_params->chain_noise_num_beacons;
751 average_sig[1] = data->chain_signal_b /
752 priv->cfg->base_params->chain_noise_num_beacons;
753 average_sig[2] = data->chain_signal_c /
754 priv->cfg->base_params->chain_noise_num_beacons;
755
756 if (average_sig[0] >= average_sig[1]) {
757 max_average_sig = average_sig[0];
758 max_average_sig_antenna_i = 0;
759 active_chains = (1 << max_average_sig_antenna_i);
760 } else {
761 max_average_sig = average_sig[1];
762 max_average_sig_antenna_i = 1;
763 active_chains = (1 << max_average_sig_antenna_i);
764 }
765
766 if (average_sig[2] >= max_average_sig) {
767 max_average_sig = average_sig[2];
768 max_average_sig_antenna_i = 2;
769 active_chains = (1 << max_average_sig_antenna_i);
770 }
771
772 IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
773 average_sig[0], average_sig[1], average_sig[2]);
774 IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
775 max_average_sig, max_average_sig_antenna_i);
776
777 /* Compare signal strengths for all 3 receivers. */
778 for (i = 0; i < NUM_RX_CHAINS; i++) {
779 if (i != max_average_sig_antenna_i) {
780 s32 rssi_delta = (max_average_sig - average_sig[i]);
781
782 /* If signal is very weak, compared with
783 * strongest, mark it as disconnected. */
784 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
785 data->disconn_array[i] = 1;
786 else
787 active_chains |= (1 << i);
788 IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d "
789 "disconn_array[i] = %d\n",
790 i, rssi_delta, data->disconn_array[i]);
791 }
792 }
793
794 /*
795 * The above algorithm sometimes fails when the ucode
796 * reports 0 for all chains. It's not clear why that
797 * happens to start with, but it is then causing trouble
798 * because this can make us enable more chains than the
799 * hardware really has.
800 *
801 * To be safe, simply mask out any chains that we know
802 * are not on the device.
803 */
804 active_chains &= priv->hw_params.valid_rx_ant;
805
806 num_tx_chains = 0;
807 for (i = 0; i < NUM_RX_CHAINS; i++) {
808 /* loops on all the bits of
809 * priv->hw_setting.valid_tx_ant */
810 u8 ant_msk = (1 << i);
811 if (!(priv->hw_params.valid_tx_ant & ant_msk))
812 continue;
813
814 num_tx_chains++;
815 if (data->disconn_array[i] == 0)
816 /* there is a Tx antenna connected */
817 break;
818 if (num_tx_chains == priv->hw_params.tx_chains_num &&
819 data->disconn_array[i]) {
820 /*
821 * If all chains are disconnected
822 * connect the first valid tx chain
823 */
824 first_chain =
825 find_first_chain(priv->cfg->valid_tx_ant);
826 data->disconn_array[first_chain] = 0;
827 active_chains |= BIT(first_chain);
828 IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected \
829 W/A - declare %d as connected\n",
830 first_chain);
831 break;
832 }
833 }
834
835 if (active_chains != priv->hw_params.valid_rx_ant &&
836 active_chains != priv->chain_noise_data.active_chains)
837 IWL_DEBUG_CALIB(priv,
838 "Detected that not all antennas are connected! "
839 "Connected: %#x, valid: %#x.\n",
840 active_chains, priv->hw_params.valid_rx_ant);
841
842 /* Save for use within RXON, TX, SCAN commands, etc. */
843 data->active_chains = active_chains;
844 IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
845 active_chains);
846}
847
848
735/* 849/*
736 * Accumulate 20 beacons of signal and noise statistics for each of 850 * Accumulate 16 beacons of signal and noise statistics for each of
737 * 3 receivers/antennas/rx-chains, then figure out: 851 * 3 receivers/antennas/rx-chains, then figure out:
738 * 1) Which antennas are connected. 852 * 1) Which antennas are connected.
739 * 2) Differential rx gain settings to balance the 3 receivers. 853 * 2) Differential rx gain settings to balance the 3 receivers.
@@ -750,8 +864,6 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
750 u32 chain_sig_c; 864 u32 chain_sig_c;
751 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; 865 u32 average_sig[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
752 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE}; 866 u32 average_noise[NUM_RX_CHAINS] = {INITIALIZATION_VALUE};
753 u32 max_average_sig;
754 u16 max_average_sig_antenna_i;
755 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE; 867 u32 min_average_noise = MIN_AVERAGE_NOISE_MAX_VALUE;
756 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE; 868 u16 min_average_noise_antenna_i = INITIALIZATION_VALUE;
757 u16 i = 0; 869 u16 i = 0;
@@ -759,11 +871,9 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
759 u16 stat_chnum = INITIALIZATION_VALUE; 871 u16 stat_chnum = INITIALIZATION_VALUE;
760 u8 rxon_band24; 872 u8 rxon_band24;
761 u8 stat_band24; 873 u8 stat_band24;
762 u32 active_chains = 0;
763 u8 num_tx_chains;
764 unsigned long flags; 874 unsigned long flags;
765 struct statistics_rx_non_phy *rx_info; 875 struct statistics_rx_non_phy *rx_info;
766 u8 first_chain; 876
767 /* 877 /*
768 * MULTI-FIXME: 878 * MULTI-FIXME:
769 * When we support multiple interfaces on different channels, 879 * When we support multiple interfaces on different channels,
@@ -869,108 +979,16 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv, void *stat_resp)
869 return; 979 return;
870 980
871 /* Analyze signal for disconnected antenna */ 981 /* Analyze signal for disconnected antenna */
872 average_sig[0] = data->chain_signal_a /
873 priv->cfg->base_params->chain_noise_num_beacons;
874 average_sig[1] = data->chain_signal_b /
875 priv->cfg->base_params->chain_noise_num_beacons;
876 average_sig[2] = data->chain_signal_c /
877 priv->cfg->base_params->chain_noise_num_beacons;
878
879 if (average_sig[0] >= average_sig[1]) {
880 max_average_sig = average_sig[0];
881 max_average_sig_antenna_i = 0;
882 active_chains = (1 << max_average_sig_antenna_i);
883 } else {
884 max_average_sig = average_sig[1];
885 max_average_sig_antenna_i = 1;
886 active_chains = (1 << max_average_sig_antenna_i);
887 }
888
889 if (average_sig[2] >= max_average_sig) {
890 max_average_sig = average_sig[2];
891 max_average_sig_antenna_i = 2;
892 active_chains = (1 << max_average_sig_antenna_i);
893 }
894
895 IWL_DEBUG_CALIB(priv, "average_sig: a %d b %d c %d\n",
896 average_sig[0], average_sig[1], average_sig[2]);
897 IWL_DEBUG_CALIB(priv, "max_average_sig = %d, antenna %d\n",
898 max_average_sig, max_average_sig_antenna_i);
899
900 /* Compare signal strengths for all 3 receivers. */
901 for (i = 0; i < NUM_RX_CHAINS; i++) {
902 if (i != max_average_sig_antenna_i) {
903 s32 rssi_delta = (max_average_sig - average_sig[i]);
904
905 /* If signal is very weak, compared with
906 * strongest, mark it as disconnected. */
907 if (rssi_delta > MAXIMUM_ALLOWED_PATHLOSS)
908 data->disconn_array[i] = 1;
909 else
910 active_chains |= (1 << i);
911 IWL_DEBUG_CALIB(priv, "i = %d rssiDelta = %d "
912 "disconn_array[i] = %d\n",
913 i, rssi_delta, data->disconn_array[i]);
914 }
915 }
916
917 /*
918 * The above algorithm sometimes fails when the ucode
919 * reports 0 for all chains. It's not clear why that
920 * happens to start with, but it is then causing trouble
921 * because this can make us enable more chains than the
922 * hardware really has.
923 *
924 * To be safe, simply mask out any chains that we know
925 * are not on the device.
926 */
927 if (priv->cfg->bt_params && 982 if (priv->cfg->bt_params &&
928 priv->cfg->bt_params->advanced_bt_coexist && 983 priv->cfg->bt_params->advanced_bt_coexist) {
929 priv->bt_full_concurrent) { 984 /* Disable disconnected antenna algorithm for advanced
930 /* operated as 1x1 in full concurrency mode */ 985 bt coex, assuming valid antennas are connected */
931 active_chains &= first_antenna(priv->hw_params.valid_rx_ant); 986 data->active_chains = priv->hw_params.valid_rx_ant;
987 for (i = 0; i < NUM_RX_CHAINS; i++)
988 if (!(data->active_chains & (1<<i)))
989 data->disconn_array[i] = 1;
932 } else 990 } else
933 active_chains &= priv->hw_params.valid_rx_ant; 991 iwl_find_disconn_antenna(priv, average_sig, data);
934
935 num_tx_chains = 0;
936 for (i = 0; i < NUM_RX_CHAINS; i++) {
937 /* loops on all the bits of
938 * priv->hw_setting.valid_tx_ant */
939 u8 ant_msk = (1 << i);
940 if (!(priv->hw_params.valid_tx_ant & ant_msk))
941 continue;
942
943 num_tx_chains++;
944 if (data->disconn_array[i] == 0)
945 /* there is a Tx antenna connected */
946 break;
947 if (num_tx_chains == priv->hw_params.tx_chains_num &&
948 data->disconn_array[i]) {
949 /*
950 * If all chains are disconnected
951 * connect the first valid tx chain
952 */
953 first_chain =
954 find_first_chain(priv->cfg->valid_tx_ant);
955 data->disconn_array[first_chain] = 0;
956 active_chains |= BIT(first_chain);
957 IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected W/A - declare %d as connected\n",
958 first_chain);
959 break;
960 }
961 }
962
963 if (active_chains != priv->hw_params.valid_rx_ant &&
964 active_chains != priv->chain_noise_data.active_chains)
965 IWL_DEBUG_CALIB(priv,
966 "Detected that not all antennas are connected! "
967 "Connected: %#x, valid: %#x.\n",
968 active_chains, priv->hw_params.valid_rx_ant);
969
970 /* Save for use within RXON, TX, SCAN commands, etc. */
971 priv->chain_noise_data.active_chains = active_chains;
972 IWL_DEBUG_CALIB(priv, "active_chains (bitwise) = 0x%x\n",
973 active_chains);
974 992
975 /* Analyze noise for rx balance */ 993 /* Analyze noise for rx balance */
976 average_noise[0] = data->chain_noise_a / 994 average_noise[0] = data->chain_noise_a /
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
index a358d4334a1a..a6dbd8983dac 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-debugfs.c
@@ -856,6 +856,9 @@ ssize_t iwl_ucode_bt_stats_read(struct file *file,
856 if (!iwl_is_alive(priv)) 856 if (!iwl_is_alive(priv))
857 return -EAGAIN; 857 return -EAGAIN;
858 858
859 if (!priv->bt_enable_flag)
860 return -EINVAL;
861
859 /* make request to uCode to retrieve statistics information */ 862 /* make request to uCode to retrieve statistics information */
860 mutex_lock(&priv->mutex); 863 mutex_lock(&priv->mutex);
861 ret = iwl_send_statistics_request(priv, CMD_SYNC, false); 864 ret = iwl_send_statistics_request(priv, CMD_SYNC, false);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
index a650baba0809..97906dd442e6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-eeprom.c
@@ -75,109 +75,6 @@
75#include "iwl-agn.h" 75#include "iwl-agn.h"
76#include "iwl-io.h" 76#include "iwl-io.h"
77 77
78/************************** EEPROM BANDS ****************************
79 *
80 * The iwl_eeprom_band definitions below provide the mapping from the
81 * EEPROM contents to the specific channel number supported for each
82 * band.
83 *
84 * For example, iwl_priv->eeprom.band_3_channels[4] from the band_3
85 * definition below maps to physical channel 42 in the 5.2GHz spectrum.
86 * The specific geography and calibration information for that channel
87 * is contained in the eeprom map itself.
88 *
89 * During init, we copy the eeprom information and channel map
90 * information into priv->channel_info_24/52 and priv->channel_map_24/52
91 *
92 * channel_map_24/52 provides the index in the channel_info array for a
93 * given channel. We have to have two separate maps as there is channel
94 * overlap with the 2.4GHz and 5.2GHz spectrum as seen in band_1 and
95 * band_2
96 *
97 * A value of 0xff stored in the channel_map indicates that the channel
98 * is not supported by the hardware at all.
99 *
100 * A value of 0xfe in the channel_map indicates that the channel is not
101 * valid for Tx with the current hardware. This means that
102 * while the system can tune and receive on a given channel, it may not
103 * be able to associate or transmit any frames on that
104 * channel. There is no corresponding channel information for that
105 * entry.
106 *
107 *********************************************************************/
108
109/**
110 * struct iwl_txpwr_section: eeprom section information
111 * @offset: indirect address into eeprom image
112 * @count: number of "struct iwl_eeprom_enhanced_txpwr" in this section
113 * @band: band type for the section
114 * @is_common - true: common section, false: channel section
115 * @is_cck - true: cck section, false: not cck section
116 * @is_ht_40 - true: all channel in the section are HT40 channel,
117 * false: legacy or HT 20 MHz
118 * ignore if it is common section
119 * @iwl_eeprom_section_channel: channel array in the section,
120 * ignore if common section
121 */
122struct iwl_txpwr_section {
123 u32 offset;
124 u8 count;
125 enum ieee80211_band band;
126 bool is_common;
127 bool is_cck;
128 bool is_ht40;
129 u8 iwl_eeprom_section_channel[EEPROM_MAX_TXPOWER_SECTION_ELEMENTS];
130};
131
132/**
133 * section 1 - 3 are regulatory tx power apply to all channels based on
134 * modulation: CCK, OFDM
135 * Band: 2.4GHz, 5.2GHz
136 * section 4 - 10 are regulatory tx power apply to specified channels
137 * For example:
138 * 1L - Channel 1 Legacy
139 * 1HT - Channel 1 HT
140 * (1,+1) - Channel 1 HT40 "_above_"
141 *
142 * Section 1: all CCK channels
143 * Section 2: all 2.4 GHz OFDM (Legacy, HT and HT40) channels
144 * Section 3: all 5.2 GHz OFDM (Legacy, HT and HT40) channels
145 * Section 4: 2.4 GHz 20MHz channels: 1L, 1HT, 2L, 2HT, 10L, 10HT, 11L, 11HT
146 * Section 5: 2.4 GHz 40MHz channels: (1,+1) (2,+1) (6,+1) (7,+1) (9,+1)
147 * Section 6: 5.2 GHz 20MHz channels: 36L, 64L, 100L, 36HT, 64HT, 100HT
148 * Section 7: 5.2 GHz 40MHz channels: (36,+1) (60,+1) (100,+1)
149 * Section 8: 2.4 GHz channel: 13L, 13HT
150 * Section 9: 2.4 GHz channel: 140L, 140HT
151 * Section 10: 2.4 GHz 40MHz channels: (132,+1) (44,+1)
152 *
153 */
154static const struct iwl_txpwr_section enhinfo[] = {
155 { EEPROM_LB_CCK_20_COMMON, 1, IEEE80211_BAND_2GHZ, true, true, false },
156 { EEPROM_LB_OFDM_COMMON, 3, IEEE80211_BAND_2GHZ, true, false, false },
157 { EEPROM_HB_OFDM_COMMON, 3, IEEE80211_BAND_5GHZ, true, false, false },
158 { EEPROM_LB_OFDM_20_BAND, 8, IEEE80211_BAND_2GHZ,
159 false, false, false,
160 {1, 1, 2, 2, 10, 10, 11, 11 } },
161 { EEPROM_LB_OFDM_HT40_BAND, 5, IEEE80211_BAND_2GHZ,
162 false, false, true,
163 { 1, 2, 6, 7, 9 } },
164 { EEPROM_HB_OFDM_20_BAND, 6, IEEE80211_BAND_5GHZ,
165 false, false, false,
166 { 36, 64, 100, 36, 64, 100 } },
167 { EEPROM_HB_OFDM_HT40_BAND, 3, IEEE80211_BAND_5GHZ,
168 false, false, true,
169 { 36, 60, 100 } },
170 { EEPROM_LB_OFDM_20_CHANNEL_13, 2, IEEE80211_BAND_2GHZ,
171 false, false, false,
172 { 13, 13 } },
173 { EEPROM_HB_OFDM_20_CHANNEL_140, 2, IEEE80211_BAND_5GHZ,
174 false, false, false,
175 { 140, 140 } },
176 { EEPROM_HB_OFDM_HT40_BAND_1, 2, IEEE80211_BAND_5GHZ,
177 false, false, true,
178 { 132, 44 } },
179};
180
181/****************************************************************************** 78/******************************************************************************
182 * 79 *
183 * EEPROM related functions 80 * EEPROM related functions
@@ -248,6 +145,47 @@ err:
248 145
249} 146}
250 147
148int iwl_eeprom_check_sku(struct iwl_priv *priv)
149{
150 u16 eeprom_sku;
151 u16 radio_cfg;
152
153 eeprom_sku = iwl_eeprom_query16(priv, EEPROM_SKU_CAP);
154
155 priv->cfg->sku = ((eeprom_sku & EEPROM_SKU_CAP_BAND_SELECTION) >>
156 EEPROM_SKU_CAP_BAND_POS);
157 if (eeprom_sku & EEPROM_SKU_CAP_11N_ENABLE)
158 priv->cfg->sku |= IWL_SKU_N;
159
160 if (!priv->cfg->sku) {
161 IWL_ERR(priv, "Invalid device sku\n");
162 return -EINVAL;
163 }
164
165 IWL_INFO(priv, "Device SKU: 0X%x\n", priv->cfg->sku);
166
167 if (!priv->cfg->valid_tx_ant && !priv->cfg->valid_rx_ant) {
168 /* not using .cfg overwrite */
169 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
170 priv->cfg->valid_tx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
171 priv->cfg->valid_rx_ant = EEPROM_RF_CFG_TX_ANT_MSK(radio_cfg);
172 if (!priv->cfg->valid_tx_ant || !priv->cfg->valid_rx_ant) {
173 IWL_ERR(priv, "Invalid chain (0X%x, 0X%x)\n",
174 priv->cfg->valid_tx_ant,
175 priv->cfg->valid_rx_ant);
176 return -EINVAL;
177 }
178 IWL_INFO(priv, "Valid Tx ant: 0X%x, Valid Rx ant: 0X%x\n",
179 priv->cfg->valid_tx_ant, priv->cfg->valid_rx_ant);
180 }
181 /*
182 * for some special cases,
183 * EEPROM did not reflect the correct antenna setting
184 * so overwrite the valid tx/rx antenna from .cfg
185 */
186 return 0;
187}
188
251void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac) 189void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac)
252{ 190{
253 const u8 *addr = priv->cfg->ops->lib->eeprom_ops.query_addr(priv, 191 const u8 *addr = priv->cfg->ops->lib->eeprom_ops.query_addr(priv,
@@ -265,15 +203,6 @@ static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
265{ 203{
266 s8 max_txpower_avg = 0; /* (dBm) */ 204 s8 max_txpower_avg = 0; /* (dBm) */
267 205
268 IWL_DEBUG_INFO(priv, "%d - "
269 "chain_a: %d dB chain_b: %d dB "
270 "chain_c: %d dB mimo2: %d dB mimo3: %d dB\n",
271 element,
272 enhanced_txpower[element].chain_a_max >> 1,
273 enhanced_txpower[element].chain_b_max >> 1,
274 enhanced_txpower[element].chain_c_max >> 1,
275 enhanced_txpower[element].mimo2_max >> 1,
276 enhanced_txpower[element].mimo3_max >> 1);
277 /* Take the highest tx power from any valid chains */ 206 /* Take the highest tx power from any valid chains */
278 if ((priv->cfg->valid_tx_ant & ANT_A) && 207 if ((priv->cfg->valid_tx_ant & ANT_A) &&
279 (enhanced_txpower[element].chain_a_max > max_txpower_avg)) 208 (enhanced_txpower[element].chain_a_max > max_txpower_avg))
@@ -303,152 +232,106 @@ static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
303 return (max_txpower_avg & 0x01) + (max_txpower_avg >> 1); 232 return (max_txpower_avg & 0x01) + (max_txpower_avg >> 1);
304} 233}
305 234
306/** 235static void
307 * iwl_update_common_txpower: update channel tx power 236iwlcore_eeprom_enh_txp_read_element(struct iwl_priv *priv,
308 * update tx power per band based on EEPROM enhanced tx power info. 237 struct iwl_eeprom_enhanced_txpwr *txp,
309 */ 238 s8 max_txpower_avg)
310static s8 iwl_update_common_txpower(struct iwl_priv *priv,
311 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
312 int section, int element, s8 *max_txpower_in_half_dbm)
313{ 239{
314 struct iwl_channel_info *ch_info; 240 int ch_idx;
315 int ch; 241 bool is_ht40 = txp->flags & IWL_EEPROM_ENH_TXP_FL_40MHZ;
316 bool is_ht40 = false; 242 enum ieee80211_band band;
317 s8 max_txpower_avg; /* (dBm) */ 243
318 244 band = txp->flags & IWL_EEPROM_ENH_TXP_FL_BAND_52G ?
319 /* it is common section, contain all type (Legacy, HT and HT40) 245 IEEE80211_BAND_5GHZ : IEEE80211_BAND_2GHZ;
320 * based on the element in the section to determine 246
321 * is it HT 40 or not 247 for (ch_idx = 0; ch_idx < priv->channel_count; ch_idx++) {
322 */ 248 struct iwl_channel_info *ch_info = &priv->channel_info[ch_idx];
323 if (element == EEPROM_TXPOWER_COMMON_HT40_INDEX) 249
324 is_ht40 = true; 250 /* update matching channel or from common data only */
325 max_txpower_avg = 251 if (txp->channel != 0 && ch_info->channel != txp->channel)
326 iwl_get_max_txpower_avg(priv, enhanced_txpower, 252 continue;
327 element, max_txpower_in_half_dbm); 253
328 254 /* update matching band only */
329 ch_info = priv->channel_info; 255 if (band != ch_info->band)
330 256 continue;
331 for (ch = 0; ch < priv->channel_count; ch++) { 257
332 /* find matching band and update tx power if needed */ 258 if (ch_info->max_power_avg < max_txpower_avg && !is_ht40) {
333 if ((ch_info->band == enhinfo[section].band) && 259 ch_info->max_power_avg = max_txpower_avg;
334 (ch_info->max_power_avg < max_txpower_avg) && 260 ch_info->curr_txpow = max_txpower_avg;
335 (!is_ht40)) {
336 /* Update regulatory-based run-time data */
337 ch_info->max_power_avg = ch_info->curr_txpow =
338 max_txpower_avg;
339 ch_info->scan_power = max_txpower_avg; 261 ch_info->scan_power = max_txpower_avg;
340 } 262 }
341 if ((ch_info->band == enhinfo[section].band) && is_ht40 && 263
342 (ch_info->ht40_max_power_avg < max_txpower_avg)) { 264 if (is_ht40 && ch_info->ht40_max_power_avg < max_txpower_avg)
343 /* Update regulatory-based run-time data */
344 ch_info->ht40_max_power_avg = max_txpower_avg; 265 ch_info->ht40_max_power_avg = max_txpower_avg;
345 }
346 ch_info++;
347 } 266 }
348 return max_txpower_avg;
349} 267}
350 268
351/** 269#define EEPROM_TXP_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT)
352 * iwl_update_channel_txpower: update channel tx power 270#define EEPROM_TXP_ENTRY_LEN sizeof(struct iwl_eeprom_enhanced_txpwr)
353 * update channel tx power based on EEPROM enhanced tx power info. 271#define EEPROM_TXP_SZ_OFFS (0x00 | INDIRECT_ADDRESS | INDIRECT_TXP_LIMIT_SIZE)
354 */ 272
355static s8 iwl_update_channel_txpower(struct iwl_priv *priv, 273#define TXP_CHECK_AND_PRINT(x) ((txp->flags & IWL_EEPROM_ENH_TXP_FL_##x) \
356 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower, 274 ? # x " " : "")
357 int section, int element, s8 *max_txpower_in_half_dbm)
358{
359 struct iwl_channel_info *ch_info;
360 int ch;
361 u8 channel;
362 s8 max_txpower_avg; /* (dBm) */
363
364 channel = enhinfo[section].iwl_eeprom_section_channel[element];
365 max_txpower_avg =
366 iwl_get_max_txpower_avg(priv, enhanced_txpower,
367 element, max_txpower_in_half_dbm);
368
369 ch_info = priv->channel_info;
370 for (ch = 0; ch < priv->channel_count; ch++) {
371 /* find matching channel and update tx power if needed */
372 if (ch_info->channel == channel) {
373 if ((ch_info->max_power_avg < max_txpower_avg) &&
374 (!enhinfo[section].is_ht40)) {
375 /* Update regulatory-based run-time data */
376 ch_info->max_power_avg = max_txpower_avg;
377 ch_info->curr_txpow = max_txpower_avg;
378 ch_info->scan_power = max_txpower_avg;
379 }
380 if ((enhinfo[section].is_ht40) &&
381 (ch_info->ht40_max_power_avg < max_txpower_avg)) {
382 /* Update regulatory-based run-time data */
383 ch_info->ht40_max_power_avg = max_txpower_avg;
384 }
385 break;
386 }
387 ch_info++;
388 }
389 return max_txpower_avg;
390}
391 275
392/**
393 * iwlcore_eeprom_enhanced_txpower: process enhanced tx power info
394 */
395void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv) 276void iwlcore_eeprom_enhanced_txpower(struct iwl_priv *priv)
396{ 277{
397 int eeprom_section_count = 0; 278 struct iwl_eeprom_enhanced_txpwr *txp_array, *txp;
398 int section, element; 279 int idx, entries;
399 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower; 280 __le16 *txp_len;
400 u32 offset; 281 s8 max_txp_avg, max_txp_avg_halfdbm;
401 s8 max_txpower_avg; /* (dBm) */ 282
402 s8 max_txpower_in_half_dbm; /* (half-dBm) */ 283 BUILD_BUG_ON(sizeof(struct iwl_eeprom_enhanced_txpwr) != 8);
403 284
404 /* Loop through all the sections 285 /* the length is in 16-bit words, but we want entries */
405 * adjust bands and channel's max tx power 286 txp_len = (__le16 *) iwlagn_eeprom_query_addr(priv, EEPROM_TXP_SZ_OFFS);
406 * Set the tx_power_user_lmt to the highest power 287 entries = le16_to_cpup(txp_len) * 2 / EEPROM_TXP_ENTRY_LEN;
407 * supported by any channels and chains 288
408 */ 289 txp_array = (void *) iwlagn_eeprom_query_addr(priv, EEPROM_TXP_OFFS);
409 for (section = 0; section < ARRAY_SIZE(enhinfo); section++) { 290
410 eeprom_section_count = enhinfo[section].count; 291 for (idx = 0; idx < entries; idx++) {
411 offset = enhinfo[section].offset; 292 txp = &txp_array[idx];
412 enhanced_txpower = (struct iwl_eeprom_enhanced_txpwr *) 293 /* skip invalid entries */
413 iwl_eeprom_query_addr(priv, offset); 294 if (!(txp->flags & IWL_EEPROM_ENH_TXP_FL_VALID))
295 continue;
296
297 IWL_DEBUG_EEPROM(priv, "%s %d:\t %s%s%s%s%s%s%s%s (0x%02x)\n",
298 (txp->channel && (txp->flags &
299 IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE)) ?
300 "Common " : (txp->channel) ?
301 "Channel" : "Common",
302 (txp->channel),
303 TXP_CHECK_AND_PRINT(VALID),
304 TXP_CHECK_AND_PRINT(BAND_52G),
305 TXP_CHECK_AND_PRINT(OFDM),
306 TXP_CHECK_AND_PRINT(40MHZ),
307 TXP_CHECK_AND_PRINT(HT_AP),
308 TXP_CHECK_AND_PRINT(RES1),
309 TXP_CHECK_AND_PRINT(RES2),
310 TXP_CHECK_AND_PRINT(COMMON_TYPE),
311 txp->flags);
312 IWL_DEBUG_EEPROM(priv, "\t\t chain_A: 0x%02x "
313 "chain_B: 0X%02x chain_C: 0X%02x\n",
314 txp->chain_a_max, txp->chain_b_max,
315 txp->chain_c_max);
316 IWL_DEBUG_EEPROM(priv, "\t\t MIMO2: 0x%02x "
317 "MIMO3: 0x%02x High 20_on_40: 0x%02x "
318 "Low 20_on_40: 0x%02x\n",
319 txp->mimo2_max, txp->mimo3_max,
320 ((txp->delta_20_in_40 & 0xf0) >> 4),
321 (txp->delta_20_in_40 & 0x0f));
322
323 max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
324 &max_txp_avg_halfdbm);
414 325
415 /* 326 /*
416 * check for valid entry - 327 * Update the user limit values values to the highest
417 * different version of EEPROM might contain different set 328 * power supported by any channel
418 * of enhanced tx power table
419 * always check for valid entry before process
420 * the information
421 */ 329 */
422 if (!enhanced_txpower->common || enhanced_txpower->reserved) 330 if (max_txp_avg > priv->tx_power_user_lmt)
423 continue; 331 priv->tx_power_user_lmt = max_txp_avg;
332 if (max_txp_avg_halfdbm > priv->tx_power_lmt_in_half_dbm)
333 priv->tx_power_lmt_in_half_dbm = max_txp_avg_halfdbm;
424 334
425 for (element = 0; element < eeprom_section_count; element++) { 335 iwlcore_eeprom_enh_txp_read_element(priv, txp, max_txp_avg);
426 if (enhinfo[section].is_common)
427 max_txpower_avg =
428 iwl_update_common_txpower(priv,
429 enhanced_txpower, section,
430 element,
431 &max_txpower_in_half_dbm);
432 else
433 max_txpower_avg =
434 iwl_update_channel_txpower(priv,
435 enhanced_txpower, section,
436 element,
437 &max_txpower_in_half_dbm);
438
439 /* Update the tx_power_user_lmt to the highest power
440 * supported by any channel */
441 if (max_txpower_avg > priv->tx_power_user_lmt)
442 priv->tx_power_user_lmt = max_txpower_avg;
443
444 /*
445 * Update the tx_power_lmt_in_half_dbm to
446 * the highest power supported by any channel
447 */
448 if (max_txpower_in_half_dbm >
449 priv->tx_power_lmt_in_half_dbm)
450 priv->tx_power_lmt_in_half_dbm =
451 max_txpower_in_half_dbm;
452 }
453 } 336 }
454} 337}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
index ffb2f4111ad0..366340f3fb0f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-hcmd.c
@@ -307,6 +307,7 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
307 307
308 if (ctx_bss->vif && ctx_pan->vif) { 308 if (ctx_bss->vif && ctx_pan->vif) {
309 int bcnint = ctx_pan->vif->bss_conf.beacon_int; 309 int bcnint = ctx_pan->vif->bss_conf.beacon_int;
310 int dtim = ctx_pan->vif->bss_conf.dtim_period ?: 1;
310 311
311 /* should be set, but seems unused?? */ 312 /* should be set, but seems unused?? */
312 cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE); 313 cmd.flags |= cpu_to_le16(IWL_WIPAN_PARAMS_FLG_SLOTTED_MODE);
@@ -329,10 +330,10 @@ static int iwlagn_set_pan_params(struct iwl_priv *priv)
329 if (test_bit(STATUS_SCAN_HW, &priv->status) || 330 if (test_bit(STATUS_SCAN_HW, &priv->status) ||
330 (!ctx_bss->vif->bss_conf.idle && 331 (!ctx_bss->vif->bss_conf.idle &&
331 !ctx_bss->vif->bss_conf.assoc)) { 332 !ctx_bss->vif->bss_conf.assoc)) {
332 slot0 = bcnint * 3 - 20; 333 slot0 = dtim * bcnint * 3 - 20;
333 slot1 = 20; 334 slot1 = 20;
334 } else if (!ctx_pan->vif->bss_conf.idle && 335 } else if (!ctx_pan->vif->bss_conf.idle &&
335 !ctx_pan->vif->bss_conf.assoc) { 336 !ctx_pan->vif->bss_conf.assoc) {
336 slot1 = bcnint * 3 - 20; 337 slot1 = bcnint * 3 - 20;
337 slot0 = 20; 338 slot0 = 20;
338 } 339 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index b555edd53354..3dee87e8f55d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -405,6 +405,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
405 return; 405 return;
406 } 406 }
407 407
408 txq->time_stamp = jiffies;
408 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb); 409 info = IEEE80211_SKB_CB(txq->txb[txq->q.read_ptr].skb);
409 memset(&info->status, 0, sizeof(info->status)); 410 memset(&info->status, 0, sizeof(info->status));
410 411
@@ -445,22 +446,17 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
445 446
446 if (priv->mac80211_registered && 447 if (priv->mac80211_registered &&
447 (iwl_queue_space(&txq->q) > txq->q.low_mark) && 448 (iwl_queue_space(&txq->q) > txq->q.low_mark) &&
448 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) { 449 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
449 if (agg->state == IWL_AGG_OFF) 450 iwl_wake_queue(priv, txq);
450 iwl_wake_queue(priv, txq_id);
451 else
452 iwl_wake_queue(priv, txq->swq_id);
453 }
454 } 451 }
455 } else { 452 } else {
456 BUG_ON(txq_id != txq->swq_id);
457 iwlagn_set_tx_status(priv, info, tx_resp, txq_id, false); 453 iwlagn_set_tx_status(priv, info, tx_resp, txq_id, false);
458 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index); 454 freed = iwlagn_tx_queue_reclaim(priv, txq_id, index);
459 iwl_free_tfds_in_queue(priv, sta_id, tid, freed); 455 iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
460 456
461 if (priv->mac80211_registered && 457 if (priv->mac80211_registered &&
462 (iwl_queue_space(&txq->q) > txq->q.low_mark)) 458 (iwl_queue_space(&txq->q) > txq->q.low_mark))
463 iwl_wake_queue(priv, txq_id); 459 iwl_wake_queue(priv, txq);
464 } 460 }
465 461
466 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id); 462 iwlagn_txq_check_empty(priv, sta_id, tid, txq_id);
@@ -496,6 +492,10 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
496 struct iwlagn_tx_power_dbm_cmd tx_power_cmd; 492 struct iwlagn_tx_power_dbm_cmd tx_power_cmd;
497 u8 tx_ant_cfg_cmd; 493 u8 tx_ant_cfg_cmd;
498 494
495 if (WARN_ONCE(test_bit(STATUS_SCAN_HW, &priv->status),
496 "TX Power requested while scanning!\n"))
497 return -EAGAIN;
498
499 /* half dBm need to multiply */ 499 /* half dBm need to multiply */
500 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt); 500 tx_power_cmd.global_lmt = (s8)(2 * priv->tx_power_user_lmt);
501 501
@@ -522,9 +522,8 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
522 else 522 else
523 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD; 523 tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
524 524
525 return iwl_send_cmd_pdu_async(priv, tx_ant_cfg_cmd, 525 return iwl_send_cmd_pdu(priv, tx_ant_cfg_cmd, sizeof(tx_power_cmd),
526 sizeof(tx_power_cmd), &tx_power_cmd, 526 &tx_power_cmd);
527 NULL);
528} 527}
529 528
530void iwlagn_temperature(struct iwl_priv *priv) 529void iwlagn_temperature(struct iwl_priv *priv)
@@ -569,6 +568,12 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
569 case INDIRECT_REGULATORY: 568 case INDIRECT_REGULATORY:
570 offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY); 569 offset = iwl_eeprom_query16(priv, EEPROM_LINK_REGULATORY);
571 break; 570 break;
571 case INDIRECT_TXP_LIMIT:
572 offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT);
573 break;
574 case INDIRECT_TXP_LIMIT_SIZE:
575 offset = iwl_eeprom_query16(priv, EEPROM_LINK_TXP_LIMIT_SIZE);
576 break;
572 case INDIRECT_CALIBRATION: 577 case INDIRECT_CALIBRATION:
573 offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION); 578 offset = iwl_eeprom_query16(priv, EEPROM_LINK_CALIBRATION);
574 break; 579 break;
@@ -750,6 +755,12 @@ int iwlagn_hw_nic_init(struct iwl_priv *priv)
750 } else 755 } else
751 iwlagn_txq_ctx_reset(priv); 756 iwlagn_txq_ctx_reset(priv);
752 757
758 if (priv->cfg->base_params->shadow_reg_enable) {
759 /* enable shadow regs in HW */
760 iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
761 0x800FFFFF);
762 }
763
753 set_bit(STATUS_INIT, &priv->status); 764 set_bit(STATUS_INIT, &priv->status);
754 765
755 return 0; 766 return 0;
@@ -1481,15 +1492,11 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1481 if (priv->cfg->scan_rx_antennas[band]) 1492 if (priv->cfg->scan_rx_antennas[band])
1482 rx_ant = priv->cfg->scan_rx_antennas[band]; 1493 rx_ant = priv->cfg->scan_rx_antennas[band];
1483 1494
1484 if (priv->cfg->scan_tx_antennas[band]) 1495 if (band == IEEE80211_BAND_2GHZ &&
1485 scan_tx_antennas = priv->cfg->scan_tx_antennas[band]; 1496 priv->cfg->bt_params &&
1486 1497 priv->cfg->bt_params->advanced_bt_coexist) {
1487 if (priv->cfg->bt_params && 1498 /* transmit 2.4 GHz probes only on first antenna */
1488 priv->cfg->bt_params->advanced_bt_coexist && 1499 scan_tx_antennas = first_antenna(scan_tx_antennas);
1489 priv->bt_full_concurrent) {
1490 /* operated as 1x1 in full concurrency mode */
1491 scan_tx_antennas = first_antenna(
1492 priv->cfg->scan_tx_antennas[band]);
1493 } 1500 }
1494 1501
1495 priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band], 1502 priv->scan_tx_ant[band] = iwl_toggle_tx_ant(priv, priv->scan_tx_ant[band],
@@ -1584,22 +1591,6 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
1584 return ret; 1591 return ret;
1585} 1592}
1586 1593
1587void iwlagn_post_scan(struct iwl_priv *priv)
1588{
1589 struct iwl_rxon_context *ctx;
1590
1591 /*
1592 * Since setting the RXON may have been deferred while
1593 * performing the scan, fire one off if needed
1594 */
1595 for_each_context(priv, ctx)
1596 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
1597 iwlagn_commit_rxon(priv, ctx);
1598
1599 if (priv->cfg->ops->hcmd->set_pan_params)
1600 priv->cfg->ops->hcmd->set_pan_params(priv);
1601}
1602
1603int iwlagn_manage_ibss_station(struct iwl_priv *priv, 1594int iwlagn_manage_ibss_station(struct iwl_priv *priv,
1604 struct ieee80211_vif *vif, bool add) 1595 struct ieee80211_vif *vif, bool add)
1605{ 1596{
@@ -1790,7 +1781,7 @@ static const __le32 iwlagn_def_3w_lookup[12] = {
1790 cpu_to_le32(0xc0004000), 1781 cpu_to_le32(0xc0004000),
1791 cpu_to_le32(0x00004000), 1782 cpu_to_le32(0x00004000),
1792 cpu_to_le32(0xf0005000), 1783 cpu_to_le32(0xf0005000),
1793 cpu_to_le32(0xf0004000), 1784 cpu_to_le32(0xf0005000),
1794}; 1785};
1795 1786
1796static const __le32 iwlagn_concurrent_lookup[12] = { 1787static const __le32 iwlagn_concurrent_lookup[12] = {
@@ -1826,6 +1817,7 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
1826 bt_cmd.prio_boost = 0; 1817 bt_cmd.prio_boost = 0;
1827 bt_cmd.kill_ack_mask = priv->kill_ack_mask; 1818 bt_cmd.kill_ack_mask = priv->kill_ack_mask;
1828 bt_cmd.kill_cts_mask = priv->kill_cts_mask; 1819 bt_cmd.kill_cts_mask = priv->kill_cts_mask;
1820
1829 bt_cmd.valid = priv->bt_valid; 1821 bt_cmd.valid = priv->bt_valid;
1830 bt_cmd.tx_prio_boost = 0; 1822 bt_cmd.tx_prio_boost = 0;
1831 bt_cmd.rx_prio_boost = 0; 1823 bt_cmd.rx_prio_boost = 0;
@@ -1841,10 +1833,15 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
1841 } else { 1833 } else {
1842 bt_cmd.flags = IWLAGN_BT_FLAG_COEX_MODE_3W << 1834 bt_cmd.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
1843 IWLAGN_BT_FLAG_COEX_MODE_SHIFT; 1835 IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
1836 if (priv->cfg->bt_params &&
1837 priv->cfg->bt_params->bt_sco_disable)
1838 bt_cmd.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
1839
1844 if (priv->bt_ch_announce) 1840 if (priv->bt_ch_announce)
1845 bt_cmd.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION; 1841 bt_cmd.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
1846 IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", bt_cmd.flags); 1842 IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", bt_cmd.flags);
1847 } 1843 }
1844 priv->bt_enable_flag = bt_cmd.flags;
1848 if (priv->bt_full_concurrent) 1845 if (priv->bt_full_concurrent)
1849 memcpy(bt_cmd.bt3_lookup_table, iwlagn_concurrent_lookup, 1846 memcpy(bt_cmd.bt3_lookup_table, iwlagn_concurrent_lookup,
1850 sizeof(iwlagn_concurrent_lookup)); 1847 sizeof(iwlagn_concurrent_lookup));
@@ -1884,12 +1881,20 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
1884 struct iwl_rxon_context *ctx; 1881 struct iwl_rxon_context *ctx;
1885 int smps_request = -1; 1882 int smps_request = -1;
1886 1883
1884 /*
1885 * Note: bt_traffic_load can be overridden by scan complete and
1886 * coex profile notifications. Ignore that since only bad consequence
1887 * can be not matching debug print with actual state.
1888 */
1887 IWL_DEBUG_INFO(priv, "BT traffic load changes: %d\n", 1889 IWL_DEBUG_INFO(priv, "BT traffic load changes: %d\n",
1888 priv->bt_traffic_load); 1890 priv->bt_traffic_load);
1889 1891
1890 switch (priv->bt_traffic_load) { 1892 switch (priv->bt_traffic_load) {
1891 case IWL_BT_COEX_TRAFFIC_LOAD_NONE: 1893 case IWL_BT_COEX_TRAFFIC_LOAD_NONE:
1892 smps_request = IEEE80211_SMPS_AUTOMATIC; 1894 if (priv->bt_status)
1895 smps_request = IEEE80211_SMPS_DYNAMIC;
1896 else
1897 smps_request = IEEE80211_SMPS_AUTOMATIC;
1893 break; 1898 break;
1894 case IWL_BT_COEX_TRAFFIC_LOAD_LOW: 1899 case IWL_BT_COEX_TRAFFIC_LOAD_LOW:
1895 smps_request = IEEE80211_SMPS_DYNAMIC; 1900 smps_request = IEEE80211_SMPS_DYNAMIC;
@@ -1906,6 +1911,16 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
1906 1911
1907 mutex_lock(&priv->mutex); 1912 mutex_lock(&priv->mutex);
1908 1913
1914 /*
1915 * We can not send command to firmware while scanning. When the scan
1916 * complete we will schedule this work again. We do check with mutex
1917 * locked to prevent new scan request to arrive. We do not check
1918 * STATUS_SCANNING to avoid race when queue_work two times from
1919 * different notifications, but quit and not perform any work at all.
1920 */
1921 if (test_bit(STATUS_SCAN_HW, &priv->status))
1922 goto out;
1923
1909 if (priv->cfg->ops->lib->update_chain_flags) 1924 if (priv->cfg->ops->lib->update_chain_flags)
1910 priv->cfg->ops->lib->update_chain_flags(priv); 1925 priv->cfg->ops->lib->update_chain_flags(priv);
1911 1926
@@ -1915,7 +1930,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
1915 ieee80211_request_smps(ctx->vif, smps_request); 1930 ieee80211_request_smps(ctx->vif, smps_request);
1916 } 1931 }
1917 } 1932 }
1918 1933out:
1919 mutex_unlock(&priv->mutex); 1934 mutex_unlock(&priv->mutex);
1920} 1935}
1921 1936
@@ -1986,24 +2001,29 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
1986 BT_UART_MSG_FRAME7CONNECTABLE_POS); 2001 BT_UART_MSG_FRAME7CONNECTABLE_POS);
1987} 2002}
1988 2003
1989static void iwlagn_set_kill_ack_msk(struct iwl_priv *priv, 2004static void iwlagn_set_kill_msk(struct iwl_priv *priv,
1990 struct iwl_bt_uart_msg *uart_msg) 2005 struct iwl_bt_uart_msg *uart_msg)
1991{ 2006{
1992 u8 kill_ack_msk; 2007 u8 kill_msk;
1993 __le32 bt_kill_ack_msg[2] = { 2008 static const __le32 bt_kill_ack_msg[2] = {
1994 cpu_to_le32(0xFFFFFFF), cpu_to_le32(0xFFFFFC00) }; 2009 IWLAGN_BT_KILL_ACK_MASK_DEFAULT,
1995 2010 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO };
1996 kill_ack_msk = (((BT_UART_MSG_FRAME3A2DP_MSK | 2011 static const __le32 bt_kill_cts_msg[2] = {
1997 BT_UART_MSG_FRAME3SNIFF_MSK | 2012 IWLAGN_BT_KILL_CTS_MASK_DEFAULT,
1998 BT_UART_MSG_FRAME3SCOESCO_MSK) & 2013 IWLAGN_BT_KILL_ACK_CTS_MASK_SCO };
1999 uart_msg->frame3) == 0) ? 1 : 0; 2014
2000 if (priv->kill_ack_mask != bt_kill_ack_msg[kill_ack_msk]) { 2015 kill_msk = (BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3)
2016 ? 1 : 0;
2017 if (priv->kill_ack_mask != bt_kill_ack_msg[kill_msk] ||
2018 priv->kill_cts_mask != bt_kill_cts_msg[kill_msk]) {
2001 priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK; 2019 priv->bt_valid |= IWLAGN_BT_VALID_KILL_ACK_MASK;
2002 priv->kill_ack_mask = bt_kill_ack_msg[kill_ack_msk]; 2020 priv->kill_ack_mask = bt_kill_ack_msg[kill_msk];
2021 priv->bt_valid |= IWLAGN_BT_VALID_KILL_CTS_MASK;
2022 priv->kill_cts_mask = bt_kill_cts_msg[kill_msk];
2023
2003 /* schedule to send runtime bt_config */ 2024 /* schedule to send runtime bt_config */
2004 queue_work(priv->workqueue, &priv->bt_runtime_config); 2025 queue_work(priv->workqueue, &priv->bt_runtime_config);
2005 } 2026 }
2006
2007} 2027}
2008 2028
2009void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv, 2029void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
@@ -2014,7 +2034,6 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
2014 struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif; 2034 struct iwl_bt_coex_profile_notif *coex = &pkt->u.bt_coex_profile_notif;
2015 struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 }; 2035 struct iwlagn_bt_sco_cmd sco_cmd = { .flags = 0 };
2016 struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg; 2036 struct iwl_bt_uart_msg *uart_msg = &coex->last_bt_uart_msg;
2017 u8 last_traffic_load;
2018 2037
2019 IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n"); 2038 IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n");
2020 IWL_DEBUG_NOTIF(priv, " status: %d\n", coex->bt_status); 2039 IWL_DEBUG_NOTIF(priv, " status: %d\n", coex->bt_status);
@@ -2023,11 +2042,10 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
2023 coex->bt_ci_compliance); 2042 coex->bt_ci_compliance);
2024 iwlagn_print_uartmsg(priv, uart_msg); 2043 iwlagn_print_uartmsg(priv, uart_msg);
2025 2044
2026 last_traffic_load = priv->notif_bt_traffic_load; 2045 priv->last_bt_traffic_load = priv->bt_traffic_load;
2027 priv->notif_bt_traffic_load = coex->bt_traffic_load;
2028 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) { 2046 if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
2029 if (priv->bt_status != coex->bt_status || 2047 if (priv->bt_status != coex->bt_status ||
2030 last_traffic_load != coex->bt_traffic_load) { 2048 priv->last_bt_traffic_load != coex->bt_traffic_load) {
2031 if (coex->bt_status) { 2049 if (coex->bt_status) {
2032 /* BT on */ 2050 /* BT on */
2033 if (!priv->bt_ch_announce) 2051 if (!priv->bt_ch_announce)
@@ -2056,7 +2074,7 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
2056 } 2074 }
2057 } 2075 }
2058 2076
2059 iwlagn_set_kill_ack_msk(priv, uart_msg); 2077 iwlagn_set_kill_msk(priv, uart_msg);
2060 2078
2061 /* FIXME: based on notification, adjust the prio_boost */ 2079 /* FIXME: based on notification, adjust the prio_boost */
2062 2080
@@ -2276,7 +2294,7 @@ static const char *get_csr_string(int cmd)
2276void iwl_dump_csr(struct iwl_priv *priv) 2294void iwl_dump_csr(struct iwl_priv *priv)
2277{ 2295{
2278 int i; 2296 int i;
2279 u32 csr_tbl[] = { 2297 static const u32 csr_tbl[] = {
2280 CSR_HW_IF_CONFIG_REG, 2298 CSR_HW_IF_CONFIG_REG,
2281 CSR_INT_COALESCING, 2299 CSR_INT_COALESCING,
2282 CSR_INT, 2300 CSR_INT,
@@ -2335,7 +2353,7 @@ int iwl_dump_fh(struct iwl_priv *priv, char **buf, bool display)
2335 int pos = 0; 2353 int pos = 0;
2336 size_t bufsz = 0; 2354 size_t bufsz = 0;
2337#endif 2355#endif
2338 u32 fh_tbl[] = { 2356 static const u32 fh_tbl[] = {
2339 FH_RSCSR_CHNL0_STTS_WPTR_REG, 2357 FH_RSCSR_CHNL0_STTS_WPTR_REG,
2340 FH_RSCSR_CHNL0_RBDCB_BASE_REG, 2358 FH_RSCSR_CHNL0_RBDCB_BASE_REG,
2341 FH_RSCSR_CHNL0_WPTR, 2359 FH_RSCSR_CHNL0_WPTR,
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 065553629de5..75fcd30a7c13 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -387,7 +387,7 @@ static int rs_tl_turn_on_agg_for_tid(struct iwl_priv *priv,
387 if (load > IWL_AGG_LOAD_THRESHOLD) { 387 if (load > IWL_AGG_LOAD_THRESHOLD) {
388 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n", 388 IWL_DEBUG_HT(priv, "Starting Tx agg: STA: %pM tid: %d\n",
389 sta->addr, tid); 389 sta->addr, tid);
390 ret = ieee80211_start_tx_ba_session(sta, tid); 390 ret = ieee80211_start_tx_ba_session(sta, tid, 5000);
391 if (ret == -EAGAIN) { 391 if (ret == -EAGAIN) {
392 /* 392 /*
393 * driver and mac80211 is out of sync 393 * driver and mac80211 is out of sync
@@ -833,17 +833,23 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
833 struct iwl_lq_sta *lq_sta) 833 struct iwl_lq_sta *lq_sta)
834{ 834{
835 struct iwl_scale_tbl_info *tbl; 835 struct iwl_scale_tbl_info *tbl;
836 bool full_concurrent; 836 bool full_concurrent = priv->bt_full_concurrent;
837 unsigned long flags; 837 unsigned long flags;
838 838
839 spin_lock_irqsave(&priv->lock, flags); 839 if (priv->bt_ant_couple_ok) {
840 if (priv->bt_ci_compliance && priv->bt_ant_couple_ok) 840 /*
841 full_concurrent = true; 841 * Is there a need to switch between
842 else 842 * full concurrency and 3-wire?
843 full_concurrent = false; 843 */
844 spin_unlock_irqrestore(&priv->lock, flags); 844 spin_lock_irqsave(&priv->lock, flags);
845 845 if (priv->bt_ci_compliance && priv->bt_ant_couple_ok)
846 if (priv->bt_full_concurrent != full_concurrent) { 846 full_concurrent = true;
847 else
848 full_concurrent = false;
849 spin_unlock_irqrestore(&priv->lock, flags);
850 }
851 if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
852 (priv->bt_full_concurrent != full_concurrent)) {
847 priv->bt_full_concurrent = full_concurrent; 853 priv->bt_full_concurrent = full_concurrent;
848 854
849 /* Update uCode's rate table. */ 855 /* Update uCode's rate table. */
@@ -1040,8 +1046,7 @@ done:
1040 if (sta && sta->supp_rates[sband->band]) 1046 if (sta && sta->supp_rates[sband->band])
1041 rs_rate_scale_perform(priv, skb, sta, lq_sta); 1047 rs_rate_scale_perform(priv, skb, sta, lq_sta);
1042 1048
1043 /* Is there a need to switch between full concurrency and 3-wire? */ 1049 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
1044 if (priv->bt_ant_couple_ok)
1045 rs_bt_update_lq(priv, ctx, lq_sta); 1050 rs_bt_update_lq(priv, ctx, lq_sta);
1046} 1051}
1047 1052
@@ -2868,6 +2873,10 @@ void iwl_rs_rate_init(struct iwl_priv *priv, struct ieee80211_sta *sta, u8 sta_i
2868 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; 2873 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2869 lq_sta->is_agg = 0; 2874 lq_sta->is_agg = 0;
2870 2875
2876#ifdef CONFIG_MAC80211_DEBUGFS
2877 lq_sta->dbg_fixed_rate = 0;
2878#endif
2879
2871 rs_initialize_lq(priv, conf, sta, lq_sta); 2880 rs_initialize_lq(priv, conf, sta, lq_sta);
2872} 2881}
2873 2882
@@ -3010,10 +3019,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
3010 */ 3019 */
3011 if (priv && priv->cfg->bt_params && 3020 if (priv && priv->cfg->bt_params &&
3012 priv->cfg->bt_params->agg_time_limit && 3021 priv->cfg->bt_params->agg_time_limit &&
3013 priv->cfg->bt_params->agg_time_limit >= 3022 priv->bt_traffic_load >= IWL_BT_COEX_TRAFFIC_LOAD_HIGH)
3014 LINK_QUAL_AGG_TIME_LIMIT_MIN &&
3015 priv->cfg->bt_params->agg_time_limit <=
3016 LINK_QUAL_AGG_TIME_LIMIT_MAX)
3017 lq_cmd->agg_params.agg_time_limit = 3023 lq_cmd->agg_params.agg_time_limit =
3018 cpu_to_le16(priv->cfg->bt_params->agg_time_limit); 3024 cpu_to_le16(priv->cfg->bt_params->agg_time_limit);
3019} 3025}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
new file mode 100644
index 000000000000..6d140bd53291
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rxon.c
@@ -0,0 +1,642 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2010 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include "iwl-dev.h"
28#include "iwl-agn.h"
29#include "iwl-sta.h"
30#include "iwl-core.h"
31#include "iwl-agn-calib.h"
32
33static int iwlagn_disable_bss(struct iwl_priv *priv,
34 struct iwl_rxon_context *ctx,
35 struct iwl_rxon_cmd *send)
36{
37 __le32 old_filter = send->filter_flags;
38 int ret;
39
40 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
41 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send);
42
43 send->filter_flags = old_filter;
44
45 if (ret)
46 IWL_ERR(priv, "Error clearing ASSOC_MSK on BSS (%d)\n", ret);
47
48 return ret;
49}
50
51static int iwlagn_disable_pan(struct iwl_priv *priv,
52 struct iwl_rxon_context *ctx,
53 struct iwl_rxon_cmd *send)
54{
55 __le32 old_filter = send->filter_flags;
56 u8 old_dev_type = send->dev_type;
57 int ret;
58
59 send->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
60 send->dev_type = RXON_DEV_TYPE_P2P;
61 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd, sizeof(*send), send);
62
63 send->filter_flags = old_filter;
64 send->dev_type = old_dev_type;
65
66 if (ret)
67 IWL_ERR(priv, "Error disabling PAN (%d)\n", ret);
68
69 /* FIXME: WAIT FOR PAN DISABLE */
70 msleep(300);
71
72 return ret;
73}
74
75static void iwlagn_update_qos(struct iwl_priv *priv,
76 struct iwl_rxon_context *ctx)
77{
78 int ret;
79
80 if (!ctx->is_active)
81 return;
82
83 ctx->qos_data.def_qos_parm.qos_flags = 0;
84
85 if (ctx->qos_data.qos_active)
86 ctx->qos_data.def_qos_parm.qos_flags |=
87 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
88
89 if (ctx->ht.enabled)
90 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
91
92 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
93 ctx->qos_data.qos_active,
94 ctx->qos_data.def_qos_parm.qos_flags);
95
96 ret = iwl_send_cmd_pdu(priv, ctx->qos_cmd,
97 sizeof(struct iwl_qosparam_cmd),
98 &ctx->qos_data.def_qos_parm);
99 if (ret)
100 IWL_ERR(priv, "Failed to update QoS\n");
101}
102
103static int iwlagn_update_beacon(struct iwl_priv *priv,
104 struct ieee80211_vif *vif)
105{
106 lockdep_assert_held(&priv->mutex);
107
108 dev_kfree_skb(priv->beacon_skb);
109 priv->beacon_skb = ieee80211_beacon_get(priv->hw, vif);
110 if (!priv->beacon_skb)
111 return -ENOMEM;
112 return iwlagn_send_beacon_cmd(priv);
113}
114
115/**
116 * iwlagn_commit_rxon - commit staging_rxon to hardware
117 *
118 * The RXON command in staging_rxon is committed to the hardware and
119 * the active_rxon structure is updated with the new data. This
120 * function correctly transitions out of the RXON_ASSOC_MSK state if
121 * a HW tune is required based on the RXON structure changes.
122 */
123int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
124{
125 /* cast away the const for active_rxon in this function */
126 struct iwl_rxon_cmd *active = (void *)&ctx->active;
127 bool new_assoc = !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
128 bool old_assoc = !!(ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK);
129 int ret;
130
131 lockdep_assert_held(&priv->mutex);
132
133 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
134 return -EINVAL;
135
136 if (!iwl_is_alive(priv))
137 return -EBUSY;
138
139 /* This function hardcodes a bunch of dual-mode assumptions */
140 BUILD_BUG_ON(NUM_IWL_RXON_CTX != 2);
141
142 if (!ctx->is_active)
143 return 0;
144
145 /* always get timestamp with Rx frame */
146 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
147
148 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
149 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
150 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
151 else
152 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
153
154 ret = iwl_check_rxon_cmd(priv, ctx);
155 if (ret) {
156 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
157 return -EINVAL;
158 }
159
160 /*
161 * receive commit_rxon request
162 * abort any previous channel switch if still in process
163 */
164 if (priv->switch_rxon.switch_in_progress &&
165 (priv->switch_rxon.channel != ctx->staging.channel)) {
166 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
167 le16_to_cpu(priv->switch_rxon.channel));
168 iwl_chswitch_done(priv, false);
169 }
170
171 /*
172 * If we don't need to send a full RXON, we can use
173 * iwl_rxon_assoc_cmd which is used to reconfigure filter
174 * and other flags for the current radio configuration.
175 */
176 if (!iwl_full_rxon_required(priv, ctx)) {
177 ret = iwl_send_rxon_assoc(priv, ctx);
178 if (ret) {
179 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
180 return ret;
181 }
182
183 memcpy(active, &ctx->staging, sizeof(*active));
184 iwl_print_rx_config_cmd(priv, ctx);
185 return 0;
186 }
187
188 if (priv->cfg->ops->hcmd->set_pan_params) {
189 ret = priv->cfg->ops->hcmd->set_pan_params(priv);
190 if (ret)
191 return ret;
192 }
193
194 iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto);
195
196 IWL_DEBUG_INFO(priv,
197 "Going to commit RXON\n"
198 " * with%s RXON_FILTER_ASSOC_MSK\n"
199 " * channel = %d\n"
200 " * bssid = %pM\n",
201 (new_assoc ? "" : "out"),
202 le16_to_cpu(ctx->staging.channel),
203 ctx->staging.bssid_addr);
204
205 /*
206 * Always clear associated first, but with the correct config.
207 * This is required as for example station addition for the
208 * AP station must be done after the BSSID is set to correctly
209 * set up filters in the device.
210 */
211 if ((old_assoc && new_assoc) || !new_assoc) {
212 if (ctx->ctxid == IWL_RXON_CTX_BSS)
213 ret = iwlagn_disable_bss(priv, ctx, &ctx->staging);
214 else
215 ret = iwlagn_disable_pan(priv, ctx, &ctx->staging);
216 if (ret)
217 return ret;
218
219 memcpy(active, &ctx->staging, sizeof(*active));
220
221 /*
222 * Un-assoc RXON clears the station table and WEP
223 * keys, so we have to restore those afterwards.
224 */
225 iwl_clear_ucode_stations(priv, ctx);
226 iwl_restore_stations(priv, ctx);
227 ret = iwl_restore_default_wep_keys(priv, ctx);
228 if (ret) {
229 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
230 return ret;
231 }
232 }
233
234 /* RXON timing must be before associated RXON */
235 ret = iwl_send_rxon_timing(priv, ctx);
236 if (ret) {
237 IWL_ERR(priv, "Failed to send timing (%d)!\n", ret);
238 return ret;
239 }
240
241 if (new_assoc) {
242 /* QoS info may be cleared by previous un-assoc RXON */
243 iwlagn_update_qos(priv, ctx);
244
245 /*
246 * We'll run into this code path when beaconing is
247 * enabled, but then we also need to send the beacon
248 * to the device.
249 */
250 if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_AP)) {
251 ret = iwlagn_update_beacon(priv, ctx->vif);
252 if (ret) {
253 IWL_ERR(priv,
254 "Error sending required beacon (%d)!\n",
255 ret);
256 return ret;
257 }
258 }
259
260 priv->start_calib = 0;
261 /*
262 * Apply the new configuration.
263 *
264 * Associated RXON doesn't clear the station table in uCode,
265 * so we don't need to restore stations etc. after this.
266 */
267 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
268 sizeof(struct iwl_rxon_cmd), &ctx->staging);
269 if (ret) {
270 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
271 return ret;
272 }
273 memcpy(active, &ctx->staging, sizeof(*active));
274
275 iwl_reprogram_ap_sta(priv, ctx);
276
277 /* IBSS beacon needs to be sent after setting assoc */
278 if (ctx->vif && (ctx->vif->type == NL80211_IFTYPE_ADHOC))
279 if (iwlagn_update_beacon(priv, ctx->vif))
280 IWL_ERR(priv, "Error sending IBSS beacon\n");
281 }
282
283 iwl_print_rx_config_cmd(priv, ctx);
284
285 iwl_init_sensitivity(priv);
286
287 /*
288 * If we issue a new RXON command which required a tune then we must
289 * send a new TXPOWER command or we won't be able to Tx any frames.
290 *
291 * FIXME: which RXON requires a tune? Can we optimise this out in
292 * some cases?
293 */
294 ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
295 if (ret) {
296 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
297 return ret;
298 }
299
300 return 0;
301}
302
303int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed)
304{
305 struct iwl_priv *priv = hw->priv;
306 struct iwl_rxon_context *ctx;
307 struct ieee80211_conf *conf = &hw->conf;
308 struct ieee80211_channel *channel = conf->channel;
309 const struct iwl_channel_info *ch_info;
310 int ret = 0;
311 bool ht_changed[NUM_IWL_RXON_CTX] = {};
312
313 IWL_DEBUG_MAC80211(priv, "changed %#x", changed);
314
315 mutex_lock(&priv->mutex);
316
317 if (unlikely(test_bit(STATUS_SCANNING, &priv->status))) {
318 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
319 goto out;
320 }
321
322 if (!iwl_is_ready(priv)) {
323 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
324 goto out;
325 }
326
327 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
328 IEEE80211_CONF_CHANGE_CHANNEL)) {
329 /* mac80211 uses static for non-HT which is what we want */
330 priv->current_ht_config.smps = conf->smps_mode;
331
332 /*
333 * Recalculate chain counts.
334 *
335 * If monitor mode is enabled then mac80211 will
336 * set up the SM PS mode to OFF if an HT channel is
337 * configured.
338 */
339 if (priv->cfg->ops->hcmd->set_rxon_chain)
340 for_each_context(priv, ctx)
341 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
342 }
343
344 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
345 unsigned long flags;
346
347 ch_info = iwl_get_channel_info(priv, channel->band,
348 channel->hw_value);
349 if (!is_channel_valid(ch_info)) {
350 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
351 ret = -EINVAL;
352 goto out;
353 }
354
355 spin_lock_irqsave(&priv->lock, flags);
356
357 for_each_context(priv, ctx) {
358 /* Configure HT40 channels */
359 if (ctx->ht.enabled != conf_is_ht(conf)) {
360 ctx->ht.enabled = conf_is_ht(conf);
361 ht_changed[ctx->ctxid] = true;
362 }
363
364 if (ctx->ht.enabled) {
365 if (conf_is_ht40_minus(conf)) {
366 ctx->ht.extension_chan_offset =
367 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
368 ctx->ht.is_40mhz = true;
369 } else if (conf_is_ht40_plus(conf)) {
370 ctx->ht.extension_chan_offset =
371 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
372 ctx->ht.is_40mhz = true;
373 } else {
374 ctx->ht.extension_chan_offset =
375 IEEE80211_HT_PARAM_CHA_SEC_NONE;
376 ctx->ht.is_40mhz = false;
377 }
378 } else
379 ctx->ht.is_40mhz = false;
380
381 /*
382 * Default to no protection. Protection mode will
383 * later be set from BSS config in iwl_ht_conf
384 */
385 ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
386
387 /* if we are switching from ht to 2.4 clear flags
388 * from any ht related info since 2.4 does not
389 * support ht */
390 if (le16_to_cpu(ctx->staging.channel) !=
391 channel->hw_value)
392 ctx->staging.flags = 0;
393
394 iwl_set_rxon_channel(priv, channel, ctx);
395 iwl_set_rxon_ht(priv, &priv->current_ht_config);
396
397 iwl_set_flags_for_band(priv, ctx, channel->band,
398 ctx->vif);
399 }
400
401 spin_unlock_irqrestore(&priv->lock, flags);
402
403 iwl_update_bcast_stations(priv);
404
405 /*
406 * The list of supported rates and rate mask can be different
407 * for each band; since the band may have changed, reset
408 * the rate mask to what mac80211 lists.
409 */
410 iwl_set_rate(priv);
411 }
412
413 if (changed & (IEEE80211_CONF_CHANGE_PS |
414 IEEE80211_CONF_CHANGE_IDLE)) {
415 ret = iwl_power_update_mode(priv, false);
416 if (ret)
417 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
418 }
419
420 if (changed & IEEE80211_CONF_CHANGE_POWER) {
421 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
422 priv->tx_power_user_lmt, conf->power_level);
423
424 iwl_set_tx_power(priv, conf->power_level, false);
425 }
426
427 for_each_context(priv, ctx) {
428 if (!memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
429 continue;
430 iwlagn_commit_rxon(priv, ctx);
431 if (ht_changed[ctx->ctxid])
432 iwlagn_update_qos(priv, ctx);
433 }
434 out:
435 mutex_unlock(&priv->mutex);
436 return ret;
437}
438
439static void iwlagn_check_needed_chains(struct iwl_priv *priv,
440 struct iwl_rxon_context *ctx,
441 struct ieee80211_bss_conf *bss_conf)
442{
443 struct ieee80211_vif *vif = ctx->vif;
444 struct iwl_rxon_context *tmp;
445 struct ieee80211_sta *sta;
446 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
447 bool need_multiple;
448
449 lockdep_assert_held(&priv->mutex);
450
451 switch (vif->type) {
452 case NL80211_IFTYPE_STATION:
453 rcu_read_lock();
454 sta = ieee80211_find_sta(vif, bss_conf->bssid);
455 if (sta) {
456 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
457 int maxstreams;
458
459 maxstreams = (ht_cap->mcs.tx_params &
460 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
461 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
462 maxstreams += 1;
463
464 need_multiple = true;
465
466 if ((ht_cap->mcs.rx_mask[1] == 0) &&
467 (ht_cap->mcs.rx_mask[2] == 0))
468 need_multiple = false;
469 if (maxstreams <= 1)
470 need_multiple = false;
471 } else {
472 /*
473 * If at all, this can only happen through a race
474 * when the AP disconnects us while we're still
475 * setting up the connection, in that case mac80211
476 * will soon tell us about that.
477 */
478 need_multiple = false;
479 }
480 rcu_read_unlock();
481 break;
482 case NL80211_IFTYPE_ADHOC:
483 /* currently */
484 need_multiple = false;
485 break;
486 default:
487 /* only AP really */
488 need_multiple = true;
489 break;
490 }
491
492 ctx->ht_need_multiple_chains = need_multiple;
493
494 if (!need_multiple) {
495 /* check all contexts */
496 for_each_context(priv, tmp) {
497 if (!tmp->vif)
498 continue;
499 if (tmp->ht_need_multiple_chains) {
500 need_multiple = true;
501 break;
502 }
503 }
504 }
505
506 ht_conf->single_chain_sufficient = !need_multiple;
507}
508
509void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
510 struct ieee80211_vif *vif,
511 struct ieee80211_bss_conf *bss_conf,
512 u32 changes)
513{
514 struct iwl_priv *priv = hw->priv;
515 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
516 int ret;
517 bool force = false;
518
519 mutex_lock(&priv->mutex);
520
521 if (unlikely(!iwl_is_ready(priv))) {
522 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
523 mutex_unlock(&priv->mutex);
524 return;
525 }
526
527 if (unlikely(!ctx->vif)) {
528 IWL_DEBUG_MAC80211(priv, "leave - vif is NULL\n");
529 mutex_unlock(&priv->mutex);
530 return;
531 }
532
533 if (changes & BSS_CHANGED_BEACON_INT)
534 force = true;
535
536 if (changes & BSS_CHANGED_QOS) {
537 ctx->qos_data.qos_active = bss_conf->qos;
538 iwlagn_update_qos(priv, ctx);
539 }
540
541 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
542 if (vif->bss_conf.use_short_preamble)
543 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
544 else
545 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
546
547 if (changes & BSS_CHANGED_ASSOC) {
548 if (bss_conf->assoc) {
549 iwl_led_associate(priv);
550 priv->timestamp = bss_conf->timestamp;
551 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
552 } else {
553 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
554 iwl_led_disassociate(priv);
555 }
556 }
557
558 if (ctx->ht.enabled) {
559 ctx->ht.protection = bss_conf->ht_operation_mode &
560 IEEE80211_HT_OP_MODE_PROTECTION;
561 ctx->ht.non_gf_sta_present = !!(bss_conf->ht_operation_mode &
562 IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
563 iwlagn_check_needed_chains(priv, ctx, bss_conf);
564 iwl_set_rxon_ht(priv, &priv->current_ht_config);
565 }
566
567 if (priv->cfg->ops->hcmd->set_rxon_chain)
568 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
569
570 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
571 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
572 else
573 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
574
575 if (bss_conf->use_cts_prot)
576 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
577 else
578 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
579
580 memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
581
582 if (vif->type == NL80211_IFTYPE_AP ||
583 vif->type == NL80211_IFTYPE_ADHOC) {
584 if (vif->bss_conf.enable_beacon) {
585 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
586 priv->beacon_ctx = ctx;
587 } else {
588 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
589 priv->beacon_ctx = NULL;
590 }
591 }
592
593 if (force || memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
594 iwlagn_commit_rxon(priv, ctx);
595
596 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
597 /*
598 * The chain noise calibration will enable PM upon
599 * completion. If calibration has already been run
600 * then we need to enable power management here.
601 */
602 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
603 iwl_power_update_mode(priv, false);
604
605 /* Enable RX differential gain and sensitivity calibrations */
606 iwl_chain_noise_reset(priv);
607 priv->start_calib = 1;
608 }
609
610 if (changes & BSS_CHANGED_IBSS) {
611 ret = iwlagn_manage_ibss_station(priv, vif,
612 bss_conf->ibss_joined);
613 if (ret)
614 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
615 bss_conf->ibss_joined ? "add" : "remove",
616 bss_conf->bssid);
617 }
618
619 if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_ADHOC &&
620 priv->beacon_ctx) {
621 if (iwlagn_update_beacon(priv, vif))
622 IWL_ERR(priv, "Error sending IBSS beacon\n");
623 }
624
625 mutex_unlock(&priv->mutex);
626}
627
628void iwlagn_post_scan(struct iwl_priv *priv)
629{
630 struct iwl_rxon_context *ctx;
631
632 /*
633 * Since setting the RXON may have been deferred while
634 * performing the scan, fire one off if needed
635 */
636 for_each_context(priv, ctx)
637 if (memcmp(&ctx->staging, &ctx->active, sizeof(ctx->staging)))
638 iwlagn_commit_rxon(priv, ctx);
639
640 if (priv->cfg->ops->hcmd->set_pan_params)
641 priv->cfg->ops->hcmd->set_pan_params(priv);
642}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index 35a30d2e0734..35f085ac336b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -684,7 +684,7 @@ int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
684 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC); 684 return iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
685} 685}
686 686
687void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id) 687static void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id)
688{ 688{
689 unsigned long flags; 689 unsigned long flags;
690 690
@@ -714,3 +714,33 @@ void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt)
714 spin_unlock_irqrestore(&priv->sta_lock, flags); 714 spin_unlock_irqrestore(&priv->sta_lock, flags);
715 715
716} 716}
717
718void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
719 struct ieee80211_vif *vif,
720 enum sta_notify_cmd cmd,
721 struct ieee80211_sta *sta)
722{
723 struct iwl_priv *priv = hw->priv;
724 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
725 int sta_id;
726
727 switch (cmd) {
728 case STA_NOTIFY_SLEEP:
729 WARN_ON(!sta_priv->client);
730 sta_priv->asleep = true;
731 if (atomic_read(&sta_priv->pending_frames) > 0)
732 ieee80211_sta_block_awake(hw, sta, true);
733 break;
734 case STA_NOTIFY_AWAKE:
735 WARN_ON(!sta_priv->client);
736 if (!sta_priv->asleep)
737 break;
738 sta_priv->asleep = false;
739 sta_id = iwl_sta_id(sta);
740 if (sta_id != IWL_INVALID_STATION)
741 iwl_sta_modify_ps_wake(priv, sta_id);
742 break;
743 default:
744 break;
745 }
746}
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
index 2b078a995729..24a11b8f73bc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-tx.c
@@ -67,8 +67,14 @@
67 */ 67 */
68 68
69static const u8 tid_to_ac[] = { 69static const u8 tid_to_ac[] = {
70 /* this matches the mac80211 numbers */ 70 IEEE80211_AC_BE,
71 2, 3, 3, 2, 1, 1, 0, 0 71 IEEE80211_AC_BK,
72 IEEE80211_AC_BK,
73 IEEE80211_AC_BE,
74 IEEE80211_AC_VI,
75 IEEE80211_AC_VI,
76 IEEE80211_AC_VO,
77 IEEE80211_AC_VO
72}; 78};
73 79
74static inline int get_ac_from_tid(u16 tid) 80static inline int get_ac_from_tid(u16 tid)
@@ -518,11 +524,11 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
518 struct iwl_cmd_meta *out_meta; 524 struct iwl_cmd_meta *out_meta;
519 struct iwl_tx_cmd *tx_cmd; 525 struct iwl_tx_cmd *tx_cmd;
520 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 526 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
521 int swq_id, txq_id; 527 int txq_id;
522 dma_addr_t phys_addr; 528 dma_addr_t phys_addr;
523 dma_addr_t txcmd_phys; 529 dma_addr_t txcmd_phys;
524 dma_addr_t scratch_phys; 530 dma_addr_t scratch_phys;
525 u16 len, len_org, firstlen, secondlen; 531 u16 len, firstlen, secondlen;
526 u16 seq_number = 0; 532 u16 seq_number = 0;
527 __le16 fc; 533 __le16 fc;
528 u8 hdr_len; 534 u8 hdr_len;
@@ -531,6 +537,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
531 u8 tid = 0; 537 u8 tid = 0;
532 u8 *qc = NULL; 538 u8 *qc = NULL;
533 unsigned long flags; 539 unsigned long flags;
540 bool is_agg = false;
534 541
535 if (info->control.vif) 542 if (info->control.vif)
536 ctx = iwl_rxon_ctx_from_vif(info->control.vif); 543 ctx = iwl_rxon_ctx_from_vif(info->control.vif);
@@ -567,8 +574,8 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
567 if (sta) 574 if (sta)
568 sta_priv = (void *)sta->drv_priv; 575 sta_priv = (void *)sta->drv_priv;
569 576
570 if (sta_priv && sta_priv->asleep) { 577 if (sta_priv && sta_priv->asleep &&
571 WARN_ON(!(info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)); 578 (info->flags & IEEE80211_TX_CTL_PSPOLL_RESPONSE)) {
572 /* 579 /*
573 * This sends an asynchronous command to the device, 580 * This sends an asynchronous command to the device,
574 * but we can rely on it being processed before the 581 * but we can rely on it being processed before the
@@ -616,11 +623,11 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
616 if (info->flags & IEEE80211_TX_CTL_AMPDU && 623 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
617 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) { 624 priv->stations[sta_id].tid[tid].agg.state == IWL_AGG_ON) {
618 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id; 625 txq_id = priv->stations[sta_id].tid[tid].agg.txq_id;
626 is_agg = true;
619 } 627 }
620 } 628 }
621 629
622 txq = &priv->txq[txq_id]; 630 txq = &priv->txq[txq_id];
623 swq_id = txq->swq_id;
624 q = &txq->q; 631 q = &txq->q;
625 632
626 if (unlikely(iwl_queue_space(q) < q->high_mark)) { 633 if (unlikely(iwl_queue_space(q) < q->high_mark)) {
@@ -687,30 +694,23 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
687 */ 694 */
688 len = sizeof(struct iwl_tx_cmd) + 695 len = sizeof(struct iwl_tx_cmd) +
689 sizeof(struct iwl_cmd_header) + hdr_len; 696 sizeof(struct iwl_cmd_header) + hdr_len;
690 697 firstlen = (len + 3) & ~3;
691 len_org = len;
692 firstlen = len = (len + 3) & ~3;
693
694 if (len_org != len)
695 len_org = 1;
696 else
697 len_org = 0;
698 698
699 /* Tell NIC about any 2-byte padding after MAC header */ 699 /* Tell NIC about any 2-byte padding after MAC header */
700 if (len_org) 700 if (firstlen != len)
701 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK; 701 tx_cmd->tx_flags |= TX_CMD_FLG_MH_PAD_MSK;
702 702
703 /* Physical address of this Tx command's header (not MAC header!), 703 /* Physical address of this Tx command's header (not MAC header!),
704 * within command buffer array. */ 704 * within command buffer array. */
705 txcmd_phys = pci_map_single(priv->pci_dev, 705 txcmd_phys = pci_map_single(priv->pci_dev,
706 &out_cmd->hdr, len, 706 &out_cmd->hdr, firstlen,
707 PCI_DMA_BIDIRECTIONAL); 707 PCI_DMA_BIDIRECTIONAL);
708 dma_unmap_addr_set(out_meta, mapping, txcmd_phys); 708 dma_unmap_addr_set(out_meta, mapping, txcmd_phys);
709 dma_unmap_len_set(out_meta, len, len); 709 dma_unmap_len_set(out_meta, len, firstlen);
710 /* Add buffer containing Tx command and MAC(!) header to TFD's 710 /* Add buffer containing Tx command and MAC(!) header to TFD's
711 * first entry */ 711 * first entry */
712 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 712 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
713 txcmd_phys, len, 1, 0); 713 txcmd_phys, firstlen, 1, 0);
714 714
715 if (!ieee80211_has_morefrags(hdr->frame_control)) { 715 if (!ieee80211_has_morefrags(hdr->frame_control)) {
716 txq->need_update = 1; 716 txq->need_update = 1;
@@ -721,23 +721,21 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
721 721
722 /* Set up TFD's 2nd entry to point directly to remainder of skb, 722 /* Set up TFD's 2nd entry to point directly to remainder of skb,
723 * if any (802.11 null frames have no payload). */ 723 * if any (802.11 null frames have no payload). */
724 secondlen = len = skb->len - hdr_len; 724 secondlen = skb->len - hdr_len;
725 if (len) { 725 if (secondlen > 0) {
726 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, 726 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
727 len, PCI_DMA_TODEVICE); 727 secondlen, PCI_DMA_TODEVICE);
728 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 728 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
729 phys_addr, len, 729 phys_addr, secondlen,
730 0, 0); 730 0, 0);
731 } 731 }
732 732
733 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) + 733 scratch_phys = txcmd_phys + sizeof(struct iwl_cmd_header) +
734 offsetof(struct iwl_tx_cmd, scratch); 734 offsetof(struct iwl_tx_cmd, scratch);
735 735
736 len = sizeof(struct iwl_tx_cmd) +
737 sizeof(struct iwl_cmd_header) + hdr_len;
738 /* take back ownership of DMA buffer to enable update */ 736 /* take back ownership of DMA buffer to enable update */
739 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys, 737 pci_dma_sync_single_for_cpu(priv->pci_dev, txcmd_phys,
740 len, PCI_DMA_BIDIRECTIONAL); 738 firstlen, PCI_DMA_BIDIRECTIONAL);
741 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 739 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
742 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 740 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
743 741
@@ -753,7 +751,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
753 le16_to_cpu(tx_cmd->len)); 751 le16_to_cpu(tx_cmd->len));
754 752
755 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, 753 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
756 len, PCI_DMA_BIDIRECTIONAL); 754 firstlen, PCI_DMA_BIDIRECTIONAL);
757 755
758 trace_iwlwifi_dev_tx(priv, 756 trace_iwlwifi_dev_tx(priv,
759 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr], 757 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
@@ -773,8 +771,14 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
773 * whether or not we should update the write pointer. 771 * whether or not we should update the write pointer.
774 */ 772 */
775 773
776 /* avoid atomic ops if it isn't an associated client */ 774 /*
777 if (sta_priv && sta_priv->client) 775 * Avoid atomic ops if it isn't an associated client.
776 * Also, if this is a packet for aggregation, don't
777 * increase the counter because the ucode will stop
778 * aggregation queues when their respective station
779 * goes to sleep.
780 */
781 if (sta_priv && sta_priv->client && !is_agg)
778 atomic_inc(&sta_priv->pending_frames); 782 atomic_inc(&sta_priv->pending_frames);
779 783
780 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) { 784 if ((iwl_queue_space(q) < q->high_mark) && priv->mac80211_registered) {
@@ -784,7 +788,7 @@ int iwlagn_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
784 iwl_txq_update_write_ptr(priv, txq); 788 iwl_txq_update_write_ptr(priv, txq);
785 spin_unlock_irqrestore(&priv->lock, flags); 789 spin_unlock_irqrestore(&priv->lock, flags);
786 } else { 790 } else {
787 iwl_stop_queue(priv, txq->swq_id); 791 iwl_stop_queue(priv, txq);
788 } 792 }
789 } 793 }
790 794
@@ -1013,7 +1017,7 @@ int iwlagn_tx_agg_start(struct iwl_priv *priv, struct ieee80211_vif *vif,
1013 tid_data = &priv->stations[sta_id].tid[tid]; 1017 tid_data = &priv->stations[sta_id].tid[tid];
1014 *ssn = SEQ_TO_SN(tid_data->seq_number); 1018 *ssn = SEQ_TO_SN(tid_data->seq_number);
1015 tid_data->agg.txq_id = txq_id; 1019 tid_data->agg.txq_id = txq_id;
1016 priv->txq[txq_id].swq_id = iwl_virtual_agg_queue_num(get_ac_from_tid(tid), txq_id); 1020 iwl_set_swq_id(&priv->txq[txq_id], get_ac_from_tid(tid), txq_id);
1017 spin_unlock_irqrestore(&priv->sta_lock, flags); 1021 spin_unlock_irqrestore(&priv->sta_lock, flags);
1018 1022
1019 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo, 1023 ret = priv->cfg->ops->lib->txq_agg_enable(priv, txq_id, tx_fifo,
@@ -1153,14 +1157,15 @@ int iwlagn_txq_check_empty(struct iwl_priv *priv,
1153 return 0; 1157 return 0;
1154} 1158}
1155 1159
1156static void iwlagn_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info) 1160static void iwlagn_non_agg_tx_status(struct iwl_priv *priv,
1161 struct iwl_rxon_context *ctx,
1162 const u8 *addr1)
1157{ 1163{
1158 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
1159 struct ieee80211_sta *sta; 1164 struct ieee80211_sta *sta;
1160 struct iwl_station_priv *sta_priv; 1165 struct iwl_station_priv *sta_priv;
1161 1166
1162 rcu_read_lock(); 1167 rcu_read_lock();
1163 sta = ieee80211_find_sta(tx_info->ctx->vif, hdr->addr1); 1168 sta = ieee80211_find_sta(ctx->vif, addr1);
1164 if (sta) { 1169 if (sta) {
1165 sta_priv = (void *)sta->drv_priv; 1170 sta_priv = (void *)sta->drv_priv;
1166 /* avoid atomic ops if this isn't a client */ 1171 /* avoid atomic ops if this isn't a client */
@@ -1169,6 +1174,15 @@ static void iwlagn_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info)
1169 ieee80211_sta_block_awake(priv->hw, sta, false); 1174 ieee80211_sta_block_awake(priv->hw, sta, false);
1170 } 1175 }
1171 rcu_read_unlock(); 1176 rcu_read_unlock();
1177}
1178
1179static void iwlagn_tx_status(struct iwl_priv *priv, struct iwl_tx_info *tx_info,
1180 bool is_agg)
1181{
1182 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) tx_info->skb->data;
1183
1184 if (!is_agg)
1185 iwlagn_non_agg_tx_status(priv, tx_info->ctx, hdr->addr1);
1172 1186
1173 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb); 1187 ieee80211_tx_status_irqsafe(priv->hw, tx_info->skb);
1174} 1188}
@@ -1193,7 +1207,8 @@ int iwlagn_tx_queue_reclaim(struct iwl_priv *priv, int txq_id, int index)
1193 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1207 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1194 1208
1195 tx_info = &txq->txb[txq->q.read_ptr]; 1209 tx_info = &txq->txb[txq->q.read_ptr];
1196 iwlagn_tx_status(priv, tx_info); 1210 iwlagn_tx_status(priv, tx_info,
1211 txq_id >= IWLAGN_FIRST_AMPDU_QUEUE);
1197 1212
1198 hdr = (struct ieee80211_hdr *)tx_info->skb->data; 1213 hdr = (struct ieee80211_hdr *)tx_info->skb->data;
1199 if (hdr && ieee80211_is_data_qos(hdr->frame_control)) 1214 if (hdr && ieee80211_is_data_qos(hdr->frame_control))
@@ -1222,7 +1237,6 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1222 int i, sh, ack; 1237 int i, sh, ack;
1223 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl); 1238 u16 seq_ctl = le16_to_cpu(ba_resp->seq_ctl);
1224 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow); 1239 u16 scd_flow = le16_to_cpu(ba_resp->scd_flow);
1225 u64 bitmap, sent_bitmap;
1226 int successes = 0; 1240 int successes = 0;
1227 struct ieee80211_tx_info *info; 1241 struct ieee80211_tx_info *info;
1228 1242
@@ -1241,40 +1255,68 @@ static int iwlagn_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1241 if (sh < 0) /* tbw something is wrong with indices */ 1255 if (sh < 0) /* tbw something is wrong with indices */
1242 sh += 0x100; 1256 sh += 0x100;
1243 1257
1244 /* don't use 64-bit values for now */
1245 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1246
1247 if (agg->frame_count > (64 - sh)) { 1258 if (agg->frame_count > (64 - sh)) {
1248 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size"); 1259 IWL_DEBUG_TX_REPLY(priv, "more frames than bitmap size");
1249 return -1; 1260 return -1;
1250 } 1261 }
1262 if (!priv->cfg->base_params->no_agg_framecnt_info && ba_resp->txed) {
1263 /*
1264 * sent and ack information provided by uCode
1265 * use it instead of figure out ourself
1266 */
1267 if (ba_resp->txed_2_done > ba_resp->txed) {
1268 IWL_DEBUG_TX_REPLY(priv,
1269 "bogus sent(%d) and ack(%d) count\n",
1270 ba_resp->txed, ba_resp->txed_2_done);
1271 /*
1272 * set txed_2_done = txed,
1273 * so it won't impact rate scale
1274 */
1275 ba_resp->txed = ba_resp->txed_2_done;
1276 }
1277 IWL_DEBUG_HT(priv, "agg frames sent:%d, acked:%d\n",
1278 ba_resp->txed, ba_resp->txed_2_done);
1279 } else {
1280 u64 bitmap, sent_bitmap;
1281
1282 /* don't use 64-bit values for now */
1283 bitmap = le64_to_cpu(ba_resp->bitmap) >> sh;
1284
1285 /* check for success or failure according to the
1286 * transmitted bitmap and block-ack bitmap */
1287 sent_bitmap = bitmap & agg->bitmap;
1288
1289 /* For each frame attempted in aggregation,
1290 * update driver's record of tx frame's status. */
1291 i = 0;
1292 while (sent_bitmap) {
1293 ack = sent_bitmap & 1ULL;
1294 successes += ack;
1295 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1296 ack ? "ACK" : "NACK", i,
1297 (agg->start_idx + i) & 0xff,
1298 agg->start_idx + i);
1299 sent_bitmap >>= 1;
1300 ++i;
1301 }
1251 1302
1252 /* check for success or failure according to the 1303 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n",
1253 * transmitted bitmap and block-ack bitmap */ 1304 (unsigned long long)bitmap);
1254 sent_bitmap = bitmap & agg->bitmap;
1255
1256 /* For each frame attempted in aggregation,
1257 * update driver's record of tx frame's status. */
1258 i = 0;
1259 while (sent_bitmap) {
1260 ack = sent_bitmap & 1ULL;
1261 successes += ack;
1262 IWL_DEBUG_TX_REPLY(priv, "%s ON i=%d idx=%d raw=%d\n",
1263 ack ? "ACK" : "NACK", i, (agg->start_idx + i) & 0xff,
1264 agg->start_idx + i);
1265 sent_bitmap >>= 1;
1266 ++i;
1267 } 1305 }
1268 1306
1269 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb); 1307 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb);
1270 memset(&info->status, 0, sizeof(info->status)); 1308 memset(&info->status, 0, sizeof(info->status));
1271 info->flags |= IEEE80211_TX_STAT_ACK; 1309 info->flags |= IEEE80211_TX_STAT_ACK;
1272 info->flags |= IEEE80211_TX_STAT_AMPDU; 1310 info->flags |= IEEE80211_TX_STAT_AMPDU;
1273 info->status.ampdu_ack_len = successes; 1311 if (!priv->cfg->base_params->no_agg_framecnt_info && ba_resp->txed) {
1274 info->status.ampdu_len = agg->frame_count; 1312 info->status.ampdu_ack_len = ba_resp->txed_2_done;
1275 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info); 1313 info->status.ampdu_len = ba_resp->txed;
1276 1314
1277 IWL_DEBUG_TX_REPLY(priv, "Bitmap %llx\n", (unsigned long long)bitmap); 1315 } else {
1316 info->status.ampdu_ack_len = successes;
1317 info->status.ampdu_len = agg->frame_count;
1318 }
1319 iwlagn_hwrate_to_tx_control(priv, agg->rate_n_flags, info);
1278 1320
1279 return 0; 1321 return 0;
1280} 1322}
@@ -1385,7 +1427,7 @@ void iwlagn_rx_reply_compressed_ba(struct iwl_priv *priv,
1385 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) && 1427 if ((iwl_queue_space(&txq->q) > txq->q.low_mark) &&
1386 priv->mac80211_registered && 1428 priv->mac80211_registered &&
1387 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA)) 1429 (agg->state != IWL_EMPTYING_HW_QUEUE_DELBA))
1388 iwl_wake_queue(priv, txq->swq_id); 1430 iwl_wake_queue(priv, txq);
1389 1431
1390 iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow); 1432 iwlagn_txq_check_empty(priv, sta_id, tid, scd_flow);
1391 } 1433 }
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
index 703621107dac..24dabcd2a36c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-ucode.c
@@ -40,30 +40,36 @@
40#include "iwl-agn.h" 40#include "iwl-agn.h"
41#include "iwl-agn-calib.h" 41#include "iwl-agn-calib.h"
42 42
43static const s8 iwlagn_default_queue_to_tx_fifo[] = { 43#define IWL_AC_UNSET -1
44 IWL_TX_FIFO_VO, 44
45 IWL_TX_FIFO_VI, 45struct queue_to_fifo_ac {
46 IWL_TX_FIFO_BE, 46 s8 fifo, ac;
47 IWL_TX_FIFO_BK, 47};
48 IWLAGN_CMD_FIFO_NUM, 48
49 IWL_TX_FIFO_UNUSED, 49static const struct queue_to_fifo_ac iwlagn_default_queue_to_tx_fifo[] = {
50 IWL_TX_FIFO_UNUSED, 50 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
51 IWL_TX_FIFO_UNUSED, 51 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
52 IWL_TX_FIFO_UNUSED, 52 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
53 IWL_TX_FIFO_UNUSED, 53 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
54 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
55 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
56 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
57 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
58 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
59 { IWL_TX_FIFO_UNUSED, IWL_AC_UNSET, },
54}; 60};
55 61
56static const s8 iwlagn_ipan_queue_to_tx_fifo[] = { 62static const struct queue_to_fifo_ac iwlagn_ipan_queue_to_tx_fifo[] = {
57 IWL_TX_FIFO_VO, 63 { IWL_TX_FIFO_VO, IEEE80211_AC_VO, },
58 IWL_TX_FIFO_VI, 64 { IWL_TX_FIFO_VI, IEEE80211_AC_VI, },
59 IWL_TX_FIFO_BE, 65 { IWL_TX_FIFO_BE, IEEE80211_AC_BE, },
60 IWL_TX_FIFO_BK, 66 { IWL_TX_FIFO_BK, IEEE80211_AC_BK, },
61 IWL_TX_FIFO_BK_IPAN, 67 { IWL_TX_FIFO_BK_IPAN, IEEE80211_AC_BK, },
62 IWL_TX_FIFO_BE_IPAN, 68 { IWL_TX_FIFO_BE_IPAN, IEEE80211_AC_BE, },
63 IWL_TX_FIFO_VI_IPAN, 69 { IWL_TX_FIFO_VI_IPAN, IEEE80211_AC_VI, },
64 IWL_TX_FIFO_VO_IPAN, 70 { IWL_TX_FIFO_VO_IPAN, IEEE80211_AC_VO, },
65 IWL_TX_FIFO_BE_IPAN, 71 { IWL_TX_FIFO_BE_IPAN, 2, },
66 IWLAGN_CMD_FIFO_NUM, 72 { IWLAGN_CMD_FIFO_NUM, IWL_AC_UNSET, },
67}; 73};
68 74
69static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = { 75static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
@@ -429,7 +435,7 @@ void iwlagn_send_bt_env(struct iwl_priv *priv, u8 action, u8 type)
429 435
430int iwlagn_alive_notify(struct iwl_priv *priv) 436int iwlagn_alive_notify(struct iwl_priv *priv)
431{ 437{
432 const s8 *queues; 438 const struct queue_to_fifo_ac *queue_to_fifo;
433 u32 a; 439 u32 a;
434 unsigned long flags; 440 unsigned long flags;
435 int i, chan; 441 int i, chan;
@@ -492,9 +498,9 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
492 498
493 /* map queues to FIFOs */ 499 /* map queues to FIFOs */
494 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)) 500 if (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS))
495 queues = iwlagn_ipan_queue_to_tx_fifo; 501 queue_to_fifo = iwlagn_ipan_queue_to_tx_fifo;
496 else 502 else
497 queues = iwlagn_default_queue_to_tx_fifo; 503 queue_to_fifo = iwlagn_default_queue_to_tx_fifo;
498 504
499 iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0); 505 iwlagn_set_wr_ptrs(priv, priv->cmd_queue, 0);
500 506
@@ -510,18 +516,25 @@ int iwlagn_alive_notify(struct iwl_priv *priv)
510 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10); 516 BUILD_BUG_ON(ARRAY_SIZE(iwlagn_ipan_queue_to_tx_fifo) != 10);
511 517
512 for (i = 0; i < 10; i++) { 518 for (i = 0; i < 10; i++) {
513 int ac = queues[i]; 519 int fifo = queue_to_fifo[i].fifo;
520 int ac = queue_to_fifo[i].ac;
514 521
515 iwl_txq_ctx_activate(priv, i); 522 iwl_txq_ctx_activate(priv, i);
516 523
517 if (ac == IWL_TX_FIFO_UNUSED) 524 if (fifo == IWL_TX_FIFO_UNUSED)
518 continue; 525 continue;
519 526
520 iwlagn_tx_queue_set_status(priv, &priv->txq[i], ac, 0); 527 if (ac != IWL_AC_UNSET)
528 iwl_set_swq_id(&priv->txq[i], ac, i);
529 iwlagn_tx_queue_set_status(priv, &priv->txq[i], fifo, 0);
521 } 530 }
522 531
523 spin_unlock_irqrestore(&priv->lock, flags); 532 spin_unlock_irqrestore(&priv->lock, flags);
524 533
534 /* Enable L1-Active */
535 iwl_clear_bits_prph(priv, APMG_PCIDEV_STT_REG,
536 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
537
525 iwlagn_send_wimax_coex(priv); 538 iwlagn_send_wimax_coex(priv);
526 539
527 iwlagn_set_Xtal_calib(priv); 540 iwlagn_set_Xtal_calib(priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 9b912c0b6e59..36335b1b54d4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -90,170 +90,6 @@ MODULE_ALIAS("iwl4965");
90static int iwlagn_ant_coupling; 90static int iwlagn_ant_coupling;
91static bool iwlagn_bt_ch_announce = 1; 91static bool iwlagn_bt_ch_announce = 1;
92 92
93/**
94 * iwlagn_commit_rxon - commit staging_rxon to hardware
95 *
96 * The RXON command in staging_rxon is committed to the hardware and
97 * the active_rxon structure is updated with the new data. This
98 * function correctly transitions out of the RXON_ASSOC_MSK state if
99 * a HW tune is required based on the RXON structure changes.
100 */
101int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
102{
103 /* cast away the const for active_rxon in this function */
104 struct iwl_rxon_cmd *active_rxon = (void *)&ctx->active;
105 int ret;
106 bool new_assoc =
107 !!(ctx->staging.filter_flags & RXON_FILTER_ASSOC_MSK);
108 bool old_assoc = !!(ctx->active.filter_flags & RXON_FILTER_ASSOC_MSK);
109
110 if (!iwl_is_alive(priv))
111 return -EBUSY;
112
113 if (!ctx->is_active)
114 return 0;
115
116 /* always get timestamp with Rx frame */
117 ctx->staging.flags |= RXON_FLG_TSF2HOST_MSK;
118
119 ret = iwl_check_rxon_cmd(priv, ctx);
120 if (ret) {
121 IWL_ERR(priv, "Invalid RXON configuration. Not committing.\n");
122 return -EINVAL;
123 }
124
125 /*
126 * receive commit_rxon request
127 * abort any previous channel switch if still in process
128 */
129 if (priv->switch_rxon.switch_in_progress &&
130 (priv->switch_rxon.channel != ctx->staging.channel)) {
131 IWL_DEBUG_11H(priv, "abort channel switch on %d\n",
132 le16_to_cpu(priv->switch_rxon.channel));
133 iwl_chswitch_done(priv, false);
134 }
135
136 /* If we don't need to send a full RXON, we can use
137 * iwl_rxon_assoc_cmd which is used to reconfigure filter
138 * and other flags for the current radio configuration. */
139 if (!iwl_full_rxon_required(priv, ctx)) {
140 ret = iwl_send_rxon_assoc(priv, ctx);
141 if (ret) {
142 IWL_ERR(priv, "Error setting RXON_ASSOC (%d)\n", ret);
143 return ret;
144 }
145
146 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
147 iwl_print_rx_config_cmd(priv, ctx);
148 return 0;
149 }
150
151 /* If we are currently associated and the new config requires
152 * an RXON_ASSOC and the new config wants the associated mask enabled,
153 * we must clear the associated from the active configuration
154 * before we apply the new config */
155 if (iwl_is_associated_ctx(ctx) && new_assoc) {
156 IWL_DEBUG_INFO(priv, "Toggling associated bit on current RXON\n");
157 active_rxon->filter_flags &= ~RXON_FILTER_ASSOC_MSK;
158
159 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
160 sizeof(struct iwl_rxon_cmd),
161 active_rxon);
162
163 /* If the mask clearing failed then we set
164 * active_rxon back to what it was previously */
165 if (ret) {
166 active_rxon->filter_flags |= RXON_FILTER_ASSOC_MSK;
167 IWL_ERR(priv, "Error clearing ASSOC_MSK (%d)\n", ret);
168 return ret;
169 }
170 iwl_clear_ucode_stations(priv, ctx);
171 iwl_restore_stations(priv, ctx);
172 ret = iwl_restore_default_wep_keys(priv, ctx);
173 if (ret) {
174 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
175 return ret;
176 }
177 }
178
179 IWL_DEBUG_INFO(priv, "Sending RXON\n"
180 "* with%s RXON_FILTER_ASSOC_MSK\n"
181 "* channel = %d\n"
182 "* bssid = %pM\n",
183 (new_assoc ? "" : "out"),
184 le16_to_cpu(ctx->staging.channel),
185 ctx->staging.bssid_addr);
186
187 iwl_set_rxon_hwcrypto(priv, ctx, !priv->cfg->mod_params->sw_crypto);
188
189 if (!old_assoc) {
190 /*
191 * First of all, before setting associated, we need to
192 * send RXON timing so the device knows about the DTIM
193 * period and other timing values
194 */
195 ret = iwl_send_rxon_timing(priv, ctx);
196 if (ret) {
197 IWL_ERR(priv, "Error setting RXON timing!\n");
198 return ret;
199 }
200 }
201
202 if (priv->cfg->ops->hcmd->set_pan_params) {
203 ret = priv->cfg->ops->hcmd->set_pan_params(priv);
204 if (ret)
205 return ret;
206 }
207
208 /* Apply the new configuration
209 * RXON unassoc clears the station table in uCode so restoration of
210 * stations is needed after it (the RXON command) completes
211 */
212 if (!new_assoc) {
213 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
214 sizeof(struct iwl_rxon_cmd), &ctx->staging);
215 if (ret) {
216 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
217 return ret;
218 }
219 IWL_DEBUG_INFO(priv, "Return from !new_assoc RXON.\n");
220 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
221 iwl_clear_ucode_stations(priv, ctx);
222 iwl_restore_stations(priv, ctx);
223 ret = iwl_restore_default_wep_keys(priv, ctx);
224 if (ret) {
225 IWL_ERR(priv, "Failed to restore WEP keys (%d)\n", ret);
226 return ret;
227 }
228 }
229 if (new_assoc) {
230 priv->start_calib = 0;
231 /* Apply the new configuration
232 * RXON assoc doesn't clear the station table in uCode,
233 */
234 ret = iwl_send_cmd_pdu(priv, ctx->rxon_cmd,
235 sizeof(struct iwl_rxon_cmd), &ctx->staging);
236 if (ret) {
237 IWL_ERR(priv, "Error setting new RXON (%d)\n", ret);
238 return ret;
239 }
240 memcpy(active_rxon, &ctx->staging, sizeof(*active_rxon));
241 }
242 iwl_print_rx_config_cmd(priv, ctx);
243
244 iwl_init_sensitivity(priv);
245
246 /* If we issue a new RXON command which required a tune then we must
247 * send a new TXPOWER command or we won't be able to Tx any frames */
248 ret = iwl_set_tx_power(priv, priv->tx_power_user_lmt, true);
249 if (ret) {
250 IWL_ERR(priv, "Error sending TX power (%d)\n", ret);
251 return ret;
252 }
253
254 return 0;
255}
256
257void iwl_update_chain_flags(struct iwl_priv *priv) 93void iwl_update_chain_flags(struct iwl_priv *priv)
258{ 94{
259 struct iwl_rxon_context *ctx; 95 struct iwl_rxon_context *ctx;
@@ -261,7 +97,8 @@ void iwl_update_chain_flags(struct iwl_priv *priv)
261 if (priv->cfg->ops->hcmd->set_rxon_chain) { 97 if (priv->cfg->ops->hcmd->set_rxon_chain) {
262 for_each_context(priv, ctx) { 98 for_each_context(priv, ctx) {
263 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx); 99 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
264 iwlcore_commit_rxon(priv, ctx); 100 if (ctx->active.rx_chain != ctx->staging.rx_chain)
101 iwlcore_commit_rxon(priv, ctx);
265 } 102 }
266 } 103 }
267} 104}
@@ -411,7 +248,8 @@ static unsigned int iwl_hw_get_beacon_cmd(struct iwl_priv *priv,
411 248
412 return sizeof(*tx_beacon_cmd) + frame_size; 249 return sizeof(*tx_beacon_cmd) + frame_size;
413} 250}
414static int iwl_send_beacon_cmd(struct iwl_priv *priv) 251
252int iwlagn_send_beacon_cmd(struct iwl_priv *priv)
415{ 253{
416 struct iwl_frame *frame; 254 struct iwl_frame *frame;
417 unsigned int frame_size; 255 unsigned int frame_size;
@@ -661,7 +499,7 @@ static void iwl_bg_beacon_update(struct work_struct *work)
661 499
662 priv->beacon_skb = beacon; 500 priv->beacon_skb = beacon;
663 501
664 iwl_send_beacon_cmd(priv); 502 iwlagn_send_beacon_cmd(priv);
665 out: 503 out:
666 mutex_unlock(&priv->mutex); 504 mutex_unlock(&priv->mutex);
667} 505}
@@ -2664,7 +2502,7 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
2664 return pos; 2502 return pos;
2665 } 2503 }
2666 2504
2667 /* enable/disable bt channel announcement */ 2505 /* enable/disable bt channel inhibition */
2668 priv->bt_ch_announce = iwlagn_bt_ch_announce; 2506 priv->bt_ch_announce = iwlagn_bt_ch_announce;
2669 2507
2670#ifdef CONFIG_IWLWIFI_DEBUG 2508#ifdef CONFIG_IWLWIFI_DEBUG
@@ -2816,13 +2654,8 @@ static void iwl_alive_start(struct iwl_priv *priv)
2816 /* After the ALIVE response, we can send host commands to the uCode */ 2654 /* After the ALIVE response, we can send host commands to the uCode */
2817 set_bit(STATUS_ALIVE, &priv->status); 2655 set_bit(STATUS_ALIVE, &priv->status);
2818 2656
2819 if (priv->cfg->ops->lib->recover_from_tx_stall) { 2657 /* Enable watchdog to monitor the driver tx queues */
2820 /* Enable timer to monitor the driver queues */ 2658 iwl_setup_watchdog(priv);
2821 mod_timer(&priv->monitor_recover,
2822 jiffies +
2823 msecs_to_jiffies(
2824 priv->cfg->base_params->monitor_recover_period));
2825 }
2826 2659
2827 if (iwl_is_rfkill(priv)) 2660 if (iwl_is_rfkill(priv))
2828 return; 2661 return;
@@ -2879,6 +2712,8 @@ static void iwl_alive_start(struct iwl_priv *priv)
2879 2712
2880 iwl_reset_run_time_calib(priv); 2713 iwl_reset_run_time_calib(priv);
2881 2714
2715 set_bit(STATUS_READY, &priv->status);
2716
2882 /* Configure the adapter for unassociated operation */ 2717 /* Configure the adapter for unassociated operation */
2883 iwlcore_commit_rxon(priv, ctx); 2718 iwlcore_commit_rxon(priv, ctx);
2884 2719
@@ -2888,7 +2723,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
2888 iwl_leds_init(priv); 2723 iwl_leds_init(priv);
2889 2724
2890 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); 2725 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2891 set_bit(STATUS_READY, &priv->status);
2892 wake_up_interruptible(&priv->wait_command_queue); 2726 wake_up_interruptible(&priv->wait_command_queue);
2893 2727
2894 iwl_power_update_mode(priv, true); 2728 iwl_power_update_mode(priv, true);
@@ -2916,8 +2750,7 @@ static void __iwl_down(struct iwl_priv *priv)
2916 2750
2917 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set 2751 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
2918 * to prevent rearm timer */ 2752 * to prevent rearm timer */
2919 if (priv->cfg->ops->lib->recover_from_tx_stall) 2753 del_timer_sync(&priv->watchdog);
2920 del_timer_sync(&priv->monitor_recover);
2921 2754
2922 iwl_clear_ucode_stations(priv, NULL); 2755 iwl_clear_ucode_stations(priv, NULL);
2923 iwl_dealloc_bcast_stations(priv); 2756 iwl_dealloc_bcast_stations(priv);
@@ -2978,7 +2811,8 @@ static void __iwl_down(struct iwl_priv *priv)
2978 STATUS_EXIT_PENDING; 2811 STATUS_EXIT_PENDING;
2979 2812
2980 /* device going down, Stop using ICT table */ 2813 /* device going down, Stop using ICT table */
2981 iwl_disable_ict(priv); 2814 if (priv->cfg->ops->lib->isr_ops.disable)
2815 priv->cfg->ops->lib->isr_ops.disable(priv);
2982 2816
2983 iwlagn_txq_ctx_stop(priv); 2817 iwlagn_txq_ctx_stop(priv);
2984 iwlagn_rxq_stop(priv); 2818 iwlagn_rxq_stop(priv);
@@ -3201,7 +3035,8 @@ static void iwl_bg_alive_start(struct work_struct *data)
3201 return; 3035 return;
3202 3036
3203 /* enable dram interrupt */ 3037 /* enable dram interrupt */
3204 iwl_reset_ict(priv); 3038 if (priv->cfg->ops->lib->isr_ops.reset)
3039 priv->cfg->ops->lib->isr_ops.reset(priv);
3205 3040
3206 mutex_lock(&priv->mutex); 3041 mutex_lock(&priv->mutex);
3207 iwl_alive_start(priv); 3042 iwl_alive_start(priv);
@@ -3309,92 +3144,6 @@ static void iwl_bg_rx_replenish(struct work_struct *data)
3309 mutex_unlock(&priv->mutex); 3144 mutex_unlock(&priv->mutex);
3310} 3145}
3311 3146
3312#define IWL_DELAY_NEXT_SCAN (HZ*2)
3313
3314void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3315{
3316 struct iwl_rxon_context *ctx;
3317 struct ieee80211_conf *conf = NULL;
3318 int ret = 0;
3319
3320 if (!vif || !priv->is_open)
3321 return;
3322
3323 ctx = iwl_rxon_ctx_from_vif(vif);
3324
3325 if (vif->type == NL80211_IFTYPE_AP) {
3326 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
3327 return;
3328 }
3329
3330 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3331 return;
3332
3333 iwl_scan_cancel_timeout(priv, 200);
3334
3335 conf = ieee80211_get_hw_conf(priv->hw);
3336
3337 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3338 iwlcore_commit_rxon(priv, ctx);
3339
3340 ret = iwl_send_rxon_timing(priv, ctx);
3341 if (ret)
3342 IWL_WARN(priv, "RXON timing - "
3343 "Attempting to continue.\n");
3344
3345 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
3346
3347 iwl_set_rxon_ht(priv, &priv->current_ht_config);
3348
3349 if (priv->cfg->ops->hcmd->set_rxon_chain)
3350 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
3351
3352 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid);
3353
3354 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
3355 vif->bss_conf.aid, vif->bss_conf.beacon_int);
3356
3357 if (vif->bss_conf.use_short_preamble)
3358 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3359 else
3360 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
3361
3362 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
3363 if (vif->bss_conf.use_short_slot)
3364 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3365 else
3366 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
3367 }
3368
3369 iwlcore_commit_rxon(priv, ctx);
3370
3371 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
3372 vif->bss_conf.aid, ctx->active.bssid_addr);
3373
3374 switch (vif->type) {
3375 case NL80211_IFTYPE_STATION:
3376 break;
3377 case NL80211_IFTYPE_ADHOC:
3378 iwl_send_beacon_cmd(priv);
3379 break;
3380 default:
3381 IWL_ERR(priv, "%s Should not be called in %d mode\n",
3382 __func__, vif->type);
3383 break;
3384 }
3385
3386 /* the chain noise calibration will enabled PM upon completion
3387 * If chain noise has already been run, then we need to enable
3388 * power management here */
3389 if (priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE)
3390 iwl_power_update_mode(priv, false);
3391
3392 /* Enable Rx differential gain and sensitivity calibrations */
3393 iwl_chain_noise_reset(priv);
3394 priv->start_calib = 1;
3395
3396}
3397
3398/***************************************************************************** 3147/*****************************************************************************
3399 * 3148 *
3400 * mac80211 entry point functions 3149 * mac80211 entry point functions
@@ -3420,7 +3169,8 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
3420 hw->flags = IEEE80211_HW_SIGNAL_DBM | 3169 hw->flags = IEEE80211_HW_SIGNAL_DBM |
3421 IEEE80211_HW_AMPDU_AGGREGATION | 3170 IEEE80211_HW_AMPDU_AGGREGATION |
3422 IEEE80211_HW_NEED_DTIM_PERIOD | 3171 IEEE80211_HW_NEED_DTIM_PERIOD |
3423 IEEE80211_HW_SPECTRUM_MGMT; 3172 IEEE80211_HW_SPECTRUM_MGMT |
3173 IEEE80211_HW_REPORTS_TX_ACK_STATUS;
3424 3174
3425 if (!priv->cfg->base_params->broken_powersave) 3175 if (!priv->cfg->base_params->broken_powersave)
3426 hw->flags |= IEEE80211_HW_SUPPORTS_PS | 3176 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
@@ -3474,7 +3224,7 @@ static int iwl_mac_setup_register(struct iwl_priv *priv,
3474} 3224}
3475 3225
3476 3226
3477static int iwl_mac_start(struct ieee80211_hw *hw) 3227int iwlagn_mac_start(struct ieee80211_hw *hw)
3478{ 3228{
3479 struct iwl_priv *priv = hw->priv; 3229 struct iwl_priv *priv = hw->priv;
3480 int ret; 3230 int ret;
@@ -3515,7 +3265,7 @@ out:
3515 return 0; 3265 return 0;
3516} 3266}
3517 3267
3518static void iwl_mac_stop(struct ieee80211_hw *hw) 3268void iwlagn_mac_stop(struct ieee80211_hw *hw)
3519{ 3269{
3520 struct iwl_priv *priv = hw->priv; 3270 struct iwl_priv *priv = hw->priv;
3521 3271
@@ -3530,14 +3280,15 @@ static void iwl_mac_stop(struct ieee80211_hw *hw)
3530 3280
3531 flush_workqueue(priv->workqueue); 3281 flush_workqueue(priv->workqueue);
3532 3282
3533 /* enable interrupts again in order to receive rfkill changes */ 3283 /* User space software may expect getting rfkill changes
3284 * even if interface is down */
3534 iwl_write32(priv, CSR_INT, 0xFFFFFFFF); 3285 iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
3535 iwl_enable_interrupts(priv); 3286 iwl_enable_rfkill_int(priv);
3536 3287
3537 IWL_DEBUG_MAC80211(priv, "leave\n"); 3288 IWL_DEBUG_MAC80211(priv, "leave\n");
3538} 3289}
3539 3290
3540static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb) 3291int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3541{ 3292{
3542 struct iwl_priv *priv = hw->priv; 3293 struct iwl_priv *priv = hw->priv;
3543 3294
@@ -3553,73 +3304,12 @@ static int iwl_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3553 return NETDEV_TX_OK; 3304 return NETDEV_TX_OK;
3554} 3305}
3555 3306
3556void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif) 3307void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
3557{ 3308 struct ieee80211_vif *vif,
3558 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); 3309 struct ieee80211_key_conf *keyconf,
3559 int ret = 0; 3310 struct ieee80211_sta *sta,
3560 3311 u32 iv32, u16 *phase1key)
3561 lockdep_assert_held(&priv->mutex);
3562
3563 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3564 return;
3565
3566 /* The following should be done only at AP bring up */
3567 if (!iwl_is_associated_ctx(ctx)) {
3568
3569 /* RXON - unassoc (to set timing command) */
3570 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
3571 iwlcore_commit_rxon(priv, ctx);
3572
3573 /* RXON Timing */
3574 ret = iwl_send_rxon_timing(priv, ctx);
3575 if (ret)
3576 IWL_WARN(priv, "RXON timing failed - "
3577 "Attempting to continue.\n");
3578
3579 /* AP has all antennas */
3580 priv->chain_noise_data.active_chains =
3581 priv->hw_params.valid_rx_ant;
3582 iwl_set_rxon_ht(priv, &priv->current_ht_config);
3583 if (priv->cfg->ops->hcmd->set_rxon_chain)
3584 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
3585
3586 ctx->staging.assoc_id = 0;
3587
3588 if (vif->bss_conf.use_short_preamble)
3589 ctx->staging.flags |=
3590 RXON_FLG_SHORT_PREAMBLE_MSK;
3591 else
3592 ctx->staging.flags &=
3593 ~RXON_FLG_SHORT_PREAMBLE_MSK;
3594
3595 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
3596 if (vif->bss_conf.use_short_slot)
3597 ctx->staging.flags |=
3598 RXON_FLG_SHORT_SLOT_MSK;
3599 else
3600 ctx->staging.flags &=
3601 ~RXON_FLG_SHORT_SLOT_MSK;
3602 }
3603 /* need to send beacon cmd before committing assoc RXON! */
3604 iwl_send_beacon_cmd(priv);
3605 /* restore RXON assoc */
3606 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
3607 iwlcore_commit_rxon(priv, ctx);
3608 }
3609 iwl_send_beacon_cmd(priv);
3610
3611 /* FIXME - we need to add code here to detect a totally new
3612 * configuration, reset the AP, unassoc, rxon timing, assoc,
3613 * clear sta table, add BCAST sta... */
3614}
3615
3616static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
3617 struct ieee80211_vif *vif,
3618 struct ieee80211_key_conf *keyconf,
3619 struct ieee80211_sta *sta,
3620 u32 iv32, u16 *phase1key)
3621{ 3312{
3622
3623 struct iwl_priv *priv = hw->priv; 3313 struct iwl_priv *priv = hw->priv;
3624 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 3314 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
3625 3315
@@ -3631,10 +3321,9 @@ static void iwl_mac_update_tkip_key(struct ieee80211_hw *hw,
3631 IWL_DEBUG_MAC80211(priv, "leave\n"); 3321 IWL_DEBUG_MAC80211(priv, "leave\n");
3632} 3322}
3633 3323
3634static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 3324int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3635 struct ieee80211_vif *vif, 3325 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
3636 struct ieee80211_sta *sta, 3326 struct ieee80211_key_conf *key)
3637 struct ieee80211_key_conf *key)
3638{ 3327{
3639 struct iwl_priv *priv = hw->priv; 3328 struct iwl_priv *priv = hw->priv;
3640 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 3329 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
@@ -3701,10 +3390,10 @@ static int iwl_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
3701 return ret; 3390 return ret;
3702} 3391}
3703 3392
3704static int iwl_mac_ampdu_action(struct ieee80211_hw *hw, 3393int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
3705 struct ieee80211_vif *vif, 3394 struct ieee80211_vif *vif,
3706 enum ieee80211_ampdu_mlme_action action, 3395 enum ieee80211_ampdu_mlme_action action,
3707 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 3396 struct ieee80211_sta *sta, u16 tid, u16 *ssn)
3708{ 3397{
3709 struct iwl_priv *priv = hw->priv; 3398 struct iwl_priv *priv = hw->priv;
3710 int ret = -EINVAL; 3399 int ret = -EINVAL;
@@ -3785,39 +3474,9 @@ static int iwl_mac_ampdu_action(struct ieee80211_hw *hw,
3785 return ret; 3474 return ret;
3786} 3475}
3787 3476
3788static void iwl_mac_sta_notify(struct ieee80211_hw *hw, 3477int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
3789 struct ieee80211_vif *vif, 3478 struct ieee80211_vif *vif,
3790 enum sta_notify_cmd cmd, 3479 struct ieee80211_sta *sta)
3791 struct ieee80211_sta *sta)
3792{
3793 struct iwl_priv *priv = hw->priv;
3794 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
3795 int sta_id;
3796
3797 switch (cmd) {
3798 case STA_NOTIFY_SLEEP:
3799 WARN_ON(!sta_priv->client);
3800 sta_priv->asleep = true;
3801 if (atomic_read(&sta_priv->pending_frames) > 0)
3802 ieee80211_sta_block_awake(hw, sta, true);
3803 break;
3804 case STA_NOTIFY_AWAKE:
3805 WARN_ON(!sta_priv->client);
3806 if (!sta_priv->asleep)
3807 break;
3808 sta_priv->asleep = false;
3809 sta_id = iwl_sta_id(sta);
3810 if (sta_id != IWL_INVALID_STATION)
3811 iwl_sta_modify_ps_wake(priv, sta_id);
3812 break;
3813 default:
3814 break;
3815 }
3816}
3817
3818static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
3819 struct ieee80211_vif *vif,
3820 struct ieee80211_sta *sta)
3821{ 3480{
3822 struct iwl_priv *priv = hw->priv; 3481 struct iwl_priv *priv = hw->priv;
3823 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv; 3482 struct iwl_station_priv *sta_priv = (void *)sta->drv_priv;
@@ -3858,8 +3517,8 @@ static int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
3858 return 0; 3517 return 0;
3859} 3518}
3860 3519
3861static void iwl_mac_channel_switch(struct ieee80211_hw *hw, 3520void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
3862 struct ieee80211_channel_switch *ch_switch) 3521 struct ieee80211_channel_switch *ch_switch)
3863{ 3522{
3864 struct iwl_priv *priv = hw->priv; 3523 struct iwl_priv *priv = hw->priv;
3865 const struct iwl_channel_info *ch_info; 3524 const struct iwl_channel_info *ch_info;
@@ -3956,10 +3615,10 @@ out_exit:
3956 IWL_DEBUG_MAC80211(priv, "leave\n"); 3615 IWL_DEBUG_MAC80211(priv, "leave\n");
3957} 3616}
3958 3617
3959static void iwlagn_configure_filter(struct ieee80211_hw *hw, 3618void iwlagn_configure_filter(struct ieee80211_hw *hw,
3960 unsigned int changed_flags, 3619 unsigned int changed_flags,
3961 unsigned int *total_flags, 3620 unsigned int *total_flags,
3962 u64 multicast) 3621 u64 multicast)
3963{ 3622{
3964 struct iwl_priv *priv = hw->priv; 3623 struct iwl_priv *priv = hw->priv;
3965 __le32 filter_or = 0, filter_nand = 0; 3624 __le32 filter_or = 0, filter_nand = 0;
@@ -3976,7 +3635,8 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
3976 changed_flags, *total_flags); 3635 changed_flags, *total_flags);
3977 3636
3978 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK); 3637 CHK(FIF_OTHER_BSS | FIF_PROMISC_IN_BSS, RXON_FILTER_PROMISC_MSK);
3979 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK); 3638 /* Setting _just_ RXON_FILTER_CTL2HOST_MSK causes FH errors */
3639 CHK(FIF_CONTROL, RXON_FILTER_CTL2HOST_MSK | RXON_FILTER_PROMISC_MSK);
3980 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK); 3640 CHK(FIF_BCN_PRBRESP_PROMISC, RXON_FILTER_BCON_AWARE_MSK);
3981 3641
3982#undef CHK 3642#undef CHK
@@ -3986,7 +3646,11 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
3986 for_each_context(priv, ctx) { 3646 for_each_context(priv, ctx) {
3987 ctx->staging.filter_flags &= ~filter_nand; 3647 ctx->staging.filter_flags &= ~filter_nand;
3988 ctx->staging.filter_flags |= filter_or; 3648 ctx->staging.filter_flags |= filter_or;
3989 iwlcore_commit_rxon(priv, ctx); 3649
3650 /*
3651 * Not committing directly because hardware can perform a scan,
3652 * but we'll eventually commit the filter flags change anyway.
3653 */
3990 } 3654 }
3991 3655
3992 mutex_unlock(&priv->mutex); 3656 mutex_unlock(&priv->mutex);
@@ -4001,7 +3665,7 @@ static void iwlagn_configure_filter(struct ieee80211_hw *hw,
4001 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL; 3665 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL;
4002} 3666}
4003 3667
4004static void iwl_mac_flush(struct ieee80211_hw *hw, bool drop) 3668void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop)
4005{ 3669{
4006 struct iwl_priv *priv = hw->priv; 3670 struct iwl_priv *priv = hw->priv;
4007 3671
@@ -4074,12 +3738,9 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
4074 priv->ucode_trace.data = (unsigned long)priv; 3738 priv->ucode_trace.data = (unsigned long)priv;
4075 priv->ucode_trace.function = iwl_bg_ucode_trace; 3739 priv->ucode_trace.function = iwl_bg_ucode_trace;
4076 3740
4077 if (priv->cfg->ops->lib->recover_from_tx_stall) { 3741 init_timer(&priv->watchdog);
4078 init_timer(&priv->monitor_recover); 3742 priv->watchdog.data = (unsigned long)priv;
4079 priv->monitor_recover.data = (unsigned long)priv; 3743 priv->watchdog.function = iwl_bg_watchdog;
4080 priv->monitor_recover.function =
4081 priv->cfg->ops->lib->recover_from_tx_stall;
4082 }
4083 3744
4084 if (!priv->cfg->base_params->use_isr_legacy) 3745 if (!priv->cfg->base_params->use_isr_legacy)
4085 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 3746 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
@@ -4172,13 +3833,13 @@ static int iwl_init_drv(struct iwl_priv *priv)
4172 priv->bt_on_thresh = BT_ON_THRESHOLD_DEF; 3833 priv->bt_on_thresh = BT_ON_THRESHOLD_DEF;
4173 priv->bt_duration = BT_DURATION_LIMIT_DEF; 3834 priv->bt_duration = BT_DURATION_LIMIT_DEF;
4174 priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF; 3835 priv->dynamic_frag_thresh = BT_FRAG_THRESHOLD_DEF;
4175 priv->dynamic_agg_thresh = BT_AGG_THRESHOLD_DEF;
4176 } 3836 }
4177 3837
4178 /* Set the tx_power_user_lmt to the lowest power level 3838 /* Set the tx_power_user_lmt to the lowest power level
4179 * this value will get overwritten by channel max power avg 3839 * this value will get overwritten by channel max power avg
4180 * from eeprom */ 3840 * from eeprom */
4181 priv->tx_power_user_lmt = IWLAGN_TX_POWER_TARGET_POWER_MIN; 3841 priv->tx_power_user_lmt = IWLAGN_TX_POWER_TARGET_POWER_MIN;
3842 priv->tx_power_next = IWLAGN_TX_POWER_TARGET_POWER_MIN;
4182 3843
4183 ret = iwl_init_channel_map(priv); 3844 ret = iwl_init_channel_map(priv);
4184 if (ret) { 3845 if (ret) {
@@ -4209,28 +3870,30 @@ static void iwl_uninit_drv(struct iwl_priv *priv)
4209 kfree(priv->scan_cmd); 3870 kfree(priv->scan_cmd);
4210} 3871}
4211 3872
4212static struct ieee80211_ops iwl_hw_ops = { 3873#ifdef CONFIG_IWL5000
4213 .tx = iwl_mac_tx, 3874struct ieee80211_ops iwlagn_hw_ops = {
4214 .start = iwl_mac_start, 3875 .tx = iwlagn_mac_tx,
4215 .stop = iwl_mac_stop, 3876 .start = iwlagn_mac_start,
3877 .stop = iwlagn_mac_stop,
4216 .add_interface = iwl_mac_add_interface, 3878 .add_interface = iwl_mac_add_interface,
4217 .remove_interface = iwl_mac_remove_interface, 3879 .remove_interface = iwl_mac_remove_interface,
4218 .config = iwl_mac_config, 3880 .change_interface = iwl_mac_change_interface,
3881 .config = iwlagn_mac_config,
4219 .configure_filter = iwlagn_configure_filter, 3882 .configure_filter = iwlagn_configure_filter,
4220 .set_key = iwl_mac_set_key, 3883 .set_key = iwlagn_mac_set_key,
4221 .update_tkip_key = iwl_mac_update_tkip_key, 3884 .update_tkip_key = iwlagn_mac_update_tkip_key,
4222 .conf_tx = iwl_mac_conf_tx, 3885 .conf_tx = iwl_mac_conf_tx,
4223 .reset_tsf = iwl_mac_reset_tsf, 3886 .bss_info_changed = iwlagn_bss_info_changed,
4224 .bss_info_changed = iwl_bss_info_changed, 3887 .ampdu_action = iwlagn_mac_ampdu_action,
4225 .ampdu_action = iwl_mac_ampdu_action,
4226 .hw_scan = iwl_mac_hw_scan, 3888 .hw_scan = iwl_mac_hw_scan,
4227 .sta_notify = iwl_mac_sta_notify, 3889 .sta_notify = iwlagn_mac_sta_notify,
4228 .sta_add = iwlagn_mac_sta_add, 3890 .sta_add = iwlagn_mac_sta_add,
4229 .sta_remove = iwl_mac_sta_remove, 3891 .sta_remove = iwl_mac_sta_remove,
4230 .channel_switch = iwl_mac_channel_switch, 3892 .channel_switch = iwlagn_mac_channel_switch,
4231 .flush = iwl_mac_flush, 3893 .flush = iwlagn_mac_flush,
4232 .tx_last_beacon = iwl_mac_tx_last_beacon, 3894 .tx_last_beacon = iwl_mac_tx_last_beacon,
4233}; 3895};
3896#endif
4234 3897
4235static void iwl_hw_detect(struct iwl_priv *priv) 3898static void iwl_hw_detect(struct iwl_priv *priv)
4236{ 3899{
@@ -4298,10 +3961,15 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4298 if (cfg->mod_params->disable_hw_scan) { 3961 if (cfg->mod_params->disable_hw_scan) {
4299 dev_printk(KERN_DEBUG, &(pdev->dev), 3962 dev_printk(KERN_DEBUG, &(pdev->dev),
4300 "sw scan support is deprecated\n"); 3963 "sw scan support is deprecated\n");
4301 iwl_hw_ops.hw_scan = NULL; 3964#ifdef CONFIG_IWL5000
3965 iwlagn_hw_ops.hw_scan = NULL;
3966#endif
3967#ifdef CONFIG_IWL4965
3968 iwl4965_hw_ops.hw_scan = NULL;
3969#endif
4302 } 3970 }
4303 3971
4304 hw = iwl_alloc_all(cfg, &iwl_hw_ops); 3972 hw = iwl_alloc_all(cfg);
4305 if (!hw) { 3973 if (!hw) {
4306 err = -ENOMEM; 3974 err = -ENOMEM;
4307 goto out; 3975 goto out;
@@ -4333,6 +4001,7 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4333 BIT(NL80211_IFTYPE_ADHOC); 4001 BIT(NL80211_IFTYPE_ADHOC);
4334 priv->contexts[IWL_RXON_CTX_BSS].interface_modes = 4002 priv->contexts[IWL_RXON_CTX_BSS].interface_modes =
4335 BIT(NL80211_IFTYPE_STATION); 4003 BIT(NL80211_IFTYPE_STATION);
4004 priv->contexts[IWL_RXON_CTX_BSS].ap_devtype = RXON_DEV_TYPE_AP;
4336 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS; 4005 priv->contexts[IWL_RXON_CTX_BSS].ibss_devtype = RXON_DEV_TYPE_IBSS;
4337 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS; 4006 priv->contexts[IWL_RXON_CTX_BSS].station_devtype = RXON_DEV_TYPE_ESS;
4338 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS; 4007 priv->contexts[IWL_RXON_CTX_BSS].unused_devtype = RXON_DEV_TYPE_ESS;
@@ -4368,8 +4037,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4368 (iwlagn_ant_coupling > IWL_BT_ANTENNA_COUPLING_THRESHOLD) ? 4037 (iwlagn_ant_coupling > IWL_BT_ANTENNA_COUPLING_THRESHOLD) ?
4369 true : false; 4038 true : false;
4370 4039
4371 /* enable/disable bt channel announcement */ 4040 /* enable/disable bt channel inhibition */
4372 priv->bt_ch_announce = iwlagn_bt_ch_announce; 4041 priv->bt_ch_announce = iwlagn_bt_ch_announce;
4042 IWL_DEBUG_INFO(priv, "BT channel inhibition is %s\n",
4043 (priv->bt_ch_announce) ? "On" : "Off");
4373 4044
4374 if (iwl_alloc_traffic_mem(priv)) 4045 if (iwl_alloc_traffic_mem(priv))
4375 IWL_ERR(priv, "Not enough memory to generate traffic log\n"); 4046 IWL_ERR(priv, "Not enough memory to generate traffic log\n");
@@ -4461,6 +4132,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4461 if (err) 4132 if (err)
4462 goto out_free_eeprom; 4133 goto out_free_eeprom;
4463 4134
4135 err = iwl_eeprom_check_sku(priv);
4136 if (err)
4137 goto out_free_eeprom;
4138
4464 /* extract MAC Address */ 4139 /* extract MAC Address */
4465 iwl_eeprom_get_mac(priv, priv->addresses[0].addr); 4140 iwl_eeprom_get_mac(priv, priv->addresses[0].addr);
4466 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr); 4141 IWL_DEBUG_INFO(priv, "MAC address: %pM\n", priv->addresses[0].addr);
@@ -4500,8 +4175,10 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4500 4175
4501 pci_enable_msi(priv->pci_dev); 4176 pci_enable_msi(priv->pci_dev);
4502 4177
4503 iwl_alloc_isr_ict(priv); 4178 if (priv->cfg->ops->lib->isr_ops.alloc)
4504 err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr, 4179 priv->cfg->ops->lib->isr_ops.alloc(priv);
4180
4181 err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr_ops.isr,
4505 IRQF_SHARED, DRV_NAME, priv); 4182 IRQF_SHARED, DRV_NAME, priv);
4506 if (err) { 4183 if (err) {
4507 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); 4184 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
@@ -4515,14 +4192,14 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4515 * 8. Enable interrupts and read RFKILL state 4192 * 8. Enable interrupts and read RFKILL state
4516 *********************************************/ 4193 *********************************************/
4517 4194
4518 /* enable interrupts if needed: hw bug w/a */ 4195 /* enable rfkill interrupt: hw bug w/a */
4519 pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd); 4196 pci_read_config_word(priv->pci_dev, PCI_COMMAND, &pci_cmd);
4520 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { 4197 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
4521 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE; 4198 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
4522 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd); 4199 pci_write_config_word(priv->pci_dev, PCI_COMMAND, pci_cmd);
4523 } 4200 }
4524 4201
4525 iwl_enable_interrupts(priv); 4202 iwl_enable_rfkill_int(priv);
4526 4203
4527 /* If platform's RF_KILL switch is NOT set to KILL */ 4204 /* If platform's RF_KILL switch is NOT set to KILL */
4528 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) 4205 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
@@ -4548,7 +4225,8 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
4548 destroy_workqueue(priv->workqueue); 4225 destroy_workqueue(priv->workqueue);
4549 priv->workqueue = NULL; 4226 priv->workqueue = NULL;
4550 free_irq(priv->pci_dev->irq, priv); 4227 free_irq(priv->pci_dev->irq, priv);
4551 iwl_free_isr_ict(priv); 4228 if (priv->cfg->ops->lib->isr_ops.free)
4229 priv->cfg->ops->lib->isr_ops.free(priv);
4552 out_disable_msi: 4230 out_disable_msi:
4553 pci_disable_msi(priv->pci_dev); 4231 pci_disable_msi(priv->pci_dev);
4554 iwl_uninit_drv(priv); 4232 iwl_uninit_drv(priv);
@@ -4643,7 +4321,8 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
4643 4321
4644 iwl_uninit_drv(priv); 4322 iwl_uninit_drv(priv);
4645 4323
4646 iwl_free_isr_ict(priv); 4324 if (priv->cfg->ops->lib->isr_ops.free)
4325 priv->cfg->ops->lib->isr_ops.free(priv);
4647 4326
4648 dev_kfree_skb(priv->beacon_skb); 4327 dev_kfree_skb(priv->beacon_skb);
4649 4328
@@ -4734,51 +4413,32 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
4734 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)}, 4413 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
4735 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)}, 4414 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
4736 4415
4737/* 6x00 Series Gen2a */ 4416/* 6x05 Series */
4738 {IWL_PCI_DEVICE(0x0082, 0x1201, iwl6000g2a_2agn_cfg)}, 4417 {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6005_2agn_cfg)},
4739 {IWL_PCI_DEVICE(0x0085, 0x1211, iwl6000g2a_2agn_cfg)}, 4418 {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6005_2abg_cfg)},
4740 {IWL_PCI_DEVICE(0x0082, 0x1221, iwl6000g2a_2agn_cfg)}, 4419 {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6005_2bg_cfg)},
4741 {IWL_PCI_DEVICE(0x0082, 0x1206, iwl6000g2a_2abg_cfg)}, 4420 {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6005_2agn_cfg)},
4742 {IWL_PCI_DEVICE(0x0085, 0x1216, iwl6000g2a_2abg_cfg)}, 4421 {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6005_2abg_cfg)},
4743 {IWL_PCI_DEVICE(0x0082, 0x1226, iwl6000g2a_2abg_cfg)}, 4422 {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6005_2agn_cfg)},
4744 {IWL_PCI_DEVICE(0x0082, 0x1207, iwl6000g2a_2bg_cfg)}, 4423 {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6005_2abg_cfg)},
4745 {IWL_PCI_DEVICE(0x0082, 0x1301, iwl6000g2a_2agn_cfg)}, 4424
4746 {IWL_PCI_DEVICE(0x0082, 0x1306, iwl6000g2a_2abg_cfg)}, 4425/* 6x30 Series */
4747 {IWL_PCI_DEVICE(0x0082, 0x1307, iwl6000g2a_2bg_cfg)}, 4426 {IWL_PCI_DEVICE(0x008A, 0x5305, iwl1030_bgn_cfg)},
4748 {IWL_PCI_DEVICE(0x0082, 0x1321, iwl6000g2a_2agn_cfg)}, 4427 {IWL_PCI_DEVICE(0x008A, 0x5307, iwl1030_bg_cfg)},
4749 {IWL_PCI_DEVICE(0x0082, 0x1326, iwl6000g2a_2abg_cfg)}, 4428 {IWL_PCI_DEVICE(0x008A, 0x5325, iwl1030_bgn_cfg)},
4750 {IWL_PCI_DEVICE(0x0085, 0x1311, iwl6000g2a_2agn_cfg)}, 4429 {IWL_PCI_DEVICE(0x008A, 0x5327, iwl1030_bg_cfg)},
4751 {IWL_PCI_DEVICE(0x0085, 0x1316, iwl6000g2a_2abg_cfg)}, 4430 {IWL_PCI_DEVICE(0x008B, 0x5315, iwl1030_bgn_cfg)},
4752 4431 {IWL_PCI_DEVICE(0x008B, 0x5317, iwl1030_bg_cfg)},
4753/* 6x00 Series Gen2b */ 4432 {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6030_2agn_cfg)},
4754 {IWL_PCI_DEVICE(0x008F, 0x5105, iwl6000g2b_bgn_cfg)}, 4433 {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6030_2bgn_cfg)},
4755 {IWL_PCI_DEVICE(0x0090, 0x5115, iwl6000g2b_bgn_cfg)}, 4434 {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6030_2abg_cfg)},
4756 {IWL_PCI_DEVICE(0x008F, 0x5125, iwl6000g2b_bgn_cfg)}, 4435 {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6030_2agn_cfg)},
4757 {IWL_PCI_DEVICE(0x008F, 0x5107, iwl6000g2b_bg_cfg)}, 4436 {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6030_2bgn_cfg)},
4758 {IWL_PCI_DEVICE(0x008F, 0x5201, iwl6000g2b_2agn_cfg)}, 4437 {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6030_2abg_cfg)},
4759 {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6000g2b_2agn_cfg)}, 4438 {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6030_2bg_cfg)},
4760 {IWL_PCI_DEVICE(0x008F, 0x5221, iwl6000g2b_2agn_cfg)}, 4439 {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6030_2agn_cfg)},
4761 {IWL_PCI_DEVICE(0x008F, 0x5206, iwl6000g2b_2abg_cfg)}, 4440 {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6030_2bgn_cfg)},
4762 {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6000g2b_2abg_cfg)}, 4441 {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6030_2abg_cfg)},
4763 {IWL_PCI_DEVICE(0x008F, 0x5226, iwl6000g2b_2abg_cfg)},
4764 {IWL_PCI_DEVICE(0x008F, 0x5207, iwl6000g2b_2bg_cfg)},
4765 {IWL_PCI_DEVICE(0x008A, 0x5301, iwl6000g2b_bgn_cfg)},
4766 {IWL_PCI_DEVICE(0x008A, 0x5305, iwl6000g2b_bgn_cfg)},
4767 {IWL_PCI_DEVICE(0x008A, 0x5307, iwl6000g2b_bg_cfg)},
4768 {IWL_PCI_DEVICE(0x008A, 0x5321, iwl6000g2b_bgn_cfg)},
4769 {IWL_PCI_DEVICE(0x008A, 0x5325, iwl6000g2b_bgn_cfg)},
4770 {IWL_PCI_DEVICE(0x008B, 0x5311, iwl6000g2b_bgn_cfg)},
4771 {IWL_PCI_DEVICE(0x008B, 0x5315, iwl6000g2b_bgn_cfg)},
4772 {IWL_PCI_DEVICE(0x0090, 0x5211, iwl6000g2b_2agn_cfg)},
4773 {IWL_PCI_DEVICE(0x0090, 0x5215, iwl6000g2b_2bgn_cfg)},
4774 {IWL_PCI_DEVICE(0x0090, 0x5216, iwl6000g2b_2abg_cfg)},
4775 {IWL_PCI_DEVICE(0x0091, 0x5201, iwl6000g2b_2agn_cfg)},
4776 {IWL_PCI_DEVICE(0x0091, 0x5205, iwl6000g2b_2bgn_cfg)},
4777 {IWL_PCI_DEVICE(0x0091, 0x5206, iwl6000g2b_2abg_cfg)},
4778 {IWL_PCI_DEVICE(0x0091, 0x5207, iwl6000g2b_2bg_cfg)},
4779 {IWL_PCI_DEVICE(0x0091, 0x5221, iwl6000g2b_2agn_cfg)},
4780 {IWL_PCI_DEVICE(0x0091, 0x5225, iwl6000g2b_2bgn_cfg)},
4781 {IWL_PCI_DEVICE(0x0091, 0x5226, iwl6000g2b_2abg_cfg)},
4782 4442
4783/* 6x50 WiFi/WiMax Series */ 4443/* 6x50 WiFi/WiMax Series */
4784 {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)}, 4444 {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
@@ -4788,13 +4448,13 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
4788 {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)}, 4448 {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)},
4789 {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)}, 4449 {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)},
4790 4450
4791/* 6x50 WiFi/WiMax Series Gen2 */ 4451/* 6150 WiFi/WiMax Series */
4792 {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6050g2_bgn_cfg)}, 4452 {IWL_PCI_DEVICE(0x0885, 0x1305, iwl6150_bgn_cfg)},
4793 {IWL_PCI_DEVICE(0x0885, 0x1306, iwl6050g2_bgn_cfg)}, 4453 {IWL_PCI_DEVICE(0x0885, 0x1306, iwl6150_bgn_cfg)},
4794 {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6050g2_bgn_cfg)}, 4454 {IWL_PCI_DEVICE(0x0885, 0x1325, iwl6150_bgn_cfg)},
4795 {IWL_PCI_DEVICE(0x0885, 0x1326, iwl6050g2_bgn_cfg)}, 4455 {IWL_PCI_DEVICE(0x0885, 0x1326, iwl6150_bgn_cfg)},
4796 {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6050g2_bgn_cfg)}, 4456 {IWL_PCI_DEVICE(0x0886, 0x1315, iwl6150_bgn_cfg)},
4797 {IWL_PCI_DEVICE(0x0886, 0x1316, iwl6050g2_bgn_cfg)}, 4457 {IWL_PCI_DEVICE(0x0886, 0x1316, iwl6150_bgn_cfg)},
4798 4458
4799/* 1000 Series WiFi */ 4459/* 1000 Series WiFi */
4800 {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)}, 4460 {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
@@ -4812,10 +4472,11 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
4812 4472
4813/* 100 Series WiFi */ 4473/* 100 Series WiFi */
4814 {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)}, 4474 {IWL_PCI_DEVICE(0x08AE, 0x1005, iwl100_bgn_cfg)},
4475 {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)},
4815 {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)}, 4476 {IWL_PCI_DEVICE(0x08AF, 0x1015, iwl100_bgn_cfg)},
4477 {IWL_PCI_DEVICE(0x08AF, 0x1017, iwl100_bg_cfg)},
4816 {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)}, 4478 {IWL_PCI_DEVICE(0x08AE, 0x1025, iwl100_bgn_cfg)},
4817 {IWL_PCI_DEVICE(0x08AE, 0x1007, iwl100_bg_cfg)}, 4479 {IWL_PCI_DEVICE(0x08AE, 0x1027, iwl100_bg_cfg)},
4818 {IWL_PCI_DEVICE(0x08AE, 0x1017, iwl100_bg_cfg)},
4819 4480
4820/* 130 Series WiFi */ 4481/* 130 Series WiFi */
4821 {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)}, 4482 {IWL_PCI_DEVICE(0x0896, 0x5005, iwl130_bgn_cfg)},
@@ -4836,10 +4497,7 @@ static struct pci_driver iwl_driver = {
4836 .id_table = iwl_hw_card_ids, 4497 .id_table = iwl_hw_card_ids,
4837 .probe = iwl_pci_probe, 4498 .probe = iwl_pci_probe,
4838 .remove = __devexit_p(iwl_pci_remove), 4499 .remove = __devexit_p(iwl_pci_remove),
4839#ifdef CONFIG_PM 4500 .driver.pm = IWL_PM_OPS,
4840 .suspend = iwl_pci_suspend,
4841 .resume = iwl_pci_resume,
4842#endif
4843}; 4501};
4844 4502
4845static int __init iwl_init(void) 4503static int __init iwl_init(void)
@@ -4925,6 +4583,6 @@ module_param_named(antenna_coupling, iwlagn_ant_coupling, int, S_IRUGO);
4925MODULE_PARM_DESC(antenna_coupling, 4583MODULE_PARM_DESC(antenna_coupling,
4926 "specify antenna coupling in dB (defualt: 0 dB)"); 4584 "specify antenna coupling in dB (defualt: 0 dB)");
4927 4585
4928module_param_named(bt_ch_announce, iwlagn_bt_ch_announce, bool, S_IRUGO); 4586module_param_named(bt_ch_inhibition, iwlagn_bt_ch_announce, bool, S_IRUGO);
4929MODULE_PARM_DESC(bt_ch_announce, 4587MODULE_PARM_DESC(bt_ch_inhibition,
4930 "Enable BT channel announcement mode (default: enable)"); 4588 "Disable BT channel inhibition (default: enable)");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.h b/drivers/net/wireless/iwlwifi/iwl-agn.h
index f525d55f2c0f..da303585f801 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.h
@@ -74,22 +74,22 @@ extern struct iwl_cfg iwl5100_bgn_cfg;
74extern struct iwl_cfg iwl5100_abg_cfg; 74extern struct iwl_cfg iwl5100_abg_cfg;
75extern struct iwl_cfg iwl5150_agn_cfg; 75extern struct iwl_cfg iwl5150_agn_cfg;
76extern struct iwl_cfg iwl5150_abg_cfg; 76extern struct iwl_cfg iwl5150_abg_cfg;
77extern struct iwl_cfg iwl6000g2a_2agn_cfg; 77extern struct iwl_cfg iwl6005_2agn_cfg;
78extern struct iwl_cfg iwl6000g2a_2abg_cfg; 78extern struct iwl_cfg iwl6005_2abg_cfg;
79extern struct iwl_cfg iwl6000g2a_2bg_cfg; 79extern struct iwl_cfg iwl6005_2bg_cfg;
80extern struct iwl_cfg iwl6000g2b_bgn_cfg; 80extern struct iwl_cfg iwl1030_bgn_cfg;
81extern struct iwl_cfg iwl6000g2b_bg_cfg; 81extern struct iwl_cfg iwl1030_bg_cfg;
82extern struct iwl_cfg iwl6000g2b_2agn_cfg; 82extern struct iwl_cfg iwl6030_2agn_cfg;
83extern struct iwl_cfg iwl6000g2b_2abg_cfg; 83extern struct iwl_cfg iwl6030_2abg_cfg;
84extern struct iwl_cfg iwl6000g2b_2bgn_cfg; 84extern struct iwl_cfg iwl6030_2bgn_cfg;
85extern struct iwl_cfg iwl6000g2b_2bg_cfg; 85extern struct iwl_cfg iwl6030_2bg_cfg;
86extern struct iwl_cfg iwl6000i_2agn_cfg; 86extern struct iwl_cfg iwl6000i_2agn_cfg;
87extern struct iwl_cfg iwl6000i_2abg_cfg; 87extern struct iwl_cfg iwl6000i_2abg_cfg;
88extern struct iwl_cfg iwl6000i_2bg_cfg; 88extern struct iwl_cfg iwl6000i_2bg_cfg;
89extern struct iwl_cfg iwl6000_3agn_cfg; 89extern struct iwl_cfg iwl6000_3agn_cfg;
90extern struct iwl_cfg iwl6050_2agn_cfg; 90extern struct iwl_cfg iwl6050_2agn_cfg;
91extern struct iwl_cfg iwl6050_2abg_cfg; 91extern struct iwl_cfg iwl6050_2abg_cfg;
92extern struct iwl_cfg iwl6050g2_bgn_cfg; 92extern struct iwl_cfg iwl6150_bgn_cfg;
93extern struct iwl_cfg iwl1000_bgn_cfg; 93extern struct iwl_cfg iwl1000_bgn_cfg;
94extern struct iwl_cfg iwl1000_bg_cfg; 94extern struct iwl_cfg iwl1000_bg_cfg;
95extern struct iwl_cfg iwl100_bgn_cfg; 95extern struct iwl_cfg iwl100_bgn_cfg;
@@ -102,6 +102,9 @@ extern struct iwl_hcmd_ops iwlagn_hcmd;
102extern struct iwl_hcmd_ops iwlagn_bt_hcmd; 102extern struct iwl_hcmd_ops iwlagn_bt_hcmd;
103extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils; 103extern struct iwl_hcmd_utils_ops iwlagn_hcmd_utils;
104 104
105extern struct ieee80211_ops iwlagn_hw_ops;
106extern struct ieee80211_ops iwl4965_hw_ops;
107
105int iwl_reset_ict(struct iwl_priv *priv); 108int iwl_reset_ict(struct iwl_priv *priv);
106void iwl_disable_ict(struct iwl_priv *priv); 109void iwl_disable_ict(struct iwl_priv *priv);
107int iwl_alloc_isr_ict(struct iwl_priv *priv); 110int iwl_alloc_isr_ict(struct iwl_priv *priv);
@@ -132,6 +135,11 @@ void iwl_free_tfds_in_queue(struct iwl_priv *priv,
132/* RXON */ 135/* RXON */
133int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx); 136int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
134void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx); 137void iwlagn_set_rxon_chain(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
138int iwlagn_mac_config(struct ieee80211_hw *hw, u32 changed);
139void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
140 struct ieee80211_vif *vif,
141 struct ieee80211_bss_conf *bss_conf,
142 u32 changes);
135 143
136/* uCode */ 144/* uCode */
137int iwlagn_load_ucode(struct iwl_priv *priv); 145int iwlagn_load_ucode(struct iwl_priv *priv);
@@ -249,6 +257,7 @@ int iwlagn_manage_ibss_station(struct iwl_priv *priv,
249int iwlagn_send_rxon_assoc(struct iwl_priv *priv, 257int iwlagn_send_rxon_assoc(struct iwl_priv *priv,
250 struct iwl_rxon_context *ctx); 258 struct iwl_rxon_context *ctx);
251int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant); 259int iwlagn_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant);
260int iwlagn_send_beacon_cmd(struct iwl_priv *priv);
252 261
253/* bt coex */ 262/* bt coex */
254void iwlagn_send_advance_bt_config(struct iwl_priv *priv); 263void iwlagn_send_advance_bt_config(struct iwl_priv *priv);
@@ -292,9 +301,12 @@ int iwl_sta_rx_agg_start(struct iwl_priv *priv, struct ieee80211_sta *sta,
292 int tid, u16 ssn); 301 int tid, u16 ssn);
293int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta, 302int iwl_sta_rx_agg_stop(struct iwl_priv *priv, struct ieee80211_sta *sta,
294 int tid); 303 int tid);
295void iwl_sta_modify_ps_wake(struct iwl_priv *priv, int sta_id);
296void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt); 304void iwl_sta_modify_sleep_tx_count(struct iwl_priv *priv, int sta_id, int cnt);
297int iwl_update_bcast_stations(struct iwl_priv *priv); 305int iwl_update_bcast_stations(struct iwl_priv *priv);
306void iwlagn_mac_sta_notify(struct ieee80211_hw *hw,
307 struct ieee80211_vif *vif,
308 enum sta_notify_cmd cmd,
309 struct ieee80211_sta *sta);
298 310
299/* rate */ 311/* rate */
300static inline u32 iwl_ant_idx_to_flags(u8 ant_idx) 312static inline u32 iwl_ant_idx_to_flags(u8 ant_idx)
@@ -318,4 +330,31 @@ void iwl_eeprom_get_mac(const struct iwl_priv *priv, u8 *mac);
318int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv); 330int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv);
319void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv); 331void iwlcore_eeprom_release_semaphore(struct iwl_priv *priv);
320 332
333/* mac80211 handlers (for 4965) */
334int iwlagn_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb);
335int iwlagn_mac_start(struct ieee80211_hw *hw);
336void iwlagn_mac_stop(struct ieee80211_hw *hw);
337void iwlagn_configure_filter(struct ieee80211_hw *hw,
338 unsigned int changed_flags,
339 unsigned int *total_flags,
340 u64 multicast);
341int iwlagn_mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
342 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
343 struct ieee80211_key_conf *key);
344void iwlagn_mac_update_tkip_key(struct ieee80211_hw *hw,
345 struct ieee80211_vif *vif,
346 struct ieee80211_key_conf *keyconf,
347 struct ieee80211_sta *sta,
348 u32 iv32, u16 *phase1key);
349int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
350 struct ieee80211_vif *vif,
351 enum ieee80211_ampdu_mlme_action action,
352 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
353int iwlagn_mac_sta_add(struct ieee80211_hw *hw,
354 struct ieee80211_vif *vif,
355 struct ieee80211_sta *sta);
356void iwlagn_mac_channel_switch(struct ieee80211_hw *hw,
357 struct ieee80211_channel_switch *ch_switch);
358void iwlagn_mac_flush(struct ieee80211_hw *hw, bool drop);
359
321#endif /* __iwl_agn_h__ */ 360#endif /* __iwl_agn_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 424801abc80e..f893d4a6aa87 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -2022,6 +2022,9 @@ struct iwl_compressed_ba_resp {
2022 __le64 bitmap; 2022 __le64 bitmap;
2023 __le16 scd_flow; 2023 __le16 scd_flow;
2024 __le16 scd_ssn; 2024 __le16 scd_ssn;
2025 /* following only for 5000 series and up */
2026 u8 txed; /* number of frames sent */
2027 u8 txed_2_done; /* number of frames acked */
2025} __packed; 2028} __packed;
2026 2029
2027/* 2030/*
@@ -2407,9 +2410,9 @@ struct iwl_link_quality_cmd {
2407#define BT_FRAG_THRESHOLD_MAX 0 2410#define BT_FRAG_THRESHOLD_MAX 0
2408#define BT_FRAG_THRESHOLD_MIN 0 2411#define BT_FRAG_THRESHOLD_MIN 0
2409 2412
2410#define BT_AGG_THRESHOLD_DEF 0 2413#define BT_AGG_THRESHOLD_DEF 1200
2411#define BT_AGG_THRESHOLD_MAX 0 2414#define BT_AGG_THRESHOLD_MAX 8000
2412#define BT_AGG_THRESHOLD_MIN 0 2415#define BT_AGG_THRESHOLD_MIN 400
2413 2416
2414/* 2417/*
2415 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) 2418 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
@@ -2436,8 +2439,9 @@ struct iwl_bt_cmd {
2436#define IWLAGN_BT_FLAG_COEX_MODE_3W 2 2439#define IWLAGN_BT_FLAG_COEX_MODE_3W 2
2437#define IWLAGN_BT_FLAG_COEX_MODE_4W 3 2440#define IWLAGN_BT_FLAG_COEX_MODE_4W 3
2438 2441
2439#define IWLAGN_BT_FLAG_UCODE_DEFAULT BIT(6) 2442#define IWLAGN_BT_FLAG_UCODE_DEFAULT BIT(6)
2440#define IWLAGN_BT_FLAG_NOCOEX_NOTIF BIT(7) 2443/* Disable Sync PSPoll on SCO/eSCO */
2444#define IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE BIT(7)
2441 2445
2442#define IWLAGN_BT_PRIO_BOOST_MAX 0xFF 2446#define IWLAGN_BT_PRIO_BOOST_MAX 0xFF
2443#define IWLAGN_BT_PRIO_BOOST_MIN 0x00 2447#define IWLAGN_BT_PRIO_BOOST_MIN 0x00
@@ -2447,8 +2451,9 @@ struct iwl_bt_cmd {
2447 2451
2448#define IWLAGN_BT3_T7_DEFAULT 1 2452#define IWLAGN_BT3_T7_DEFAULT 1
2449 2453
2450#define IWLAGN_BT_KILL_ACK_MASK_DEFAULT cpu_to_le32(0xffffffff) 2454#define IWLAGN_BT_KILL_ACK_MASK_DEFAULT cpu_to_le32(0xffff0000)
2451#define IWLAGN_BT_KILL_CTS_MASK_DEFAULT cpu_to_le32(0xffffffff) 2455#define IWLAGN_BT_KILL_CTS_MASK_DEFAULT cpu_to_le32(0xffff0000)
2456#define IWLAGN_BT_KILL_ACK_CTS_MASK_SCO cpu_to_le32(0xffffffff)
2452 2457
2453#define IWLAGN_BT3_PRIO_SAMPLE_DEFAULT 2 2458#define IWLAGN_BT3_PRIO_SAMPLE_DEFAULT 2
2454 2459
@@ -2664,9 +2669,16 @@ struct iwl_spectrum_notification {
2664#define IWL_POWER_VEC_SIZE 5 2669#define IWL_POWER_VEC_SIZE 5
2665 2670
2666#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0)) 2671#define IWL_POWER_DRIVER_ALLOW_SLEEP_MSK cpu_to_le16(BIT(0))
2672#define IWL_POWER_POWER_SAVE_ENA_MSK cpu_to_le16(BIT(0))
2673#define IWL_POWER_POWER_MANAGEMENT_ENA_MSK cpu_to_le16(BIT(1))
2667#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2)) 2674#define IWL_POWER_SLEEP_OVER_DTIM_MSK cpu_to_le16(BIT(2))
2668#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3)) 2675#define IWL_POWER_PCI_PM_MSK cpu_to_le16(BIT(3))
2669#define IWL_POWER_FAST_PD cpu_to_le16(BIT(4)) 2676#define IWL_POWER_FAST_PD cpu_to_le16(BIT(4))
2677#define IWL_POWER_BEACON_FILTERING cpu_to_le16(BIT(5))
2678#define IWL_POWER_SHADOW_REG_ENA cpu_to_le16(BIT(6))
2679#define IWL_POWER_CT_KILL_SET cpu_to_le16(BIT(7))
2680#define IWL_POWER_BT_SCO_ENA cpu_to_le16(BIT(8))
2681#define IWL_POWER_ADVANCE_PM_ENA_MSK cpu_to_le16(BIT(9))
2670 2682
2671struct iwl3945_powertable_cmd { 2683struct iwl3945_powertable_cmd {
2672 __le16 flags; 2684 __le16 flags;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 8700ab38d599..efbde1f1a8bf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -77,15 +77,15 @@ EXPORT_SYMBOL(iwl_bcast_addr);
77 77
78 78
79/* This function both allocates and initializes hw and priv. */ 79/* This function both allocates and initializes hw and priv. */
80struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, 80struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg)
81 struct ieee80211_ops *hw_ops)
82{ 81{
83 struct iwl_priv *priv; 82 struct iwl_priv *priv;
84
85 /* mac80211 allocates memory for this device instance, including 83 /* mac80211 allocates memory for this device instance, including
86 * space for this driver's private structure */ 84 * space for this driver's private structure */
87 struct ieee80211_hw *hw = 85 struct ieee80211_hw *hw;
88 ieee80211_alloc_hw(sizeof(struct iwl_priv), hw_ops); 86
87 hw = ieee80211_alloc_hw(sizeof(struct iwl_priv),
88 cfg->ops->ieee80211_ops);
89 if (hw == NULL) { 89 if (hw == NULL) {
90 pr_err("%s: Can not allocate network device\n", 90 pr_err("%s: Can not allocate network device\n",
91 cfg->name); 91 cfg->name);
@@ -100,35 +100,6 @@ out:
100} 100}
101EXPORT_SYMBOL(iwl_alloc_all); 101EXPORT_SYMBOL(iwl_alloc_all);
102 102
103/*
104 * QoS support
105*/
106static void iwl_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
107{
108 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
109 return;
110
111 if (!ctx->is_active)
112 return;
113
114 ctx->qos_data.def_qos_parm.qos_flags = 0;
115
116 if (ctx->qos_data.qos_active)
117 ctx->qos_data.def_qos_parm.qos_flags |=
118 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
119
120 if (ctx->ht.enabled)
121 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
122
123 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
124 ctx->qos_data.qos_active,
125 ctx->qos_data.def_qos_parm.qos_flags);
126
127 iwl_send_cmd_pdu_async(priv, ctx->qos_cmd,
128 sizeof(struct iwl_qosparam_cmd),
129 &ctx->qos_data.def_qos_parm, NULL);
130}
131
132#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */ 103#define MAX_BIT_RATE_40_MHZ 150 /* Mbps */
133#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */ 104#define MAX_BIT_RATE_20_MHZ 72 /* Mbps */
134static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv, 105static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
@@ -317,40 +288,6 @@ void iwlcore_free_geos(struct iwl_priv *priv)
317} 288}
318EXPORT_SYMBOL(iwlcore_free_geos); 289EXPORT_SYMBOL(iwlcore_free_geos);
319 290
320/*
321 * iwlcore_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
322 * function.
323 */
324void iwlcore_tx_cmd_protection(struct iwl_priv *priv,
325 struct ieee80211_tx_info *info,
326 __le16 fc, __le32 *tx_flags)
327{
328 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
329 *tx_flags |= TX_CMD_FLG_RTS_MSK;
330 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
331 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
332
333 if (!ieee80211_is_mgmt(fc))
334 return;
335
336 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
337 case cpu_to_le16(IEEE80211_STYPE_AUTH):
338 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
339 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
340 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
341 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
342 *tx_flags |= TX_CMD_FLG_CTS_MSK;
343 break;
344 }
345 } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
346 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
347 *tx_flags |= TX_CMD_FLG_CTS_MSK;
348 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
349 }
350}
351EXPORT_SYMBOL(iwlcore_tx_cmd_protection);
352
353
354static bool iwl_is_channel_extension(struct iwl_priv *priv, 291static bool iwl_is_channel_extension(struct iwl_priv *priv,
355 enum ieee80211_band band, 292 enum ieee80211_band band,
356 u16 channel, u8 extension_chan_offset) 293 u16 channel, u8 extension_chan_offset)
@@ -1020,6 +957,22 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
1020 /* Cancel currently queued command. */ 957 /* Cancel currently queued command. */
1021 clear_bit(STATUS_HCMD_ACTIVE, &priv->status); 958 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1022 959
960 /* W/A for WiFi/WiMAX coex and WiMAX own the RF */
961 if (priv->cfg->internal_wimax_coex &&
962 (!(iwl_read_prph(priv, APMG_CLK_CTRL_REG) &
963 APMS_CLK_VAL_MRB_FUNC_MODE) ||
964 (iwl_read_prph(priv, APMG_PS_CTRL_REG) &
965 APMG_PS_CTRL_VAL_RESET_REQ))) {
966 wake_up_interruptible(&priv->wait_command_queue);
967 /*
968 *Keep the restart process from trying to send host
969 * commands by clearing the INIT status bit
970 */
971 clear_bit(STATUS_READY, &priv->status);
972 IWL_ERR(priv, "RF is used by WiMAX\n");
973 return;
974 }
975
1023 IWL_ERR(priv, "Loaded firmware version: %s\n", 976 IWL_ERR(priv, "Loaded firmware version: %s\n",
1024 priv->hw->wiphy->fw_version); 977 priv->hw->wiphy->fw_version);
1025 978
@@ -1206,8 +1159,16 @@ EXPORT_SYMBOL(iwl_apm_init);
1206 1159
1207int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) 1160int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1208{ 1161{
1209 int ret = 0; 1162 int ret;
1210 s8 prev_tx_power = priv->tx_power_user_lmt; 1163 s8 prev_tx_power;
1164
1165 lockdep_assert_held(&priv->mutex);
1166
1167 if (priv->tx_power_user_lmt == tx_power && !force)
1168 return 0;
1169
1170 if (!priv->cfg->ops->lib->send_tx_power)
1171 return -EOPNOTSUPP;
1211 1172
1212 if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) { 1173 if (tx_power < IWLAGN_TX_POWER_TARGET_POWER_MIN) {
1213 IWL_WARN(priv, 1174 IWL_WARN(priv,
@@ -1224,93 +1185,29 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1224 return -EINVAL; 1185 return -EINVAL;
1225 } 1186 }
1226 1187
1227 if (priv->tx_power_user_lmt != tx_power) 1188 if (!iwl_is_ready_rf(priv))
1228 force = true; 1189 return -EIO;
1229 1190
1230 /* if nic is not up don't send command */ 1191 /* scan complete use tx_power_next, need to be updated */
1231 if (iwl_is_ready_rf(priv)) { 1192 priv->tx_power_next = tx_power;
1232 priv->tx_power_user_lmt = tx_power; 1193 if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
1233 if (force && priv->cfg->ops->lib->send_tx_power) 1194 IWL_DEBUG_INFO(priv, "Deferring tx power set while scanning\n");
1234 ret = priv->cfg->ops->lib->send_tx_power(priv); 1195 return 0;
1235 else if (!priv->cfg->ops->lib->send_tx_power)
1236 ret = -EOPNOTSUPP;
1237 /*
1238 * if fail to set tx_power, restore the orig. tx power
1239 */
1240 if (ret)
1241 priv->tx_power_user_lmt = prev_tx_power;
1242 } 1196 }
1243 1197
1244 /* 1198 prev_tx_power = priv->tx_power_user_lmt;
1245 * Even this is an async host command, the command 1199 priv->tx_power_user_lmt = tx_power;
1246 * will always report success from uCode
1247 * So once driver can placing the command into the queue
1248 * successfully, driver can use priv->tx_power_user_lmt
1249 * to reflect the current tx power
1250 */
1251 return ret;
1252}
1253EXPORT_SYMBOL(iwl_set_tx_power);
1254 1200
1255irqreturn_t iwl_isr_legacy(int irq, void *data) 1201 ret = priv->cfg->ops->lib->send_tx_power(priv);
1256{
1257 struct iwl_priv *priv = data;
1258 u32 inta, inta_mask;
1259 u32 inta_fh;
1260 unsigned long flags;
1261 if (!priv)
1262 return IRQ_NONE;
1263
1264 spin_lock_irqsave(&priv->lock, flags);
1265 1202
1266 /* Disable (but don't clear!) interrupts here to avoid 1203 /* if fail to set tx_power, restore the orig. tx power */
1267 * back-to-back ISRs and sporadic interrupts from our NIC. 1204 if (ret) {
1268 * If we have something to service, the tasklet will re-enable ints. 1205 priv->tx_power_user_lmt = prev_tx_power;
1269 * If we *don't* have something, we'll re-enable before leaving here. */ 1206 priv->tx_power_next = prev_tx_power;
1270 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
1271 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
1272
1273 /* Discover which interrupts are active/pending */
1274 inta = iwl_read32(priv, CSR_INT);
1275 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
1276
1277 /* Ignore interrupt if there's nothing in NIC to service.
1278 * This may be due to IRQ shared with another device,
1279 * or due to sporadic interrupts thrown from our NIC. */
1280 if (!inta && !inta_fh) {
1281 IWL_DEBUG_ISR(priv, "Ignore interrupt, inta == 0, inta_fh == 0\n");
1282 goto none;
1283 } 1207 }
1284 1208 return ret;
1285 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1286 /* Hardware disappeared. It might have already raised
1287 * an interrupt */
1288 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1289 goto unplugged;
1290 }
1291
1292 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
1293 inta, inta_mask, inta_fh);
1294
1295 inta &= ~CSR_INT_BIT_SCD;
1296
1297 /* iwl_irq_tasklet() will service interrupts and re-enable them */
1298 if (likely(inta || inta_fh))
1299 tasklet_schedule(&priv->irq_tasklet);
1300
1301 unplugged:
1302 spin_unlock_irqrestore(&priv->lock, flags);
1303 return IRQ_HANDLED;
1304
1305 none:
1306 /* re-enable interrupts here since we don't have anything to service. */
1307 /* only Re-enable if disabled by irq */
1308 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1309 iwl_enable_interrupts(priv);
1310 spin_unlock_irqrestore(&priv->lock, flags);
1311 return IRQ_NONE;
1312} 1209}
1313EXPORT_SYMBOL(iwl_isr_legacy); 1210EXPORT_SYMBOL(iwl_set_tx_power);
1314 1211
1315void iwl_send_bt_config(struct iwl_priv *priv) 1212void iwl_send_bt_config(struct iwl_priv *priv)
1316{ 1213{
@@ -1326,6 +1223,7 @@ void iwl_send_bt_config(struct iwl_priv *priv)
1326 else 1223 else
1327 bt_cmd.flags = BT_COEX_ENABLE; 1224 bt_cmd.flags = BT_COEX_ENABLE;
1328 1225
1226 priv->bt_enable_flag = bt_cmd.flags;
1329 IWL_DEBUG_INFO(priv, "BT coex %s\n", 1227 IWL_DEBUG_INFO(priv, "BT coex %s\n",
1330 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active"); 1228 (bt_cmd.flags == BT_COEX_DISABLE) ? "disable" : "active");
1331 1229
@@ -1452,318 +1350,51 @@ int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw)
1452} 1350}
1453EXPORT_SYMBOL_GPL(iwl_mac_tx_last_beacon); 1351EXPORT_SYMBOL_GPL(iwl_mac_tx_last_beacon);
1454 1352
1455static void iwl_ht_conf(struct iwl_priv *priv, 1353static int iwl_set_mode(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1456 struct ieee80211_vif *vif)
1457{ 1354{
1458 struct iwl_ht_config *ht_conf = &priv->current_ht_config; 1355 iwl_connection_init_rx_config(priv, ctx);
1459 struct ieee80211_sta *sta;
1460 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
1461 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1462
1463 IWL_DEBUG_MAC80211(priv, "enter:\n");
1464
1465 if (!ctx->ht.enabled)
1466 return;
1467
1468 ctx->ht.protection =
1469 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
1470 ctx->ht.non_gf_sta_present =
1471 !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
1472
1473 ht_conf->single_chain_sufficient = false;
1474
1475 switch (vif->type) {
1476 case NL80211_IFTYPE_STATION:
1477 rcu_read_lock();
1478 sta = ieee80211_find_sta(vif, bss_conf->bssid);
1479 if (sta) {
1480 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
1481 int maxstreams;
1482
1483 maxstreams = (ht_cap->mcs.tx_params &
1484 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
1485 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
1486 maxstreams += 1;
1487
1488 if ((ht_cap->mcs.rx_mask[1] == 0) &&
1489 (ht_cap->mcs.rx_mask[2] == 0))
1490 ht_conf->single_chain_sufficient = true;
1491 if (maxstreams <= 1)
1492 ht_conf->single_chain_sufficient = true;
1493 } else {
1494 /*
1495 * If at all, this can only happen through a race
1496 * when the AP disconnects us while we're still
1497 * setting up the connection, in that case mac80211
1498 * will soon tell us about that.
1499 */
1500 ht_conf->single_chain_sufficient = true;
1501 }
1502 rcu_read_unlock();
1503 break;
1504 case NL80211_IFTYPE_ADHOC:
1505 ht_conf->single_chain_sufficient = true;
1506 break;
1507 default:
1508 break;
1509 }
1510
1511 IWL_DEBUG_MAC80211(priv, "leave\n");
1512}
1513 1356
1514static inline void iwl_set_no_assoc(struct iwl_priv *priv, 1357 if (priv->cfg->ops->hcmd->set_rxon_chain)
1515 struct ieee80211_vif *vif) 1358 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1516{
1517 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1518 1359
1519 iwl_led_disassociate(priv); 1360 return iwlcore_commit_rxon(priv, ctx);
1520 /*
1521 * inform the ucode that there is no longer an
1522 * association and that no more packets should be
1523 * sent
1524 */
1525 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
1526 ctx->staging.assoc_id = 0;
1527 iwlcore_commit_rxon(priv, ctx);
1528} 1361}
1529 1362
1530static void iwlcore_beacon_update(struct ieee80211_hw *hw, 1363static int iwl_setup_interface(struct iwl_priv *priv,
1531 struct ieee80211_vif *vif) 1364 struct iwl_rxon_context *ctx)
1532{ 1365{
1533 struct iwl_priv *priv = hw->priv; 1366 struct ieee80211_vif *vif = ctx->vif;
1534 unsigned long flags; 1367 int err;
1535 __le64 timestamp;
1536 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
1537
1538 if (!skb)
1539 return;
1540
1541 IWL_DEBUG_ASSOC(priv, "enter\n");
1542 1368
1543 lockdep_assert_held(&priv->mutex); 1369 lockdep_assert_held(&priv->mutex);
1544 1370
1545 if (!priv->beacon_ctx) {
1546 IWL_ERR(priv, "update beacon but no beacon context!\n");
1547 dev_kfree_skb(skb);
1548 return;
1549 }
1550
1551 spin_lock_irqsave(&priv->lock, flags);
1552
1553 if (priv->beacon_skb)
1554 dev_kfree_skb(priv->beacon_skb);
1555
1556 priv->beacon_skb = skb;
1557
1558 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
1559 priv->timestamp = le64_to_cpu(timestamp);
1560
1561 IWL_DEBUG_ASSOC(priv, "leave\n");
1562
1563 spin_unlock_irqrestore(&priv->lock, flags);
1564
1565 if (!iwl_is_ready_rf(priv)) {
1566 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
1567 return;
1568 }
1569
1570 priv->cfg->ops->lib->post_associate(priv, priv->beacon_ctx->vif);
1571}
1572
1573void iwl_bss_info_changed(struct ieee80211_hw *hw,
1574 struct ieee80211_vif *vif,
1575 struct ieee80211_bss_conf *bss_conf,
1576 u32 changes)
1577{
1578 struct iwl_priv *priv = hw->priv;
1579 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1580 int ret;
1581
1582 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
1583
1584 if (!iwl_is_alive(priv))
1585 return;
1586
1587 mutex_lock(&priv->mutex);
1588
1589 if (changes & BSS_CHANGED_QOS) {
1590 unsigned long flags;
1591
1592 spin_lock_irqsave(&priv->lock, flags);
1593 ctx->qos_data.qos_active = bss_conf->qos;
1594 iwl_update_qos(priv, ctx);
1595 spin_unlock_irqrestore(&priv->lock, flags);
1596 }
1597
1598 if (changes & BSS_CHANGED_BEACON_ENABLED) {
1599 /*
1600 * the add_interface code must make sure we only ever
1601 * have a single interface that could be beaconing at
1602 * any time.
1603 */
1604 if (vif->bss_conf.enable_beacon)
1605 priv->beacon_ctx = ctx;
1606 else
1607 priv->beacon_ctx = NULL;
1608 }
1609
1610 if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
1611 dev_kfree_skb(priv->beacon_skb);
1612 priv->beacon_skb = ieee80211_beacon_get(hw, vif);
1613 }
1614
1615 if (changes & BSS_CHANGED_BEACON_INT && vif->type == NL80211_IFTYPE_AP)
1616 iwl_send_rxon_timing(priv, ctx);
1617
1618 if (changes & BSS_CHANGED_BSSID) {
1619 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
1620
1621 /*
1622 * If there is currently a HW scan going on in the
1623 * background then we need to cancel it else the RXON
1624 * below/in post_associate will fail.
1625 */
1626 if (iwl_scan_cancel_timeout(priv, 100)) {
1627 IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
1628 IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
1629 mutex_unlock(&priv->mutex);
1630 return;
1631 }
1632
1633 /* mac80211 only sets assoc when in STATION mode */
1634 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
1635 memcpy(ctx->staging.bssid_addr,
1636 bss_conf->bssid, ETH_ALEN);
1637
1638 /* currently needed in a few places */
1639 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
1640 } else {
1641 ctx->staging.filter_flags &=
1642 ~RXON_FILTER_ASSOC_MSK;
1643 }
1644
1645 }
1646
1647 /* 1371 /*
1648 * This needs to be after setting the BSSID in case 1372 * This variable will be correct only when there's just
1649 * mac80211 decides to do both changes at once because 1373 * a single context, but all code using it is for hardware
1650 * it will invoke post_associate. 1374 * that supports only one context.
1651 */ 1375 */
1652 if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON) 1376 priv->iw_mode = vif->type;
1653 iwlcore_beacon_update(hw, vif);
1654
1655 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
1656 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
1657 bss_conf->use_short_preamble);
1658 if (bss_conf->use_short_preamble)
1659 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
1660 else
1661 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
1662 }
1663
1664 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
1665 IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
1666 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
1667 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
1668 else
1669 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
1670 if (bss_conf->use_cts_prot)
1671 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1672 else
1673 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
1674 }
1675
1676 if (changes & BSS_CHANGED_BASIC_RATES) {
1677 /* XXX use this information
1678 *
1679 * To do that, remove code from iwl_set_rate() and put something
1680 * like this here:
1681 *
1682 if (A-band)
1683 ctx->staging.ofdm_basic_rates =
1684 bss_conf->basic_rates;
1685 else
1686 ctx->staging.ofdm_basic_rates =
1687 bss_conf->basic_rates >> 4;
1688 ctx->staging.cck_basic_rates =
1689 bss_conf->basic_rates & 0xF;
1690 */
1691 }
1692
1693 if (changes & BSS_CHANGED_HT) {
1694 iwl_ht_conf(priv, vif);
1695
1696 if (priv->cfg->ops->hcmd->set_rxon_chain)
1697 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1698 }
1699
1700 if (changes & BSS_CHANGED_ASSOC) {
1701 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
1702 if (bss_conf->assoc) {
1703 priv->timestamp = bss_conf->timestamp;
1704
1705 iwl_led_associate(priv);
1706
1707 if (!iwl_is_rfkill(priv))
1708 priv->cfg->ops->lib->post_associate(priv, vif);
1709 } else
1710 iwl_set_no_assoc(priv, vif);
1711 }
1712
1713 if (changes && iwl_is_associated_ctx(ctx) && bss_conf->aid) {
1714 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
1715 changes);
1716 ret = iwl_send_rxon_assoc(priv, ctx);
1717 if (!ret) {
1718 /* Sync active_rxon with latest change. */
1719 memcpy((void *)&ctx->active,
1720 &ctx->staging,
1721 sizeof(struct iwl_rxon_cmd));
1722 }
1723 }
1724 1377
1725 if (changes & BSS_CHANGED_BEACON_ENABLED) { 1378 ctx->is_active = true;
1726 if (vif->bss_conf.enable_beacon) {
1727 memcpy(ctx->staging.bssid_addr,
1728 bss_conf->bssid, ETH_ALEN);
1729 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
1730 iwl_led_associate(priv);
1731 iwlcore_config_ap(priv, vif);
1732 } else
1733 iwl_set_no_assoc(priv, vif);
1734 }
1735 1379
1736 if (changes & BSS_CHANGED_IBSS) { 1380 err = iwl_set_mode(priv, ctx);
1737 ret = priv->cfg->ops->lib->manage_ibss_station(priv, vif, 1381 if (err) {
1738 bss_conf->ibss_joined); 1382 if (!ctx->always_active)
1739 if (ret) 1383 ctx->is_active = false;
1740 IWL_ERR(priv, "failed to %s IBSS station %pM\n", 1384 return err;
1741 bss_conf->ibss_joined ? "add" : "remove",
1742 bss_conf->bssid);
1743 } 1385 }
1744 1386
1745 if (changes & BSS_CHANGED_IDLE && 1387 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist &&
1746 priv->cfg->ops->hcmd->set_pan_params) { 1388 vif->type == NL80211_IFTYPE_ADHOC) {
1747 if (priv->cfg->ops->hcmd->set_pan_params(priv)) 1389 /*
1748 IWL_ERR(priv, "failed to update PAN params\n"); 1390 * pretend to have high BT traffic as long as we
1391 * are operating in IBSS mode, as this will cause
1392 * the rate scaling etc. to behave as intended.
1393 */
1394 priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
1749 } 1395 }
1750 1396
1751 mutex_unlock(&priv->mutex); 1397 return 0;
1752
1753 IWL_DEBUG_MAC80211(priv, "leave\n");
1754}
1755EXPORT_SYMBOL(iwl_bss_info_changed);
1756
1757static int iwl_set_mode(struct iwl_priv *priv, struct ieee80211_vif *vif)
1758{
1759 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1760
1761 iwl_connection_init_rx_config(priv, ctx);
1762
1763 if (priv->cfg->ops->hcmd->set_rxon_chain)
1764 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1765
1766 return iwlcore_commit_rxon(priv, ctx);
1767} 1398}
1768 1399
1769int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 1400int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
@@ -1771,7 +1402,7 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1771 struct iwl_priv *priv = hw->priv; 1402 struct iwl_priv *priv = hw->priv;
1772 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv; 1403 struct iwl_vif_priv *vif_priv = (void *)vif->drv_priv;
1773 struct iwl_rxon_context *tmp, *ctx = NULL; 1404 struct iwl_rxon_context *tmp, *ctx = NULL;
1774 int err = 0; 1405 int err;
1775 1406
1776 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n", 1407 IWL_DEBUG_MAC80211(priv, "enter: type %d, addr %pM\n",
1777 vif->type, vif->addr); 1408 vif->type, vif->addr);
@@ -1813,36 +1444,11 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1813 1444
1814 vif_priv->ctx = ctx; 1445 vif_priv->ctx = ctx;
1815 ctx->vif = vif; 1446 ctx->vif = vif;
1816 /*
1817 * This variable will be correct only when there's just
1818 * a single context, but all code using it is for hardware
1819 * that supports only one context.
1820 */
1821 priv->iw_mode = vif->type;
1822
1823 ctx->is_active = true;
1824 1447
1825 err = iwl_set_mode(priv, vif); 1448 err = iwl_setup_interface(priv, ctx);
1826 if (err) { 1449 if (!err)
1827 if (!ctx->always_active) 1450 goto out;
1828 ctx->is_active = false;
1829 goto out_err;
1830 }
1831
1832 if (priv->cfg->bt_params &&
1833 priv->cfg->bt_params->advanced_bt_coexist &&
1834 vif->type == NL80211_IFTYPE_ADHOC) {
1835 /*
1836 * pretend to have high BT traffic as long as we
1837 * are operating in IBSS mode, as this will cause
1838 * the rate scaling etc. to behave as intended.
1839 */
1840 priv->bt_traffic_load = IWL_BT_COEX_TRAFFIC_LOAD_HIGH;
1841 }
1842
1843 goto out;
1844 1451
1845 out_err:
1846 ctx->vif = NULL; 1452 ctx->vif = NULL;
1847 priv->iw_mode = NL80211_IFTYPE_STATION; 1453 priv->iw_mode = NL80211_IFTYPE_STATION;
1848 out: 1454 out:
@@ -1853,27 +1459,24 @@ int iwl_mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
1853} 1459}
1854EXPORT_SYMBOL(iwl_mac_add_interface); 1460EXPORT_SYMBOL(iwl_mac_add_interface);
1855 1461
1856void iwl_mac_remove_interface(struct ieee80211_hw *hw, 1462static void iwl_teardown_interface(struct iwl_priv *priv,
1857 struct ieee80211_vif *vif) 1463 struct ieee80211_vif *vif,
1464 bool mode_change)
1858{ 1465{
1859 struct iwl_priv *priv = hw->priv;
1860 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif); 1466 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
1861 1467
1862 IWL_DEBUG_MAC80211(priv, "enter\n"); 1468 lockdep_assert_held(&priv->mutex);
1863
1864 mutex_lock(&priv->mutex);
1865
1866 WARN_ON(ctx->vif != vif);
1867 ctx->vif = NULL;
1868 1469
1869 if (priv->scan_vif == vif) { 1470 if (priv->scan_vif == vif) {
1870 iwl_scan_cancel_timeout(priv, 200); 1471 iwl_scan_cancel_timeout(priv, 200);
1871 iwl_force_scan_end(priv); 1472 iwl_force_scan_end(priv);
1872 } 1473 }
1873 iwl_set_mode(priv, vif);
1874 1474
1875 if (!ctx->always_active) 1475 if (!mode_change) {
1876 ctx->is_active = false; 1476 iwl_set_mode(priv, ctx);
1477 if (!ctx->always_active)
1478 ctx->is_active = false;
1479 }
1877 1480
1878 /* 1481 /*
1879 * When removing the IBSS interface, overwrite the 1482 * When removing the IBSS interface, overwrite the
@@ -1883,211 +1486,31 @@ void iwl_mac_remove_interface(struct ieee80211_hw *hw,
1883 * both values are the same and zero. 1486 * both values are the same and zero.
1884 */ 1487 */
1885 if (vif->type == NL80211_IFTYPE_ADHOC) 1488 if (vif->type == NL80211_IFTYPE_ADHOC)
1886 priv->bt_traffic_load = priv->notif_bt_traffic_load; 1489 priv->bt_traffic_load = priv->last_bt_traffic_load;
1887
1888 memset(priv->bssid, 0, ETH_ALEN);
1889 mutex_unlock(&priv->mutex);
1890
1891 IWL_DEBUG_MAC80211(priv, "leave\n");
1892
1893}
1894EXPORT_SYMBOL(iwl_mac_remove_interface);
1895
1896/**
1897 * iwl_mac_config - mac80211 config callback
1898 */
1899int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
1900{
1901 struct iwl_priv *priv = hw->priv;
1902 const struct iwl_channel_info *ch_info;
1903 struct ieee80211_conf *conf = &hw->conf;
1904 struct ieee80211_channel *channel = conf->channel;
1905 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
1906 struct iwl_rxon_context *ctx;
1907 unsigned long flags = 0;
1908 int ret = 0;
1909 u16 ch;
1910 int scan_active = 0;
1911
1912 mutex_lock(&priv->mutex);
1913
1914 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
1915 channel->hw_value, changed);
1916
1917 if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
1918 test_bit(STATUS_SCANNING, &priv->status))) {
1919 scan_active = 1;
1920 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
1921 }
1922
1923 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
1924 IEEE80211_CONF_CHANGE_CHANNEL)) {
1925 /* mac80211 uses static for non-HT which is what we want */
1926 priv->current_ht_config.smps = conf->smps_mode;
1927
1928 /*
1929 * Recalculate chain counts.
1930 *
1931 * If monitor mode is enabled then mac80211 will
1932 * set up the SM PS mode to OFF if an HT channel is
1933 * configured.
1934 */
1935 if (priv->cfg->ops->hcmd->set_rxon_chain)
1936 for_each_context(priv, ctx)
1937 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
1938 }
1939
1940 /* during scanning mac80211 will delay channel setting until
1941 * scan finish with changed = 0
1942 */
1943 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
1944 if (scan_active)
1945 goto set_ch_out;
1946
1947 ch = channel->hw_value;
1948 ch_info = iwl_get_channel_info(priv, channel->band, ch);
1949 if (!is_channel_valid(ch_info)) {
1950 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
1951 ret = -EINVAL;
1952 goto set_ch_out;
1953 }
1954
1955 spin_lock_irqsave(&priv->lock, flags);
1956
1957 for_each_context(priv, ctx) {
1958 /* Configure HT40 channels */
1959 ctx->ht.enabled = conf_is_ht(conf);
1960 if (ctx->ht.enabled) {
1961 if (conf_is_ht40_minus(conf)) {
1962 ctx->ht.extension_chan_offset =
1963 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
1964 ctx->ht.is_40mhz = true;
1965 } else if (conf_is_ht40_plus(conf)) {
1966 ctx->ht.extension_chan_offset =
1967 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
1968 ctx->ht.is_40mhz = true;
1969 } else {
1970 ctx->ht.extension_chan_offset =
1971 IEEE80211_HT_PARAM_CHA_SEC_NONE;
1972 ctx->ht.is_40mhz = false;
1973 }
1974 } else
1975 ctx->ht.is_40mhz = false;
1976
1977 /*
1978 * Default to no protection. Protection mode will
1979 * later be set from BSS config in iwl_ht_conf
1980 */
1981 ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
1982
1983 /* if we are switching from ht to 2.4 clear flags
1984 * from any ht related info since 2.4 does not
1985 * support ht */
1986 if ((le16_to_cpu(ctx->staging.channel) != ch))
1987 ctx->staging.flags = 0;
1988
1989 iwl_set_rxon_channel(priv, channel, ctx);
1990 iwl_set_rxon_ht(priv, ht_conf);
1991
1992 iwl_set_flags_for_band(priv, ctx, channel->band,
1993 ctx->vif);
1994 }
1995
1996 spin_unlock_irqrestore(&priv->lock, flags);
1997
1998 if (priv->cfg->ops->lib->update_bcast_stations)
1999 ret = priv->cfg->ops->lib->update_bcast_stations(priv);
2000
2001 set_ch_out:
2002 /* The list of supported rates and rate mask can be different
2003 * for each band; since the band may have changed, reset
2004 * the rate mask to what mac80211 lists */
2005 iwl_set_rate(priv);
2006 }
2007
2008 if (changed & (IEEE80211_CONF_CHANGE_PS |
2009 IEEE80211_CONF_CHANGE_IDLE)) {
2010 ret = iwl_power_update_mode(priv, false);
2011 if (ret)
2012 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
2013 }
2014
2015 if (changed & IEEE80211_CONF_CHANGE_POWER) {
2016 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
2017 priv->tx_power_user_lmt, conf->power_level);
2018
2019 iwl_set_tx_power(priv, conf->power_level, false);
2020 }
2021
2022 if (!iwl_is_ready(priv)) {
2023 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2024 goto out;
2025 }
2026
2027 if (scan_active)
2028 goto out;
2029
2030 for_each_context(priv, ctx) {
2031 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
2032 iwlcore_commit_rxon(priv, ctx);
2033 else
2034 IWL_DEBUG_INFO(priv,
2035 "Not re-sending same RXON configuration.\n");
2036 }
2037
2038out:
2039 IWL_DEBUG_MAC80211(priv, "leave\n");
2040 mutex_unlock(&priv->mutex);
2041 return ret;
2042} 1490}
2043EXPORT_SYMBOL(iwl_mac_config);
2044 1491
2045void iwl_mac_reset_tsf(struct ieee80211_hw *hw) 1492void iwl_mac_remove_interface(struct ieee80211_hw *hw,
1493 struct ieee80211_vif *vif)
2046{ 1494{
2047 struct iwl_priv *priv = hw->priv; 1495 struct iwl_priv *priv = hw->priv;
2048 unsigned long flags; 1496 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
2049 /* IBSS can only be the IWL_RXON_CTX_BSS context */
2050 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
2051 1497
2052 mutex_lock(&priv->mutex);
2053 IWL_DEBUG_MAC80211(priv, "enter\n"); 1498 IWL_DEBUG_MAC80211(priv, "enter\n");
2054 1499
2055 spin_lock_irqsave(&priv->lock, flags); 1500 mutex_lock(&priv->mutex);
2056 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
2057 spin_unlock_irqrestore(&priv->lock, flags);
2058
2059 spin_lock_irqsave(&priv->lock, flags);
2060
2061 /* new association get rid of ibss beacon skb */
2062 if (priv->beacon_skb)
2063 dev_kfree_skb(priv->beacon_skb);
2064
2065 priv->beacon_skb = NULL;
2066
2067 priv->timestamp = 0;
2068
2069 spin_unlock_irqrestore(&priv->lock, flags);
2070
2071 iwl_scan_cancel_timeout(priv, 100);
2072 if (!iwl_is_ready_rf(priv)) {
2073 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
2074 mutex_unlock(&priv->mutex);
2075 return;
2076 }
2077 1501
2078 /* we are restarting association process 1502 WARN_ON(ctx->vif != vif);
2079 * clear RXON_FILTER_ASSOC_MSK bit 1503 ctx->vif = NULL;
2080 */
2081 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
2082 iwlcore_commit_rxon(priv, ctx);
2083 1504
2084 iwl_set_rate(priv); 1505 iwl_teardown_interface(priv, vif, false);
2085 1506
1507 memset(priv->bssid, 0, ETH_ALEN);
2086 mutex_unlock(&priv->mutex); 1508 mutex_unlock(&priv->mutex);
2087 1509
2088 IWL_DEBUG_MAC80211(priv, "leave\n"); 1510 IWL_DEBUG_MAC80211(priv, "leave\n");
1511
2089} 1512}
2090EXPORT_SYMBOL(iwl_mac_reset_tsf); 1513EXPORT_SYMBOL(iwl_mac_remove_interface);
2091 1514
2092int iwl_alloc_txq_mem(struct iwl_priv *priv) 1515int iwl_alloc_txq_mem(struct iwl_priv *priv)
2093{ 1516{
@@ -2431,77 +1854,115 @@ int iwl_force_reset(struct iwl_priv *priv, int mode, bool external)
2431 return 0; 1854 return 0;
2432} 1855}
2433 1856
2434/** 1857int iwl_mac_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
2435 * iwl_bg_monitor_recover - Timer callback to check for stuck queue and recover 1858 enum nl80211_iftype newtype, bool newp2p)
2436 * 1859{
2437 * During normal condition (no queue is stuck), the timer is continually set to 1860 struct iwl_priv *priv = hw->priv;
2438 * execute every monitor_recover_period milliseconds after the last timer 1861 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
2439 * expired. When the queue read_ptr is at the same place, the timer is 1862 struct iwl_rxon_context *tmp;
2440 * shorten to 100mSecs. This is 1863 u32 interface_modes;
2441 * 1) to reduce the chance that the read_ptr may wrap around (not stuck) 1864 int err;
2442 * 2) to detect the stuck queues quicker before the station and AP can 1865
2443 * disassociate each other. 1866 newtype = ieee80211_iftype_p2p(newtype, newp2p);
2444 * 1867
2445 * This function monitors all the tx queues and recover from it if any 1868 mutex_lock(&priv->mutex);
2446 * of the queues are stuck. 1869
2447 * 1. It first check the cmd queue for stuck conditions. If it is stuck, 1870 interface_modes = ctx->interface_modes | ctx->exclusive_interface_modes;
2448 * it will recover by resetting the firmware and return. 1871
2449 * 2. Then, it checks for station association. If it associates it will check 1872 if (!(interface_modes & BIT(newtype))) {
2450 * other queues. If any queue is stuck, it will recover by resetting 1873 err = -EBUSY;
2451 * the firmware. 1874 goto out;
2452 * Note: It the number of times the queue read_ptr to be at the same place to 1875 }
2453 * be MAX_REPEAT+1 in order to consider to be stuck. 1876
2454 */ 1877 if (ctx->exclusive_interface_modes & BIT(newtype)) {
1878 for_each_context(priv, tmp) {
1879 if (ctx == tmp)
1880 continue;
1881
1882 if (!tmp->vif)
1883 continue;
1884
1885 /*
1886 * The current mode switch would be exclusive, but
1887 * another context is active ... refuse the switch.
1888 */
1889 err = -EBUSY;
1890 goto out;
1891 }
1892 }
1893
1894 /* success */
1895 iwl_teardown_interface(priv, vif, true);
1896 vif->type = newtype;
1897 err = iwl_setup_interface(priv, ctx);
1898 WARN_ON(err);
1899 /*
1900 * We've switched internally, but submitting to the
1901 * device may have failed for some reason. Mask this
1902 * error, because otherwise mac80211 will not switch
1903 * (and set the interface type back) and we'll be
1904 * out of sync with it.
1905 */
1906 err = 0;
1907
1908 out:
1909 mutex_unlock(&priv->mutex);
1910 return err;
1911}
1912EXPORT_SYMBOL(iwl_mac_change_interface);
1913
2455/* 1914/*
2456 * The maximum number of times the read pointer of the tx queue at the 1915 * On every watchdog tick we check (latest) time stamp. If it does not
2457 * same place without considering to be stuck. 1916 * change during timeout period and queue is not empty we reset firmware.
2458 */ 1917 */
2459#define MAX_REPEAT (2)
2460static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt) 1918static int iwl_check_stuck_queue(struct iwl_priv *priv, int cnt)
2461{ 1919{
2462 struct iwl_tx_queue *txq; 1920 struct iwl_tx_queue *txq = &priv->txq[cnt];
2463 struct iwl_queue *q; 1921 struct iwl_queue *q = &txq->q;
1922 unsigned long timeout;
1923 int ret;
2464 1924
2465 txq = &priv->txq[cnt]; 1925 if (q->read_ptr == q->write_ptr) {
2466 q = &txq->q; 1926 txq->time_stamp = jiffies;
2467 /* queue is empty, skip */
2468 if (q->read_ptr == q->write_ptr)
2469 return 0; 1927 return 0;
1928 }
2470 1929
2471 if (q->read_ptr == q->last_read_ptr) { 1930 timeout = txq->time_stamp +
2472 /* a queue has not been read from last time */ 1931 msecs_to_jiffies(priv->cfg->base_params->wd_timeout);
2473 if (q->repeat_same_read_ptr > MAX_REPEAT) { 1932
2474 IWL_ERR(priv, 1933 if (time_after(jiffies, timeout)) {
2475 "queue %d stuck %d time. Fw reload.\n", 1934 IWL_ERR(priv, "Queue %d stuck for %u ms.\n",
2476 q->id, q->repeat_same_read_ptr); 1935 q->id, priv->cfg->base_params->wd_timeout);
2477 q->repeat_same_read_ptr = 0; 1936 ret = iwl_force_reset(priv, IWL_FW_RESET, false);
2478 iwl_force_reset(priv, IWL_FW_RESET, false); 1937 return (ret == -EAGAIN) ? 0 : 1;
2479 } else {
2480 q->repeat_same_read_ptr++;
2481 IWL_DEBUG_RADIO(priv,
2482 "queue %d, not read %d time\n",
2483 q->id,
2484 q->repeat_same_read_ptr);
2485 mod_timer(&priv->monitor_recover,
2486 jiffies + msecs_to_jiffies(
2487 IWL_ONE_HUNDRED_MSECS));
2488 return 1;
2489 }
2490 } else {
2491 q->last_read_ptr = q->read_ptr;
2492 q->repeat_same_read_ptr = 0;
2493 } 1938 }
1939
2494 return 0; 1940 return 0;
2495} 1941}
2496 1942
2497void iwl_bg_monitor_recover(unsigned long data) 1943/*
1944 * Making watchdog tick be a quarter of timeout assure we will
1945 * discover the queue hung between timeout and 1.25*timeout
1946 */
1947#define IWL_WD_TICK(timeout) ((timeout) / 4)
1948
1949/*
1950 * Watchdog timer callback, we check each tx queue for stuck, if if hung
1951 * we reset the firmware. If everything is fine just rearm the timer.
1952 */
1953void iwl_bg_watchdog(unsigned long data)
2498{ 1954{
2499 struct iwl_priv *priv = (struct iwl_priv *)data; 1955 struct iwl_priv *priv = (struct iwl_priv *)data;
2500 int cnt; 1956 int cnt;
1957 unsigned long timeout;
2501 1958
2502 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 1959 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
2503 return; 1960 return;
2504 1961
1962 timeout = priv->cfg->base_params->wd_timeout;
1963 if (timeout == 0)
1964 return;
1965
2505 /* monitor and check for stuck cmd queue */ 1966 /* monitor and check for stuck cmd queue */
2506 if (iwl_check_stuck_queue(priv, priv->cmd_queue)) 1967 if (iwl_check_stuck_queue(priv, priv->cmd_queue))
2507 return; 1968 return;
@@ -2516,17 +1977,23 @@ void iwl_bg_monitor_recover(unsigned long data)
2516 return; 1977 return;
2517 } 1978 }
2518 } 1979 }
2519 if (priv->cfg->base_params->monitor_recover_period) { 1980
2520 /* 1981 mod_timer(&priv->watchdog, jiffies +
2521 * Reschedule the timer to occur in 1982 msecs_to_jiffies(IWL_WD_TICK(timeout)));
2522 * priv->cfg->base_params->monitor_recover_period
2523 */
2524 mod_timer(&priv->monitor_recover, jiffies + msecs_to_jiffies(
2525 priv->cfg->base_params->monitor_recover_period));
2526 }
2527} 1983}
2528EXPORT_SYMBOL(iwl_bg_monitor_recover); 1984EXPORT_SYMBOL(iwl_bg_watchdog);
1985
1986void iwl_setup_watchdog(struct iwl_priv *priv)
1987{
1988 unsigned int timeout = priv->cfg->base_params->wd_timeout;
2529 1989
1990 if (timeout)
1991 mod_timer(&priv->watchdog,
1992 jiffies + msecs_to_jiffies(IWL_WD_TICK(timeout)));
1993 else
1994 del_timer(&priv->watchdog);
1995}
1996EXPORT_SYMBOL(iwl_setup_watchdog);
2530 1997
2531/* 1998/*
2532 * extended beacon time format 1999 * extended beacon time format
@@ -2584,8 +2051,9 @@ EXPORT_SYMBOL(iwl_add_beacon_time);
2584 2051
2585#ifdef CONFIG_PM 2052#ifdef CONFIG_PM
2586 2053
2587int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state) 2054int iwl_pci_suspend(struct device *device)
2588{ 2055{
2056 struct pci_dev *pdev = to_pci_dev(device);
2589 struct iwl_priv *priv = pci_get_drvdata(pdev); 2057 struct iwl_priv *priv = pci_get_drvdata(pdev);
2590 2058
2591 /* 2059 /*
@@ -2597,18 +2065,14 @@ int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
2597 */ 2065 */
2598 iwl_apm_stop(priv); 2066 iwl_apm_stop(priv);
2599 2067
2600 pci_save_state(pdev);
2601 pci_disable_device(pdev);
2602 pci_set_power_state(pdev, PCI_D3hot);
2603
2604 return 0; 2068 return 0;
2605} 2069}
2606EXPORT_SYMBOL(iwl_pci_suspend); 2070EXPORT_SYMBOL(iwl_pci_suspend);
2607 2071
2608int iwl_pci_resume(struct pci_dev *pdev) 2072int iwl_pci_resume(struct device *device)
2609{ 2073{
2074 struct pci_dev *pdev = to_pci_dev(device);
2610 struct iwl_priv *priv = pci_get_drvdata(pdev); 2075 struct iwl_priv *priv = pci_get_drvdata(pdev);
2611 int ret;
2612 bool hw_rfkill = false; 2076 bool hw_rfkill = false;
2613 2077
2614 /* 2078 /*
@@ -2617,11 +2081,6 @@ int iwl_pci_resume(struct pci_dev *pdev)
2617 */ 2081 */
2618 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00); 2082 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
2619 2083
2620 pci_set_power_state(pdev, PCI_D0);
2621 ret = pci_enable_device(pdev);
2622 if (ret)
2623 return ret;
2624 pci_restore_state(pdev);
2625 iwl_enable_interrupts(priv); 2084 iwl_enable_interrupts(priv);
2626 2085
2627 if (!(iwl_read32(priv, CSR_GP_CNTRL) & 2086 if (!(iwl_read32(priv, CSR_GP_CNTRL) &
@@ -2639,4 +2098,14 @@ int iwl_pci_resume(struct pci_dev *pdev)
2639} 2098}
2640EXPORT_SYMBOL(iwl_pci_resume); 2099EXPORT_SYMBOL(iwl_pci_resume);
2641 2100
2101const struct dev_pm_ops iwl_pm_ops = {
2102 .suspend = iwl_pci_suspend,
2103 .resume = iwl_pci_resume,
2104 .freeze = iwl_pci_suspend,
2105 .thaw = iwl_pci_resume,
2106 .poweroff = iwl_pci_suspend,
2107 .restore = iwl_pci_resume,
2108};
2109EXPORT_SYMBOL(iwl_pm_ops);
2110
2642#endif /* CONFIG_PM */ 2111#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 64527def059f..a3474376fdbc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -120,6 +120,14 @@ struct iwl_apm_ops {
120 void (*config)(struct iwl_priv *priv); 120 void (*config)(struct iwl_priv *priv);
121}; 121};
122 122
123struct iwl_isr_ops {
124 irqreturn_t (*isr) (int irq, void *data);
125 void (*free)(struct iwl_priv *priv);
126 int (*alloc)(struct iwl_priv *priv);
127 int (*reset)(struct iwl_priv *priv);
128 void (*disable)(struct iwl_priv *priv);
129};
130
123struct iwl_debugfs_ops { 131struct iwl_debugfs_ops {
124 ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf, 132 ssize_t (*rx_stats_read)(struct file *file, char __user *user_buf,
125 size_t count, loff_t *ppos); 133 size_t count, loff_t *ppos);
@@ -193,22 +201,15 @@ struct iwl_lib_ops {
193 /* power */ 201 /* power */
194 int (*send_tx_power) (struct iwl_priv *priv); 202 int (*send_tx_power) (struct iwl_priv *priv);
195 void (*update_chain_flags)(struct iwl_priv *priv); 203 void (*update_chain_flags)(struct iwl_priv *priv);
196 void (*post_associate)(struct iwl_priv *priv, 204
197 struct ieee80211_vif *vif); 205 /* isr */
198 void (*config_ap)(struct iwl_priv *priv, struct ieee80211_vif *vif); 206 struct iwl_isr_ops isr_ops;
199 irqreturn_t (*isr) (int irq, void *data);
200 207
201 /* eeprom operations (as defined in iwl-eeprom.h) */ 208 /* eeprom operations (as defined in iwl-eeprom.h) */
202 struct iwl_eeprom_ops eeprom_ops; 209 struct iwl_eeprom_ops eeprom_ops;
203 210
204 /* temperature */ 211 /* temperature */
205 struct iwl_temp_ops temp_ops; 212 struct iwl_temp_ops temp_ops;
206 /* station management */
207 int (*manage_ibss_station)(struct iwl_priv *priv,
208 struct ieee80211_vif *vif, bool add);
209 int (*update_bcast_stations)(struct iwl_priv *priv);
210 /* recover from tx queue stall */
211 void (*recover_from_tx_stall)(unsigned long data);
212 /* check for plcp health */ 213 /* check for plcp health */
213 bool (*check_plcp_health)(struct iwl_priv *priv, 214 bool (*check_plcp_health)(struct iwl_priv *priv,
214 struct iwl_rx_packet *pkt); 215 struct iwl_rx_packet *pkt);
@@ -235,12 +236,23 @@ struct iwl_nic_ops {
235 void (*additional_nic_config)(struct iwl_priv *priv); 236 void (*additional_nic_config)(struct iwl_priv *priv);
236}; 237};
237 238
239struct iwl_legacy_ops {
240 void (*post_associate)(struct iwl_priv *priv);
241 void (*config_ap)(struct iwl_priv *priv);
242 /* station management */
243 int (*update_bcast_stations)(struct iwl_priv *priv);
244 int (*manage_ibss_station)(struct iwl_priv *priv,
245 struct ieee80211_vif *vif, bool add);
246};
247
238struct iwl_ops { 248struct iwl_ops {
239 const struct iwl_lib_ops *lib; 249 const struct iwl_lib_ops *lib;
240 const struct iwl_hcmd_ops *hcmd; 250 const struct iwl_hcmd_ops *hcmd;
241 const struct iwl_hcmd_utils_ops *utils; 251 const struct iwl_hcmd_utils_ops *utils;
242 const struct iwl_led_ops *led; 252 const struct iwl_led_ops *led;
243 const struct iwl_nic_ops *nic; 253 const struct iwl_nic_ops *nic;
254 const struct iwl_legacy_ops *legacy;
255 const struct ieee80211_ops *ieee80211_ops;
244}; 256};
245 257
246struct iwl_mod_params { 258struct iwl_mod_params {
@@ -266,7 +278,7 @@ struct iwl_mod_params {
266 * @plcp_delta_threshold: plcp error rate threshold used to trigger 278 * @plcp_delta_threshold: plcp error rate threshold used to trigger
267 * radio tuning when there is a high receiving plcp error rate 279 * radio tuning when there is a high receiving plcp error rate
268 * @chain_noise_scale: default chain noise scale used for gain computation 280 * @chain_noise_scale: default chain noise scale used for gain computation
269 * @monitor_recover_period: default timer used to check stuck queues 281 * @wd_timeout: TX queues watchdog timeout
270 * @temperature_kelvin: temperature report by uCode in kelvin 282 * @temperature_kelvin: temperature report by uCode in kelvin
271 * @max_event_log_size: size of event log buffer size for ucode event logging 283 * @max_event_log_size: size of event log buffer size for ucode event logging
272 * @tx_power_by_driver: tx power calibration performed by driver 284 * @tx_power_by_driver: tx power calibration performed by driver
@@ -276,7 +288,10 @@ struct iwl_mod_params {
276 * sensitivity calibration operation 288 * sensitivity calibration operation
277 * @chain_noise_calib_by_driver: driver has the capability to perform 289 * @chain_noise_calib_by_driver: driver has the capability to perform
278 * chain noise calibration operation 290 * chain noise calibration operation
279*/ 291 * @shadow_reg_enable: HW shadhow register bit
292 * @no_agg_framecnt_info: uCode do not provide aggregation frame count
293 * information
294 */
280struct iwl_base_params { 295struct iwl_base_params {
281 int eeprom_size; 296 int eeprom_size;
282 int num_of_queues; /* def: HW dependent */ 297 int num_of_queues; /* def: HW dependent */
@@ -298,14 +313,15 @@ struct iwl_base_params {
298 const bool support_wimax_coexist; 313 const bool support_wimax_coexist;
299 u8 plcp_delta_threshold; 314 u8 plcp_delta_threshold;
300 s32 chain_noise_scale; 315 s32 chain_noise_scale;
301 /* timer period for monitor the driver queues */ 316 unsigned int wd_timeout;
302 u32 monitor_recover_period;
303 bool temperature_kelvin; 317 bool temperature_kelvin;
304 u32 max_event_log_size; 318 u32 max_event_log_size;
305 const bool tx_power_by_driver; 319 const bool tx_power_by_driver;
306 const bool ucode_tracing; 320 const bool ucode_tracing;
307 const bool sensitivity_calib_by_driver; 321 const bool sensitivity_calib_by_driver;
308 const bool chain_noise_calib_by_driver; 322 const bool chain_noise_calib_by_driver;
323 const bool shadow_reg_enable;
324 const bool no_agg_framecnt_info;
309}; 325};
310/* 326/*
311 * @advanced_bt_coexist: support advanced bt coexist 327 * @advanced_bt_coexist: support advanced bt coexist
@@ -315,6 +331,7 @@ struct iwl_base_params {
315 * @agg_time_limit: maximum number of uSec in aggregation 331 * @agg_time_limit: maximum number of uSec in aggregation
316 * @ampdu_factor: Maximum A-MPDU length factor 332 * @ampdu_factor: Maximum A-MPDU length factor
317 * @ampdu_density: Minimum A-MPDU spacing 333 * @ampdu_density: Minimum A-MPDU spacing
334 * @bt_sco_disable: uCode should not response to BT in SCO/ESCO mode
318*/ 335*/
319struct iwl_bt_params { 336struct iwl_bt_params {
320 bool advanced_bt_coexist; 337 bool advanced_bt_coexist;
@@ -324,6 +341,7 @@ struct iwl_bt_params {
324 u16 agg_time_limit; 341 u16 agg_time_limit;
325 u8 ampdu_factor; 342 u8 ampdu_factor;
326 u8 ampdu_density; 343 u8 ampdu_density;
344 bool bt_sco_disable;
327}; 345};
328/* 346/*
329 * @use_rts_for_aggregation: use rts/cts protection for HT traffic 347 * @use_rts_for_aggregation: use rts/cts protection for HT traffic
@@ -344,6 +362,10 @@ struct iwl_ht_params {
344 * @need_dc_calib: need to perform init dc calibration 362 * @need_dc_calib: need to perform init dc calibration
345 * @need_temp_offset_calib: need to perform temperature offset calibration 363 * @need_temp_offset_calib: need to perform temperature offset calibration
346 * @scan_antennas: available antenna for scan operation 364 * @scan_antennas: available antenna for scan operation
365 * @led_mode: 0=blinking, 1=On(RF On)/Off(RF Off)
366 * @adv_pm: advance power management
367 * @rx_with_siso_diversity: 1x1 device with rx antenna diversity
368 * @internal_wimax_coex: internal wifi/wimax combo device
347 * 369 *
348 * We enable the driver to be backward compatible wrt API version. The 370 * We enable the driver to be backward compatible wrt API version. The
349 * driver specifies which APIs it supports (with @ucode_api_max being the 371 * driver specifies which APIs it supports (with @ucode_api_max being the
@@ -389,15 +411,17 @@ struct iwl_cfg {
389 const bool need_dc_calib; /* if used set to true */ 411 const bool need_dc_calib; /* if used set to true */
390 const bool need_temp_offset_calib; /* if used set to true */ 412 const bool need_temp_offset_calib; /* if used set to true */
391 u8 scan_rx_antennas[IEEE80211_NUM_BANDS]; 413 u8 scan_rx_antennas[IEEE80211_NUM_BANDS];
392 u8 scan_tx_antennas[IEEE80211_NUM_BANDS]; 414 enum iwl_led_mode led_mode;
415 const bool adv_pm;
416 const bool rx_with_siso_diversity;
417 const bool internal_wimax_coex;
393}; 418};
394 419
395/*************************** 420/***************************
396 * L i b * 421 * L i b *
397 ***************************/ 422 ***************************/
398 423
399struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg, 424struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg);
400 struct ieee80211_ops *hw_ops);
401int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue, 425int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
402 const struct ieee80211_tx_queue_params *params); 426 const struct ieee80211_tx_queue_params *params);
403int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw); 427int iwl_mac_tx_last_beacon(struct ieee80211_hw *hw);
@@ -425,23 +449,16 @@ int iwl_set_decrypted_flag(struct iwl_priv *priv,
425 u32 decrypt_res, 449 u32 decrypt_res,
426 struct ieee80211_rx_status *stats); 450 struct ieee80211_rx_status *stats);
427void iwl_irq_handle_error(struct iwl_priv *priv); 451void iwl_irq_handle_error(struct iwl_priv *priv);
428void iwl_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif);
429void iwl_bss_info_changed(struct ieee80211_hw *hw,
430 struct ieee80211_vif *vif,
431 struct ieee80211_bss_conf *bss_conf,
432 u32 changes);
433int iwl_mac_add_interface(struct ieee80211_hw *hw, 452int iwl_mac_add_interface(struct ieee80211_hw *hw,
434 struct ieee80211_vif *vif); 453 struct ieee80211_vif *vif);
435void iwl_mac_remove_interface(struct ieee80211_hw *hw, 454void iwl_mac_remove_interface(struct ieee80211_hw *hw,
436 struct ieee80211_vif *vif); 455 struct ieee80211_vif *vif);
437int iwl_mac_config(struct ieee80211_hw *hw, u32 changed); 456int iwl_mac_change_interface(struct ieee80211_hw *hw,
438void iwl_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif); 457 struct ieee80211_vif *vif,
439void iwl_mac_reset_tsf(struct ieee80211_hw *hw); 458 enum nl80211_iftype newtype, bool newp2p);
440int iwl_alloc_txq_mem(struct iwl_priv *priv); 459int iwl_alloc_txq_mem(struct iwl_priv *priv);
441void iwl_free_txq_mem(struct iwl_priv *priv); 460void iwl_free_txq_mem(struct iwl_priv *priv);
442void iwlcore_tx_cmd_protection(struct iwl_priv *priv, 461
443 struct ieee80211_tx_info *info,
444 __le16 fc, __le32 *tx_flags);
445#ifdef CONFIG_IWLWIFI_DEBUGFS 462#ifdef CONFIG_IWLWIFI_DEBUGFS
446int iwl_alloc_traffic_mem(struct iwl_priv *priv); 463int iwl_alloc_traffic_mem(struct iwl_priv *priv);
447void iwl_free_traffic_mem(struct iwl_priv *priv); 464void iwl_free_traffic_mem(struct iwl_priv *priv);
@@ -529,6 +546,7 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
529void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq, 546void iwl_tx_queue_reset(struct iwl_priv *priv, struct iwl_tx_queue *txq,
530 int slots_num, u32 txq_id); 547 int slots_num, u32 txq_id);
531void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id); 548void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id);
549void iwl_setup_watchdog(struct iwl_priv *priv);
532/***************************************************** 550/*****************************************************
533 * TX power 551 * TX power
534 ****************************************************/ 552 ****************************************************/
@@ -598,7 +616,6 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
598/***************************************************** 616/*****************************************************
599 * PCI * 617 * PCI *
600 *****************************************************/ 618 *****************************************************/
601irqreturn_t iwl_isr_legacy(int irq, void *data);
602 619
603static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv) 620static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
604{ 621{
@@ -609,15 +626,23 @@ static inline u16 iwl_pcie_link_ctl(struct iwl_priv *priv)
609 return pci_lnk_ctl; 626 return pci_lnk_ctl;
610} 627}
611 628
612void iwl_bg_monitor_recover(unsigned long data); 629void iwl_bg_watchdog(unsigned long data);
613u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval); 630u32 iwl_usecs_to_beacons(struct iwl_priv *priv, u32 usec, u32 beacon_interval);
614__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base, 631__le32 iwl_add_beacon_time(struct iwl_priv *priv, u32 base,
615 u32 addon, u32 beacon_interval); 632 u32 addon, u32 beacon_interval);
616 633
617#ifdef CONFIG_PM 634#ifdef CONFIG_PM
618int iwl_pci_suspend(struct pci_dev *pdev, pm_message_t state); 635int iwl_pci_suspend(struct device *device);
619int iwl_pci_resume(struct pci_dev *pdev); 636int iwl_pci_resume(struct device *device);
620#endif /* CONFIG_PM */ 637extern const struct dev_pm_ops iwl_pm_ops;
638
639#define IWL_PM_OPS (&iwl_pm_ops)
640
641#else /* !CONFIG_PM */
642
643#define IWL_PM_OPS NULL
644
645#endif /* !CONFIG_PM */
621 646
622/***************************************************** 647/*****************************************************
623* Error Handling Debugging 648* Error Handling Debugging
@@ -724,11 +749,6 @@ static inline int iwlcore_commit_rxon(struct iwl_priv *priv,
724{ 749{
725 return priv->cfg->ops->hcmd->commit_rxon(priv, ctx); 750 return priv->cfg->ops->hcmd->commit_rxon(priv, ctx);
726} 751}
727static inline void iwlcore_config_ap(struct iwl_priv *priv,
728 struct ieee80211_vif *vif)
729{
730 priv->cfg->ops->lib->config_ap(priv, vif);
731}
732static inline const struct ieee80211_supported_band *iwl_get_hw_mode( 752static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
733 struct iwl_priv *priv, enum ieee80211_band band) 753 struct iwl_priv *priv, enum ieee80211_band band)
734{ 754{
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 2aa15ab13892..b80bf7dff55b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -132,6 +132,8 @@
132 132
133#define CSR_LED_REG (CSR_BASE+0x094) 133#define CSR_LED_REG (CSR_BASE+0x094)
134#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0) 134#define CSR_DRAM_INT_TBL_REG (CSR_BASE+0x0A0)
135#define CSR_MAC_SHADOW_REG_CTRL (CSR_BASE+0x0A8) /* 6000 and up */
136
135 137
136/* GIO Chicken Bits (PCI Express bus link power management) */ 138/* GIO Chicken Bits (PCI Express bus link power management) */
137#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100) 139#define CSR_GIO_CHICKEN_BITS (CSR_BASE+0x100)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index 0b961a353ff6..ebdea3be3ef9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -120,6 +120,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
120/* 0x000000F0 - 0x00000010 */ 120/* 0x000000F0 - 0x00000010 */
121#define IWL_DL_MACDUMP (1 << 4) 121#define IWL_DL_MACDUMP (1 << 4)
122#define IWL_DL_HCMD_DUMP (1 << 5) 122#define IWL_DL_HCMD_DUMP (1 << 5)
123#define IWL_DL_EEPROM (1 << 6)
123#define IWL_DL_RADIO (1 << 7) 124#define IWL_DL_RADIO (1 << 7)
124/* 0x00000F00 - 0x00000100 */ 125/* 0x00000F00 - 0x00000100 */
125#define IWL_DL_POWER (1 << 8) 126#define IWL_DL_POWER (1 << 8)
@@ -164,6 +165,7 @@ static inline void iwl_dbgfs_unregister(struct iwl_priv *priv)
164#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a) 165#define IWL_DEBUG_WEP(p, f, a...) IWL_DEBUG(p, IWL_DL_WEP, f, ## a)
165#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a) 166#define IWL_DEBUG_HC(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD, f, ## a)
166#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a) 167#define IWL_DEBUG_HC_DUMP(p, f, a...) IWL_DEBUG(p, IWL_DL_HCMD_DUMP, f, ## a)
168#define IWL_DEBUG_EEPROM(p, f, a...) IWL_DEBUG(p, IWL_DL_EEPROM, f, ## a)
167#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a) 169#define IWL_DEBUG_CALIB(p, f, a...) IWL_DEBUG(p, IWL_DL_CALIB, f, ## a)
168#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a) 170#define IWL_DEBUG_FW(p, f, a...) IWL_DEBUG(p, IWL_DL_FW, f, ## a)
169#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a) 171#define IWL_DEBUG_RF_KILL(p, f, a...) IWL_DEBUG(p, IWL_DL_RF_KILL, f, ## a)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index 8fdd4efdb1d3..6fe80b5e7a15 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -992,11 +992,8 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
992 " swq_id=%#.2x (ac %d/hwq %d)\n", 992 " swq_id=%#.2x (ac %d/hwq %d)\n",
993 cnt, q->read_ptr, q->write_ptr, 993 cnt, q->read_ptr, q->write_ptr,
994 !!test_bit(cnt, priv->queue_stopped), 994 !!test_bit(cnt, priv->queue_stopped),
995 txq->swq_id, 995 txq->swq_id, txq->swq_id & 3,
996 txq->swq_id & 0x80 ? txq->swq_id & 3 : 996 (txq->swq_id >> 2) & 0x1f);
997 txq->swq_id,
998 txq->swq_id & 0x80 ? (txq->swq_id >> 2) &
999 0x1f : txq->swq_id);
1000 if (cnt >= 4) 997 if (cnt >= 4)
1001 continue; 998 continue;
1002 /* for the ACs, display the stop count too */ 999 /* for the ACs, display the stop count too */
@@ -1537,32 +1534,26 @@ static ssize_t iwl_dbgfs_ucode_bt_stats_read(struct file *file,
1537 user_buf, count, ppos); 1534 user_buf, count, ppos);
1538} 1535}
1539 1536
1540static ssize_t iwl_dbgfs_monitor_period_write(struct file *file, 1537static ssize_t iwl_dbgfs_wd_timeout_write(struct file *file,
1541 const char __user *user_buf, 1538 const char __user *user_buf,
1542 size_t count, loff_t *ppos) { 1539 size_t count, loff_t *ppos) {
1543 1540
1544 struct iwl_priv *priv = file->private_data; 1541 struct iwl_priv *priv = file->private_data;
1545 char buf[8]; 1542 char buf[8];
1546 int buf_size; 1543 int buf_size;
1547 int period; 1544 int timeout;
1548 1545
1549 memset(buf, 0, sizeof(buf)); 1546 memset(buf, 0, sizeof(buf));
1550 buf_size = min(count, sizeof(buf) - 1); 1547 buf_size = min(count, sizeof(buf) - 1);
1551 if (copy_from_user(buf, user_buf, buf_size)) 1548 if (copy_from_user(buf, user_buf, buf_size))
1552 return -EFAULT; 1549 return -EFAULT;
1553 if (sscanf(buf, "%d", &period) != 1) 1550 if (sscanf(buf, "%d", &timeout) != 1)
1554 return -EINVAL; 1551 return -EINVAL;
1555 if (period < 0 || period > IWL_MAX_MONITORING_PERIOD) 1552 if (timeout < 0 || timeout > IWL_MAX_WD_TIMEOUT)
1556 priv->cfg->base_params->monitor_recover_period = 1553 timeout = IWL_DEF_WD_TIMEOUT;
1557 IWL_DEF_MONITORING_PERIOD;
1558 else
1559 priv->cfg->base_params->monitor_recover_period = period;
1560 1554
1561 if (priv->cfg->base_params->monitor_recover_period) 1555 priv->cfg->base_params->wd_timeout = timeout;
1562 mod_timer(&priv->monitor_recover, jiffies + msecs_to_jiffies( 1556 iwl_setup_watchdog(priv);
1563 priv->cfg->base_params->monitor_recover_period));
1564 else
1565 del_timer_sync(&priv->monitor_recover);
1566 return count; 1557 return count;
1567} 1558}
1568 1559
@@ -1576,11 +1567,18 @@ static ssize_t iwl_dbgfs_bt_traffic_read(struct file *file,
1576 const size_t bufsz = sizeof(buf); 1567 const size_t bufsz = sizeof(buf);
1577 ssize_t ret; 1568 ssize_t ret;
1578 1569
1570 if (!priv->bt_enable_flag) {
1571 pos += scnprintf(buf + pos, bufsz - pos, "BT coex disabled\n");
1572 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1573 return ret;
1574 }
1575 pos += scnprintf(buf + pos, bufsz - pos, "BT enable flag: 0x%x\n",
1576 priv->bt_enable_flag);
1579 pos += scnprintf(buf + pos, bufsz - pos, "BT in %s mode\n", 1577 pos += scnprintf(buf + pos, bufsz - pos, "BT in %s mode\n",
1580 priv->bt_full_concurrent ? "full concurrency" : "3-wire"); 1578 priv->bt_full_concurrent ? "full concurrency" : "3-wire");
1581 pos += scnprintf(buf + pos, bufsz - pos, "BT status: %s, " 1579 pos += scnprintf(buf + pos, bufsz - pos, "BT status: %s, "
1582 "last traffic notif: %d\n", 1580 "last traffic notif: %d\n",
1583 priv->bt_status ? "On" : "Off", priv->notif_bt_traffic_load); 1581 priv->bt_status ? "On" : "Off", priv->last_bt_traffic_load);
1584 pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, " 1582 pos += scnprintf(buf + pos, bufsz - pos, "ch_announcement: %d, "
1585 "sco_active: %d, kill_ack_mask: %x, " 1583 "sco_active: %d, kill_ack_mask: %x, "
1586 "kill_cts_mask: %x\n", 1584 "kill_cts_mask: %x\n",
@@ -1689,7 +1687,7 @@ DEBUGFS_READ_FILE_OPS(rxon_flags);
1689DEBUGFS_READ_FILE_OPS(rxon_filter_flags); 1687DEBUGFS_READ_FILE_OPS(rxon_filter_flags);
1690DEBUGFS_WRITE_FILE_OPS(txfifo_flush); 1688DEBUGFS_WRITE_FILE_OPS(txfifo_flush);
1691DEBUGFS_READ_FILE_OPS(ucode_bt_stats); 1689DEBUGFS_READ_FILE_OPS(ucode_bt_stats);
1692DEBUGFS_WRITE_FILE_OPS(monitor_period); 1690DEBUGFS_WRITE_FILE_OPS(wd_timeout);
1693DEBUGFS_READ_FILE_OPS(bt_traffic); 1691DEBUGFS_READ_FILE_OPS(bt_traffic);
1694DEBUGFS_READ_WRITE_FILE_OPS(protection_mode); 1692DEBUGFS_READ_WRITE_FILE_OPS(protection_mode);
1695DEBUGFS_READ_FILE_OPS(reply_tx_error); 1693DEBUGFS_READ_FILE_OPS(reply_tx_error);
@@ -1766,7 +1764,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1766 DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR); 1764 DEBUGFS_ADD_FILE(reply_tx_error, dir_debug, S_IRUSR);
1767 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR); 1765 DEBUGFS_ADD_FILE(rxon_flags, dir_debug, S_IWUSR);
1768 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR); 1766 DEBUGFS_ADD_FILE(rxon_filter_flags, dir_debug, S_IWUSR);
1769 DEBUGFS_ADD_FILE(monitor_period, dir_debug, S_IWUSR); 1767 DEBUGFS_ADD_FILE(wd_timeout, dir_debug, S_IWUSR);
1770 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist) 1768 if (priv->cfg->bt_params && priv->cfg->bt_params->advanced_bt_coexist)
1771 DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR); 1769 DEBUGFS_ADD_FILE(bt_traffic, dir_debug, S_IRUSR);
1772 if (priv->cfg->base_params->sensitivity_calib_by_driver) 1770 if (priv->cfg->base_params->sensitivity_calib_by_driver)
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 70e07fa48405..8dda67850af4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -129,9 +129,6 @@ struct iwl_queue {
129 int write_ptr; /* 1-st empty entry (index) host_w*/ 129 int write_ptr; /* 1-st empty entry (index) host_w*/
130 int read_ptr; /* last used entry (index) host_r*/ 130 int read_ptr; /* last used entry (index) host_r*/
131 /* use for monitoring and recovering the stuck queue */ 131 /* use for monitoring and recovering the stuck queue */
132 int last_read_ptr; /* storing the last read_ptr */
133 /* number of time read_ptr and last_read_ptr are the same */
134 u8 repeat_same_read_ptr;
135 dma_addr_t dma_addr; /* physical addr for BD's */ 132 dma_addr_t dma_addr; /* physical addr for BD's */
136 int n_window; /* safe queue window */ 133 int n_window; /* safe queue window */
137 u32 id; 134 u32 id;
@@ -155,6 +152,7 @@ struct iwl_tx_info {
155 * @meta: array of meta data for each command/tx buffer 152 * @meta: array of meta data for each command/tx buffer
156 * @dma_addr_cmd: physical address of cmd/tx buffer array 153 * @dma_addr_cmd: physical address of cmd/tx buffer array
157 * @txb: array of per-TFD driver data 154 * @txb: array of per-TFD driver data
155 * @time_stamp: time (in jiffies) of last read_ptr change
158 * @need_update: indicates need to update read/write index 156 * @need_update: indicates need to update read/write index
159 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled 157 * @sched_retry: indicates queue is high-throughput aggregation (HT AGG) enabled
160 * 158 *
@@ -170,6 +168,7 @@ struct iwl_tx_queue {
170 struct iwl_device_cmd **cmd; 168 struct iwl_device_cmd **cmd;
171 struct iwl_cmd_meta *meta; 169 struct iwl_cmd_meta *meta;
172 struct iwl_tx_info *txb; 170 struct iwl_tx_info *txb;
171 unsigned long time_stamp;
173 u8 need_update; 172 u8 need_update;
174 u8 sched_retry; 173 u8 sched_retry;
175 u8 active; 174 u8 active;
@@ -1104,11 +1103,10 @@ struct iwl_event_log {
1104#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3) 1103#define IWL_DELAY_NEXT_FORCE_RF_RESET (HZ*3)
1105#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5) 1104#define IWL_DELAY_NEXT_FORCE_FW_RELOAD (HZ*5)
1106 1105
1107/* timer constants use to monitor and recover stuck tx queues in mSecs */ 1106/* TX queue watchdog timeouts in mSecs */
1108#define IWL_DEF_MONITORING_PERIOD (1000) 1107#define IWL_DEF_WD_TIMEOUT (2000)
1109#define IWL_LONG_MONITORING_PERIOD (5000) 1108#define IWL_LONG_WD_TIMEOUT (10000)
1110#define IWL_ONE_HUNDRED_MSECS (100) 1109#define IWL_MAX_WD_TIMEOUT (120000)
1111#define IWL_MAX_MONITORING_PERIOD (60000)
1112 1110
1113/* BT Antenna Coupling Threshold (dB) */ 1111/* BT Antenna Coupling Threshold (dB) */
1114#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35) 1112#define IWL_BT_ANTENNA_COUPLING_THRESHOLD (35)
@@ -1162,6 +1160,8 @@ struct iwl_rxon_context {
1162 */ 1160 */
1163 bool always_active, is_active; 1161 bool always_active, is_active;
1164 1162
1163 bool ht_need_multiple_chains;
1164
1165 enum iwl_rxon_context_id ctxid; 1165 enum iwl_rxon_context_id ctxid;
1166 1166
1167 u32 interface_modes, exclusive_interface_modes; 1167 u32 interface_modes, exclusive_interface_modes;
@@ -1468,8 +1468,9 @@ struct iwl_priv {
1468 }; 1468 };
1469 1469
1470 /* bt coex */ 1470 /* bt coex */
1471 u8 bt_enable_flag;
1471 u8 bt_status; 1472 u8 bt_status;
1472 u8 bt_traffic_load, notif_bt_traffic_load; 1473 u8 bt_traffic_load, last_bt_traffic_load;
1473 bool bt_ch_announce; 1474 bool bt_ch_announce;
1474 bool bt_sco_active; 1475 bool bt_sco_active;
1475 bool bt_full_concurrent; 1476 bool bt_full_concurrent;
@@ -1480,7 +1481,6 @@ struct iwl_priv {
1480 u16 bt_on_thresh; 1481 u16 bt_on_thresh;
1481 u16 bt_duration; 1482 u16 bt_duration;
1482 u16 dynamic_frag_thresh; 1483 u16 dynamic_frag_thresh;
1483 u16 dynamic_agg_thresh;
1484 u8 bt_ci_compliance; 1484 u8 bt_ci_compliance;
1485 struct work_struct bt_traffic_change_work; 1485 struct work_struct bt_traffic_change_work;
1486 1486
@@ -1517,6 +1517,7 @@ struct iwl_priv {
1517 s8 tx_power_user_lmt; 1517 s8 tx_power_user_lmt;
1518 s8 tx_power_device_lmt; 1518 s8 tx_power_device_lmt;
1519 s8 tx_power_lmt_in_half_dbm; /* max tx power in half-dBm format */ 1519 s8 tx_power_lmt_in_half_dbm; /* max tx power in half-dBm format */
1520 s8 tx_power_next;
1520 1521
1521 1522
1522#ifdef CONFIG_IWLWIFI_DEBUG 1523#ifdef CONFIG_IWLWIFI_DEBUG
@@ -1542,7 +1543,7 @@ struct iwl_priv {
1542 struct work_struct run_time_calib_work; 1543 struct work_struct run_time_calib_work;
1543 struct timer_list statistics_periodic; 1544 struct timer_list statistics_periodic;
1544 struct timer_list ucode_trace; 1545 struct timer_list ucode_trace;
1545 struct timer_list monitor_recover; 1546 struct timer_list watchdog;
1546 bool hw_ready; 1547 bool hw_ready;
1547 1548
1548 struct iwl_event_log event_log; 1549 struct iwl_event_log event_log;
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 87cd10ff285d..358cfd7e5af1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -147,7 +147,7 @@ static int iwl_eeprom_verify_signature(struct iwl_priv *priv)
147 u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK; 147 u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
148 int ret = 0; 148 int ret = 0;
149 149
150 IWL_DEBUG_INFO(priv, "EEPROM signature=0x%08x\n", gp); 150 IWL_DEBUG_EEPROM(priv, "EEPROM signature=0x%08x\n", gp);
151 switch (gp) { 151 switch (gp) {
152 case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP: 152 case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
153 if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) { 153 if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
@@ -354,7 +354,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
354 */ 354 */
355 valid_addr = next_link_addr; 355 valid_addr = next_link_addr;
356 next_link_addr = le16_to_cpu(link_value) * sizeof(u16); 356 next_link_addr = le16_to_cpu(link_value) * sizeof(u16);
357 IWL_DEBUG_INFO(priv, "OTP blocks %d addr 0x%x\n", 357 IWL_DEBUG_EEPROM(priv, "OTP blocks %d addr 0x%x\n",
358 usedblocks, next_link_addr); 358 usedblocks, next_link_addr);
359 if (iwl_read_otp_word(priv, next_link_addr, &link_value)) 359 if (iwl_read_otp_word(priv, next_link_addr, &link_value))
360 return -EINVAL; 360 return -EINVAL;
@@ -374,7 +374,7 @@ static int iwl_find_otp_image(struct iwl_priv *priv,
374 } while (usedblocks <= priv->cfg->base_params->max_ll_items); 374 } while (usedblocks <= priv->cfg->base_params->max_ll_items);
375 375
376 /* OTP has no valid blocks */ 376 /* OTP has no valid blocks */
377 IWL_DEBUG_INFO(priv, "OTP has no valid blocks\n"); 377 IWL_DEBUG_EEPROM(priv, "OTP has no valid blocks\n");
378 return -EINVAL; 378 return -EINVAL;
379} 379}
380 380
@@ -414,7 +414,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
414 return -ENOENT; 414 return -ENOENT;
415 /* allocate eeprom */ 415 /* allocate eeprom */
416 sz = priv->cfg->base_params->eeprom_size; 416 sz = priv->cfg->base_params->eeprom_size;
417 IWL_DEBUG_INFO(priv, "NVM size = %d\n", sz); 417 IWL_DEBUG_EEPROM(priv, "NVM size = %d\n", sz);
418 priv->eeprom = kzalloc(sz, GFP_KERNEL); 418 priv->eeprom = kzalloc(sz, GFP_KERNEL);
419 if (!priv->eeprom) { 419 if (!priv->eeprom) {
420 ret = -ENOMEM; 420 ret = -ENOMEM;
@@ -492,7 +492,7 @@ int iwl_eeprom_init(struct iwl_priv *priv)
492 } 492 }
493 } 493 }
494 494
495 IWL_DEBUG_INFO(priv, "NVM Type: %s, version: 0x%x\n", 495 IWL_DEBUG_EEPROM(priv, "NVM Type: %s, version: 0x%x\n",
496 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) 496 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
497 ? "OTP" : "EEPROM", 497 ? "OTP" : "EEPROM",
498 iwl_eeprom_query16(priv, EEPROM_VERSION)); 498 iwl_eeprom_query16(priv, EEPROM_VERSION));
@@ -594,7 +594,7 @@ static int iwl_mod_ht40_chan_info(struct iwl_priv *priv,
594 if (!is_channel_valid(ch_info)) 594 if (!is_channel_valid(ch_info))
595 return -1; 595 return -1;
596 596
597 IWL_DEBUG_INFO(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):" 597 IWL_DEBUG_EEPROM(priv, "HT40 Ch. %d [%sGHz] %s%s%s%s%s(0x%02x %ddBm):"
598 " Ad-Hoc %ssupported\n", 598 " Ad-Hoc %ssupported\n",
599 ch_info->channel, 599 ch_info->channel,
600 is_channel_a_band(ch_info) ? 600 is_channel_a_band(ch_info) ?
@@ -634,11 +634,11 @@ int iwl_init_channel_map(struct iwl_priv *priv)
634 struct iwl_channel_info *ch_info; 634 struct iwl_channel_info *ch_info;
635 635
636 if (priv->channel_count) { 636 if (priv->channel_count) {
637 IWL_DEBUG_INFO(priv, "Channel map already initialized.\n"); 637 IWL_DEBUG_EEPROM(priv, "Channel map already initialized.\n");
638 return 0; 638 return 0;
639 } 639 }
640 640
641 IWL_DEBUG_INFO(priv, "Initializing regulatory info from EEPROM\n"); 641 IWL_DEBUG_EEPROM(priv, "Initializing regulatory info from EEPROM\n");
642 642
643 priv->channel_count = 643 priv->channel_count =
644 ARRAY_SIZE(iwl_eeprom_band_1) + 644 ARRAY_SIZE(iwl_eeprom_band_1) +
@@ -647,7 +647,8 @@ int iwl_init_channel_map(struct iwl_priv *priv)
647 ARRAY_SIZE(iwl_eeprom_band_4) + 647 ARRAY_SIZE(iwl_eeprom_band_4) +
648 ARRAY_SIZE(iwl_eeprom_band_5); 648 ARRAY_SIZE(iwl_eeprom_band_5);
649 649
650 IWL_DEBUG_INFO(priv, "Parsing data for %d channels.\n", priv->channel_count); 650 IWL_DEBUG_EEPROM(priv, "Parsing data for %d channels.\n",
651 priv->channel_count);
651 652
652 priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) * 653 priv->channel_info = kzalloc(sizeof(struct iwl_channel_info) *
653 priv->channel_count, GFP_KERNEL); 654 priv->channel_count, GFP_KERNEL);
@@ -686,7 +687,8 @@ int iwl_init_channel_map(struct iwl_priv *priv)
686 IEEE80211_CHAN_NO_HT40; 687 IEEE80211_CHAN_NO_HT40;
687 688
688 if (!(is_channel_valid(ch_info))) { 689 if (!(is_channel_valid(ch_info))) {
689 IWL_DEBUG_INFO(priv, "Ch. %d Flags %x [%sGHz] - " 690 IWL_DEBUG_EEPROM(priv,
691 "Ch. %d Flags %x [%sGHz] - "
690 "No traffic\n", 692 "No traffic\n",
691 ch_info->channel, 693 ch_info->channel,
692 ch_info->flags, 694 ch_info->flags,
@@ -702,7 +704,8 @@ int iwl_init_channel_map(struct iwl_priv *priv)
702 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg; 704 ch_info->scan_power = eeprom_ch_info[ch].max_power_avg;
703 ch_info->min_power = 0; 705 ch_info->min_power = 0;
704 706
705 IWL_DEBUG_INFO(priv, "Ch. %d [%sGHz] %s%s%s%s%s%s(0x%02x %ddBm):" 707 IWL_DEBUG_EEPROM(priv, "Ch. %d [%sGHz] "
708 "%s%s%s%s%s%s(0x%02x %ddBm):"
706 " Ad-Hoc %ssupported\n", 709 " Ad-Hoc %ssupported\n",
707 ch_info->channel, 710 ch_info->channel,
708 is_channel_a_band(ch_info) ? 711 is_channel_a_band(ch_info) ?
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index d9b590625ae4..9e6f31355eee 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -110,9 +110,18 @@ enum {
110}; 110};
111 111
112/* SKU Capabilities */ 112/* SKU Capabilities */
113/* 3945 only */
113#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0) 114#define EEPROM_SKU_CAP_SW_RF_KILL_ENABLE (1 << 0)
114#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1) 115#define EEPROM_SKU_CAP_HW_RF_KILL_ENABLE (1 << 1)
115 116
117/* 5000 and up */
118#define EEPROM_SKU_CAP_BAND_POS (4)
119#define EEPROM_SKU_CAP_BAND_SELECTION \
120 (3 << EEPROM_SKU_CAP_BAND_POS)
121#define EEPROM_SKU_CAP_11N_ENABLE (1 << 6)
122#define EEPROM_SKU_CAP_AMT_ENABLE (1 << 7)
123#define EEPROM_SKU_CAP_IPAN_ENABLE (1 << 8)
124
116/* *regulatory* channel data format in eeprom, one for each channel. 125/* *regulatory* channel data format in eeprom, one for each channel.
117 * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */ 126 * There are separate entries for HT40 (40 MHz) vs. normal (20 MHz) channels. */
118struct iwl_eeprom_channel { 127struct iwl_eeprom_channel {
@@ -120,6 +129,17 @@ struct iwl_eeprom_channel {
120 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */ 129 s8 max_power_avg; /* max power (dBm) on this chnl, limit 31 */
121} __packed; 130} __packed;
122 131
132enum iwl_eeprom_enhanced_txpwr_flags {
133 IWL_EEPROM_ENH_TXP_FL_VALID = BIT(0),
134 IWL_EEPROM_ENH_TXP_FL_BAND_52G = BIT(1),
135 IWL_EEPROM_ENH_TXP_FL_OFDM = BIT(2),
136 IWL_EEPROM_ENH_TXP_FL_40MHZ = BIT(3),
137 IWL_EEPROM_ENH_TXP_FL_HT_AP = BIT(4),
138 IWL_EEPROM_ENH_TXP_FL_RES1 = BIT(5),
139 IWL_EEPROM_ENH_TXP_FL_RES2 = BIT(6),
140 IWL_EEPROM_ENH_TXP_FL_COMMON_TYPE = BIT(7),
141};
142
123/** 143/**
124 * iwl_eeprom_enhanced_txpwr structure 144 * iwl_eeprom_enhanced_txpwr structure
125 * This structure presents the enhanced regulatory tx power limit layout 145 * This structure presents the enhanced regulatory tx power limit layout
@@ -127,21 +147,23 @@ struct iwl_eeprom_channel {
127 * Enhanced regulatory tx power portion of eeprom image can be broken down 147 * Enhanced regulatory tx power portion of eeprom image can be broken down
128 * into individual structures; each one is 8 bytes in size and contain the 148 * into individual structures; each one is 8 bytes in size and contain the
129 * following information 149 * following information
130 * @common: (desc + channel) not used by driver, should _NOT_ be "zero" 150 * @flags: entry flags
151 * @channel: channel number
131 * @chain_a_max_pwr: chain a max power in 1/2 dBm 152 * @chain_a_max_pwr: chain a max power in 1/2 dBm
132 * @chain_b_max_pwr: chain b max power in 1/2 dBm 153 * @chain_b_max_pwr: chain b max power in 1/2 dBm
133 * @chain_c_max_pwr: chain c max power in 1/2 dBm 154 * @chain_c_max_pwr: chain c max power in 1/2 dBm
134 * @reserved: not used, should be "zero" 155 * @delta_20_in_40: 20-in-40 deltas (hi/lo)
135 * @mimo2_max_pwr: mimo2 max power in 1/2 dBm 156 * @mimo2_max_pwr: mimo2 max power in 1/2 dBm
136 * @mimo3_max_pwr: mimo3 max power in 1/2 dBm 157 * @mimo3_max_pwr: mimo3 max power in 1/2 dBm
137 * 158 *
138 */ 159 */
139struct iwl_eeprom_enhanced_txpwr { 160struct iwl_eeprom_enhanced_txpwr {
140 __le16 common; 161 u8 flags;
162 u8 channel;
141 s8 chain_a_max; 163 s8 chain_a_max;
142 s8 chain_b_max; 164 s8 chain_b_max;
143 s8 chain_c_max; 165 s8 chain_c_max;
144 s8 reserved; 166 u8 delta_20_in_40;
145 s8 mimo2_max; 167 s8 mimo2_max;
146 s8 mimo3_max; 168 s8 mimo3_max;
147} __packed; 169} __packed;
@@ -186,6 +208,8 @@ struct iwl_eeprom_enhanced_txpwr {
186#define EEPROM_LINK_CALIBRATION (2*0x67) 208#define EEPROM_LINK_CALIBRATION (2*0x67)
187#define EEPROM_LINK_PROCESS_ADJST (2*0x68) 209#define EEPROM_LINK_PROCESS_ADJST (2*0x68)
188#define EEPROM_LINK_OTHERS (2*0x69) 210#define EEPROM_LINK_OTHERS (2*0x69)
211#define EEPROM_LINK_TXP_LIMIT (2*0x6a)
212#define EEPROM_LINK_TXP_LIMIT_SIZE (2*0x6b)
189 213
190/* agn regulatory - indirect access */ 214/* agn regulatory - indirect access */
191#define EEPROM_REG_BAND_1_CHANNELS ((0x08)\ 215#define EEPROM_REG_BAND_1_CHANNELS ((0x08)\
@@ -207,59 +231,6 @@ struct iwl_eeprom_enhanced_txpwr {
207#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\ 231#define EEPROM_6000_REG_BAND_24_HT40_CHANNELS ((0x80)\
208 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */ 232 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 14 bytes */
209 233
210/* 6000 and up regulatory tx power - indirect access */
211/* max. elements per section */
212#define EEPROM_MAX_TXPOWER_SECTION_ELEMENTS (8)
213#define EEPROM_TXPOWER_COMMON_HT40_INDEX (2)
214
215/**
216 * Partition the enhanced tx power portion of eeprom image into
217 * 10 sections based on band, modulation, frequency and channel
218 *
219 * Section 1: all CCK channels
220 * Section 2: all 2.4 GHz OFDM (Legacy, HT and HT40 ) channels
221 * Section 3: all 5.2 GHz OFDM (Legacy, HT and HT40) channels
222 * Section 4: 2.4 GHz 20MHz channels: 1, 2, 10, 11. Both Legacy and HT
223 * Section 5: 2.4 GHz 40MHz channels: 1, 2, 6, 7, 9, (_above_)
224 * Section 6: 5.2 GHz 20MHz channels: 36, 64, 100, both Legacy and HT
225 * Section 7: 5.2 GHz 40MHz channels: 36, 60, 100 (_above_)
226 * Section 8: 2.4 GHz channel 13, Both Legacy and HT
227 * Section 9: 2.4 GHz channel 140, Both Legacy and HT
228 * Section 10: 2.4 GHz 40MHz channels: 132, 44 (_above_)
229 */
230/* 2.4 GHz band: CCK */
231#define EEPROM_LB_CCK_20_COMMON ((0xA8)\
232 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 8 bytes */
233/* 2.4 GHz band: 20MHz-Legacy, 20MHz-HT, 40MHz-HT */
234#define EEPROM_LB_OFDM_COMMON ((0xB0)\
235 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
236/* 5.2 GHz band: 20MHz-Legacy, 20MHz-HT, 40MHz-HT */
237#define EEPROM_HB_OFDM_COMMON ((0xC8)\
238 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
239/* 2.4GHz band channels:
240 * 1Legacy, 1HT, 2Legacy, 2HT, 10Legacy, 10HT, 11Legacy, 11HT */
241#define EEPROM_LB_OFDM_20_BAND ((0xE0)\
242 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 64 bytes */
243/* 2.4 GHz band HT40 channels: (1,+1) (2,+1) (6,+1) (7,+1) (9,+1) */
244#define EEPROM_LB_OFDM_HT40_BAND ((0x120)\
245 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 40 bytes */
246/* 5.2GHz band channels: 36Legacy, 36HT, 64Legacy, 64HT, 100Legacy, 100HT */
247#define EEPROM_HB_OFDM_20_BAND ((0x148)\
248 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 48 bytes */
249/* 5.2 GHz band HT40 channels: (36,+1) (60,+1) (100,+1) */
250#define EEPROM_HB_OFDM_HT40_BAND ((0x178)\
251 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 24 bytes */
252/* 2.4 GHz band, channnel 13: Legacy, HT */
253#define EEPROM_LB_OFDM_20_CHANNEL_13 ((0x190)\
254 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */
255/* 5.2 GHz band, channnel 140: Legacy, HT */
256#define EEPROM_HB_OFDM_20_CHANNEL_140 ((0x1A0)\
257 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */
258/* 5.2 GHz band, HT40 channnels (132,+1) (44,+1) */
259#define EEPROM_HB_OFDM_HT40_BAND_1 ((0x1B0)\
260 | INDIRECT_ADDRESS | INDIRECT_REGULATORY) /* 16 bytes */
261
262
263/* 5050 Specific */ 234/* 5050 Specific */
264#define EEPROM_5050_TX_POWER_VERSION (4) 235#define EEPROM_5050_TX_POWER_VERSION (4)
265#define EEPROM_5050_EEPROM_VERSION (0x21E) 236#define EEPROM_5050_EEPROM_VERSION (0x21E)
@@ -389,6 +360,8 @@ struct iwl_eeprom_calib_info {
389#define INDIRECT_CALIBRATION 0x00040000 360#define INDIRECT_CALIBRATION 0x00040000
390#define INDIRECT_PROCESS_ADJST 0x00050000 361#define INDIRECT_PROCESS_ADJST 0x00050000
391#define INDIRECT_OTHERS 0x00060000 362#define INDIRECT_OTHERS 0x00060000
363#define INDIRECT_TXP_LIMIT 0x00070000
364#define INDIRECT_TXP_LIMIT_SIZE 0x00080000
392#define INDIRECT_ADDRESS 0x00100000 365#define INDIRECT_ADDRESS 0x00100000
393 366
394/* General */ 367/* General */
@@ -397,11 +370,10 @@ struct iwl_eeprom_calib_info {
397#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */ 370#define EEPROM_BOARD_REVISION (2*0x35) /* 2 bytes */
398#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */ 371#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
399#define EEPROM_VERSION (2*0x44) /* 2 bytes */ 372#define EEPROM_VERSION (2*0x44) /* 2 bytes */
400#define EEPROM_SKU_CAP (2*0x45) /* 1 bytes */ 373#define EEPROM_SKU_CAP (2*0x45) /* 2 bytes */
401#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */ 374#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
402#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */ 375#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
403#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */ 376#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
404#define EEPROM_3945_M_VERSION (2*0x4A) /* 1 bytes */
405#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */ 377#define EEPROM_NUM_MAC_ADDRESS (2*0x4C) /* 2 bytes */
406 378
407/* The following masks are to be applied on EEPROM_RADIO_CONFIG */ 379/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
@@ -504,6 +476,7 @@ struct iwl_eeprom_ops {
504int iwl_eeprom_init(struct iwl_priv *priv); 476int iwl_eeprom_init(struct iwl_priv *priv);
505void iwl_eeprom_free(struct iwl_priv *priv); 477void iwl_eeprom_free(struct iwl_priv *priv);
506int iwl_eeprom_check_version(struct iwl_priv *priv); 478int iwl_eeprom_check_version(struct iwl_priv *priv);
479int iwl_eeprom_check_sku(struct iwl_priv *priv);
507const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset); 480const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset);
508int iwlcore_eeprom_verify_signature(struct iwl_priv *priv); 481int iwlcore_eeprom_verify_signature(struct iwl_priv *priv);
509u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset); 482u16 iwl_eeprom_query16(const struct iwl_priv *priv, size_t offset);
diff --git a/drivers/net/wireless/iwlwifi/iwl-helpers.h b/drivers/net/wireless/iwlwifi/iwl-helpers.h
index 1aaef70deaec..8821f088ba7f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-helpers.h
+++ b/drivers/net/wireless/iwlwifi/iwl-helpers.h
@@ -44,15 +44,6 @@ static inline struct ieee80211_conf *ieee80211_get_hw_conf(
44 return &hw->conf; 44 return &hw->conf;
45} 45}
46 46
47static inline unsigned long elapsed_jiffies(unsigned long start,
48 unsigned long end)
49{
50 if (end >= start)
51 return end - start;
52
53 return end + (MAX_JIFFY_OFFSET - start) + 1;
54}
55
56/** 47/**
57 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning 48 * iwl_queue_inc_wrap - increment queue index, wrap back to beginning
58 * @index -- current index 49 * @index -- current index
@@ -104,42 +95,36 @@ static inline int iwl_alloc_fw_desc(struct pci_dev *pci_dev,
104 * | | | | | | | | 95 * | | | | | | | |
105 * | | | | | | +-+-------- AC queue (0-3) 96 * | | | | | | +-+-------- AC queue (0-3)
106 * | | | | | | 97 * | | | | | |
107 * | +-+-+-+-+------------ HW A-MPDU queue 98 * | +-+-+-+-+------------ HW queue ID
108 * | 99 * |
109 * +---------------------- indicates agg queue 100 * +---------------------- unused
110 */ 101 */
111static inline u8 iwl_virtual_agg_queue_num(u8 ac, u8 hwq) 102static inline void iwl_set_swq_id(struct iwl_tx_queue *txq, u8 ac, u8 hwq)
112{ 103{
113 BUG_ON(ac > 3); /* only have 2 bits */ 104 BUG_ON(ac > 3); /* only have 2 bits */
114 BUG_ON(hwq > 31); /* only have 5 bits */ 105 BUG_ON(hwq > 31); /* only use 5 bits */
115 106
116 return 0x80 | (hwq << 2) | ac; 107 txq->swq_id = (hwq << 2) | ac;
117} 108}
118 109
119static inline void iwl_wake_queue(struct iwl_priv *priv, u8 queue) 110static inline void iwl_wake_queue(struct iwl_priv *priv,
111 struct iwl_tx_queue *txq)
120{ 112{
121 u8 ac = queue; 113 u8 queue = txq->swq_id;
122 u8 hwq = queue; 114 u8 ac = queue & 3;
123 115 u8 hwq = (queue >> 2) & 0x1f;
124 if (queue & 0x80) {
125 ac = queue & 3;
126 hwq = (queue >> 2) & 0x1f;
127 }
128 116
129 if (test_and_clear_bit(hwq, priv->queue_stopped)) 117 if (test_and_clear_bit(hwq, priv->queue_stopped))
130 if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0) 118 if (atomic_dec_return(&priv->queue_stop_count[ac]) <= 0)
131 ieee80211_wake_queue(priv->hw, ac); 119 ieee80211_wake_queue(priv->hw, ac);
132} 120}
133 121
134static inline void iwl_stop_queue(struct iwl_priv *priv, u8 queue) 122static inline void iwl_stop_queue(struct iwl_priv *priv,
123 struct iwl_tx_queue *txq)
135{ 124{
136 u8 ac = queue; 125 u8 queue = txq->swq_id;
137 u8 hwq = queue; 126 u8 ac = queue & 3;
138 127 u8 hwq = (queue >> 2) & 0x1f;
139 if (queue & 0x80) {
140 ac = queue & 3;
141 hwq = (queue >> 2) & 0x1f;
142 }
143 128
144 if (!test_and_set_bit(hwq, priv->queue_stopped)) 129 if (!test_and_set_bit(hwq, priv->queue_stopped))
145 if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0) 130 if (atomic_inc_return(&priv->queue_stop_count[ac]) > 0)
@@ -163,6 +148,12 @@ static inline void iwl_disable_interrupts(struct iwl_priv *priv)
163 IWL_DEBUG_ISR(priv, "Disabled interrupts\n"); 148 IWL_DEBUG_ISR(priv, "Disabled interrupts\n");
164} 149}
165 150
151static inline void iwl_enable_rfkill_int(struct iwl_priv *priv)
152{
153 IWL_DEBUG_ISR(priv, "Enabling rfkill interrupt\n");
154 iwl_write32(priv, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
155}
156
166static inline void iwl_enable_interrupts(struct iwl_priv *priv) 157static inline void iwl_enable_interrupts(struct iwl_priv *priv)
167{ 158{
168 IWL_DEBUG_ISR(priv, "Enabling interrupts\n"); 159 IWL_DEBUG_ISR(priv, "Enabling interrupts\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index 86c2b6fed0c6..46ccdf406e8e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -45,9 +45,8 @@
45/* default: IWL_LED_BLINK(0) using blinking index table */ 45/* default: IWL_LED_BLINK(0) using blinking index table */
46static int led_mode; 46static int led_mode;
47module_param(led_mode, int, S_IRUGO); 47module_param(led_mode, int, S_IRUGO);
48MODULE_PARM_DESC(led_mode, "led mode: 0=blinking, 1=On(RF On)/Off(RF Off), " 48MODULE_PARM_DESC(led_mode, "0=system default, "
49 "(default 0)"); 49 "1=On(RF On)/Off(RF Off), 2=blinking");
50
51 50
52static const struct { 51static const struct {
53 u16 tpt; /* Mb/s */ 52 u16 tpt; /* Mb/s */
@@ -128,12 +127,13 @@ EXPORT_SYMBOL(iwl_led_start);
128int iwl_led_associate(struct iwl_priv *priv) 127int iwl_led_associate(struct iwl_priv *priv)
129{ 128{
130 IWL_DEBUG_LED(priv, "Associated\n"); 129 IWL_DEBUG_LED(priv, "Associated\n");
131 if (led_mode == IWL_LED_BLINK) 130 if (priv->cfg->led_mode == IWL_LED_BLINK)
132 priv->allow_blinking = 1; 131 priv->allow_blinking = 1;
133 priv->last_blink_time = jiffies; 132 priv->last_blink_time = jiffies;
134 133
135 return 0; 134 return 0;
136} 135}
136EXPORT_SYMBOL(iwl_led_associate);
137 137
138int iwl_led_disassociate(struct iwl_priv *priv) 138int iwl_led_disassociate(struct iwl_priv *priv)
139{ 139{
@@ -141,6 +141,7 @@ int iwl_led_disassociate(struct iwl_priv *priv)
141 141
142 return 0; 142 return 0;
143} 143}
144EXPORT_SYMBOL(iwl_led_disassociate);
144 145
145/* 146/*
146 * calculate blink rate according to last second Tx/Rx activities 147 * calculate blink rate according to last second Tx/Rx activities
@@ -221,5 +222,8 @@ void iwl_leds_init(struct iwl_priv *priv)
221 priv->last_blink_rate = 0; 222 priv->last_blink_rate = 0;
222 priv->last_blink_time = 0; 223 priv->last_blink_time = 0;
223 priv->allow_blinking = 0; 224 priv->allow_blinking = 0;
225 if (led_mode != IWL_LED_DEFAULT &&
226 led_mode != priv->cfg->led_mode)
227 priv->cfg->led_mode = led_mode;
224} 228}
225EXPORT_SYMBOL(iwl_leds_init); 229EXPORT_SYMBOL(iwl_leds_init);
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index 49a70baa3fb6..9079b33486ef 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -47,14 +47,16 @@ enum led_type {
47 47
48/* 48/*
49 * LED mode 49 * LED mode
50 * IWL_LED_BLINK: adjust led blink rate based on blink table 50 * IWL_LED_DEFAULT: use system default
51 * IWL_LED_RF_STATE: turn LED on/off based on RF state 51 * IWL_LED_RF_STATE: turn LED on/off based on RF state
52 * LED ON = RF ON 52 * LED ON = RF ON
53 * LED OFF = RF OFF 53 * LED OFF = RF OFF
54 * IWL_LED_BLINK: adjust led blink rate based on blink table
54 */ 55 */
55enum iwl_led_mode { 56enum iwl_led_mode {
56 IWL_LED_BLINK, 57 IWL_LED_DEFAULT,
57 IWL_LED_RF_STATE, 58 IWL_LED_RF_STATE,
59 IWL_LED_BLINK,
58}; 60};
59 61
60void iwl_leds_init(struct iwl_priv *priv); 62void iwl_leds_init(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.c b/drivers/net/wireless/iwlwifi/iwl-legacy.c
new file mode 100644
index 000000000000..bb1a742a98a0
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-legacy.c
@@ -0,0 +1,662 @@
1/******************************************************************************
2 *
3 * GPL LICENSE SUMMARY
4 *
5 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
19 * USA
20 *
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * Contact Information:
25 * Intel Linux Wireless <ilw@linux.intel.com>
26 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
27 *****************************************************************************/
28
29#include <linux/kernel.h>
30#include <net/mac80211.h>
31
32#include "iwl-dev.h"
33#include "iwl-core.h"
34#include "iwl-helpers.h"
35#include "iwl-legacy.h"
36
37static void iwl_update_qos(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
38{
39 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
40 return;
41
42 if (!ctx->is_active)
43 return;
44
45 ctx->qos_data.def_qos_parm.qos_flags = 0;
46
47 if (ctx->qos_data.qos_active)
48 ctx->qos_data.def_qos_parm.qos_flags |=
49 QOS_PARAM_FLG_UPDATE_EDCA_MSK;
50
51 if (ctx->ht.enabled)
52 ctx->qos_data.def_qos_parm.qos_flags |= QOS_PARAM_FLG_TGN_MSK;
53
54 IWL_DEBUG_QOS(priv, "send QoS cmd with Qos active=%d FLAGS=0x%X\n",
55 ctx->qos_data.qos_active,
56 ctx->qos_data.def_qos_parm.qos_flags);
57
58 iwl_send_cmd_pdu_async(priv, ctx->qos_cmd,
59 sizeof(struct iwl_qosparam_cmd),
60 &ctx->qos_data.def_qos_parm, NULL);
61}
62
63/**
64 * iwl_legacy_mac_config - mac80211 config callback
65 */
66int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed)
67{
68 struct iwl_priv *priv = hw->priv;
69 const struct iwl_channel_info *ch_info;
70 struct ieee80211_conf *conf = &hw->conf;
71 struct ieee80211_channel *channel = conf->channel;
72 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
73 struct iwl_rxon_context *ctx;
74 unsigned long flags = 0;
75 int ret = 0;
76 u16 ch;
77 int scan_active = 0;
78 bool ht_changed[NUM_IWL_RXON_CTX] = {};
79
80 if (WARN_ON(!priv->cfg->ops->legacy))
81 return -EOPNOTSUPP;
82
83 mutex_lock(&priv->mutex);
84
85 IWL_DEBUG_MAC80211(priv, "enter to channel %d changed 0x%X\n",
86 channel->hw_value, changed);
87
88 if (unlikely(!priv->cfg->mod_params->disable_hw_scan &&
89 test_bit(STATUS_SCANNING, &priv->status))) {
90 scan_active = 1;
91 IWL_DEBUG_MAC80211(priv, "leave - scanning\n");
92 }
93
94 if (changed & (IEEE80211_CONF_CHANGE_SMPS |
95 IEEE80211_CONF_CHANGE_CHANNEL)) {
96 /* mac80211 uses static for non-HT which is what we want */
97 priv->current_ht_config.smps = conf->smps_mode;
98
99 /*
100 * Recalculate chain counts.
101 *
102 * If monitor mode is enabled then mac80211 will
103 * set up the SM PS mode to OFF if an HT channel is
104 * configured.
105 */
106 if (priv->cfg->ops->hcmd->set_rxon_chain)
107 for_each_context(priv, ctx)
108 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
109 }
110
111 /* during scanning mac80211 will delay channel setting until
112 * scan finish with changed = 0
113 */
114 if (!changed || (changed & IEEE80211_CONF_CHANGE_CHANNEL)) {
115 if (scan_active)
116 goto set_ch_out;
117
118 ch = channel->hw_value;
119 ch_info = iwl_get_channel_info(priv, channel->band, ch);
120 if (!is_channel_valid(ch_info)) {
121 IWL_DEBUG_MAC80211(priv, "leave - invalid channel\n");
122 ret = -EINVAL;
123 goto set_ch_out;
124 }
125
126 spin_lock_irqsave(&priv->lock, flags);
127
128 for_each_context(priv, ctx) {
129 /* Configure HT40 channels */
130 if (ctx->ht.enabled != conf_is_ht(conf)) {
131 ctx->ht.enabled = conf_is_ht(conf);
132 ht_changed[ctx->ctxid] = true;
133 }
134 if (ctx->ht.enabled) {
135 if (conf_is_ht40_minus(conf)) {
136 ctx->ht.extension_chan_offset =
137 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
138 ctx->ht.is_40mhz = true;
139 } else if (conf_is_ht40_plus(conf)) {
140 ctx->ht.extension_chan_offset =
141 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
142 ctx->ht.is_40mhz = true;
143 } else {
144 ctx->ht.extension_chan_offset =
145 IEEE80211_HT_PARAM_CHA_SEC_NONE;
146 ctx->ht.is_40mhz = false;
147 }
148 } else
149 ctx->ht.is_40mhz = false;
150
151 /*
152 * Default to no protection. Protection mode will
153 * later be set from BSS config in iwl_ht_conf
154 */
155 ctx->ht.protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
156
157 /* if we are switching from ht to 2.4 clear flags
158 * from any ht related info since 2.4 does not
159 * support ht */
160 if ((le16_to_cpu(ctx->staging.channel) != ch))
161 ctx->staging.flags = 0;
162
163 iwl_set_rxon_channel(priv, channel, ctx);
164 iwl_set_rxon_ht(priv, ht_conf);
165
166 iwl_set_flags_for_band(priv, ctx, channel->band,
167 ctx->vif);
168 }
169
170 spin_unlock_irqrestore(&priv->lock, flags);
171
172 if (priv->cfg->ops->legacy->update_bcast_stations)
173 ret = priv->cfg->ops->legacy->update_bcast_stations(priv);
174
175 set_ch_out:
176 /* The list of supported rates and rate mask can be different
177 * for each band; since the band may have changed, reset
178 * the rate mask to what mac80211 lists */
179 iwl_set_rate(priv);
180 }
181
182 if (changed & (IEEE80211_CONF_CHANGE_PS |
183 IEEE80211_CONF_CHANGE_IDLE)) {
184 ret = iwl_power_update_mode(priv, false);
185 if (ret)
186 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
187 }
188
189 if (changed & IEEE80211_CONF_CHANGE_POWER) {
190 IWL_DEBUG_MAC80211(priv, "TX Power old=%d new=%d\n",
191 priv->tx_power_user_lmt, conf->power_level);
192
193 iwl_set_tx_power(priv, conf->power_level, false);
194 }
195
196 if (!iwl_is_ready(priv)) {
197 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
198 goto out;
199 }
200
201 if (scan_active)
202 goto out;
203
204 for_each_context(priv, ctx) {
205 if (memcmp(&ctx->active, &ctx->staging, sizeof(ctx->staging)))
206 iwlcore_commit_rxon(priv, ctx);
207 else
208 IWL_DEBUG_INFO(priv,
209 "Not re-sending same RXON configuration.\n");
210 if (ht_changed[ctx->ctxid])
211 iwl_update_qos(priv, ctx);
212 }
213
214out:
215 IWL_DEBUG_MAC80211(priv, "leave\n");
216 mutex_unlock(&priv->mutex);
217 return ret;
218}
219EXPORT_SYMBOL(iwl_legacy_mac_config);
220
221void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw)
222{
223 struct iwl_priv *priv = hw->priv;
224 unsigned long flags;
225 /* IBSS can only be the IWL_RXON_CTX_BSS context */
226 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
227
228 if (WARN_ON(!priv->cfg->ops->legacy))
229 return;
230
231 mutex_lock(&priv->mutex);
232 IWL_DEBUG_MAC80211(priv, "enter\n");
233
234 spin_lock_irqsave(&priv->lock, flags);
235 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
236 spin_unlock_irqrestore(&priv->lock, flags);
237
238 spin_lock_irqsave(&priv->lock, flags);
239
240 /* new association get rid of ibss beacon skb */
241 if (priv->beacon_skb)
242 dev_kfree_skb(priv->beacon_skb);
243
244 priv->beacon_skb = NULL;
245
246 priv->timestamp = 0;
247
248 spin_unlock_irqrestore(&priv->lock, flags);
249
250 iwl_scan_cancel_timeout(priv, 100);
251 if (!iwl_is_ready_rf(priv)) {
252 IWL_DEBUG_MAC80211(priv, "leave - not ready\n");
253 mutex_unlock(&priv->mutex);
254 return;
255 }
256
257 /* we are restarting association process
258 * clear RXON_FILTER_ASSOC_MSK bit
259 */
260 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
261 iwlcore_commit_rxon(priv, ctx);
262
263 iwl_set_rate(priv);
264
265 mutex_unlock(&priv->mutex);
266
267 IWL_DEBUG_MAC80211(priv, "leave\n");
268}
269EXPORT_SYMBOL(iwl_legacy_mac_reset_tsf);
270
271static void iwl_ht_conf(struct iwl_priv *priv,
272 struct ieee80211_vif *vif)
273{
274 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
275 struct ieee80211_sta *sta;
276 struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
277 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
278
279 IWL_DEBUG_ASSOC(priv, "enter:\n");
280
281 if (!ctx->ht.enabled)
282 return;
283
284 ctx->ht.protection =
285 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
286 ctx->ht.non_gf_sta_present =
287 !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
288
289 ht_conf->single_chain_sufficient = false;
290
291 switch (vif->type) {
292 case NL80211_IFTYPE_STATION:
293 rcu_read_lock();
294 sta = ieee80211_find_sta(vif, bss_conf->bssid);
295 if (sta) {
296 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
297 int maxstreams;
298
299 maxstreams = (ht_cap->mcs.tx_params &
300 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
301 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
302 maxstreams += 1;
303
304 if ((ht_cap->mcs.rx_mask[1] == 0) &&
305 (ht_cap->mcs.rx_mask[2] == 0))
306 ht_conf->single_chain_sufficient = true;
307 if (maxstreams <= 1)
308 ht_conf->single_chain_sufficient = true;
309 } else {
310 /*
311 * If at all, this can only happen through a race
312 * when the AP disconnects us while we're still
313 * setting up the connection, in that case mac80211
314 * will soon tell us about that.
315 */
316 ht_conf->single_chain_sufficient = true;
317 }
318 rcu_read_unlock();
319 break;
320 case NL80211_IFTYPE_ADHOC:
321 ht_conf->single_chain_sufficient = true;
322 break;
323 default:
324 break;
325 }
326
327 IWL_DEBUG_ASSOC(priv, "leave\n");
328}
329
330static inline void iwl_set_no_assoc(struct iwl_priv *priv,
331 struct ieee80211_vif *vif)
332{
333 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
334
335 iwl_led_disassociate(priv);
336 /*
337 * inform the ucode that there is no longer an
338 * association and that no more packets should be
339 * sent
340 */
341 ctx->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK;
342 ctx->staging.assoc_id = 0;
343 iwlcore_commit_rxon(priv, ctx);
344}
345
346static void iwlcore_beacon_update(struct ieee80211_hw *hw,
347 struct ieee80211_vif *vif)
348{
349 struct iwl_priv *priv = hw->priv;
350 unsigned long flags;
351 __le64 timestamp;
352 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
353
354 if (!skb)
355 return;
356
357 IWL_DEBUG_MAC80211(priv, "enter\n");
358
359 lockdep_assert_held(&priv->mutex);
360
361 if (!priv->beacon_ctx) {
362 IWL_ERR(priv, "update beacon but no beacon context!\n");
363 dev_kfree_skb(skb);
364 return;
365 }
366
367 spin_lock_irqsave(&priv->lock, flags);
368
369 if (priv->beacon_skb)
370 dev_kfree_skb(priv->beacon_skb);
371
372 priv->beacon_skb = skb;
373
374 timestamp = ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp;
375 priv->timestamp = le64_to_cpu(timestamp);
376
377 IWL_DEBUG_MAC80211(priv, "leave\n");
378 spin_unlock_irqrestore(&priv->lock, flags);
379
380 if (!iwl_is_ready_rf(priv)) {
381 IWL_DEBUG_MAC80211(priv, "leave - RF not ready\n");
382 return;
383 }
384
385 priv->cfg->ops->legacy->post_associate(priv);
386}
387
388void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
389 struct ieee80211_vif *vif,
390 struct ieee80211_bss_conf *bss_conf,
391 u32 changes)
392{
393 struct iwl_priv *priv = hw->priv;
394 struct iwl_rxon_context *ctx = iwl_rxon_ctx_from_vif(vif);
395 int ret;
396
397 if (WARN_ON(!priv->cfg->ops->legacy))
398 return;
399
400 IWL_DEBUG_MAC80211(priv, "changes = 0x%X\n", changes);
401
402 if (!iwl_is_alive(priv))
403 return;
404
405 mutex_lock(&priv->mutex);
406
407 if (changes & BSS_CHANGED_QOS) {
408 unsigned long flags;
409
410 spin_lock_irqsave(&priv->lock, flags);
411 ctx->qos_data.qos_active = bss_conf->qos;
412 iwl_update_qos(priv, ctx);
413 spin_unlock_irqrestore(&priv->lock, flags);
414 }
415
416 if (changes & BSS_CHANGED_BEACON_ENABLED) {
417 /*
418 * the add_interface code must make sure we only ever
419 * have a single interface that could be beaconing at
420 * any time.
421 */
422 if (vif->bss_conf.enable_beacon)
423 priv->beacon_ctx = ctx;
424 else
425 priv->beacon_ctx = NULL;
426 }
427
428 if (changes & BSS_CHANGED_BEACON && vif->type == NL80211_IFTYPE_AP) {
429 dev_kfree_skb(priv->beacon_skb);
430 priv->beacon_skb = ieee80211_beacon_get(hw, vif);
431 }
432
433 if (changes & BSS_CHANGED_BEACON_INT && vif->type == NL80211_IFTYPE_AP)
434 iwl_send_rxon_timing(priv, ctx);
435
436 if (changes & BSS_CHANGED_BSSID) {
437 IWL_DEBUG_MAC80211(priv, "BSSID %pM\n", bss_conf->bssid);
438
439 /*
440 * If there is currently a HW scan going on in the
441 * background then we need to cancel it else the RXON
442 * below/in post_associate will fail.
443 */
444 if (iwl_scan_cancel_timeout(priv, 100)) {
445 IWL_WARN(priv, "Aborted scan still in progress after 100ms\n");
446 IWL_DEBUG_MAC80211(priv, "leaving - scan abort failed.\n");
447 mutex_unlock(&priv->mutex);
448 return;
449 }
450
451 /* mac80211 only sets assoc when in STATION mode */
452 if (vif->type == NL80211_IFTYPE_ADHOC || bss_conf->assoc) {
453 memcpy(ctx->staging.bssid_addr,
454 bss_conf->bssid, ETH_ALEN);
455
456 /* currently needed in a few places */
457 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
458 } else {
459 ctx->staging.filter_flags &=
460 ~RXON_FILTER_ASSOC_MSK;
461 }
462
463 }
464
465 /*
466 * This needs to be after setting the BSSID in case
467 * mac80211 decides to do both changes at once because
468 * it will invoke post_associate.
469 */
470 if (vif->type == NL80211_IFTYPE_ADHOC && changes & BSS_CHANGED_BEACON)
471 iwlcore_beacon_update(hw, vif);
472
473 if (changes & BSS_CHANGED_ERP_PREAMBLE) {
474 IWL_DEBUG_MAC80211(priv, "ERP_PREAMBLE %d\n",
475 bss_conf->use_short_preamble);
476 if (bss_conf->use_short_preamble)
477 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
478 else
479 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
480 }
481
482 if (changes & BSS_CHANGED_ERP_CTS_PROT) {
483 IWL_DEBUG_MAC80211(priv, "ERP_CTS %d\n", bss_conf->use_cts_prot);
484 if (bss_conf->use_cts_prot && (priv->band != IEEE80211_BAND_5GHZ))
485 ctx->staging.flags |= RXON_FLG_TGG_PROTECT_MSK;
486 else
487 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
488 if (bss_conf->use_cts_prot)
489 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
490 else
491 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
492 }
493
494 if (changes & BSS_CHANGED_BASIC_RATES) {
495 /* XXX use this information
496 *
497 * To do that, remove code from iwl_set_rate() and put something
498 * like this here:
499 *
500 if (A-band)
501 ctx->staging.ofdm_basic_rates =
502 bss_conf->basic_rates;
503 else
504 ctx->staging.ofdm_basic_rates =
505 bss_conf->basic_rates >> 4;
506 ctx->staging.cck_basic_rates =
507 bss_conf->basic_rates & 0xF;
508 */
509 }
510
511 if (changes & BSS_CHANGED_HT) {
512 iwl_ht_conf(priv, vif);
513
514 if (priv->cfg->ops->hcmd->set_rxon_chain)
515 priv->cfg->ops->hcmd->set_rxon_chain(priv, ctx);
516 }
517
518 if (changes & BSS_CHANGED_ASSOC) {
519 IWL_DEBUG_MAC80211(priv, "ASSOC %d\n", bss_conf->assoc);
520 if (bss_conf->assoc) {
521 priv->timestamp = bss_conf->timestamp;
522
523 iwl_led_associate(priv);
524
525 if (!iwl_is_rfkill(priv))
526 priv->cfg->ops->legacy->post_associate(priv);
527 } else
528 iwl_set_no_assoc(priv, vif);
529 }
530
531 if (changes && iwl_is_associated_ctx(ctx) && bss_conf->aid) {
532 IWL_DEBUG_MAC80211(priv, "Changes (%#x) while associated\n",
533 changes);
534 ret = iwl_send_rxon_assoc(priv, ctx);
535 if (!ret) {
536 /* Sync active_rxon with latest change. */
537 memcpy((void *)&ctx->active,
538 &ctx->staging,
539 sizeof(struct iwl_rxon_cmd));
540 }
541 }
542
543 if (changes & BSS_CHANGED_BEACON_ENABLED) {
544 if (vif->bss_conf.enable_beacon) {
545 memcpy(ctx->staging.bssid_addr,
546 bss_conf->bssid, ETH_ALEN);
547 memcpy(priv->bssid, bss_conf->bssid, ETH_ALEN);
548 iwl_led_associate(priv);
549 priv->cfg->ops->legacy->config_ap(priv);
550 } else
551 iwl_set_no_assoc(priv, vif);
552 }
553
554 if (changes & BSS_CHANGED_IBSS) {
555 ret = priv->cfg->ops->legacy->manage_ibss_station(priv, vif,
556 bss_conf->ibss_joined);
557 if (ret)
558 IWL_ERR(priv, "failed to %s IBSS station %pM\n",
559 bss_conf->ibss_joined ? "add" : "remove",
560 bss_conf->bssid);
561 }
562
563 mutex_unlock(&priv->mutex);
564
565 IWL_DEBUG_MAC80211(priv, "leave\n");
566}
567EXPORT_SYMBOL(iwl_legacy_mac_bss_info_changed);
568
569irqreturn_t iwl_isr_legacy(int irq, void *data)
570{
571 struct iwl_priv *priv = data;
572 u32 inta, inta_mask;
573 u32 inta_fh;
574 unsigned long flags;
575 if (!priv)
576 return IRQ_NONE;
577
578 spin_lock_irqsave(&priv->lock, flags);
579
580 /* Disable (but don't clear!) interrupts here to avoid
581 * back-to-back ISRs and sporadic interrupts from our NIC.
582 * If we have something to service, the tasklet will re-enable ints.
583 * If we *don't* have something, we'll re-enable before leaving here. */
584 inta_mask = iwl_read32(priv, CSR_INT_MASK); /* just for debug */
585 iwl_write32(priv, CSR_INT_MASK, 0x00000000);
586
587 /* Discover which interrupts are active/pending */
588 inta = iwl_read32(priv, CSR_INT);
589 inta_fh = iwl_read32(priv, CSR_FH_INT_STATUS);
590
591 /* Ignore interrupt if there's nothing in NIC to service.
592 * This may be due to IRQ shared with another device,
593 * or due to sporadic interrupts thrown from our NIC. */
594 if (!inta && !inta_fh) {
595 IWL_DEBUG_ISR(priv,
596 "Ignore interrupt, inta == 0, inta_fh == 0\n");
597 goto none;
598 }
599
600 if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
601 /* Hardware disappeared. It might have already raised
602 * an interrupt */
603 IWL_WARN(priv, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
604 goto unplugged;
605 }
606
607 IWL_DEBUG_ISR(priv, "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
608 inta, inta_mask, inta_fh);
609
610 inta &= ~CSR_INT_BIT_SCD;
611
612 /* iwl_irq_tasklet() will service interrupts and re-enable them */
613 if (likely(inta || inta_fh))
614 tasklet_schedule(&priv->irq_tasklet);
615
616unplugged:
617 spin_unlock_irqrestore(&priv->lock, flags);
618 return IRQ_HANDLED;
619
620none:
621 /* re-enable interrupts here since we don't have anything to service. */
622 /* only Re-enable if disabled by irq */
623 if (test_bit(STATUS_INT_ENABLED, &priv->status))
624 iwl_enable_interrupts(priv);
625 spin_unlock_irqrestore(&priv->lock, flags);
626 return IRQ_NONE;
627}
628EXPORT_SYMBOL(iwl_isr_legacy);
629
630/*
631 * iwl_legacy_tx_cmd_protection: Set rts/cts. 3945 and 4965 only share this
632 * function.
633 */
634void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
635 struct ieee80211_tx_info *info,
636 __le16 fc, __le32 *tx_flags)
637{
638 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
639 *tx_flags |= TX_CMD_FLG_RTS_MSK;
640 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
641 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
642
643 if (!ieee80211_is_mgmt(fc))
644 return;
645
646 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
647 case cpu_to_le16(IEEE80211_STYPE_AUTH):
648 case cpu_to_le16(IEEE80211_STYPE_DEAUTH):
649 case cpu_to_le16(IEEE80211_STYPE_ASSOC_REQ):
650 case cpu_to_le16(IEEE80211_STYPE_REASSOC_REQ):
651 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
652 *tx_flags |= TX_CMD_FLG_CTS_MSK;
653 break;
654 }
655 } else if (info->control.rates[0].flags &
656 IEEE80211_TX_RC_USE_CTS_PROTECT) {
657 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
658 *tx_flags |= TX_CMD_FLG_CTS_MSK;
659 *tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
660 }
661}
662EXPORT_SYMBOL(iwl_legacy_tx_cmd_protection);
diff --git a/drivers/net/wireless/iwlwifi/iwl-legacy.h b/drivers/net/wireless/iwlwifi/iwl-legacy.h
new file mode 100644
index 000000000000..9f7b2f935964
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-legacy.h
@@ -0,0 +1,79 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2008 - 2010 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2010 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *****************************************************************************/
62
63#ifndef __iwl_legacy_h__
64#define __iwl_legacy_h__
65
66/* mac80211 handlers */
67int iwl_legacy_mac_config(struct ieee80211_hw *hw, u32 changed);
68void iwl_legacy_mac_reset_tsf(struct ieee80211_hw *hw);
69void iwl_legacy_mac_bss_info_changed(struct ieee80211_hw *hw,
70 struct ieee80211_vif *vif,
71 struct ieee80211_bss_conf *bss_conf,
72 u32 changes);
73void iwl_legacy_tx_cmd_protection(struct iwl_priv *priv,
74 struct ieee80211_tx_info *info,
75 __le16 fc, __le32 *tx_flags);
76
77irqreturn_t iwl_isr_legacy(int irq, void *data);
78
79#endif /* __iwl_legacy_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 49d7788937a9..1eec18d909d8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -75,6 +75,10 @@ struct iwl_power_vec_entry {
75 75
76#define NOSLP cpu_to_le16(0), 0, 0 76#define NOSLP cpu_to_le16(0), 0, 0
77#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0 77#define SLP IWL_POWER_DRIVER_ALLOW_SLEEP_MSK, 0, 0
78#define ASLP (IWL_POWER_POWER_SAVE_ENA_MSK | \
79 IWL_POWER_POWER_MANAGEMENT_ENA_MSK | \
80 IWL_POWER_ADVANCE_PM_ENA_MSK)
81#define ASLP_TOUT(T) cpu_to_le32(T)
78#define TU_TO_USEC 1024 82#define TU_TO_USEC 1024
79#define SLP_TOUT(T) cpu_to_le32((T) * TU_TO_USEC) 83#define SLP_TOUT(T) cpu_to_le32((T) * TU_TO_USEC)
80#define SLP_VEC(X0, X1, X2, X3, X4) {cpu_to_le32(X0), \ 84#define SLP_VEC(X0, X1, X2, X3, X4) {cpu_to_le32(X0), \
@@ -114,6 +118,52 @@ static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = {
114 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0} 118 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(4, 7, 10, 10, 0xFF)}, 0}
115}; 119};
116 120
121/* advance power management */
122/* DTIM 0 - 2 */
123static const struct iwl_power_vec_entry apm_range_0[IWL_POWER_NUM] = {
124 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
125 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
126 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
127 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
128 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
129 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
130 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
131 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
132 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
133 SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2}
134};
135
136
137/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
138/* DTIM 3 - 10 */
139static const struct iwl_power_vec_entry apm_range_1[IWL_POWER_NUM] = {
140 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
141 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
142 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
143 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
144 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
145 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
146 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
147 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
148 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
149 SLP_VEC(1, 2, 6, 8, 0xFF), 0}, 2}
150};
151
152/* for DTIM period > IWL_DTIM_RANGE_1_MAX */
153/* DTIM 11 - */
154static const struct iwl_power_vec_entry apm_range_2[IWL_POWER_NUM] = {
155 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
156 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
157 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
158 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
159 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
160 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
161 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
162 SLP_VEC(1, 2, 4, 6, 0xFF), 0}, 0},
163 {{ASLP, 0, 0, ASLP_TOUT(50), ASLP_TOUT(50),
164 SLP_VEC(1, 2, 6, 8, 0xFF), ASLP_TOUT(2)}, 2}
165};
166
117static void iwl_static_sleep_cmd(struct iwl_priv *priv, 167static void iwl_static_sleep_cmd(struct iwl_priv *priv,
118 struct iwl_powertable_cmd *cmd, 168 struct iwl_powertable_cmd *cmd,
119 enum iwl_power_level lvl, int period) 169 enum iwl_power_level lvl, int period)
@@ -124,11 +174,19 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
124 u8 skip; 174 u8 skip;
125 u32 slp_itrvl; 175 u32 slp_itrvl;
126 176
127 table = range_2; 177 if (priv->cfg->adv_pm) {
128 if (period <= IWL_DTIM_RANGE_1_MAX) 178 table = apm_range_2;
129 table = range_1; 179 if (period <= IWL_DTIM_RANGE_1_MAX)
130 if (period <= IWL_DTIM_RANGE_0_MAX) 180 table = apm_range_1;
131 table = range_0; 181 if (period <= IWL_DTIM_RANGE_0_MAX)
182 table = apm_range_0;
183 } else {
184 table = range_2;
185 if (period <= IWL_DTIM_RANGE_1_MAX)
186 table = range_1;
187 if (period <= IWL_DTIM_RANGE_0_MAX)
188 table = range_0;
189 }
132 190
133 BUG_ON(lvl < 0 || lvl >= IWL_POWER_NUM); 191 BUG_ON(lvl < 0 || lvl >= IWL_POWER_NUM);
134 192
@@ -163,6 +221,20 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
163 else 221 else
164 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; 222 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
165 223
224 if (priv->cfg->base_params->shadow_reg_enable)
225 cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
226 else
227 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
228
229 if (priv->cfg->bt_params &&
230 priv->cfg->bt_params->advanced_bt_coexist) {
231 if (!priv->cfg->bt_params->bt_sco_disable)
232 cmd->flags |= IWL_POWER_BT_SCO_ENA;
233 else
234 cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
235 }
236
237
166 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]); 238 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
167 if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL) 239 if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL)
168 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] = 240 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
@@ -236,6 +308,19 @@ static void iwl_power_fill_sleep_cmd(struct iwl_priv *priv,
236 if (priv->power_data.pci_pm) 308 if (priv->power_data.pci_pm)
237 cmd->flags |= IWL_POWER_PCI_PM_MSK; 309 cmd->flags |= IWL_POWER_PCI_PM_MSK;
238 310
311 if (priv->cfg->base_params->shadow_reg_enable)
312 cmd->flags |= IWL_POWER_SHADOW_REG_ENA;
313 else
314 cmd->flags &= ~IWL_POWER_SHADOW_REG_ENA;
315
316 if (priv->cfg->bt_params &&
317 priv->cfg->bt_params->advanced_bt_coexist) {
318 if (!priv->cfg->bt_params->bt_sco_disable)
319 cmd->flags |= IWL_POWER_BT_SCO_ENA;
320 else
321 cmd->flags &= ~IWL_POWER_BT_SCO_ENA;
322 }
323
239 cmd->rx_data_timeout = cpu_to_le32(1000 * dynps_ms); 324 cmd->rx_data_timeout = cpu_to_le32(1000 * dynps_ms);
240 cmd->tx_data_timeout = cpu_to_le32(1000 * dynps_ms); 325 cmd->tx_data_timeout = cpu_to_le32(1000 * dynps_ms);
241 326
@@ -263,70 +348,95 @@ static int iwl_set_power(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd)
263 sizeof(struct iwl_powertable_cmd), cmd); 348 sizeof(struct iwl_powertable_cmd), cmd);
264} 349}
265 350
266/* priv->mutex must be held */ 351static void iwl_power_build_cmd(struct iwl_priv *priv,
267int iwl_power_update_mode(struct iwl_priv *priv, bool force) 352 struct iwl_powertable_cmd *cmd)
268{ 353{
269 int ret = 0;
270 bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS; 354 bool enabled = priv->hw->conf.flags & IEEE80211_CONF_PS;
271 bool update_chains;
272 struct iwl_powertable_cmd cmd;
273 int dtimper; 355 int dtimper;
274 356
275 /* Don't update the RX chain when chain noise calibration is running */
276 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
277 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
278
279 dtimper = priv->hw->conf.ps_dtim_period ?: 1; 357 dtimper = priv->hw->conf.ps_dtim_period ?: 1;
280 358
281 if (priv->cfg->base_params->broken_powersave) 359 if (priv->cfg->base_params->broken_powersave)
282 iwl_power_sleep_cam_cmd(priv, &cmd); 360 iwl_power_sleep_cam_cmd(priv, cmd);
283 else if (priv->cfg->base_params->supports_idle && 361 else if (priv->cfg->base_params->supports_idle &&
284 priv->hw->conf.flags & IEEE80211_CONF_IDLE) 362 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
285 iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_5, 20); 363 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_5, 20);
286 else if (priv->cfg->ops->lib->tt_ops.lower_power_detection && 364 else if (priv->cfg->ops->lib->tt_ops.lower_power_detection &&
287 priv->cfg->ops->lib->tt_ops.tt_power_mode && 365 priv->cfg->ops->lib->tt_ops.tt_power_mode &&
288 priv->cfg->ops->lib->tt_ops.lower_power_detection(priv)) { 366 priv->cfg->ops->lib->tt_ops.lower_power_detection(priv)) {
289 /* in thermal throttling low power state */ 367 /* in thermal throttling low power state */
290 iwl_static_sleep_cmd(priv, &cmd, 368 iwl_static_sleep_cmd(priv, cmd,
291 priv->cfg->ops->lib->tt_ops.tt_power_mode(priv), dtimper); 369 priv->cfg->ops->lib->tt_ops.tt_power_mode(priv), dtimper);
292 } else if (!enabled) 370 } else if (!enabled)
293 iwl_power_sleep_cam_cmd(priv, &cmd); 371 iwl_power_sleep_cam_cmd(priv, cmd);
294 else if (priv->power_data.debug_sleep_level_override >= 0) 372 else if (priv->power_data.debug_sleep_level_override >= 0)
295 iwl_static_sleep_cmd(priv, &cmd, 373 iwl_static_sleep_cmd(priv, cmd,
296 priv->power_data.debug_sleep_level_override, 374 priv->power_data.debug_sleep_level_override,
297 dtimper); 375 dtimper);
298 else if (no_sleep_autoadjust) 376 else if (no_sleep_autoadjust)
299 iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_1, dtimper); 377 iwl_static_sleep_cmd(priv, cmd, IWL_POWER_INDEX_1, dtimper);
300 else 378 else
301 iwl_power_fill_sleep_cmd(priv, &cmd, 379 iwl_power_fill_sleep_cmd(priv, cmd,
302 priv->hw->conf.dynamic_ps_timeout, 380 priv->hw->conf.dynamic_ps_timeout,
303 priv->hw->conf.max_sleep_period); 381 priv->hw->conf.max_sleep_period);
382}
383
384int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
385 bool force)
386{
387 int ret;
388 bool update_chains;
389
390 lockdep_assert_held(&priv->mutex);
391
392 /* Don't update the RX chain when chain noise calibration is running */
393 update_chains = priv->chain_noise_data.state == IWL_CHAIN_NOISE_DONE ||
394 priv->chain_noise_data.state == IWL_CHAIN_NOISE_ALIVE;
395
396 if (!memcmp(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd)) && !force)
397 return 0;
398
399 if (!iwl_is_ready_rf(priv))
400 return -EIO;
304 401
305 if (iwl_is_ready_rf(priv) && 402 /* scan complete use sleep_power_next, need to be updated */
306 (memcmp(&priv->power_data.sleep_cmd, &cmd, sizeof(cmd)) || force)) { 403 memcpy(&priv->power_data.sleep_cmd_next, cmd, sizeof(*cmd));
307 if (cmd.flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK) 404 if (test_bit(STATUS_SCANNING, &priv->status) && !force) {
308 set_bit(STATUS_POWER_PMI, &priv->status); 405 IWL_DEBUG_INFO(priv, "Defer power set mode while scanning\n");
309 406 return 0;
310 ret = iwl_set_power(priv, &cmd); 407 }
311 if (!ret) { 408
312 if (!(cmd.flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)) 409 if (cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK)
313 clear_bit(STATUS_POWER_PMI, &priv->status); 410 set_bit(STATUS_POWER_PMI, &priv->status);
314 411
315 if (priv->cfg->ops->lib->update_chain_flags && 412 ret = iwl_set_power(priv, cmd);
316 update_chains) 413 if (!ret) {
317 priv->cfg->ops->lib->update_chain_flags(priv); 414 if (!(cmd->flags & IWL_POWER_DRIVER_ALLOW_SLEEP_MSK))
318 else if (priv->cfg->ops->lib->update_chain_flags) 415 clear_bit(STATUS_POWER_PMI, &priv->status);
319 IWL_DEBUG_POWER(priv, 416
417 if (priv->cfg->ops->lib->update_chain_flags && update_chains)
418 priv->cfg->ops->lib->update_chain_flags(priv);
419 else if (priv->cfg->ops->lib->update_chain_flags)
420 IWL_DEBUG_POWER(priv,
320 "Cannot update the power, chain noise " 421 "Cannot update the power, chain noise "
321 "calibration running: %d\n", 422 "calibration running: %d\n",
322 priv->chain_noise_data.state); 423 priv->chain_noise_data.state);
323 memcpy(&priv->power_data.sleep_cmd, &cmd, sizeof(cmd)); 424
324 } else 425 memcpy(&priv->power_data.sleep_cmd, cmd, sizeof(*cmd));
325 IWL_ERR(priv, "set power fail, ret = %d", ret); 426 } else
326 } 427 IWL_ERR(priv, "set power fail, ret = %d", ret);
327 428
328 return ret; 429 return ret;
329} 430}
431EXPORT_SYMBOL(iwl_power_set_mode);
432
433int iwl_power_update_mode(struct iwl_priv *priv, bool force)
434{
435 struct iwl_powertable_cmd cmd;
436
437 iwl_power_build_cmd(priv, &cmd);
438 return iwl_power_set_mode(priv, &cmd, force);
439}
330EXPORT_SYMBOL(iwl_power_update_mode); 440EXPORT_SYMBOL(iwl_power_update_mode);
331 441
332/* initialize to default */ 442/* initialize to default */
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index df81565a7cc4..fe012032c28c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -41,10 +41,13 @@ enum iwl_power_level {
41 41
42struct iwl_power_mgr { 42struct iwl_power_mgr {
43 struct iwl_powertable_cmd sleep_cmd; 43 struct iwl_powertable_cmd sleep_cmd;
44 struct iwl_powertable_cmd sleep_cmd_next;
44 int debug_sleep_level_override; 45 int debug_sleep_level_override;
45 bool pci_pm; 46 bool pci_pm;
46}; 47};
47 48
49int iwl_power_set_mode(struct iwl_priv *priv, struct iwl_powertable_cmd *cmd,
50 bool force);
48int iwl_power_update_mode(struct iwl_priv *priv, bool force); 51int iwl_power_update_mode(struct iwl_priv *priv, bool force);
49void iwl_power_initialize(struct iwl_priv *priv); 52void iwl_power_initialize(struct iwl_priv *priv);
50 53
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 5469655646ae..86f5123bccda 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -83,10 +83,10 @@
83#define APMG_DIGITAL_SVR_REG (APMG_BASE + 0x0058) 83#define APMG_DIGITAL_SVR_REG (APMG_BASE + 0x0058)
84#define APMG_ANALOG_SVR_REG (APMG_BASE + 0x006C) 84#define APMG_ANALOG_SVR_REG (APMG_BASE + 0x006C)
85 85
86#define APMS_CLK_VAL_MRB_FUNC_MODE (0x00000001)
86#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200) 87#define APMG_CLK_VAL_DMA_CLK_RQT (0x00000200)
87#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800) 88#define APMG_CLK_VAL_BSM_CLK_RQT (0x00000800)
88 89
89
90#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000) 90#define APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS (0x00400000)
91#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000) 91#define APMG_PS_CTRL_VAL_RESET_REQ (0x04000000)
92#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000) 92#define APMG_PS_CTRL_MSK_PWR_SRC (0x03000000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index f436270ca39a..87a6fd84d4d2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -134,28 +134,37 @@ void iwl_rx_queue_update_write_ptr(struct iwl_priv *priv, struct iwl_rx_queue *q
134 if (q->need_update == 0) 134 if (q->need_update == 0)
135 goto exit_unlock; 135 goto exit_unlock;
136 136
137 /* If power-saving is in use, make sure device is awake */ 137 if (priv->cfg->base_params->shadow_reg_enable) {
138 if (test_bit(STATUS_POWER_PMI, &priv->status)) { 138 /* shadow register enabled */
139 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1); 139 /* Device expects a multiple of 8 */
140 q->write_actual = (q->write & ~0x7);
141 iwl_write32(priv, rx_wrt_ptr_reg, q->write_actual);
142 } else {
143 /* If power-saving is in use, make sure device is awake */
144 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
145 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
140 146
141 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 147 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
142 IWL_DEBUG_INFO(priv, "Rx queue requesting wakeup, GP1 = 0x%x\n", 148 IWL_DEBUG_INFO(priv,
143 reg); 149 "Rx queue requesting wakeup,"
144 iwl_set_bit(priv, CSR_GP_CNTRL, 150 " GP1 = 0x%x\n", reg);
145 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 151 iwl_set_bit(priv, CSR_GP_CNTRL,
146 goto exit_unlock; 152 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
147 } 153 goto exit_unlock;
154 }
148 155
149 q->write_actual = (q->write & ~0x7); 156 q->write_actual = (q->write & ~0x7);
150 iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual); 157 iwl_write_direct32(priv, rx_wrt_ptr_reg,
158 q->write_actual);
151 159
152 /* Else device is assumed to be awake */ 160 /* Else device is assumed to be awake */
153 } else { 161 } else {
154 /* Device expects a multiple of 8 */ 162 /* Device expects a multiple of 8 */
155 q->write_actual = (q->write & ~0x7); 163 q->write_actual = (q->write & ~0x7);
156 iwl_write_direct32(priv, rx_wrt_ptr_reg, q->write_actual); 164 iwl_write_direct32(priv, rx_wrt_ptr_reg,
165 q->write_actual);
166 }
157 } 167 }
158
159 q->need_update = 0; 168 q->need_update = 0;
160 169
161 exit_unlock: 170 exit_unlock:
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 67da31295781..12d9363d0afe 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -252,8 +252,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
252 252
253 IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n", 253 IWL_DEBUG_SCAN(priv, "Scan on %sGHz took %dms\n",
254 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2", 254 (priv->scan_band == IEEE80211_BAND_2GHZ) ? "2.4" : "5.2",
255 jiffies_to_msecs(elapsed_jiffies 255 jiffies_to_msecs(jiffies - priv->scan_start));
256 (priv->scan_start, jiffies)));
257 256
258 queue_work(priv->workqueue, &priv->scan_completed); 257 queue_work(priv->workqueue, &priv->scan_completed);
259 258
@@ -603,13 +602,16 @@ out_settings:
603 if (!iwl_is_ready_rf(priv)) 602 if (!iwl_is_ready_rf(priv))
604 goto out; 603 goto out;
605 604
606 /* Since setting the TXPOWER may have been deferred while 605 /*
607 * performing the scan, fire one off */ 606 * We do not commit power settings while scan is pending,
608 iwl_set_tx_power(priv, priv->tx_power_user_lmt, true); 607 * do it now if the settings changed.
608 */
609 iwl_power_set_mode(priv, &priv->power_data.sleep_cmd_next, false);
610 iwl_set_tx_power(priv, priv->tx_power_next, false);
609 611
610 priv->cfg->ops->utils->post_scan(priv); 612 priv->cfg->ops->utils->post_scan(priv);
611 613
612 out: 614out:
613 mutex_unlock(&priv->mutex); 615 mutex_unlock(&priv->mutex);
614} 616}
615 617
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index 972b738c0e4a..49493d176515 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -400,7 +400,8 @@ static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, u8 sta_id)
400} 400}
401 401
402static int iwl_send_remove_station(struct iwl_priv *priv, 402static int iwl_send_remove_station(struct iwl_priv *priv,
403 const u8 *addr, int sta_id) 403 const u8 *addr, int sta_id,
404 bool temporary)
404{ 405{
405 struct iwl_rx_packet *pkt; 406 struct iwl_rx_packet *pkt;
406 int ret; 407 int ret;
@@ -436,9 +437,11 @@ static int iwl_send_remove_station(struct iwl_priv *priv,
436 if (!ret) { 437 if (!ret) {
437 switch (pkt->u.rem_sta.status) { 438 switch (pkt->u.rem_sta.status) {
438 case REM_STA_SUCCESS_MSK: 439 case REM_STA_SUCCESS_MSK:
439 spin_lock_irqsave(&priv->sta_lock, flags_spin); 440 if (!temporary) {
440 iwl_sta_ucode_deactivate(priv, sta_id); 441 spin_lock_irqsave(&priv->sta_lock, flags_spin);
441 spin_unlock_irqrestore(&priv->sta_lock, flags_spin); 442 iwl_sta_ucode_deactivate(priv, sta_id);
443 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
444 }
442 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); 445 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
443 break; 446 break;
444 default: 447 default:
@@ -505,7 +508,7 @@ int iwl_remove_station(struct iwl_priv *priv, const u8 sta_id,
505 508
506 spin_unlock_irqrestore(&priv->sta_lock, flags); 509 spin_unlock_irqrestore(&priv->sta_lock, flags);
507 510
508 return iwl_send_remove_station(priv, addr, sta_id); 511 return iwl_send_remove_station(priv, addr, sta_id, false);
509out_err: 512out_err:
510 spin_unlock_irqrestore(&priv->sta_lock, flags); 513 spin_unlock_irqrestore(&priv->sta_lock, flags);
511 return -EINVAL; 514 return -EINVAL;
@@ -624,6 +627,49 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
624} 627}
625EXPORT_SYMBOL(iwl_restore_stations); 628EXPORT_SYMBOL(iwl_restore_stations);
626 629
630void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
631{
632 unsigned long flags;
633 int sta_id = ctx->ap_sta_id;
634 int ret;
635 struct iwl_addsta_cmd sta_cmd;
636 struct iwl_link_quality_cmd lq;
637 bool active;
638
639 spin_lock_irqsave(&priv->sta_lock, flags);
640 if (!(priv->stations[sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
641 spin_unlock_irqrestore(&priv->sta_lock, flags);
642 return;
643 }
644
645 memcpy(&sta_cmd, &priv->stations[sta_id].sta, sizeof(sta_cmd));
646 sta_cmd.mode = 0;
647 memcpy(&lq, priv->stations[sta_id].lq, sizeof(lq));
648
649 active = priv->stations[sta_id].used & IWL_STA_UCODE_ACTIVE;
650 priv->stations[sta_id].used &= ~IWL_STA_DRIVER_ACTIVE;
651 spin_unlock_irqrestore(&priv->sta_lock, flags);
652
653 if (active) {
654 ret = iwl_send_remove_station(
655 priv, priv->stations[sta_id].sta.sta.addr,
656 sta_id, true);
657 if (ret)
658 IWL_ERR(priv, "failed to remove STA %pM (%d)\n",
659 priv->stations[sta_id].sta.sta.addr, ret);
660 }
661 spin_lock_irqsave(&priv->sta_lock, flags);
662 priv->stations[sta_id].used |= IWL_STA_DRIVER_ACTIVE;
663 spin_unlock_irqrestore(&priv->sta_lock, flags);
664
665 ret = iwl_send_add_sta(priv, &sta_cmd, CMD_SYNC);
666 if (ret)
667 IWL_ERR(priv, "failed to re-add STA %pM (%d)\n",
668 priv->stations[sta_id].sta.sta.addr, ret);
669 iwl_send_lq_cmd(priv, ctx, &lq, CMD_SYNC, true);
670}
671EXPORT_SYMBOL(iwl_reprogram_ap_sta);
672
627int iwl_get_free_ucode_key_index(struct iwl_priv *priv) 673int iwl_get_free_ucode_key_index(struct iwl_priv *priv)
628{ 674{
629 int i; 675 int i;
@@ -736,6 +782,14 @@ int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
736 if (WARN_ON(lq->sta_id == IWL_INVALID_STATION)) 782 if (WARN_ON(lq->sta_id == IWL_INVALID_STATION))
737 return -EINVAL; 783 return -EINVAL;
738 784
785
786 spin_lock_irqsave(&priv->sta_lock, flags_spin);
787 if (!(priv->stations[lq->sta_id].used & IWL_STA_DRIVER_ACTIVE)) {
788 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
789 return -EINVAL;
790 }
791 spin_unlock_irqrestore(&priv->sta_lock, flags_spin);
792
739 iwl_dump_lq_cmd(priv, lq); 793 iwl_dump_lq_cmd(priv, lq);
740 BUG_ON(init && (cmd.flags & CMD_ASYNC)); 794 BUG_ON(init && (cmd.flags & CMD_ASYNC));
741 795
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 06475872eee4..206f1e1a0caf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -63,6 +63,7 @@ u8 iwl_prep_station(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
63 63
64int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx, 64int iwl_send_lq_cmd(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
65 struct iwl_link_quality_cmd *lq, u8 flags, bool init); 65 struct iwl_link_quality_cmd *lq, u8 flags, bool init);
66void iwl_reprogram_ap_sta(struct iwl_priv *priv, struct iwl_rxon_context *ctx);
66 67
67/** 68/**
68 * iwl_clear_driver_stations - clear knowledge of all stations from driver 69 * iwl_clear_driver_stations - clear knowledge of all stations from driver
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index 7261ee49f282..073b6ce6141c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -49,30 +49,39 @@ void iwl_txq_update_write_ptr(struct iwl_priv *priv, struct iwl_tx_queue *txq)
49 if (txq->need_update == 0) 49 if (txq->need_update == 0)
50 return; 50 return;
51 51
52 /* if we're trying to save power */ 52 if (priv->cfg->base_params->shadow_reg_enable) {
53 if (test_bit(STATUS_POWER_PMI, &priv->status)) { 53 /* shadow register enabled */
54 /* wake up nic if it's powered down ...
55 * uCode will wake up, and interrupt us again, so next
56 * time we'll skip this part. */
57 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
58
59 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
60 IWL_DEBUG_INFO(priv, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
61 txq_id, reg);
62 iwl_set_bit(priv, CSR_GP_CNTRL,
63 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
64 return;
65 }
66
67 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
68 txq->q.write_ptr | (txq_id << 8));
69
70 /* else not in power-save mode, uCode will never sleep when we're
71 * trying to tx (during RFKILL, we're not trying to tx). */
72 } else
73 iwl_write32(priv, HBUS_TARG_WRPTR, 54 iwl_write32(priv, HBUS_TARG_WRPTR,
74 txq->q.write_ptr | (txq_id << 8)); 55 txq->q.write_ptr | (txq_id << 8));
56 } else {
57 /* if we're trying to save power */
58 if (test_bit(STATUS_POWER_PMI, &priv->status)) {
59 /* wake up nic if it's powered down ...
60 * uCode will wake up, and interrupt us again, so next
61 * time we'll skip this part. */
62 reg = iwl_read32(priv, CSR_UCODE_DRV_GP1);
63
64 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
65 IWL_DEBUG_INFO(priv,
66 "Tx queue %d requesting wakeup,"
67 " GP1 = 0x%x\n", txq_id, reg);
68 iwl_set_bit(priv, CSR_GP_CNTRL,
69 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
70 return;
71 }
72
73 iwl_write_direct32(priv, HBUS_TARG_WRPTR,
74 txq->q.write_ptr | (txq_id << 8));
75 75
76 /*
77 * else not in power-save mode,
78 * uCode will never sleep when we're
79 * trying to tx (during RFKILL, we're not trying to tx).
80 */
81 } else
82 iwl_write32(priv, HBUS_TARG_WRPTR,
83 txq->q.write_ptr | (txq_id << 8));
84 }
76 txq->need_update = 0; 85 txq->need_update = 0;
77} 86}
78EXPORT_SYMBOL(iwl_txq_update_write_ptr); 87EXPORT_SYMBOL(iwl_txq_update_write_ptr);
@@ -254,8 +263,6 @@ static int iwl_queue_init(struct iwl_priv *priv, struct iwl_queue *q,
254 q->high_mark = 2; 263 q->high_mark = 2;
255 264
256 q->write_ptr = q->read_ptr = 0; 265 q->write_ptr = q->read_ptr = 0;
257 q->last_read_ptr = 0;
258 q->repeat_same_read_ptr = 0;
259 266
260 return 0; 267 return 0;
261} 268}
@@ -350,13 +357,12 @@ int iwl_tx_queue_init(struct iwl_priv *priv, struct iwl_tx_queue *txq,
350 txq->need_update = 0; 357 txq->need_update = 0;
351 358
352 /* 359 /*
353 * Aggregation TX queues will get their ID when aggregation begins; 360 * For the default queues 0-3, set up the swq_id
354 * they overwrite the setting done here. The command FIFO doesn't 361 * already -- all others need to get one later
355 * need an swq_id so don't set one to catch errors, all others can 362 * (if they need one at all).
356 * be set up to the identity mapping.
357 */ 363 */
358 if (txq_id != priv->cmd_queue) 364 if (txq_id < 4)
359 txq->swq_id = txq_id; 365 iwl_set_swq_id(txq, txq_id, txq_id);
360 366
361 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise 367 /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
362 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ 368 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 7edf8c2fb8c7..371abbf60eac 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -61,6 +61,7 @@
61#include "iwl-helpers.h" 61#include "iwl-helpers.h"
62#include "iwl-dev.h" 62#include "iwl-dev.h"
63#include "iwl-spectrum.h" 63#include "iwl-spectrum.h"
64#include "iwl-legacy.h"
64 65
65/* 66/*
66 * module name, copyright, version, etc. 67 * module name, copyright, version, etc.
@@ -474,7 +475,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
474 dma_addr_t phys_addr; 475 dma_addr_t phys_addr;
475 dma_addr_t txcmd_phys; 476 dma_addr_t txcmd_phys;
476 int txq_id = skb_get_queue_mapping(skb); 477 int txq_id = skb_get_queue_mapping(skb);
477 u16 len, idx, len_org, hdr_len; /* TODO: len_org is not used */ 478 u16 len, idx, hdr_len;
478 u8 id; 479 u8 id;
479 u8 unicast; 480 u8 unicast;
480 u8 sta_id; 481 u8 sta_id;
@@ -611,15 +612,8 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
611 */ 612 */
612 len = sizeof(struct iwl3945_tx_cmd) + 613 len = sizeof(struct iwl3945_tx_cmd) +
613 sizeof(struct iwl_cmd_header) + hdr_len; 614 sizeof(struct iwl_cmd_header) + hdr_len;
614
615 len_org = len;
616 len = (len + 3) & ~3; 615 len = (len + 3) & ~3;
617 616
618 if (len_org != len)
619 len_org = 1;
620 else
621 len_org = 0;
622
623 /* Physical address of this Tx command's header (not MAC header!), 617 /* Physical address of this Tx command's header (not MAC header!),
624 * within command buffer array. */ 618 * within command buffer array. */
625 txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr, 619 txcmd_phys = pci_map_single(priv->pci_dev, &out_cmd->hdr,
@@ -661,7 +655,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
661 spin_unlock_irqrestore(&priv->lock, flags); 655 spin_unlock_irqrestore(&priv->lock, flags);
662 } 656 }
663 657
664 iwl_stop_queue(priv, skb_get_queue_mapping(skb)); 658 iwl_stop_queue(priv, txq);
665 } 659 }
666 660
667 return 0; 661 return 0;
@@ -2515,13 +2509,8 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2515 /* After the ALIVE response, we can send commands to 3945 uCode */ 2509 /* After the ALIVE response, we can send commands to 3945 uCode */
2516 set_bit(STATUS_ALIVE, &priv->status); 2510 set_bit(STATUS_ALIVE, &priv->status);
2517 2511
2518 if (priv->cfg->ops->lib->recover_from_tx_stall) { 2512 /* Enable watchdog to monitor the driver tx queues */
2519 /* Enable timer to monitor the driver queues */ 2513 iwl_setup_watchdog(priv);
2520 mod_timer(&priv->monitor_recover,
2521 jiffies +
2522 msecs_to_jiffies(
2523 priv->cfg->base_params->monitor_recover_period));
2524 }
2525 2514
2526 if (iwl_is_rfkill(priv)) 2515 if (iwl_is_rfkill(priv))
2527 return; 2516 return;
@@ -2578,8 +2567,7 @@ static void __iwl3945_down(struct iwl_priv *priv)
2578 2567
2579 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set 2568 /* Stop TX queues watchdog. We need to have STATUS_EXIT_PENDING bit set
2580 * to prevent rearm timer */ 2569 * to prevent rearm timer */
2581 if (priv->cfg->ops->lib->recover_from_tx_stall) 2570 del_timer_sync(&priv->watchdog);
2582 del_timer_sync(&priv->monitor_recover);
2583 2571
2584 /* Station information will now be cleared in device */ 2572 /* Station information will now be cleared in device */
2585 iwl_clear_ucode_stations(priv, NULL); 2573 iwl_clear_ucode_stations(priv, NULL);
@@ -3057,22 +3045,22 @@ static void iwl3945_bg_rx_replenish(struct work_struct *data)
3057 mutex_unlock(&priv->mutex); 3045 mutex_unlock(&priv->mutex);
3058} 3046}
3059 3047
3060void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif) 3048void iwl3945_post_associate(struct iwl_priv *priv)
3061{ 3049{
3062 int rc = 0; 3050 int rc = 0;
3063 struct ieee80211_conf *conf = NULL; 3051 struct ieee80211_conf *conf = NULL;
3064 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 3052 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3065 3053
3066 if (!vif || !priv->is_open) 3054 if (!ctx->vif || !priv->is_open)
3067 return; 3055 return;
3068 3056
3069 if (vif->type == NL80211_IFTYPE_AP) { 3057 if (ctx->vif->type == NL80211_IFTYPE_AP) {
3070 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__); 3058 IWL_ERR(priv, "%s Should not be called in AP mode\n", __func__);
3071 return; 3059 return;
3072 } 3060 }
3073 3061
3074 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n", 3062 IWL_DEBUG_ASSOC(priv, "Associated as %d to: %pM\n",
3075 vif->bss_conf.aid, ctx->active.bssid_addr); 3063 ctx->vif->bss_conf.aid, ctx->active.bssid_addr);
3076 3064
3077 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3065 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
3078 return; 3066 return;
@@ -3091,18 +3079,18 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3091 3079
3092 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK; 3080 ctx->staging.filter_flags |= RXON_FILTER_ASSOC_MSK;
3093 3081
3094 ctx->staging.assoc_id = cpu_to_le16(vif->bss_conf.aid); 3082 ctx->staging.assoc_id = cpu_to_le16(ctx->vif->bss_conf.aid);
3095 3083
3096 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n", 3084 IWL_DEBUG_ASSOC(priv, "assoc id %d beacon interval %d\n",
3097 vif->bss_conf.aid, vif->bss_conf.beacon_int); 3085 ctx->vif->bss_conf.aid, ctx->vif->bss_conf.beacon_int);
3098 3086
3099 if (vif->bss_conf.use_short_preamble) 3087 if (ctx->vif->bss_conf.use_short_preamble)
3100 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK; 3088 ctx->staging.flags |= RXON_FLG_SHORT_PREAMBLE_MSK;
3101 else 3089 else
3102 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK; 3090 ctx->staging.flags &= ~RXON_FLG_SHORT_PREAMBLE_MSK;
3103 3091
3104 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) { 3092 if (ctx->staging.flags & RXON_FLG_BAND_24G_MSK) {
3105 if (vif->bss_conf.use_short_slot) 3093 if (ctx->vif->bss_conf.use_short_slot)
3106 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 3094 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
3107 else 3095 else
3108 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK; 3096 ctx->staging.flags &= ~RXON_FLG_SHORT_SLOT_MSK;
@@ -3110,7 +3098,7 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3110 3098
3111 iwl3945_commit_rxon(priv, ctx); 3099 iwl3945_commit_rxon(priv, ctx);
3112 3100
3113 switch (vif->type) { 3101 switch (ctx->vif->type) {
3114 case NL80211_IFTYPE_STATION: 3102 case NL80211_IFTYPE_STATION:
3115 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID); 3103 iwl3945_rate_scale_init(priv->hw, IWL_AP_ID);
3116 break; 3104 break;
@@ -3119,7 +3107,7 @@ void iwl3945_post_associate(struct iwl_priv *priv, struct ieee80211_vif *vif)
3119 break; 3107 break;
3120 default: 3108 default:
3121 IWL_ERR(priv, "%s Should not be called in %d mode\n", 3109 IWL_ERR(priv, "%s Should not be called in %d mode\n",
3122 __func__, vif->type); 3110 __func__, ctx->vif->type);
3123 break; 3111 break;
3124 } 3112 }
3125} 3113}
@@ -3234,9 +3222,10 @@ static int iwl3945_mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
3234 return NETDEV_TX_OK; 3222 return NETDEV_TX_OK;
3235} 3223}
3236 3224
3237void iwl3945_config_ap(struct iwl_priv *priv, struct ieee80211_vif *vif) 3225void iwl3945_config_ap(struct iwl_priv *priv)
3238{ 3226{
3239 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS]; 3227 struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
3228 struct ieee80211_vif *vif = ctx->vif;
3240 int rc = 0; 3229 int rc = 0;
3241 3230
3242 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) 3231 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
@@ -3407,9 +3396,9 @@ static void iwl3945_configure_filter(struct ieee80211_hw *hw,
3407 ctx->staging.filter_flags |= filter_or; 3396 ctx->staging.filter_flags |= filter_or;
3408 3397
3409 /* 3398 /*
3410 * Committing directly here breaks for some reason, 3399 * Not committing directly because hardware can perform a scan,
3411 * but we'll eventually commit the filter flags 3400 * but even if hw is ready, committing here breaks for some reason,
3412 * change anyway. 3401 * we'll eventually commit the filter flags change anyway.
3413 */ 3402 */
3414 3403
3415 mutex_unlock(&priv->mutex); 3404 mutex_unlock(&priv->mutex);
@@ -3780,12 +3769,9 @@ static void iwl3945_setup_deferred_work(struct iwl_priv *priv)
3780 3769
3781 iwl3945_hw_setup_deferred_work(priv); 3770 iwl3945_hw_setup_deferred_work(priv);
3782 3771
3783 if (priv->cfg->ops->lib->recover_from_tx_stall) { 3772 init_timer(&priv->watchdog);
3784 init_timer(&priv->monitor_recover); 3773 priv->watchdog.data = (unsigned long)priv;
3785 priv->monitor_recover.data = (unsigned long)priv; 3774 priv->watchdog.function = iwl_bg_watchdog;
3786 priv->monitor_recover.function =
3787 priv->cfg->ops->lib->recover_from_tx_stall;
3788 }
3789 3775
3790 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) 3776 tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long))
3791 iwl3945_irq_tasklet, (unsigned long)priv); 3777 iwl3945_irq_tasklet, (unsigned long)priv);
@@ -3824,18 +3810,19 @@ static struct attribute_group iwl3945_attribute_group = {
3824 .attrs = iwl3945_sysfs_entries, 3810 .attrs = iwl3945_sysfs_entries,
3825}; 3811};
3826 3812
3827static struct ieee80211_ops iwl3945_hw_ops = { 3813struct ieee80211_ops iwl3945_hw_ops = {
3828 .tx = iwl3945_mac_tx, 3814 .tx = iwl3945_mac_tx,
3829 .start = iwl3945_mac_start, 3815 .start = iwl3945_mac_start,
3830 .stop = iwl3945_mac_stop, 3816 .stop = iwl3945_mac_stop,
3831 .add_interface = iwl_mac_add_interface, 3817 .add_interface = iwl_mac_add_interface,
3832 .remove_interface = iwl_mac_remove_interface, 3818 .remove_interface = iwl_mac_remove_interface,
3833 .config = iwl_mac_config, 3819 .change_interface = iwl_mac_change_interface,
3820 .config = iwl_legacy_mac_config,
3834 .configure_filter = iwl3945_configure_filter, 3821 .configure_filter = iwl3945_configure_filter,
3835 .set_key = iwl3945_mac_set_key, 3822 .set_key = iwl3945_mac_set_key,
3836 .conf_tx = iwl_mac_conf_tx, 3823 .conf_tx = iwl_mac_conf_tx,
3837 .reset_tsf = iwl_mac_reset_tsf, 3824 .reset_tsf = iwl_legacy_mac_reset_tsf,
3838 .bss_info_changed = iwl_bss_info_changed, 3825 .bss_info_changed = iwl_legacy_mac_bss_info_changed,
3839 .hw_scan = iwl_mac_hw_scan, 3826 .hw_scan = iwl_mac_hw_scan,
3840 .sta_add = iwl3945_mac_sta_add, 3827 .sta_add = iwl3945_mac_sta_add,
3841 .sta_remove = iwl_mac_sta_remove, 3828 .sta_remove = iwl_mac_sta_remove,
@@ -3865,7 +3852,15 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3865 priv->iw_mode = NL80211_IFTYPE_STATION; 3852 priv->iw_mode = NL80211_IFTYPE_STATION;
3866 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF; 3853 priv->missed_beacon_threshold = IWL_MISSED_BEACON_THRESHOLD_DEF;
3867 3854
3855 /* initialize force reset */
3856 priv->force_reset[IWL_RF_RESET].reset_duration =
3857 IWL_DELAY_NEXT_FORCE_RF_RESET;
3858 priv->force_reset[IWL_FW_RESET].reset_duration =
3859 IWL_DELAY_NEXT_FORCE_FW_RELOAD;
3860
3861
3868 priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER; 3862 priv->tx_power_user_lmt = IWL_DEFAULT_TX_POWER;
3863 priv->tx_power_next = IWL_DEFAULT_TX_POWER;
3869 3864
3870 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) { 3865 if (eeprom->version < EEPROM_3945_EEPROM_VERSION) {
3871 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n", 3866 IWL_WARN(priv, "Unsupported EEPROM version: 0x%04X\n",
@@ -3965,7 +3960,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
3965 3960
3966 /* mac80211 allocates memory for this device instance, including 3961 /* mac80211 allocates memory for this device instance, including
3967 * space for this driver's private structure */ 3962 * space for this driver's private structure */
3968 hw = iwl_alloc_all(cfg, &iwl3945_hw_ops); 3963 hw = iwl_alloc_all(cfg);
3969 if (hw == NULL) { 3964 if (hw == NULL) {
3970 pr_err("Can not allocate network device\n"); 3965 pr_err("Can not allocate network device\n");
3971 err = -ENOMEM; 3966 err = -ENOMEM;
@@ -4117,7 +4112,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4117 4112
4118 pci_enable_msi(priv->pci_dev); 4113 pci_enable_msi(priv->pci_dev);
4119 4114
4120 err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr, 4115 err = request_irq(priv->pci_dev->irq, priv->cfg->ops->lib->isr_ops.isr,
4121 IRQF_SHARED, DRV_NAME, priv); 4116 IRQF_SHARED, DRV_NAME, priv);
4122 if (err) { 4117 if (err) {
4123 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq); 4118 IWL_ERR(priv, "Error allocating IRQ %d\n", priv->pci_dev->irq);
@@ -4275,10 +4270,7 @@ static struct pci_driver iwl3945_driver = {
4275 .id_table = iwl3945_hw_card_ids, 4270 .id_table = iwl3945_hw_card_ids,
4276 .probe = iwl3945_pci_probe, 4271 .probe = iwl3945_pci_probe,
4277 .remove = __devexit_p(iwl3945_pci_remove), 4272 .remove = __devexit_p(iwl3945_pci_remove),
4278#ifdef CONFIG_PM 4273 .driver.pm = IWL_PM_OPS,
4279 .suspend = iwl_pci_suspend,
4280 .resume = iwl_pci_resume,
4281#endif
4282}; 4274};
4283 4275
4284static int __init iwl3945_init(void) 4276static int __init iwl3945_init(void)
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index c6c0eff9b5ed..5a4982271e96 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -225,7 +225,8 @@ static int iwm_cfg80211_del_key(struct wiphy *wiphy, struct net_device *ndev,
225 225
226static int iwm_cfg80211_set_default_key(struct wiphy *wiphy, 226static int iwm_cfg80211_set_default_key(struct wiphy *wiphy,
227 struct net_device *ndev, 227 struct net_device *ndev,
228 u8 key_index) 228 u8 key_index, bool unicast,
229 bool multicast)
229{ 230{
230 struct iwm_priv *iwm = ndev_to_iwm(ndev); 231 struct iwm_priv *iwm = ndev_to_iwm(ndev);
231 232
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 330c7d9cf101..50dee6a0a5ca 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -908,7 +908,7 @@ int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids,
908 return ret; 908 return ret;
909 } 909 }
910 910
911 iwm->scan_id = iwm->scan_id++ % IWM_SCAN_ID_MAX; 911 iwm->scan_id = (iwm->scan_id + 1) % IWM_SCAN_ID_MAX;
912 912
913 return 0; 913 return 0;
914} 914}
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 373930afc26b..698a1f7694ed 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -9,8 +9,6 @@
9#include <linux/sched.h> 9#include <linux/sched.h>
10#include <linux/wait.h> 10#include <linux/wait.h>
11#include <linux/slab.h> 11#include <linux/slab.h>
12#include <linux/sched.h>
13#include <linux/wait.h>
14#include <linux/ieee80211.h> 12#include <linux/ieee80211.h>
15#include <net/cfg80211.h> 13#include <net/cfg80211.h>
16#include <asm/unaligned.h> 14#include <asm/unaligned.h>
@@ -619,7 +617,7 @@ static int lbs_ret_scan(struct lbs_private *priv, unsigned long dummy,
619 print_ssid(ssid_buf, ssid, ssid_len), 617 print_ssid(ssid_buf, ssid, ssid_len),
620 LBS_SCAN_RSSI_TO_MBM(rssi)/100); 618 LBS_SCAN_RSSI_TO_MBM(rssi)/100);
621 619
622 if (channel || 620 if (channel &&
623 !(channel->flags & IEEE80211_CHAN_DISABLED)) 621 !(channel->flags & IEEE80211_CHAN_DISABLED))
624 cfg80211_inform_bss(wiphy, channel, 622 cfg80211_inform_bss(wiphy, channel,
625 bssid, le64_to_cpu(*(__le64 *)tsfdesc), 623 bssid, le64_to_cpu(*(__le64 *)tsfdesc),
@@ -1424,7 +1422,8 @@ static int lbs_cfg_disconnect(struct wiphy *wiphy, struct net_device *dev,
1424 1422
1425static int lbs_cfg_set_default_key(struct wiphy *wiphy, 1423static int lbs_cfg_set_default_key(struct wiphy *wiphy,
1426 struct net_device *netdev, 1424 struct net_device *netdev,
1427 u8 key_index) 1425 u8 key_index, bool unicast,
1426 bool multicast)
1428{ 1427{
1429 struct lbs_private *priv = wiphy_priv(wiphy); 1428 struct lbs_private *priv = wiphy_priv(wiphy);
1430 1429
@@ -2062,7 +2061,7 @@ static void lbs_cfg_set_regulatory_hint(struct lbs_private *priv)
2062 }; 2061 };
2063 2062
2064 /* Section 5.17.2 */ 2063 /* Section 5.17.2 */
2065 static struct region_code_mapping regmap[] = { 2064 static const struct region_code_mapping regmap[] = {
2066 {"US ", 0x10}, /* US FCC */ 2065 {"US ", 0x10}, /* US FCC */
2067 {"CA ", 0x20}, /* Canada */ 2066 {"CA ", 0x20}, /* Canada */
2068 {"EU ", 0x30}, /* ETSI */ 2067 {"EU ", 0x30}, /* ETSI */
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 70745928f3f8..78c4da150a74 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -177,6 +177,14 @@ int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria,
177 struct cmd_ds_host_sleep cmd_config; 177 struct cmd_ds_host_sleep cmd_config;
178 int ret; 178 int ret;
179 179
180 /*
181 * Certain firmware versions do not support EHS_REMOVE_WAKEUP command
182 * and the card will return a failure. Since we need to be
183 * able to reset the mask, in those cases we set a 0 mask instead.
184 */
185 if (criteria == EHS_REMOVE_WAKEUP && !priv->ehs_remove_supported)
186 criteria = 0;
187
180 cmd_config.hdr.size = cpu_to_le16(sizeof(cmd_config)); 188 cmd_config.hdr.size = cpu_to_le16(sizeof(cmd_config));
181 cmd_config.criteria = cpu_to_le32(criteria); 189 cmd_config.criteria = cpu_to_le32(criteria);
182 cmd_config.gpio = priv->wol_gpio; 190 cmd_config.gpio = priv->wol_gpio;
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index cb14c38caf3a..18dd9a02c459 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -138,6 +138,7 @@ struct lbs_private {
138 uint32_t wol_criteria; 138 uint32_t wol_criteria;
139 uint8_t wol_gpio; 139 uint8_t wol_gpio;
140 uint8_t wol_gap; 140 uint8_t wol_gap;
141 bool ehs_remove_supported;
141 142
142 /* Transmitting */ 143 /* Transmitting */
143 int tx_pending_len; /* -1 while building packet */ 144 int tx_pending_len; /* -1 while building packet */
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index ecd4d04b2c3c..00600239a053 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -784,7 +784,7 @@ static int lbs_spi_thread(void *data)
784 up(&card->spi_thread_terminated); 784 up(&card->spi_thread_terminated);
785 do_exit(0); 785 do_exit(0);
786 } 786 }
787 } while (err == EINTR); 787 } while (err == -EINTR);
788 788
789 /* Read the host interrupt status register to see what we 789 /* Read the host interrupt status register to see what we
790 * can do. */ 790 * can do. */
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index efaf85032208..6524c70363d9 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -345,6 +345,13 @@ static int if_usb_probe(struct usb_interface *intf,
345 if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2)) 345 if (device_create_file(&priv->dev->dev, &dev_attr_lbs_flash_boot2))
346 lbs_pr_err("cannot register lbs_flash_boot2 attribute\n"); 346 lbs_pr_err("cannot register lbs_flash_boot2 attribute\n");
347 347
348 /*
349 * EHS_REMOVE_WAKEUP is not supported on all versions of the firmware.
350 */
351 priv->wol_criteria = EHS_REMOVE_WAKEUP;
352 if (lbs_host_sleep_cfg(priv, priv->wol_criteria, NULL))
353 priv->ehs_remove_supported = false;
354
348 return 0; 355 return 0;
349 356
350err_start_card: 357err_start_card:
@@ -1090,12 +1097,6 @@ static int if_usb_suspend(struct usb_interface *intf, pm_message_t message)
1090 if (priv->psstate != PS_STATE_FULL_POWER) 1097 if (priv->psstate != PS_STATE_FULL_POWER)
1091 return -1; 1098 return -1;
1092 1099
1093 if (priv->wol_criteria == EHS_REMOVE_WAKEUP) {
1094 lbs_pr_info("Suspend attempt without "
1095 "configuring wake params!\n");
1096 return -ENOSYS;
1097 }
1098
1099 ret = lbs_suspend(priv); 1100 ret = lbs_suspend(priv);
1100 if (ret) 1101 if (ret)
1101 goto out; 1102 goto out;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index fcd1bbfc632d..6836a6dd9853 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -851,9 +851,10 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
851 priv->work_thread = create_singlethread_workqueue("lbs_worker"); 851 priv->work_thread = create_singlethread_workqueue("lbs_worker");
852 INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker); 852 INIT_WORK(&priv->mcast_work, lbs_set_mcast_worker);
853 853
854 priv->wol_criteria = 0xffffffff; 854 priv->wol_criteria = EHS_REMOVE_WAKEUP;
855 priv->wol_gpio = 0xff; 855 priv->wol_gpio = 0xff;
856 priv->wol_gap = 20; 856 priv->wol_gap = 20;
857 priv->ehs_remove_supported = true;
857 858
858 goto done; 859 goto done;
859 860
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index a4d0bca9ef2c..a2b1df21d286 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -55,7 +55,9 @@ int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *skb)
55 struct rxpd *p_rx_pd; 55 struct rxpd *p_rx_pd;
56 int hdrchop; 56 int hdrchop;
57 struct ethhdr *p_ethhdr; 57 struct ethhdr *p_ethhdr;
58 const u8 rfc1042_eth_hdr[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; 58 static const u8 rfc1042_eth_hdr[] = {
59 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00
60 };
59 61
60 lbs_deb_enter(LBS_DEB_RX); 62 lbs_deb_enter(LBS_DEB_RX);
61 63
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 7eaaa3bab547..454f045ddff3 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -309,6 +309,8 @@ struct mac80211_hwsim_data {
309 */ 309 */
310 u64 group; 310 u64 group;
311 struct dentry *debugfs_group; 311 struct dentry *debugfs_group;
312
313 int power_level;
312}; 314};
313 315
314 316
@@ -497,7 +499,7 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
497 rx_status.band = data->channel->band; 499 rx_status.band = data->channel->band;
498 rx_status.rate_idx = info->control.rates[0].idx; 500 rx_status.rate_idx = info->control.rates[0].idx;
499 /* TODO: simulate real signal strength (and optional packet loss) */ 501 /* TODO: simulate real signal strength (and optional packet loss) */
500 rx_status.signal = -50; 502 rx_status.signal = data->power_level - 50;
501 503
502 if (data->ps != PS_DISABLED) 504 if (data->ps != PS_DISABLED)
503 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); 505 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
@@ -698,6 +700,7 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
698 data->idle = !!(conf->flags & IEEE80211_CONF_IDLE); 700 data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
699 701
700 data->channel = conf->channel; 702 data->channel = conf->channel;
703 data->power_level = conf->power_level;
701 if (!data->started || !data->beacon_int) 704 if (!data->started || !data->beacon_int)
702 del_timer(&data->beacon_timer); 705 del_timer(&data->beacon_timer);
703 else 706 else
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index f152a25be59f..9ecf8407cb1b 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -29,6 +29,12 @@
29#define MWL8K_NAME KBUILD_MODNAME 29#define MWL8K_NAME KBUILD_MODNAME
30#define MWL8K_VERSION "0.12" 30#define MWL8K_VERSION "0.12"
31 31
32/* Module parameters */
33static unsigned ap_mode_default;
34module_param(ap_mode_default, bool, 0);
35MODULE_PARM_DESC(ap_mode_default,
36 "Set to 1 to make ap mode the default instead of sta mode");
37
32/* Register definitions */ 38/* Register definitions */
33#define MWL8K_HIU_GEN_PTR 0x00000c10 39#define MWL8K_HIU_GEN_PTR 0x00000c10
34#define MWL8K_MODE_STA 0x0000005a 40#define MWL8K_MODE_STA 0x0000005a
@@ -92,8 +98,10 @@ struct rxd_ops {
92struct mwl8k_device_info { 98struct mwl8k_device_info {
93 char *part_name; 99 char *part_name;
94 char *helper_image; 100 char *helper_image;
95 char *fw_image; 101 char *fw_image_sta;
102 char *fw_image_ap;
96 struct rxd_ops *ap_rxd_ops; 103 struct rxd_ops *ap_rxd_ops;
104 u32 fw_api_ap;
97}; 105};
98 106
99struct mwl8k_rx_queue { 107struct mwl8k_rx_queue {
@@ -136,8 +144,8 @@ struct mwl8k_priv {
136 void __iomem *regs; 144 void __iomem *regs;
137 145
138 /* firmware */ 146 /* firmware */
139 struct firmware *fw_helper; 147 const struct firmware *fw_helper;
140 struct firmware *fw_ucode; 148 const struct firmware *fw_ucode;
141 149
142 /* hardware/firmware parameters */ 150 /* hardware/firmware parameters */
143 bool ap_fw; 151 bool ap_fw;
@@ -210,6 +218,18 @@ struct mwl8k_priv {
210 218
211 /* Most recently reported noise in dBm */ 219 /* Most recently reported noise in dBm */
212 s8 noise; 220 s8 noise;
221
222 /*
223 * preserve the queue configurations so they can be restored if/when
224 * the firmware image is swapped.
225 */
226 struct ieee80211_tx_queue_params wmm_params[MWL8K_TX_QUEUES];
227
228 /* async firmware loading state */
229 unsigned fw_state;
230 char *fw_pref;
231 char *fw_alt;
232 struct completion firmware_loading_complete;
213}; 233};
214 234
215/* Per interface specific private data */ 235/* Per interface specific private data */
@@ -285,8 +305,9 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
285}; 305};
286 306
287/* Set or get info from Firmware */ 307/* Set or get info from Firmware */
288#define MWL8K_CMD_SET 0x0001
289#define MWL8K_CMD_GET 0x0000 308#define MWL8K_CMD_GET 0x0000
309#define MWL8K_CMD_SET 0x0001
310#define MWL8K_CMD_SET_LIST 0x0002
290 311
291/* Firmware command codes */ 312/* Firmware command codes */
292#define MWL8K_CMD_CODE_DNLD 0x0001 313#define MWL8K_CMD_CODE_DNLD 0x0001
@@ -296,6 +317,7 @@ static const struct ieee80211_rate mwl8k_rates_50[] = {
296#define MWL8K_CMD_GET_STAT 0x0014 317#define MWL8K_CMD_GET_STAT 0x0014
297#define MWL8K_CMD_RADIO_CONTROL 0x001c 318#define MWL8K_CMD_RADIO_CONTROL 0x001c
298#define MWL8K_CMD_RF_TX_POWER 0x001e 319#define MWL8K_CMD_RF_TX_POWER 0x001e
320#define MWL8K_CMD_TX_POWER 0x001f
299#define MWL8K_CMD_RF_ANTENNA 0x0020 321#define MWL8K_CMD_RF_ANTENNA 0x0020
300#define MWL8K_CMD_SET_BEACON 0x0100 /* per-vif */ 322#define MWL8K_CMD_SET_BEACON 0x0100 /* per-vif */
301#define MWL8K_CMD_SET_PRE_SCAN 0x0107 323#define MWL8K_CMD_SET_PRE_SCAN 0x0107
@@ -333,6 +355,7 @@ static const char *mwl8k_cmd_name(__le16 cmd, char *buf, int bufsize)
333 MWL8K_CMDNAME(GET_STAT); 355 MWL8K_CMDNAME(GET_STAT);
334 MWL8K_CMDNAME(RADIO_CONTROL); 356 MWL8K_CMDNAME(RADIO_CONTROL);
335 MWL8K_CMDNAME(RF_TX_POWER); 357 MWL8K_CMDNAME(RF_TX_POWER);
358 MWL8K_CMDNAME(TX_POWER);
336 MWL8K_CMDNAME(RF_ANTENNA); 359 MWL8K_CMDNAME(RF_ANTENNA);
337 MWL8K_CMDNAME(SET_BEACON); 360 MWL8K_CMDNAME(SET_BEACON);
338 MWL8K_CMDNAME(SET_PRE_SCAN); 361 MWL8K_CMDNAME(SET_PRE_SCAN);
@@ -372,7 +395,7 @@ static void mwl8k_hw_reset(struct mwl8k_priv *priv)
372} 395}
373 396
374/* Release fw image */ 397/* Release fw image */
375static void mwl8k_release_fw(struct firmware **fw) 398static void mwl8k_release_fw(const struct firmware **fw)
376{ 399{
377 if (*fw == NULL) 400 if (*fw == NULL)
378 return; 401 return;
@@ -386,37 +409,68 @@ static void mwl8k_release_firmware(struct mwl8k_priv *priv)
386 mwl8k_release_fw(&priv->fw_helper); 409 mwl8k_release_fw(&priv->fw_helper);
387} 410}
388 411
412/* states for asynchronous f/w loading */
413static void mwl8k_fw_state_machine(const struct firmware *fw, void *context);
414enum {
415 FW_STATE_INIT = 0,
416 FW_STATE_LOADING_PREF,
417 FW_STATE_LOADING_ALT,
418 FW_STATE_ERROR,
419};
420
389/* Request fw image */ 421/* Request fw image */
390static int mwl8k_request_fw(struct mwl8k_priv *priv, 422static int mwl8k_request_fw(struct mwl8k_priv *priv,
391 const char *fname, struct firmware **fw) 423 const char *fname, const struct firmware **fw,
424 bool nowait)
392{ 425{
393 /* release current image */ 426 /* release current image */
394 if (*fw != NULL) 427 if (*fw != NULL)
395 mwl8k_release_fw(fw); 428 mwl8k_release_fw(fw);
396 429
397 return request_firmware((const struct firmware **)fw, 430 if (nowait)
398 fname, &priv->pdev->dev); 431 return request_firmware_nowait(THIS_MODULE, 1, fname,
432 &priv->pdev->dev, GFP_KERNEL,
433 priv, mwl8k_fw_state_machine);
434 else
435 return request_firmware(fw, fname, &priv->pdev->dev);
399} 436}
400 437
401static int mwl8k_request_firmware(struct mwl8k_priv *priv) 438static int mwl8k_request_firmware(struct mwl8k_priv *priv, char *fw_image,
439 bool nowait)
402{ 440{
403 struct mwl8k_device_info *di = priv->device_info; 441 struct mwl8k_device_info *di = priv->device_info;
404 int rc; 442 int rc;
405 443
406 if (di->helper_image != NULL) { 444 if (di->helper_image != NULL) {
407 rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw_helper); 445 if (nowait)
408 if (rc) { 446 rc = mwl8k_request_fw(priv, di->helper_image,
409 printk(KERN_ERR "%s: Error requesting helper " 447 &priv->fw_helper, true);
410 "firmware file %s\n", pci_name(priv->pdev), 448 else
411 di->helper_image); 449 rc = mwl8k_request_fw(priv, di->helper_image,
450 &priv->fw_helper, false);
451 if (rc)
452 printk(KERN_ERR "%s: Error requesting helper fw %s\n",
453 pci_name(priv->pdev), di->helper_image);
454
455 if (rc || nowait)
412 return rc; 456 return rc;
413 }
414 } 457 }
415 458
416 rc = mwl8k_request_fw(priv, di->fw_image, &priv->fw_ucode); 459 if (nowait) {
460 /*
461 * if we get here, no helper image is needed. Skip the
462 * FW_STATE_INIT state.
463 */
464 priv->fw_state = FW_STATE_LOADING_PREF;
465 rc = mwl8k_request_fw(priv, fw_image,
466 &priv->fw_ucode,
467 true);
468 } else
469 rc = mwl8k_request_fw(priv, fw_image,
470 &priv->fw_ucode, false);
417 if (rc) { 471 if (rc) {
418 printk(KERN_ERR "%s: Error requesting firmware file %s\n", 472 printk(KERN_ERR "%s: Error requesting firmware file %s\n",
419 pci_name(priv->pdev), di->fw_image); 473 pci_name(priv->pdev), fw_image);
420 mwl8k_release_fw(&priv->fw_helper); 474 mwl8k_release_fw(&priv->fw_helper);
421 return rc; 475 return rc;
422 } 476 }
@@ -577,12 +631,12 @@ static int mwl8k_feed_fw_image(struct mwl8k_priv *priv,
577static int mwl8k_load_firmware(struct ieee80211_hw *hw) 631static int mwl8k_load_firmware(struct ieee80211_hw *hw)
578{ 632{
579 struct mwl8k_priv *priv = hw->priv; 633 struct mwl8k_priv *priv = hw->priv;
580 struct firmware *fw = priv->fw_ucode; 634 const struct firmware *fw = priv->fw_ucode;
581 int rc; 635 int rc;
582 int loops; 636 int loops;
583 637
584 if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) { 638 if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) {
585 struct firmware *helper = priv->fw_helper; 639 const struct firmware *helper = priv->fw_helper;
586 640
587 if (helper == NULL) { 641 if (helper == NULL) {
588 printk(KERN_ERR "%s: helper image needed but none " 642 printk(KERN_ERR "%s: helper image needed but none "
@@ -1811,6 +1865,7 @@ struct mwl8k_cmd_get_hw_spec_ap {
1811 __le32 wcbbase1; 1865 __le32 wcbbase1;
1812 __le32 wcbbase2; 1866 __le32 wcbbase2;
1813 __le32 wcbbase3; 1867 __le32 wcbbase3;
1868 __le32 fw_api_version;
1814} __packed; 1869} __packed;
1815 1870
1816static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw) 1871static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
@@ -1818,6 +1873,7 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
1818 struct mwl8k_priv *priv = hw->priv; 1873 struct mwl8k_priv *priv = hw->priv;
1819 struct mwl8k_cmd_get_hw_spec_ap *cmd; 1874 struct mwl8k_cmd_get_hw_spec_ap *cmd;
1820 int rc; 1875 int rc;
1876 u32 api_version;
1821 1877
1822 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); 1878 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1823 if (cmd == NULL) 1879 if (cmd == NULL)
@@ -1834,6 +1890,16 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
1834 if (!rc) { 1890 if (!rc) {
1835 int off; 1891 int off;
1836 1892
1893 api_version = le32_to_cpu(cmd->fw_api_version);
1894 if (priv->device_info->fw_api_ap != api_version) {
1895 printk(KERN_ERR "%s: Unsupported fw API version for %s."
1896 " Expected %d got %d.\n", MWL8K_NAME,
1897 priv->device_info->part_name,
1898 priv->device_info->fw_api_ap,
1899 api_version);
1900 rc = -EINVAL;
1901 goto done;
1902 }
1837 SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr); 1903 SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr);
1838 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs); 1904 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
1839 priv->fw_rev = le32_to_cpu(cmd->fw_rev); 1905 priv->fw_rev = le32_to_cpu(cmd->fw_rev);
@@ -1861,6 +1927,7 @@ static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
1861 iowrite32(priv->txq[3].txd_dma, priv->sram + off); 1927 iowrite32(priv->txq[3].txd_dma, priv->sram + off);
1862 } 1928 }
1863 1929
1930done:
1864 kfree(cmd); 1931 kfree(cmd);
1865 return rc; 1932 return rc;
1866} 1933}
@@ -2084,7 +2151,7 @@ mwl8k_set_radio_preamble(struct ieee80211_hw *hw, bool short_preamble)
2084/* 2151/*
2085 * CMD_RF_TX_POWER. 2152 * CMD_RF_TX_POWER.
2086 */ 2153 */
2087#define MWL8K_TX_POWER_LEVEL_TOTAL 8 2154#define MWL8K_RF_TX_POWER_LEVEL_TOTAL 8
2088 2155
2089struct mwl8k_cmd_rf_tx_power { 2156struct mwl8k_cmd_rf_tx_power {
2090 struct mwl8k_cmd_pkt header; 2157 struct mwl8k_cmd_pkt header;
@@ -2092,7 +2159,7 @@ struct mwl8k_cmd_rf_tx_power {
2092 __le16 support_level; 2159 __le16 support_level;
2093 __le16 current_level; 2160 __le16 current_level;
2094 __le16 reserved; 2161 __le16 reserved;
2095 __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL]; 2162 __le16 power_level_list[MWL8K_RF_TX_POWER_LEVEL_TOTAL];
2096} __packed; 2163} __packed;
2097 2164
2098static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm) 2165static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm)
@@ -2116,6 +2183,65 @@ static int mwl8k_cmd_rf_tx_power(struct ieee80211_hw *hw, int dBm)
2116} 2183}
2117 2184
2118/* 2185/*
2186 * CMD_TX_POWER.
2187 */
2188#define MWL8K_TX_POWER_LEVEL_TOTAL 12
2189
2190struct mwl8k_cmd_tx_power {
2191 struct mwl8k_cmd_pkt header;
2192 __le16 action;
2193 __le16 band;
2194 __le16 channel;
2195 __le16 bw;
2196 __le16 sub_ch;
2197 __le16 power_level_list[MWL8K_TX_POWER_LEVEL_TOTAL];
2198} __attribute__((packed));
2199
2200static int mwl8k_cmd_tx_power(struct ieee80211_hw *hw,
2201 struct ieee80211_conf *conf,
2202 unsigned short pwr)
2203{
2204 struct ieee80211_channel *channel = conf->channel;
2205 struct mwl8k_cmd_tx_power *cmd;
2206 int rc;
2207 int i;
2208
2209 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2210 if (cmd == NULL)
2211 return -ENOMEM;
2212
2213 cmd->header.code = cpu_to_le16(MWL8K_CMD_TX_POWER);
2214 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2215 cmd->action = cpu_to_le16(MWL8K_CMD_SET_LIST);
2216
2217 if (channel->band == IEEE80211_BAND_2GHZ)
2218 cmd->band = cpu_to_le16(0x1);
2219 else if (channel->band == IEEE80211_BAND_5GHZ)
2220 cmd->band = cpu_to_le16(0x4);
2221
2222 cmd->channel = channel->hw_value;
2223
2224 if (conf->channel_type == NL80211_CHAN_NO_HT ||
2225 conf->channel_type == NL80211_CHAN_HT20) {
2226 cmd->bw = cpu_to_le16(0x2);
2227 } else {
2228 cmd->bw = cpu_to_le16(0x4);
2229 if (conf->channel_type == NL80211_CHAN_HT40MINUS)
2230 cmd->sub_ch = cpu_to_le16(0x3);
2231 else if (conf->channel_type == NL80211_CHAN_HT40PLUS)
2232 cmd->sub_ch = cpu_to_le16(0x1);
2233 }
2234
2235 for (i = 0; i < MWL8K_TX_POWER_LEVEL_TOTAL; i++)
2236 cmd->power_level_list[i] = cpu_to_le16(pwr);
2237
2238 rc = mwl8k_post_cmd(hw, &cmd->header);
2239 kfree(cmd);
2240
2241 return rc;
2242}
2243
2244/*
2119 * CMD_RF_ANTENNA. 2245 * CMD_RF_ANTENNA.
2120 */ 2246 */
2121struct mwl8k_cmd_rf_antenna { 2247struct mwl8k_cmd_rf_antenna {
@@ -3283,13 +3409,16 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
3283 mwl8k_txq_reclaim(hw, i, INT_MAX, 1); 3409 mwl8k_txq_reclaim(hw, i, INT_MAX, 1);
3284} 3410}
3285 3411
3412static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image);
3413
3286static int mwl8k_add_interface(struct ieee80211_hw *hw, 3414static int mwl8k_add_interface(struct ieee80211_hw *hw,
3287 struct ieee80211_vif *vif) 3415 struct ieee80211_vif *vif)
3288{ 3416{
3289 struct mwl8k_priv *priv = hw->priv; 3417 struct mwl8k_priv *priv = hw->priv;
3290 struct mwl8k_vif *mwl8k_vif; 3418 struct mwl8k_vif *mwl8k_vif;
3291 u32 macids_supported; 3419 u32 macids_supported;
3292 int macid; 3420 int macid, rc;
3421 struct mwl8k_device_info *di;
3293 3422
3294 /* 3423 /*
3295 * Reject interface creation if sniffer mode is active, as 3424 * Reject interface creation if sniffer mode is active, as
@@ -3302,12 +3431,28 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
3302 return -EINVAL; 3431 return -EINVAL;
3303 } 3432 }
3304 3433
3305 3434 di = priv->device_info;
3306 switch (vif->type) { 3435 switch (vif->type) {
3307 case NL80211_IFTYPE_AP: 3436 case NL80211_IFTYPE_AP:
3437 if (!priv->ap_fw && di->fw_image_ap) {
3438 /* we must load the ap fw to meet this request */
3439 if (!list_empty(&priv->vif_list))
3440 return -EBUSY;
3441 rc = mwl8k_reload_firmware(hw, di->fw_image_ap);
3442 if (rc)
3443 return rc;
3444 }
3308 macids_supported = priv->ap_macids_supported; 3445 macids_supported = priv->ap_macids_supported;
3309 break; 3446 break;
3310 case NL80211_IFTYPE_STATION: 3447 case NL80211_IFTYPE_STATION:
3448 if (priv->ap_fw && di->fw_image_sta) {
3449 /* we must load the sta fw to meet this request */
3450 if (!list_empty(&priv->vif_list))
3451 return -EBUSY;
3452 rc = mwl8k_reload_firmware(hw, di->fw_image_sta);
3453 if (rc)
3454 return rc;
3455 }
3311 macids_supported = priv->sta_macids_supported; 3456 macids_supported = priv->sta_macids_supported;
3312 break; 3457 break;
3313 default: 3458 default:
@@ -3377,15 +3522,19 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
3377 3522
3378 if (conf->power_level > 18) 3523 if (conf->power_level > 18)
3379 conf->power_level = 18; 3524 conf->power_level = 18;
3380 rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
3381 if (rc)
3382 goto out;
3383 3525
3384 if (priv->ap_fw) { 3526 if (priv->ap_fw) {
3527 rc = mwl8k_cmd_tx_power(hw, conf, conf->power_level);
3528 if (rc)
3529 goto out;
3530
3385 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x7); 3531 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x7);
3386 if (!rc) 3532 if (!rc)
3387 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7); 3533 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
3388 } else { 3534 } else {
3535 rc = mwl8k_cmd_rf_tx_power(hw, conf->power_level);
3536 if (rc)
3537 goto out;
3389 rc = mwl8k_cmd_mimo_config(hw, 0x7, 0x7); 3538 rc = mwl8k_cmd_mimo_config(hw, 0x7, 0x7);
3390 } 3539 }
3391 3540
@@ -3739,6 +3888,9 @@ static int mwl8k_conf_tx(struct ieee80211_hw *hw, u16 queue,
3739 3888
3740 rc = mwl8k_fw_lock(hw); 3889 rc = mwl8k_fw_lock(hw);
3741 if (!rc) { 3890 if (!rc) {
3891 BUG_ON(queue > MWL8K_TX_QUEUES - 1);
3892 memcpy(&priv->wmm_params[queue], params, sizeof(*params));
3893
3742 if (!priv->wmm_enabled) 3894 if (!priv->wmm_enabled)
3743 rc = mwl8k_cmd_set_wmm_mode(hw, 1); 3895 rc = mwl8k_cmd_set_wmm_mode(hw, 1);
3744 3896
@@ -3838,21 +3990,27 @@ enum {
3838 MWL8366, 3990 MWL8366,
3839}; 3991};
3840 3992
3993#define MWL8K_8366_AP_FW_API 1
3994#define _MWL8K_8366_AP_FW(api) "mwl8k/fmimage_8366_ap-" #api ".fw"
3995#define MWL8K_8366_AP_FW(api) _MWL8K_8366_AP_FW(api)
3996
3841static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = { 3997static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = {
3842 [MWL8363] = { 3998 [MWL8363] = {
3843 .part_name = "88w8363", 3999 .part_name = "88w8363",
3844 .helper_image = "mwl8k/helper_8363.fw", 4000 .helper_image = "mwl8k/helper_8363.fw",
3845 .fw_image = "mwl8k/fmimage_8363.fw", 4001 .fw_image_sta = "mwl8k/fmimage_8363.fw",
3846 }, 4002 },
3847 [MWL8687] = { 4003 [MWL8687] = {
3848 .part_name = "88w8687", 4004 .part_name = "88w8687",
3849 .helper_image = "mwl8k/helper_8687.fw", 4005 .helper_image = "mwl8k/helper_8687.fw",
3850 .fw_image = "mwl8k/fmimage_8687.fw", 4006 .fw_image_sta = "mwl8k/fmimage_8687.fw",
3851 }, 4007 },
3852 [MWL8366] = { 4008 [MWL8366] = {
3853 .part_name = "88w8366", 4009 .part_name = "88w8366",
3854 .helper_image = "mwl8k/helper_8366.fw", 4010 .helper_image = "mwl8k/helper_8366.fw",
3855 .fw_image = "mwl8k/fmimage_8366.fw", 4011 .fw_image_sta = "mwl8k/fmimage_8366.fw",
4012 .fw_image_ap = MWL8K_8366_AP_FW(MWL8K_8366_AP_FW_API),
4013 .fw_api_ap = MWL8K_8366_AP_FW_API,
3856 .ap_rxd_ops = &rxd_8366_ap_ops, 4014 .ap_rxd_ops = &rxd_8366_ap_ops,
3857 }, 4015 },
3858}; 4016};
@@ -3863,6 +4021,7 @@ MODULE_FIRMWARE("mwl8k/helper_8687.fw");
3863MODULE_FIRMWARE("mwl8k/fmimage_8687.fw"); 4021MODULE_FIRMWARE("mwl8k/fmimage_8687.fw");
3864MODULE_FIRMWARE("mwl8k/helper_8366.fw"); 4022MODULE_FIRMWARE("mwl8k/helper_8366.fw");
3865MODULE_FIRMWARE("mwl8k/fmimage_8366.fw"); 4023MODULE_FIRMWARE("mwl8k/fmimage_8366.fw");
4024MODULE_FIRMWARE(MWL8K_8366_AP_FW(MWL8K_8366_AP_FW_API));
3866 4025
3867static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = { 4026static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
3868 { PCI_VDEVICE(MARVELL, 0x2a0a), .driver_data = MWL8363, }, 4027 { PCI_VDEVICE(MARVELL, 0x2a0a), .driver_data = MWL8363, },
@@ -3876,94 +4035,133 @@ static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
3876}; 4035};
3877MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table); 4036MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table);
3878 4037
3879static int __devinit mwl8k_probe(struct pci_dev *pdev, 4038static int mwl8k_request_alt_fw(struct mwl8k_priv *priv)
3880 const struct pci_device_id *id)
3881{ 4039{
3882 static int printed_version = 0;
3883 struct ieee80211_hw *hw;
3884 struct mwl8k_priv *priv;
3885 int rc; 4040 int rc;
3886 int i; 4041 printk(KERN_ERR "%s: Error requesting preferred fw %s.\n"
3887 4042 "Trying alternative firmware %s\n", pci_name(priv->pdev),
3888 if (!printed_version) { 4043 priv->fw_pref, priv->fw_alt);
3889 printk(KERN_INFO "%s version %s\n", MWL8K_DESC, MWL8K_VERSION); 4044 rc = mwl8k_request_fw(priv, priv->fw_alt, &priv->fw_ucode, true);
3890 printed_version = 1;
3891 }
3892
3893
3894 rc = pci_enable_device(pdev);
3895 if (rc) { 4045 if (rc) {
3896 printk(KERN_ERR "%s: Cannot enable new PCI device\n", 4046 printk(KERN_ERR "%s: Error requesting alt fw %s\n",
3897 MWL8K_NAME); 4047 pci_name(priv->pdev), priv->fw_alt);
3898 return rc; 4048 return rc;
3899 } 4049 }
4050 return 0;
4051}
3900 4052
3901 rc = pci_request_regions(pdev, MWL8K_NAME); 4053static int mwl8k_firmware_load_success(struct mwl8k_priv *priv);
3902 if (rc) { 4054static void mwl8k_fw_state_machine(const struct firmware *fw, void *context)
3903 printk(KERN_ERR "%s: Cannot obtain PCI resources\n", 4055{
3904 MWL8K_NAME); 4056 struct mwl8k_priv *priv = context;
3905 goto err_disable_device; 4057 struct mwl8k_device_info *di = priv->device_info;
3906 } 4058 int rc;
3907
3908 pci_set_master(pdev);
3909
3910
3911 hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops);
3912 if (hw == NULL) {
3913 printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME);
3914 rc = -ENOMEM;
3915 goto err_free_reg;
3916 }
3917 4059
3918 SET_IEEE80211_DEV(hw, &pdev->dev); 4060 switch (priv->fw_state) {
3919 pci_set_drvdata(pdev, hw); 4061 case FW_STATE_INIT:
4062 if (!fw) {
4063 printk(KERN_ERR "%s: Error requesting helper fw %s\n",
4064 pci_name(priv->pdev), di->helper_image);
4065 goto fail;
4066 }
4067 priv->fw_helper = fw;
4068 rc = mwl8k_request_fw(priv, priv->fw_pref, &priv->fw_ucode,
4069 true);
4070 if (rc && priv->fw_alt) {
4071 rc = mwl8k_request_alt_fw(priv);
4072 if (rc)
4073 goto fail;
4074 priv->fw_state = FW_STATE_LOADING_ALT;
4075 } else if (rc)
4076 goto fail;
4077 else
4078 priv->fw_state = FW_STATE_LOADING_PREF;
4079 break;
3920 4080
3921 priv = hw->priv; 4081 case FW_STATE_LOADING_PREF:
3922 priv->hw = hw; 4082 if (!fw) {
3923 priv->pdev = pdev; 4083 if (priv->fw_alt) {
3924 priv->device_info = &mwl8k_info_tbl[id->driver_data]; 4084 rc = mwl8k_request_alt_fw(priv);
4085 if (rc)
4086 goto fail;
4087 priv->fw_state = FW_STATE_LOADING_ALT;
4088 } else
4089 goto fail;
4090 } else {
4091 priv->fw_ucode = fw;
4092 rc = mwl8k_firmware_load_success(priv);
4093 if (rc)
4094 goto fail;
4095 else
4096 complete(&priv->firmware_loading_complete);
4097 }
4098 break;
3925 4099
4100 case FW_STATE_LOADING_ALT:
4101 if (!fw) {
4102 printk(KERN_ERR "%s: Error requesting alt fw %s\n",
4103 pci_name(priv->pdev), di->helper_image);
4104 goto fail;
4105 }
4106 priv->fw_ucode = fw;
4107 rc = mwl8k_firmware_load_success(priv);
4108 if (rc)
4109 goto fail;
4110 else
4111 complete(&priv->firmware_loading_complete);
4112 break;
3926 4113
3927 priv->sram = pci_iomap(pdev, 0, 0x10000); 4114 default:
3928 if (priv->sram == NULL) { 4115 printk(KERN_ERR "%s: Unexpected firmware loading state: %d\n",
3929 wiphy_err(hw->wiphy, "Cannot map device SRAM\n"); 4116 MWL8K_NAME, priv->fw_state);
3930 goto err_iounmap; 4117 BUG_ON(1);
3931 } 4118 }
3932 4119
3933 /* 4120 return;
3934 * If BAR0 is a 32 bit BAR, the register BAR will be BAR1.
3935 * If BAR0 is a 64 bit BAR, the register BAR will be BAR2.
3936 */
3937 priv->regs = pci_iomap(pdev, 1, 0x10000);
3938 if (priv->regs == NULL) {
3939 priv->regs = pci_iomap(pdev, 2, 0x10000);
3940 if (priv->regs == NULL) {
3941 wiphy_err(hw->wiphy, "Cannot map device registers\n");
3942 goto err_iounmap;
3943 }
3944 }
3945 4121
4122fail:
4123 priv->fw_state = FW_STATE_ERROR;
4124 complete(&priv->firmware_loading_complete);
4125 device_release_driver(&priv->pdev->dev);
4126 mwl8k_release_firmware(priv);
4127}
4128
4129static int mwl8k_init_firmware(struct ieee80211_hw *hw, char *fw_image,
4130 bool nowait)
4131{
4132 struct mwl8k_priv *priv = hw->priv;
4133 int rc;
3946 4134
3947 /* Reset firmware and hardware */ 4135 /* Reset firmware and hardware */
3948 mwl8k_hw_reset(priv); 4136 mwl8k_hw_reset(priv);
3949 4137
3950 /* Ask userland hotplug daemon for the device firmware */ 4138 /* Ask userland hotplug daemon for the device firmware */
3951 rc = mwl8k_request_firmware(priv); 4139 rc = mwl8k_request_firmware(priv, fw_image, nowait);
3952 if (rc) { 4140 if (rc) {
3953 wiphy_err(hw->wiphy, "Firmware files not found\n"); 4141 wiphy_err(hw->wiphy, "Firmware files not found\n");
3954 goto err_stop_firmware; 4142 return rc;
3955 } 4143 }
3956 4144
4145 if (nowait)
4146 return rc;
4147
3957 /* Load firmware into hardware */ 4148 /* Load firmware into hardware */
3958 rc = mwl8k_load_firmware(hw); 4149 rc = mwl8k_load_firmware(hw);
3959 if (rc) { 4150 if (rc)
3960 wiphy_err(hw->wiphy, "Cannot start firmware\n"); 4151 wiphy_err(hw->wiphy, "Cannot start firmware\n");
3961 goto err_stop_firmware;
3962 }
3963 4152
3964 /* Reclaim memory once firmware is successfully loaded */ 4153 /* Reclaim memory once firmware is successfully loaded */
3965 mwl8k_release_firmware(priv); 4154 mwl8k_release_firmware(priv);
3966 4155
4156 return rc;
4157}
4158
4159/* initialize hw after successfully loading a firmware image */
4160static int mwl8k_probe_hw(struct ieee80211_hw *hw)
4161{
4162 struct mwl8k_priv *priv = hw->priv;
4163 int rc = 0;
4164 int i;
3967 4165
3968 if (priv->ap_fw) { 4166 if (priv->ap_fw) {
3969 priv->rxd_ops = priv->device_info->ap_rxd_ops; 4167 priv->rxd_ops = priv->device_info->ap_rxd_ops;
@@ -3980,58 +4178,11 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3980 priv->wmm_enabled = false; 4178 priv->wmm_enabled = false;
3981 priv->pending_tx_pkts = 0; 4179 priv->pending_tx_pkts = 0;
3982 4180
3983
3984 /*
3985 * Extra headroom is the size of the required DMA header
3986 * minus the size of the smallest 802.11 frame (CTS frame).
3987 */
3988 hw->extra_tx_headroom =
3989 sizeof(struct mwl8k_dma_data) - sizeof(struct ieee80211_cts);
3990
3991 hw->channel_change_time = 10;
3992
3993 hw->queues = MWL8K_TX_QUEUES;
3994
3995 /* Set rssi values to dBm */
3996 hw->flags |= IEEE80211_HW_SIGNAL_DBM;
3997 hw->vif_data_size = sizeof(struct mwl8k_vif);
3998 hw->sta_data_size = sizeof(struct mwl8k_sta);
3999
4000 priv->macids_used = 0;
4001 INIT_LIST_HEAD(&priv->vif_list);
4002
4003 /* Set default radio state and preamble */
4004 priv->radio_on = 0;
4005 priv->radio_short_preamble = 0;
4006
4007 /* Finalize join worker */
4008 INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
4009
4010 /* TX reclaim and RX tasklets. */
4011 tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
4012 tasklet_disable(&priv->poll_tx_task);
4013 tasklet_init(&priv->poll_rx_task, mwl8k_rx_poll, (unsigned long)hw);
4014 tasklet_disable(&priv->poll_rx_task);
4015
4016 /* Power management cookie */
4017 priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma);
4018 if (priv->cookie == NULL)
4019 goto err_stop_firmware;
4020
4021 rc = mwl8k_rxq_init(hw, 0); 4181 rc = mwl8k_rxq_init(hw, 0);
4022 if (rc) 4182 if (rc)
4023 goto err_free_cookie; 4183 goto err_stop_firmware;
4024 rxq_refill(hw, 0, INT_MAX); 4184 rxq_refill(hw, 0, INT_MAX);
4025 4185
4026 mutex_init(&priv->fw_mutex);
4027 priv->fw_mutex_owner = NULL;
4028 priv->fw_mutex_depth = 0;
4029 priv->hostcmd_wait = NULL;
4030
4031 spin_lock_init(&priv->tx_lock);
4032
4033 priv->tx_wait = NULL;
4034
4035 for (i = 0; i < MWL8K_TX_QUEUES; i++) { 4186 for (i = 0; i < MWL8K_TX_QUEUES; i++) {
4036 rc = mwl8k_txq_init(hw, i); 4187 rc = mwl8k_txq_init(hw, i);
4037 if (rc) 4188 if (rc)
@@ -4071,13 +4222,6 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
4071 goto err_free_irq; 4222 goto err_free_irq;
4072 } 4223 }
4073 4224
4074 hw->wiphy->interface_modes = 0;
4075 if (priv->ap_macids_supported)
4076 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
4077 if (priv->sta_macids_supported)
4078 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
4079
4080
4081 /* Turn radio off */ 4225 /* Turn radio off */
4082 rc = mwl8k_cmd_radio_disable(hw); 4226 rc = mwl8k_cmd_radio_disable(hw);
4083 if (rc) { 4227 if (rc) {
@@ -4096,12 +4240,6 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
4096 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 4240 iowrite32(0, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
4097 free_irq(priv->pdev->irq, hw); 4241 free_irq(priv->pdev->irq, hw);
4098 4242
4099 rc = ieee80211_register_hw(hw);
4100 if (rc) {
4101 wiphy_err(hw->wiphy, "Cannot register device\n");
4102 goto err_free_queues;
4103 }
4104
4105 wiphy_info(hw->wiphy, "%s v%d, %pm, %s firmware %u.%u.%u.%u\n", 4243 wiphy_info(hw->wiphy, "%s v%d, %pm, %s firmware %u.%u.%u.%u\n",
4106 priv->device_info->part_name, 4244 priv->device_info->part_name,
4107 priv->hw_rev, hw->wiphy->perm_addr, 4245 priv->hw_rev, hw->wiphy->perm_addr,
@@ -4120,14 +4258,238 @@ err_free_queues:
4120 mwl8k_txq_deinit(hw, i); 4258 mwl8k_txq_deinit(hw, i);
4121 mwl8k_rxq_deinit(hw, 0); 4259 mwl8k_rxq_deinit(hw, 0);
4122 4260
4261err_stop_firmware:
4262 mwl8k_hw_reset(priv);
4263
4264 return rc;
4265}
4266
4267/*
4268 * invoke mwl8k_reload_firmware to change the firmware image after the device
4269 * has already been registered
4270 */
4271static int mwl8k_reload_firmware(struct ieee80211_hw *hw, char *fw_image)
4272{
4273 int i, rc = 0;
4274 struct mwl8k_priv *priv = hw->priv;
4275
4276 mwl8k_stop(hw);
4277 mwl8k_rxq_deinit(hw, 0);
4278
4279 for (i = 0; i < MWL8K_TX_QUEUES; i++)
4280 mwl8k_txq_deinit(hw, i);
4281
4282 rc = mwl8k_init_firmware(hw, fw_image, false);
4283 if (rc)
4284 goto fail;
4285
4286 rc = mwl8k_probe_hw(hw);
4287 if (rc)
4288 goto fail;
4289
4290 rc = mwl8k_start(hw);
4291 if (rc)
4292 goto fail;
4293
4294 rc = mwl8k_config(hw, ~0);
4295 if (rc)
4296 goto fail;
4297
4298 for (i = 0; i < MWL8K_TX_QUEUES; i++) {
4299 rc = mwl8k_conf_tx(hw, i, &priv->wmm_params[i]);
4300 if (rc)
4301 goto fail;
4302 }
4303
4304 return rc;
4305
4306fail:
4307 printk(KERN_WARNING "mwl8k: Failed to reload firmware image.\n");
4308 return rc;
4309}
4310
4311static int mwl8k_firmware_load_success(struct mwl8k_priv *priv)
4312{
4313 struct ieee80211_hw *hw = priv->hw;
4314 int i, rc;
4315
4316 rc = mwl8k_load_firmware(hw);
4317 mwl8k_release_firmware(priv);
4318 if (rc) {
4319 wiphy_err(hw->wiphy, "Cannot start firmware\n");
4320 return rc;
4321 }
4322
4323 /*
4324 * Extra headroom is the size of the required DMA header
4325 * minus the size of the smallest 802.11 frame (CTS frame).
4326 */
4327 hw->extra_tx_headroom =
4328 sizeof(struct mwl8k_dma_data) - sizeof(struct ieee80211_cts);
4329
4330 hw->channel_change_time = 10;
4331
4332 hw->queues = MWL8K_TX_QUEUES;
4333
4334 /* Set rssi values to dBm */
4335 hw->flags |= IEEE80211_HW_SIGNAL_DBM;
4336 hw->vif_data_size = sizeof(struct mwl8k_vif);
4337 hw->sta_data_size = sizeof(struct mwl8k_sta);
4338
4339 priv->macids_used = 0;
4340 INIT_LIST_HEAD(&priv->vif_list);
4341
4342 /* Set default radio state and preamble */
4343 priv->radio_on = 0;
4344 priv->radio_short_preamble = 0;
4345
4346 /* Finalize join worker */
4347 INIT_WORK(&priv->finalize_join_worker, mwl8k_finalize_join_worker);
4348
4349 /* TX reclaim and RX tasklets. */
4350 tasklet_init(&priv->poll_tx_task, mwl8k_tx_poll, (unsigned long)hw);
4351 tasklet_disable(&priv->poll_tx_task);
4352 tasklet_init(&priv->poll_rx_task, mwl8k_rx_poll, (unsigned long)hw);
4353 tasklet_disable(&priv->poll_rx_task);
4354
4355 /* Power management cookie */
4356 priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma);
4357 if (priv->cookie == NULL)
4358 return -ENOMEM;
4359
4360 mutex_init(&priv->fw_mutex);
4361 priv->fw_mutex_owner = NULL;
4362 priv->fw_mutex_depth = 0;
4363 priv->hostcmd_wait = NULL;
4364
4365 spin_lock_init(&priv->tx_lock);
4366
4367 priv->tx_wait = NULL;
4368
4369 rc = mwl8k_probe_hw(hw);
4370 if (rc)
4371 goto err_free_cookie;
4372
4373 hw->wiphy->interface_modes = 0;
4374 if (priv->ap_macids_supported || priv->device_info->fw_image_ap)
4375 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP);
4376 if (priv->sta_macids_supported || priv->device_info->fw_image_sta)
4377 hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_STATION);
4378
4379 rc = ieee80211_register_hw(hw);
4380 if (rc) {
4381 wiphy_err(hw->wiphy, "Cannot register device\n");
4382 goto err_unprobe_hw;
4383 }
4384
4385 return 0;
4386
4387err_unprobe_hw:
4388 for (i = 0; i < MWL8K_TX_QUEUES; i++)
4389 mwl8k_txq_deinit(hw, i);
4390 mwl8k_rxq_deinit(hw, 0);
4391
4123err_free_cookie: 4392err_free_cookie:
4124 if (priv->cookie != NULL) 4393 if (priv->cookie != NULL)
4125 pci_free_consistent(priv->pdev, 4, 4394 pci_free_consistent(priv->pdev, 4,
4126 priv->cookie, priv->cookie_dma); 4395 priv->cookie, priv->cookie_dma);
4127 4396
4397 return rc;
4398}
4399static int __devinit mwl8k_probe(struct pci_dev *pdev,
4400 const struct pci_device_id *id)
4401{
4402 static int printed_version;
4403 struct ieee80211_hw *hw;
4404 struct mwl8k_priv *priv;
4405 struct mwl8k_device_info *di;
4406 int rc;
4407
4408 if (!printed_version) {
4409 printk(KERN_INFO "%s version %s\n", MWL8K_DESC, MWL8K_VERSION);
4410 printed_version = 1;
4411 }
4412
4413
4414 rc = pci_enable_device(pdev);
4415 if (rc) {
4416 printk(KERN_ERR "%s: Cannot enable new PCI device\n",
4417 MWL8K_NAME);
4418 return rc;
4419 }
4420
4421 rc = pci_request_regions(pdev, MWL8K_NAME);
4422 if (rc) {
4423 printk(KERN_ERR "%s: Cannot obtain PCI resources\n",
4424 MWL8K_NAME);
4425 goto err_disable_device;
4426 }
4427
4428 pci_set_master(pdev);
4429
4430
4431 hw = ieee80211_alloc_hw(sizeof(*priv), &mwl8k_ops);
4432 if (hw == NULL) {
4433 printk(KERN_ERR "%s: ieee80211 alloc failed\n", MWL8K_NAME);
4434 rc = -ENOMEM;
4435 goto err_free_reg;
4436 }
4437
4438 SET_IEEE80211_DEV(hw, &pdev->dev);
4439 pci_set_drvdata(pdev, hw);
4440
4441 priv = hw->priv;
4442 priv->hw = hw;
4443 priv->pdev = pdev;
4444 priv->device_info = &mwl8k_info_tbl[id->driver_data];
4445
4446
4447 priv->sram = pci_iomap(pdev, 0, 0x10000);
4448 if (priv->sram == NULL) {
4449 wiphy_err(hw->wiphy, "Cannot map device SRAM\n");
4450 goto err_iounmap;
4451 }
4452
4453 /*
4454 * If BAR0 is a 32 bit BAR, the register BAR will be BAR1.
4455 * If BAR0 is a 64 bit BAR, the register BAR will be BAR2.
4456 */
4457 priv->regs = pci_iomap(pdev, 1, 0x10000);
4458 if (priv->regs == NULL) {
4459 priv->regs = pci_iomap(pdev, 2, 0x10000);
4460 if (priv->regs == NULL) {
4461 wiphy_err(hw->wiphy, "Cannot map device registers\n");
4462 goto err_iounmap;
4463 }
4464 }
4465
4466 /*
4467 * Choose the initial fw image depending on user input. If a second
4468 * image is available, make it the alternative image that will be
4469 * loaded if the first one fails.
4470 */
4471 init_completion(&priv->firmware_loading_complete);
4472 di = priv->device_info;
4473 if (ap_mode_default && di->fw_image_ap) {
4474 priv->fw_pref = di->fw_image_ap;
4475 priv->fw_alt = di->fw_image_sta;
4476 } else if (!ap_mode_default && di->fw_image_sta) {
4477 priv->fw_pref = di->fw_image_sta;
4478 priv->fw_alt = di->fw_image_ap;
4479 } else if (ap_mode_default && !di->fw_image_ap && di->fw_image_sta) {
4480 printk(KERN_WARNING "AP fw is unavailable. Using STA fw.");
4481 priv->fw_pref = di->fw_image_sta;
4482 } else if (!ap_mode_default && !di->fw_image_sta && di->fw_image_ap) {
4483 printk(KERN_WARNING "STA fw is unavailable. Using AP fw.");
4484 priv->fw_pref = di->fw_image_ap;
4485 }
4486 rc = mwl8k_init_firmware(hw, priv->fw_pref, true);
4487 if (rc)
4488 goto err_stop_firmware;
4489 return rc;
4490
4128err_stop_firmware: 4491err_stop_firmware:
4129 mwl8k_hw_reset(priv); 4492 mwl8k_hw_reset(priv);
4130 mwl8k_release_firmware(priv);
4131 4493
4132err_iounmap: 4494err_iounmap:
4133 if (priv->regs != NULL) 4495 if (priv->regs != NULL)
@@ -4163,6 +4525,13 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
4163 return; 4525 return;
4164 priv = hw->priv; 4526 priv = hw->priv;
4165 4527
4528 wait_for_completion(&priv->firmware_loading_complete);
4529
4530 if (priv->fw_state == FW_STATE_ERROR) {
4531 mwl8k_hw_reset(priv);
4532 goto unmap;
4533 }
4534
4166 ieee80211_stop_queues(hw); 4535 ieee80211_stop_queues(hw);
4167 4536
4168 ieee80211_unregister_hw(hw); 4537 ieee80211_unregister_hw(hw);
@@ -4185,6 +4554,7 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
4185 4554
4186 pci_free_consistent(priv->pdev, 4, priv->cookie, priv->cookie_dma); 4555 pci_free_consistent(priv->pdev, 4, priv->cookie, priv->cookie_dma);
4187 4556
4557unmap:
4188 pci_iounmap(pdev, priv->regs); 4558 pci_iounmap(pdev, priv->regs);
4189 pci_iounmap(pdev, priv->sram); 4559 pci_iounmap(pdev, priv->sram);
4190 pci_set_drvdata(pdev, NULL); 4560 pci_set_drvdata(pdev, NULL);
diff --git a/drivers/net/wireless/orinoco/wext.c b/drivers/net/wireless/orinoco/wext.c
index e5afabee60d1..e793679e2e19 100644
--- a/drivers/net/wireless/orinoco/wext.c
+++ b/drivers/net/wireless/orinoco/wext.c
@@ -893,6 +893,14 @@ static int orinoco_ioctl_set_auth(struct net_device *dev,
893 */ 893 */
894 break; 894 break;
895 895
896 case IW_AUTH_MFP:
897 /* Management Frame Protection not supported.
898 * Only fail if set to required.
899 */
900 if (param->value == IW_AUTH_MFP_REQUIRED)
901 ret = -EINVAL;
902 break;
903
896 case IW_AUTH_KEY_MGMT: 904 case IW_AUTH_KEY_MGMT:
897 /* wl_lkm implies value 2 == PSK for Hermes I 905 /* wl_lkm implies value 2 == PSK for Hermes I
898 * which ties in with WEXT 906 * which ties in with WEXT
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index d5bc21e5a02c..21713a7638c4 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -43,6 +43,7 @@ MODULE_FIRMWARE("isl3887usb");
43 43
44static struct usb_device_id p54u_table[] __devinitdata = { 44static struct usb_device_id p54u_table[] __devinitdata = {
45 /* Version 1 devices (pci chip + net2280) */ 45 /* Version 1 devices (pci chip + net2280) */
46 {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */
46 {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */ 47 {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */
47 {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */ 48 {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */
48 {USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */ 49 {USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */
@@ -56,9 +57,13 @@ static struct usb_device_id p54u_table[] __devinitdata = {
56 {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */ 57 {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */
57 {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */ 58 {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */
58 {USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */ 59 {USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */
60 {USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */
59 {USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */ 61 {USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */
60 {USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */ 62 {USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */
63 {USB_DEVICE(0x1435, 0x0210)}, /* Inventel UR054G */
64 {USB_DEVICE(0x15a9, 0x0002)}, /* Gemtek WUBI-100GW 802.11g */
61 {USB_DEVICE(0x1630, 0x0005)}, /* 2Wire 802.11g USB (v1) / Z-Com */ 65 {USB_DEVICE(0x1630, 0x0005)}, /* 2Wire 802.11g USB (v1) / Z-Com */
66 {USB_DEVICE(0x182d, 0x096b)}, /* Sitecom WL-107 */
62 {USB_DEVICE(0x1915, 0x2234)}, /* Linksys WUSB54G OEM */ 67 {USB_DEVICE(0x1915, 0x2234)}, /* Linksys WUSB54G OEM */
63 {USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */ 68 {USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */
64 {USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */ 69 {USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */
@@ -94,6 +99,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
94 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ 99 {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */
95 {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ 100 {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */
96 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ 101 {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */
102 {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */
97 {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ 103 {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */
98 {USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */ 104 {USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */
99 {USB_DEVICE(0x413c, 0x8104)}, /* Cohiba Proto board */ 105 {USB_DEVICE(0x413c, 0x8104)}, /* Cohiba Proto board */
@@ -183,7 +189,7 @@ static void p54u_rx_cb(struct urb *urb)
183static void p54u_tx_cb(struct urb *urb) 189static void p54u_tx_cb(struct urb *urb)
184{ 190{
185 struct sk_buff *skb = urb->context; 191 struct sk_buff *skb = urb->context;
186 struct ieee80211_hw *dev = (struct ieee80211_hw *) 192 struct ieee80211_hw *dev =
187 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); 193 usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0));
188 194
189 p54_free_skb(dev, skb); 195 p54_free_skb(dev, skb);
diff --git a/drivers/net/wireless/ray_cs.c b/drivers/net/wireless/ray_cs.c
index 97007d9e2c1f..0764d1a30d13 100644
--- a/drivers/net/wireless/ray_cs.c
+++ b/drivers/net/wireless/ray_cs.c
@@ -1776,11 +1776,8 @@ static void ray_update_multi_list(struct net_device *dev, int all)
1776 /* Copy the kernel's list of MC addresses to card */ 1776 /* Copy the kernel's list of MC addresses to card */
1777 netdev_for_each_mc_addr(ha, dev) { 1777 netdev_for_each_mc_addr(ha, dev) {
1778 memcpy_toio(p, ha->addr, ETH_ALEN); 1778 memcpy_toio(p, ha->addr, ETH_ALEN);
1779 dev_dbg(&link->dev, 1779 dev_dbg(&link->dev, "ray_update_multi add addr %pm\n",
1780 "ray_update_multi add addr %02x%02x%02x%02x%02x%02x\n", 1780 ha->addr);
1781 ha->addr[0], ha->addr[1],
1782 ha->addr[2], ha->addr[3],
1783 ha->addr[4], ha->addr[5]);
1784 p += ETH_ALEN; 1781 p += ETH_ALEN;
1785 i++; 1782 i++;
1786 } 1783 }
@@ -2015,11 +2012,8 @@ static irqreturn_t ray_interrupt(int irq, void *dev_id)
2015 memcpy_fromio(&local->bss_id, 2012 memcpy_fromio(&local->bss_id,
2016 prcs->var.rejoin_net_complete. 2013 prcs->var.rejoin_net_complete.
2017 bssid, ADDRLEN); 2014 bssid, ADDRLEN);
2018 dev_dbg(&link->dev, 2015 dev_dbg(&link->dev, "ray_cs new BSSID = %pm\n",
2019 "ray_cs new BSSID = %02x%02x%02x%02x%02x%02x\n", 2016 local->bss_id);
2020 local->bss_id[0], local->bss_id[1],
2021 local->bss_id[2], local->bss_id[3],
2022 local->bss_id[4], local->bss_id[5]);
2023 if (!sniffer) 2017 if (!sniffer)
2024 authenticate(local); 2018 authenticate(local);
2025 } 2019 }
@@ -2286,8 +2280,8 @@ static void untranslate(ray_dev_t *local, struct sk_buff *skb, int len)
2286 struct ethhdr *peth; 2280 struct ethhdr *peth;
2287 UCHAR srcaddr[ADDRLEN]; 2281 UCHAR srcaddr[ADDRLEN];
2288 UCHAR destaddr[ADDRLEN]; 2282 UCHAR destaddr[ADDRLEN];
2289 static UCHAR org_bridge[3] = { 0, 0, 0xf8 }; 2283 static const UCHAR org_bridge[3] = { 0, 0, 0xf8 };
2290 static UCHAR org_1042[3] = { 0, 0, 0 }; 2284 static const UCHAR org_1042[3] = { 0, 0, 0 };
2291 2285
2292 memcpy(destaddr, ieee80211_get_DA(pmac), ADDRLEN); 2286 memcpy(destaddr, ieee80211_get_DA(pmac), ADDRLEN);
2293 memcpy(srcaddr, ieee80211_get_SA(pmac), ADDRLEN); 2287 memcpy(srcaddr, ieee80211_get_SA(pmac), ADDRLEN);
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index 71b5971da597..848cc2cce247 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -129,6 +129,7 @@ MODULE_PARM_DESC(workaround_interval,
129#define OID_802_11_RTS_THRESHOLD cpu_to_le32(0x0d01020a) 129#define OID_802_11_RTS_THRESHOLD cpu_to_le32(0x0d01020a)
130#define OID_802_11_SUPPORTED_RATES cpu_to_le32(0x0d01020e) 130#define OID_802_11_SUPPORTED_RATES cpu_to_le32(0x0d01020e)
131#define OID_802_11_CONFIGURATION cpu_to_le32(0x0d010211) 131#define OID_802_11_CONFIGURATION cpu_to_le32(0x0d010211)
132#define OID_802_11_POWER_MODE cpu_to_le32(0x0d010216)
132#define OID_802_11_BSSID_LIST cpu_to_le32(0x0d010217) 133#define OID_802_11_BSSID_LIST cpu_to_le32(0x0d010217)
133 134
134 135
@@ -156,6 +157,12 @@ MODULE_PARM_DESC(workaround_interval,
156#define RNDIS_STATUS_ADAPTER_NOT_OPEN cpu_to_le32(0xc0010012) 157#define RNDIS_STATUS_ADAPTER_NOT_OPEN cpu_to_le32(0xc0010012)
157 158
158 159
160/* Known device types */
161#define RNDIS_UNKNOWN 0
162#define RNDIS_BCM4320A 1
163#define RNDIS_BCM4320B 2
164
165
159/* NDIS data structures. Taken from wpa_supplicant driver_ndis.c 166/* NDIS data structures. Taken from wpa_supplicant driver_ndis.c
160 * slightly modified for datatype endianess, etc 167 * slightly modified for datatype endianess, etc
161 */ 168 */
@@ -233,6 +240,12 @@ enum ndis_80211_addwep_bits {
233 NDIS_80211_ADDWEP_TRANSMIT_KEY = cpu_to_le32(1 << 31) 240 NDIS_80211_ADDWEP_TRANSMIT_KEY = cpu_to_le32(1 << 31)
234}; 241};
235 242
243enum ndis_80211_power_mode {
244 NDIS_80211_POWER_MODE_CAM,
245 NDIS_80211_POWER_MODE_MAX_PSP,
246 NDIS_80211_POWER_MODE_FAST_PSP,
247};
248
236struct ndis_80211_auth_request { 249struct ndis_80211_auth_request {
237 __le32 length; 250 __le32 length;
238 u8 bssid[6]; 251 u8 bssid[6];
@@ -472,12 +485,16 @@ struct rndis_wlan_private {
472 struct mutex command_lock; 485 struct mutex command_lock;
473 unsigned long work_pending; 486 unsigned long work_pending;
474 int last_qual; 487 int last_qual;
488 s32 cqm_rssi_thold;
489 u32 cqm_rssi_hyst;
490 int last_cqm_event_rssi;
475 491
476 struct ieee80211_supported_band band; 492 struct ieee80211_supported_band band;
477 struct ieee80211_channel channels[ARRAY_SIZE(rndis_channels)]; 493 struct ieee80211_channel channels[ARRAY_SIZE(rndis_channels)];
478 struct ieee80211_rate rates[ARRAY_SIZE(rndis_rates)]; 494 struct ieee80211_rate rates[ARRAY_SIZE(rndis_rates)];
479 u32 cipher_suites[ARRAY_SIZE(rndis_cipher_suites)]; 495 u32 cipher_suites[ARRAY_SIZE(rndis_cipher_suites)];
480 496
497 int device_type;
481 int caps; 498 int caps;
482 int multicast_size; 499 int multicast_size;
483 500
@@ -493,10 +510,10 @@ struct rndis_wlan_private {
493 510
494 /* hardware state */ 511 /* hardware state */
495 bool radio_on; 512 bool radio_on;
513 int power_mode;
496 int infra_mode; 514 int infra_mode;
497 bool connected; 515 bool connected;
498 u8 bssid[ETH_ALEN]; 516 u8 bssid[ETH_ALEN];
499 struct ndis_80211_ssid essid;
500 __le32 current_command_oid; 517 __le32 current_command_oid;
501 518
502 /* encryption stuff */ 519 /* encryption stuff */
@@ -547,7 +564,7 @@ static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev,
547 u8 key_index, bool pairwise, const u8 *mac_addr); 564 u8 key_index, bool pairwise, const u8 *mac_addr);
548 565
549static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev, 566static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
550 u8 key_index); 567 u8 key_index, bool unicast, bool multicast);
551 568
552static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev, 569static int rndis_get_station(struct wiphy *wiphy, struct net_device *dev,
553 u8 *mac, struct station_info *sinfo); 570 u8 *mac, struct station_info *sinfo);
@@ -563,7 +580,14 @@ static int rndis_del_pmksa(struct wiphy *wiphy, struct net_device *netdev,
563 580
564static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev); 581static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev);
565 582
566static struct cfg80211_ops rndis_config_ops = { 583static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
584 bool enabled, int timeout);
585
586static int rndis_set_cqm_rssi_config(struct wiphy *wiphy,
587 struct net_device *dev,
588 s32 rssi_thold, u32 rssi_hyst);
589
590static const struct cfg80211_ops rndis_config_ops = {
567 .change_virtual_intf = rndis_change_virtual_intf, 591 .change_virtual_intf = rndis_change_virtual_intf,
568 .scan = rndis_scan, 592 .scan = rndis_scan,
569 .set_wiphy_params = rndis_set_wiphy_params, 593 .set_wiphy_params = rndis_set_wiphy_params,
@@ -582,6 +606,8 @@ static struct cfg80211_ops rndis_config_ops = {
582 .set_pmksa = rndis_set_pmksa, 606 .set_pmksa = rndis_set_pmksa,
583 .del_pmksa = rndis_del_pmksa, 607 .del_pmksa = rndis_del_pmksa,
584 .flush_pmksa = rndis_flush_pmksa, 608 .flush_pmksa = rndis_flush_pmksa,
609 .set_power_mgmt = rndis_set_power_mgmt,
610 .set_cqm_rssi_config = rndis_set_cqm_rssi_config,
585}; 611};
586 612
587static void *rndis_wiphy_privid = &rndis_wiphy_privid; 613static void *rndis_wiphy_privid = &rndis_wiphy_privid;
@@ -680,6 +706,7 @@ static const char *oid_to_string(__le32 oid)
680 OID_STR(OID_802_11_ADD_KEY); 706 OID_STR(OID_802_11_ADD_KEY);
681 OID_STR(OID_802_11_REMOVE_KEY); 707 OID_STR(OID_802_11_REMOVE_KEY);
682 OID_STR(OID_802_11_ASSOCIATION_INFORMATION); 708 OID_STR(OID_802_11_ASSOCIATION_INFORMATION);
709 OID_STR(OID_802_11_CAPABILITY);
683 OID_STR(OID_802_11_PMKID); 710 OID_STR(OID_802_11_PMKID);
684 OID_STR(OID_802_11_NETWORK_TYPES_SUPPORTED); 711 OID_STR(OID_802_11_NETWORK_TYPES_SUPPORTED);
685 OID_STR(OID_802_11_NETWORK_TYPE_IN_USE); 712 OID_STR(OID_802_11_NETWORK_TYPE_IN_USE);
@@ -690,6 +717,7 @@ static const char *oid_to_string(__le32 oid)
690 OID_STR(OID_802_11_RTS_THRESHOLD); 717 OID_STR(OID_802_11_RTS_THRESHOLD);
691 OID_STR(OID_802_11_SUPPORTED_RATES); 718 OID_STR(OID_802_11_SUPPORTED_RATES);
692 OID_STR(OID_802_11_CONFIGURATION); 719 OID_STR(OID_802_11_CONFIGURATION);
720 OID_STR(OID_802_11_POWER_MODE);
693 OID_STR(OID_802_11_BSSID_LIST); 721 OID_STR(OID_802_11_BSSID_LIST);
694#undef OID_STR 722#undef OID_STR
695 } 723 }
@@ -810,7 +838,8 @@ exit_unlock:
810 return ret; 838 return ret;
811} 839}
812 840
813static int rndis_set_oid(struct usbnet *dev, __le32 oid, void *data, int len) 841static int rndis_set_oid(struct usbnet *dev, __le32 oid, const void *data,
842 int len)
814{ 843{
815 struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev); 844 struct rndis_wlan_private *priv = get_rndis_wlan_priv(dev);
816 union { 845 union {
@@ -994,7 +1023,18 @@ static int level_to_qual(int level)
994 */ 1023 */
995static int set_infra_mode(struct usbnet *usbdev, int mode); 1024static int set_infra_mode(struct usbnet *usbdev, int mode);
996static void restore_keys(struct usbnet *usbdev); 1025static void restore_keys(struct usbnet *usbdev);
997static int rndis_check_bssid_list(struct usbnet *usbdev); 1026static int rndis_check_bssid_list(struct usbnet *usbdev, u8 *match_bssid,
1027 bool *matched);
1028
1029static int rndis_start_bssid_list_scan(struct usbnet *usbdev)
1030{
1031 __le32 tmp;
1032
1033 /* Note: OID_802_11_BSSID_LIST_SCAN clears internal BSS list. */
1034 tmp = cpu_to_le32(1);
1035 return rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp,
1036 sizeof(tmp));
1037}
998 1038
999static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid) 1039static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid)
1000{ 1040{
@@ -1007,7 +1047,6 @@ static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid)
1007 return ret; 1047 return ret;
1008 } 1048 }
1009 if (ret == 0) { 1049 if (ret == 0) {
1010 memcpy(&priv->essid, ssid, sizeof(priv->essid));
1011 priv->radio_on = true; 1050 priv->radio_on = true;
1012 netdev_dbg(usbdev->net, "%s(): radio_on = true\n", __func__); 1051 netdev_dbg(usbdev->net, "%s(): radio_on = true\n", __func__);
1013 } 1052 }
@@ -1015,7 +1054,7 @@ static int set_essid(struct usbnet *usbdev, struct ndis_80211_ssid *ssid)
1015 return ret; 1054 return ret;
1016} 1055}
1017 1056
1018static int set_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN]) 1057static int set_bssid(struct usbnet *usbdev, const u8 *bssid)
1019{ 1058{
1020 int ret; 1059 int ret;
1021 1060
@@ -1031,7 +1070,9 @@ static int set_bssid(struct usbnet *usbdev, u8 bssid[ETH_ALEN])
1031 1070
1032static int clear_bssid(struct usbnet *usbdev) 1071static int clear_bssid(struct usbnet *usbdev)
1033{ 1072{
1034 u8 broadcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff}; 1073 static const u8 broadcast_mac[ETH_ALEN] = {
1074 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1075 };
1035 1076
1036 return set_bssid(usbdev, broadcast_mac); 1077 return set_bssid(usbdev, broadcast_mac);
1037} 1078}
@@ -1904,14 +1945,14 @@ static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
1904 struct usbnet *usbdev = netdev_priv(dev); 1945 struct usbnet *usbdev = netdev_priv(dev);
1905 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 1946 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
1906 int ret; 1947 int ret;
1907 __le32 tmp; 1948 int delay = SCAN_DELAY_JIFFIES;
1908 1949
1909 netdev_dbg(usbdev->net, "cfg80211.scan\n"); 1950 netdev_dbg(usbdev->net, "cfg80211.scan\n");
1910 1951
1911 /* Get current bssid list from device before new scan, as new scan 1952 /* Get current bssid list from device before new scan, as new scan
1912 * clears internal bssid list. 1953 * clears internal bssid list.
1913 */ 1954 */
1914 rndis_check_bssid_list(usbdev); 1955 rndis_check_bssid_list(usbdev, NULL, NULL);
1915 1956
1916 if (!request) 1957 if (!request)
1917 return -EINVAL; 1958 return -EINVAL;
@@ -1921,13 +1962,13 @@ static int rndis_scan(struct wiphy *wiphy, struct net_device *dev,
1921 1962
1922 priv->scan_request = request; 1963 priv->scan_request = request;
1923 1964
1924 tmp = cpu_to_le32(1); 1965 ret = rndis_start_bssid_list_scan(usbdev);
1925 ret = rndis_set_oid(usbdev, OID_802_11_BSSID_LIST_SCAN, &tmp,
1926 sizeof(tmp));
1927 if (ret == 0) { 1966 if (ret == 0) {
1967 if (priv->device_type == RNDIS_BCM4320A)
1968 delay = HZ;
1969
1928 /* Wait before retrieving scan results from device */ 1970 /* Wait before retrieving scan results from device */
1929 queue_delayed_work(priv->workqueue, &priv->scan_work, 1971 queue_delayed_work(priv->workqueue, &priv->scan_work, delay);
1930 SCAN_DELAY_JIFFIES);
1931 } 1972 }
1932 1973
1933 return ret; 1974 return ret;
@@ -1946,8 +1987,8 @@ static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev,
1946 int ie_len, bssid_len; 1987 int ie_len, bssid_len;
1947 u8 *ie; 1988 u8 *ie;
1948 1989
1949 netdev_dbg(usbdev->net, " found bssid: '%.32s' [%pM]\n", 1990 netdev_dbg(usbdev->net, " found bssid: '%.32s' [%pM], len: %d\n",
1950 bssid->ssid.essid, bssid->mac); 1991 bssid->ssid.essid, bssid->mac, le32_to_cpu(bssid->length));
1951 1992
1952 /* parse bssid structure */ 1993 /* parse bssid structure */
1953 bssid_len = le32_to_cpu(bssid->length); 1994 bssid_len = le32_to_cpu(bssid->length);
@@ -1981,49 +2022,98 @@ static struct cfg80211_bss *rndis_bss_info_update(struct usbnet *usbdev,
1981 GFP_KERNEL); 2022 GFP_KERNEL);
1982} 2023}
1983 2024
1984static int rndis_check_bssid_list(struct usbnet *usbdev) 2025static struct ndis_80211_bssid_ex *next_bssid_list_item(
2026 struct ndis_80211_bssid_ex *bssid,
2027 int *bssid_len, void *buf, int len)
2028{
2029 void *buf_end, *bssid_end;
2030
2031 buf_end = (char *)buf + len;
2032 bssid_end = (char *)bssid + *bssid_len;
2033
2034 if ((int)(buf_end - bssid_end) < sizeof(bssid->length)) {
2035 *bssid_len = 0;
2036 return NULL;
2037 } else {
2038 bssid = (void *)((char *)bssid + *bssid_len);
2039 *bssid_len = le32_to_cpu(bssid->length);
2040 return bssid;
2041 }
2042}
2043
2044static bool check_bssid_list_item(struct ndis_80211_bssid_ex *bssid,
2045 int bssid_len, void *buf, int len)
2046{
2047 void *buf_end, *bssid_end;
2048
2049 if (!bssid || bssid_len <= 0 || bssid_len > len)
2050 return false;
2051
2052 buf_end = (char *)buf + len;
2053 bssid_end = (char *)bssid + bssid_len;
2054
2055 return (int)(buf_end - bssid_end) >= 0 && (int)(bssid_end - buf) >= 0;
2056}
2057
2058static int rndis_check_bssid_list(struct usbnet *usbdev, u8 *match_bssid,
2059 bool *matched)
1985{ 2060{
1986 void *buf = NULL; 2061 void *buf = NULL;
1987 struct ndis_80211_bssid_list_ex *bssid_list; 2062 struct ndis_80211_bssid_list_ex *bssid_list;
1988 struct ndis_80211_bssid_ex *bssid; 2063 struct ndis_80211_bssid_ex *bssid;
1989 int ret = -EINVAL, len, count, bssid_len; 2064 int ret = -EINVAL, len, count, bssid_len, real_count, new_len;
1990 bool resized = false;
1991 2065
1992 netdev_dbg(usbdev->net, "check_bssid_list\n"); 2066 netdev_dbg(usbdev->net, "%s()\n", __func__);
1993 2067
1994 len = CONTROL_BUFFER_SIZE; 2068 len = CONTROL_BUFFER_SIZE;
1995resize_buf: 2069resize_buf:
1996 buf = kmalloc(len, GFP_KERNEL); 2070 buf = kzalloc(len, GFP_KERNEL);
1997 if (!buf) { 2071 if (!buf) {
1998 ret = -ENOMEM; 2072 ret = -ENOMEM;
1999 goto out; 2073 goto out;
2000 } 2074 }
2001 2075
2002 ret = rndis_query_oid(usbdev, OID_802_11_BSSID_LIST, buf, &len); 2076 /* BSSID-list might have got bigger last time we checked, keep
2003 if (ret != 0) 2077 * resizing until it won't get any bigger.
2078 */
2079 new_len = len;
2080 ret = rndis_query_oid(usbdev, OID_802_11_BSSID_LIST, buf, &new_len);
2081 if (ret != 0 || new_len < sizeof(struct ndis_80211_bssid_list_ex))
2004 goto out; 2082 goto out;
2005 2083
2006 if (!resized && len > CONTROL_BUFFER_SIZE) { 2084 if (new_len > len) {
2007 resized = true; 2085 len = new_len;
2008 kfree(buf); 2086 kfree(buf);
2009 goto resize_buf; 2087 goto resize_buf;
2010 } 2088 }
2011 2089
2090 len = new_len;
2091
2012 bssid_list = buf; 2092 bssid_list = buf;
2013 bssid = bssid_list->bssid;
2014 bssid_len = le32_to_cpu(bssid->length);
2015 count = le32_to_cpu(bssid_list->num_items); 2093 count = le32_to_cpu(bssid_list->num_items);
2016 netdev_dbg(usbdev->net, "check_bssid_list: %d BSSIDs found (buflen: %d)\n", 2094 real_count = 0;
2017 count, len); 2095 netdev_dbg(usbdev->net, "%s(): buflen: %d\n", __func__, len);
2096
2097 bssid_len = 0;
2098 bssid = next_bssid_list_item(bssid_list->bssid, &bssid_len, buf, len);
2018 2099
2019 while (count && ((void *)bssid + bssid_len) <= (buf + len)) { 2100 /* Device returns incorrect 'num_items'. Workaround by ignoring the
2020 rndis_bss_info_update(usbdev, bssid); 2101 * received 'num_items' and walking through full bssid buffer instead.
2102 */
2103 while (check_bssid_list_item(bssid, bssid_len, buf, len)) {
2104 if (rndis_bss_info_update(usbdev, bssid) && match_bssid &&
2105 matched) {
2106 if (compare_ether_addr(bssid->mac, match_bssid))
2107 *matched = true;
2108 }
2021 2109
2022 bssid = (void *)bssid + bssid_len; 2110 real_count++;
2023 bssid_len = le32_to_cpu(bssid->length); 2111 bssid = next_bssid_list_item(bssid, &bssid_len, buf, len);
2024 count--;
2025 } 2112 }
2026 2113
2114 netdev_dbg(usbdev->net, "%s(): num_items from device: %d, really found:"
2115 " %d\n", __func__, count, real_count);
2116
2027out: 2117out:
2028 kfree(buf); 2118 kfree(buf);
2029 return ret; 2119 return ret;
@@ -2041,7 +2131,7 @@ static void rndis_get_scan_results(struct work_struct *work)
2041 if (!priv->scan_request) 2131 if (!priv->scan_request)
2042 return; 2132 return;
2043 2133
2044 ret = rndis_check_bssid_list(usbdev); 2134 ret = rndis_check_bssid_list(usbdev, NULL, NULL);
2045 2135
2046 cfg80211_scan_done(priv->scan_request, ret < 0); 2136 cfg80211_scan_done(priv->scan_request, ret < 0);
2047 2137
@@ -2355,7 +2445,7 @@ static int rndis_del_key(struct wiphy *wiphy, struct net_device *netdev,
2355} 2445}
2356 2446
2357static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev, 2447static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
2358 u8 key_index) 2448 u8 key_index, bool unicast, bool multicast)
2359{ 2449{
2360 struct rndis_wlan_private *priv = wiphy_priv(wiphy); 2450 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
2361 struct usbnet *usbdev = priv->usbdev; 2451 struct usbnet *usbdev = priv->usbdev;
@@ -2365,6 +2455,9 @@ static int rndis_set_default_key(struct wiphy *wiphy, struct net_device *netdev,
2365 2455
2366 priv->encr_tx_key_index = key_index; 2456 priv->encr_tx_key_index = key_index;
2367 2457
2458 if (is_wpa_key(priv, key_index))
2459 return 0;
2460
2368 key = priv->encr_keys[key_index]; 2461 key = priv->encr_keys[key_index];
2369 2462
2370 return add_wep_key(usbdev, key.material, key.len, key_index); 2463 return add_wep_key(usbdev, key.material, key.len, key_index);
@@ -2495,6 +2588,136 @@ static int rndis_flush_pmksa(struct wiphy *wiphy, struct net_device *netdev)
2495 return rndis_set_oid(usbdev, OID_802_11_PMKID, &pmkid, sizeof(pmkid)); 2588 return rndis_set_oid(usbdev, OID_802_11_PMKID, &pmkid, sizeof(pmkid));
2496} 2589}
2497 2590
2591static int rndis_set_power_mgmt(struct wiphy *wiphy, struct net_device *dev,
2592 bool enabled, int timeout)
2593{
2594 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
2595 struct usbnet *usbdev = priv->usbdev;
2596 int power_mode;
2597 __le32 mode;
2598 int ret;
2599
2600 netdev_dbg(usbdev->net, "%s(): %s, %d\n", __func__,
2601 enabled ? "enabled" : "disabled",
2602 timeout);
2603
2604 if (enabled)
2605 power_mode = NDIS_80211_POWER_MODE_FAST_PSP;
2606 else
2607 power_mode = NDIS_80211_POWER_MODE_CAM;
2608
2609 if (power_mode == priv->power_mode)
2610 return 0;
2611
2612 priv->power_mode = power_mode;
2613
2614 mode = cpu_to_le32(power_mode);
2615 ret = rndis_set_oid(usbdev, OID_802_11_POWER_MODE, &mode, sizeof(mode));
2616
2617 netdev_dbg(usbdev->net, "%s(): OID_802_11_POWER_MODE -> %d\n",
2618 __func__, ret);
2619
2620 return ret;
2621}
2622
2623static int rndis_set_cqm_rssi_config(struct wiphy *wiphy,
2624 struct net_device *dev,
2625 s32 rssi_thold, u32 rssi_hyst)
2626{
2627 struct rndis_wlan_private *priv = wiphy_priv(wiphy);
2628
2629 priv->cqm_rssi_thold = rssi_thold;
2630 priv->cqm_rssi_hyst = rssi_hyst;
2631 priv->last_cqm_event_rssi = 0;
2632
2633 return 0;
2634}
2635
2636static void rndis_wlan_craft_connected_bss(struct usbnet *usbdev, u8 *bssid,
2637 struct ndis_80211_assoc_info *info)
2638{
2639 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2640 struct ieee80211_channel *channel;
2641 struct ndis_80211_conf config;
2642 struct ndis_80211_ssid ssid;
2643 s32 signal;
2644 u64 timestamp;
2645 u16 capability;
2646 u16 beacon_interval;
2647 __le32 rssi;
2648 u8 ie_buf[34];
2649 int len, ret, ie_len;
2650
2651 /* Get signal quality, in case of error use rssi=0 and ignore error. */
2652 len = sizeof(rssi);
2653 rssi = 0;
2654 ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len);
2655 signal = level_to_qual(le32_to_cpu(rssi));
2656
2657 netdev_dbg(usbdev->net, "%s(): OID_802_11_RSSI -> %d, "
2658 "rssi:%d, qual: %d\n", __func__, ret, le32_to_cpu(rssi),
2659 level_to_qual(le32_to_cpu(rssi)));
2660
2661 /* Get AP capabilities */
2662 if (info) {
2663 capability = le16_to_cpu(info->resp_ie.capa);
2664 } else {
2665 /* Set atleast ESS/IBSS capability */
2666 capability = (priv->infra_mode == NDIS_80211_INFRA_INFRA) ?
2667 WLAN_CAPABILITY_ESS : WLAN_CAPABILITY_IBSS;
2668 }
2669
2670 /* Get channel and beacon interval */
2671 len = sizeof(config);
2672 ret = rndis_query_oid(usbdev, OID_802_11_CONFIGURATION, &config, &len);
2673 netdev_dbg(usbdev->net, "%s(): OID_802_11_CONFIGURATION -> %d\n",
2674 __func__, ret);
2675 if (ret >= 0) {
2676 beacon_interval = le16_to_cpu(config.beacon_period);
2677 channel = ieee80211_get_channel(priv->wdev.wiphy,
2678 KHZ_TO_MHZ(le32_to_cpu(config.ds_config)));
2679 if (!channel) {
2680 netdev_warn(usbdev->net, "%s(): could not get channel."
2681 "\n", __func__);
2682 return;
2683 }
2684 } else {
2685 netdev_warn(usbdev->net, "%s(): could not get configuration.\n",
2686 __func__);
2687 return;
2688 }
2689
2690 /* Get SSID, in case of error, use zero length SSID and ignore error. */
2691 len = sizeof(ssid);
2692 memset(&ssid, 0, sizeof(ssid));
2693 ret = rndis_query_oid(usbdev, OID_802_11_SSID, &ssid, &len);
2694 netdev_dbg(usbdev->net, "%s(): OID_802_11_SSID -> %d, len: %d, ssid: "
2695 "'%.32s'\n", __func__, ret,
2696 le32_to_cpu(ssid.length), ssid.essid);
2697
2698 if (le32_to_cpu(ssid.length) > 32)
2699 ssid.length = cpu_to_le32(32);
2700
2701 ie_buf[0] = WLAN_EID_SSID;
2702 ie_buf[1] = le32_to_cpu(ssid.length);
2703 memcpy(&ie_buf[2], ssid.essid, le32_to_cpu(ssid.length));
2704
2705 ie_len = le32_to_cpu(ssid.length) + 2;
2706
2707 /* no tsf */
2708 timestamp = 0;
2709
2710 netdev_dbg(usbdev->net, "%s(): channel:%d(freq), bssid:[%pM], tsf:%d, "
2711 "capa:%x, beacon int:%d, resp_ie(len:%d, essid:'%.32s'), "
2712 "signal:%d\n", __func__, (channel ? channel->center_freq : -1),
2713 bssid, (u32)timestamp, capability, beacon_interval, ie_len,
2714 ssid.essid, signal);
2715
2716 cfg80211_inform_bss(priv->wdev.wiphy, channel, bssid,
2717 timestamp, capability, beacon_interval, ie_buf, ie_len,
2718 signal, GFP_KERNEL);
2719}
2720
2498/* 2721/*
2499 * workers, indication handlers, device poller 2722 * workers, indication handlers, device poller
2500 */ 2723 */
@@ -2507,6 +2730,7 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
2507 u8 *req_ie, *resp_ie; 2730 u8 *req_ie, *resp_ie;
2508 int ret, offset; 2731 int ret, offset;
2509 bool roamed = false; 2732 bool roamed = false;
2733 bool match_bss;
2510 2734
2511 if (priv->infra_mode == NDIS_80211_INFRA_INFRA && priv->connected) { 2735 if (priv->infra_mode == NDIS_80211_INFRA_INFRA && priv->connected) {
2512 /* received media connect indication while connected, either 2736 /* received media connect indication while connected, either
@@ -2558,6 +2782,13 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
2558 resp_ie_len = 2782 resp_ie_len =
2559 CONTROL_BUFFER_SIZE - offset; 2783 CONTROL_BUFFER_SIZE - offset;
2560 } 2784 }
2785 } else {
2786 /* Since rndis_wlan_craft_connected_bss() might use info
2787 * later and expects info to contain valid data if
2788 * non-null, free info and set NULL here.
2789 */
2790 kfree(info);
2791 info = NULL;
2561 } 2792 }
2562 } else if (WARN_ON(priv->infra_mode != NDIS_80211_INFRA_ADHOC)) 2793 } else if (WARN_ON(priv->infra_mode != NDIS_80211_INFRA_ADHOC))
2563 return; 2794 return;
@@ -2569,13 +2800,26 @@ static void rndis_wlan_do_link_up_work(struct usbnet *usbdev)
2569 netdev_dbg(usbdev->net, "link up work: [%pM]%s\n", 2800 netdev_dbg(usbdev->net, "link up work: [%pM]%s\n",
2570 bssid, roamed ? " roamed" : ""); 2801 bssid, roamed ? " roamed" : "");
2571 2802
2572 /* Internal bss list in device always contains at least the currently 2803 /* Internal bss list in device should contain at least the currently
2573 * connected bss and we can get it to cfg80211 with 2804 * connected bss and we can get it to cfg80211 with
2574 * rndis_check_bssid_list(). 2805 * rndis_check_bssid_list().
2575 * NOTE: This is true for Broadcom chip, but not mentioned in RNDIS 2806 *
2576 * spec. 2807 * NDIS spec says: "If the device is associated, but the associated
2808 * BSSID is not in its BSSID scan list, then the driver must add an
2809 * entry for the BSSID at the end of the data that it returns in
2810 * response to query of OID_802_11_BSSID_LIST."
2811 *
2812 * NOTE: Seems to be true for BCM4320b variant, but not BCM4320a.
2577 */ 2813 */
2578 rndis_check_bssid_list(usbdev); 2814 match_bss = false;
2815 rndis_check_bssid_list(usbdev, bssid, &match_bss);
2816
2817 if (!is_zero_ether_addr(bssid) && !match_bss) {
2818 /* Couldn't get bss from device, we need to manually craft bss
2819 * for cfg80211.
2820 */
2821 rndis_wlan_craft_connected_bss(usbdev, bssid, info);
2822 }
2579 2823
2580 if (priv->infra_mode == NDIS_80211_INFRA_INFRA) { 2824 if (priv->infra_mode == NDIS_80211_INFRA_INFRA) {
2581 if (!roamed) 2825 if (!roamed)
@@ -2918,6 +3162,32 @@ static int rndis_wlan_get_caps(struct usbnet *usbdev, struct wiphy *wiphy)
2918 return retval; 3162 return retval;
2919} 3163}
2920 3164
3165static void rndis_do_cqm(struct usbnet *usbdev, s32 rssi)
3166{
3167 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
3168 enum nl80211_cqm_rssi_threshold_event event;
3169 int thold, hyst, last_event;
3170
3171 if (priv->cqm_rssi_thold >= 0 || rssi >= 0)
3172 return;
3173 if (priv->infra_mode != NDIS_80211_INFRA_INFRA)
3174 return;
3175
3176 last_event = priv->last_cqm_event_rssi;
3177 thold = priv->cqm_rssi_thold;
3178 hyst = priv->cqm_rssi_hyst;
3179
3180 if (rssi < thold && (last_event == 0 || rssi < last_event - hyst))
3181 event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
3182 else if (rssi > thold && (last_event == 0 || rssi > last_event + hyst))
3183 event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
3184 else
3185 return;
3186
3187 priv->last_cqm_event_rssi = rssi;
3188 cfg80211_cqm_rssi_notify(usbdev->net, event, GFP_KERNEL);
3189}
3190
2921#define DEVICE_POLLER_JIFFIES (HZ) 3191#define DEVICE_POLLER_JIFFIES (HZ)
2922static void rndis_device_poller(struct work_struct *work) 3192static void rndis_device_poller(struct work_struct *work)
2923{ 3193{
@@ -2934,13 +3204,28 @@ static void rndis_device_poller(struct work_struct *work)
2934 * also polls device with rndis_command() and catches for media link 3204 * also polls device with rndis_command() and catches for media link
2935 * indications. 3205 * indications.
2936 */ 3206 */
2937 if (!is_associated(usbdev)) 3207 if (!is_associated(usbdev)) {
3208 /* Workaround bad scanning in BCM4320a devices with active
3209 * background scanning when not associated.
3210 */
3211 if (priv->device_type == RNDIS_BCM4320A && priv->radio_on &&
3212 !priv->scan_request) {
3213 /* Get previous scan results */
3214 rndis_check_bssid_list(usbdev, NULL, NULL);
3215
3216 /* Initiate new scan */
3217 rndis_start_bssid_list_scan(usbdev);
3218 }
3219
2938 goto end; 3220 goto end;
3221 }
2939 3222
2940 len = sizeof(rssi); 3223 len = sizeof(rssi);
2941 ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len); 3224 ret = rndis_query_oid(usbdev, OID_802_11_RSSI, &rssi, &len);
2942 if (ret == 0) 3225 if (ret == 0) {
2943 priv->last_qual = level_to_qual(le32_to_cpu(rssi)); 3226 priv->last_qual = level_to_qual(le32_to_cpu(rssi));
3227 rndis_do_cqm(usbdev, le32_to_cpu(rssi));
3228 }
2944 3229
2945 netdev_dbg(usbdev->net, "dev-poller: OID_802_11_RSSI -> %d, rssi:%d, qual: %d\n", 3230 netdev_dbg(usbdev->net, "dev-poller: OID_802_11_RSSI -> %d, rssi:%d, qual: %d\n",
2946 ret, le32_to_cpu(rssi), level_to_qual(le32_to_cpu(rssi))); 3231 ret, le32_to_cpu(rssi), level_to_qual(le32_to_cpu(rssi)));
@@ -2992,10 +3277,12 @@ end:
2992/* 3277/*
2993 * driver/device initialization 3278 * driver/device initialization
2994 */ 3279 */
2995static void rndis_copy_module_params(struct usbnet *usbdev) 3280static void rndis_copy_module_params(struct usbnet *usbdev, int device_type)
2996{ 3281{
2997 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 3282 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
2998 3283
3284 priv->device_type = device_type;
3285
2999 priv->param_country[0] = modparam_country[0]; 3286 priv->param_country[0] = modparam_country[0];
3000 priv->param_country[1] = modparam_country[1]; 3287 priv->param_country[1] = modparam_country[1];
3001 priv->param_country[2] = 0; 3288 priv->param_country[2] = 0;
@@ -3038,12 +3325,25 @@ static void rndis_copy_module_params(struct usbnet *usbdev)
3038 priv->param_workaround_interval = modparam_workaround_interval; 3325 priv->param_workaround_interval = modparam_workaround_interval;
3039} 3326}
3040 3327
3328static int unknown_early_init(struct usbnet *usbdev)
3329{
3330 /* copy module parameters for unknown so that iwconfig reports txpower
3331 * and workaround parameter is copied to private structure correctly.
3332 */
3333 rndis_copy_module_params(usbdev, RNDIS_UNKNOWN);
3334
3335 /* This is unknown device, so do not try set configuration parameters.
3336 */
3337
3338 return 0;
3339}
3340
3041static int bcm4320a_early_init(struct usbnet *usbdev) 3341static int bcm4320a_early_init(struct usbnet *usbdev)
3042{ 3342{
3043 /* copy module parameters for bcm4320a so that iwconfig reports txpower 3343 /* copy module parameters for bcm4320a so that iwconfig reports txpower
3044 * and workaround parameter is copied to private structure correctly. 3344 * and workaround parameter is copied to private structure correctly.
3045 */ 3345 */
3046 rndis_copy_module_params(usbdev); 3346 rndis_copy_module_params(usbdev, RNDIS_BCM4320A);
3047 3347
3048 /* bcm4320a doesn't handle configuration parameters well. Try 3348 /* bcm4320a doesn't handle configuration parameters well. Try
3049 * set any and you get partially zeroed mac and broken device. 3349 * set any and you get partially zeroed mac and broken device.
@@ -3057,7 +3357,7 @@ static int bcm4320b_early_init(struct usbnet *usbdev)
3057 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev); 3357 struct rndis_wlan_private *priv = get_rndis_wlan_priv(usbdev);
3058 char buf[8]; 3358 char buf[8];
3059 3359
3060 rndis_copy_module_params(usbdev); 3360 rndis_copy_module_params(usbdev, RNDIS_BCM4320B);
3061 3361
3062 /* Early initialization settings, setting these won't have effect 3362 /* Early initialization settings, setting these won't have effect
3063 * if called after generic_rndis_bind(). 3363 * if called after generic_rndis_bind().
@@ -3187,13 +3487,15 @@ static int rndis_wlan_bind(struct usbnet *usbdev, struct usb_interface *intf)
3187 3487
3188 set_default_iw_params(usbdev); 3488 set_default_iw_params(usbdev);
3189 3489
3490 priv->power_mode = -1;
3491
3190 /* set default rts/frag */ 3492 /* set default rts/frag */
3191 rndis_set_wiphy_params(wiphy, 3493 rndis_set_wiphy_params(wiphy,
3192 WIPHY_PARAM_FRAG_THRESHOLD | WIPHY_PARAM_RTS_THRESHOLD); 3494 WIPHY_PARAM_FRAG_THRESHOLD | WIPHY_PARAM_RTS_THRESHOLD);
3193 3495
3194 /* turn radio on */ 3496 /* turn radio off on init */
3195 priv->radio_on = true; 3497 priv->radio_on = false;
3196 disassociate(usbdev, true); 3498 disassociate(usbdev, false);
3197 netif_carrier_off(usbdev->net); 3499 netif_carrier_off(usbdev->net);
3198 3500
3199 return 0; 3501 return 0;
@@ -3320,7 +3622,7 @@ static const struct driver_info rndis_wlan_info = {
3320 .tx_fixup = rndis_tx_fixup, 3622 .tx_fixup = rndis_tx_fixup,
3321 .reset = rndis_wlan_reset, 3623 .reset = rndis_wlan_reset,
3322 .stop = rndis_wlan_stop, 3624 .stop = rndis_wlan_stop,
3323 .early_init = bcm4320a_early_init, 3625 .early_init = unknown_early_init,
3324 .indication = rndis_wlan_indication, 3626 .indication = rndis_wlan_indication,
3325}; 3627};
3326 3628
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index 4396d4b9bfb9..6f383cd684b0 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -53,51 +53,41 @@ config RT61PCI
53 53
54 When compiled as a module, this driver will be called rt61pci. 54 When compiled as a module, this driver will be called rt61pci.
55 55
56config RT2800PCI_PCI
57 boolean
58 depends on PCI
59 default y
60
61config RT2800PCI_SOC
62 boolean
63 depends on RALINK_RT288X || RALINK_RT305X
64 default y
65
66config RT2800PCI 56config RT2800PCI
67 tristate "Ralink rt28xx/rt30xx/rt35xx (PCI/PCIe/PCMCIA) support (EXPERIMENTAL)" 57 tristate "Ralink rt27xx/rt28xx/rt30xx (PCI/PCIe/PCMCIA) support"
68 depends on (RT2800PCI_PCI || RT2800PCI_SOC) && EXPERIMENTAL 58 depends on PCI || RALINK_RT288X || RALINK_RT305X
69 select RT2800_LIB 59 select RT2800_LIB
70 select RT2X00_LIB_PCI if RT2800PCI_PCI 60 select RT2X00_LIB_PCI if PCI
71 select RT2X00_LIB_SOC if RT2800PCI_SOC 61 select RT2X00_LIB_SOC if RALINK_RT288X || RALINK_RT305X
72 select RT2X00_LIB_HT 62 select RT2X00_LIB_HT
73 select RT2X00_LIB_FIRMWARE 63 select RT2X00_LIB_FIRMWARE
74 select RT2X00_LIB_CRYPTO 64 select RT2X00_LIB_CRYPTO
75 select CRC_CCITT 65 select CRC_CCITT
76 select EEPROM_93CX6 66 select EEPROM_93CX6
77 ---help--- 67 ---help---
78 This adds support for rt2800/rt3000/rt3500 wireless chipset family. 68 This adds support for rt27xx/rt28xx/rt30xx wireless chipset family.
79 Supported chips: RT2760, RT2790, RT2860, RT2880, RT2890 & RT3052 69 Supported chips: RT2760, RT2790, RT2860, RT2880, RT2890, RT3052,
80 70 RT3090, RT3091 & RT3092
81 This driver is non-functional at the moment and is intended for
82 developers.
83 71
84 When compiled as a module, this driver will be called "rt2800pci.ko". 72 When compiled as a module, this driver will be called "rt2800pci.ko".
85 73
86if RT2800PCI 74if RT2800PCI
87 75
88config RT2800PCI_RT30XX 76config RT2800PCI_RT33XX
89 bool "rt2800pci - Include support for rt30xx (PCI/PCIe/PCMCIA) devices" 77 bool "rt2800pci - Include support for rt33xx devices (EXPERIMENTAL)"
90 default y 78 depends on EXPERIMENTAL
79 default n
91 ---help--- 80 ---help---
92 This adds support for rt30xx wireless chipset family to the 81 This adds support for rt33xx wireless chipset family to the
93 rt2800pci driver. 82 rt2800pci driver.
94 Supported chips: RT3090, RT3091 & RT3092 83 Supported chips: RT3390
95 84
96 Support for these devices is non-functional at the moment and is 85 Support for these devices is non-functional at the moment and is
97 intended for testers and developers. 86 intended for testers and developers.
98 87
99config RT2800PCI_RT35XX 88config RT2800PCI_RT35XX
100 bool "rt2800pci - Include support for rt35xx (PCI/PCIe/PCMCIA) devices" 89 bool "rt2800pci - Include support for rt35xx devices (EXPERIMENTAL)"
90 depends on EXPERIMENTAL
101 default n 91 default n
102 ---help--- 92 ---help---
103 This adds support for rt35xx wireless chipset family to the 93 This adds support for rt35xx wireless chipset family to the
@@ -134,8 +124,8 @@ config RT73USB
134 When compiled as a module, this driver will be called rt73usb. 124 When compiled as a module, this driver will be called rt73usb.
135 125
136config RT2800USB 126config RT2800USB
137 tristate "Ralink rt2800 (USB) support (EXPERIMENTAL)" 127 tristate "Ralink rt27xx/rt28xx/rt30xx (USB) support"
138 depends on USB && EXPERIMENTAL 128 depends on USB
139 select RT2800_LIB 129 select RT2800_LIB
140 select RT2X00_LIB_USB 130 select RT2X00_LIB_USB
141 select RT2X00_LIB_HT 131 select RT2X00_LIB_HT
@@ -143,30 +133,28 @@ config RT2800USB
143 select RT2X00_LIB_CRYPTO 133 select RT2X00_LIB_CRYPTO
144 select CRC_CCITT 134 select CRC_CCITT
145 ---help--- 135 ---help---
146 This adds experimental support for rt2800 wireless chipset family. 136 This adds support for rt27xx/rt28xx/rt30xx wireless chipset family.
147 Supported chips: RT2770, RT2870 & RT3070. 137 Supported chips: RT2770, RT2870 & RT3070, RT3071 & RT3072
148
149 Known issues:
150 - support for RT2870 chips doesn't work with 802.11n APs yet
151 - support for RT3070 chips is non-functional at the moment
152 138
153 When compiled as a module, this driver will be called "rt2800usb.ko". 139 When compiled as a module, this driver will be called "rt2800usb.ko".
154 140
155if RT2800USB 141if RT2800USB
156 142
157config RT2800USB_RT30XX 143config RT2800USB_RT33XX
158 bool "rt2800usb - Include support for rt30xx (USB) devices" 144 bool "rt2800usb - Include support for rt33xx devices (EXPERIMENTAL)"
159 default y 145 depends on EXPERIMENTAL
146 default n
160 ---help--- 147 ---help---
161 This adds support for rt30xx wireless chipset family to the 148 This adds support for rt33xx wireless chipset family to the
162 rt2800usb driver. 149 rt2800usb driver.
163 Supported chips: RT3070, RT3071 & RT3072 150 Supported chips: RT3370
164 151
165 Support for these devices is non-functional at the moment and is 152 Support for these devices is non-functional at the moment and is
166 intended for testers and developers. 153 intended for testers and developers.
167 154
168config RT2800USB_RT35XX 155config RT2800USB_RT35XX
169 bool "rt2800usb - Include support for rt35xx (USB) devices" 156 bool "rt2800usb - Include support for rt35xx devices (EXPERIMENTAL)"
157 depends on EXPERIMENTAL
170 default n 158 default n
171 ---help--- 159 ---help---
172 This adds support for rt35xx wireless chipset family to the 160 This adds support for rt35xx wireless chipset family to the
@@ -180,9 +168,9 @@ config RT2800USB_UNKNOWN
180 bool "rt2800usb - Include support for unknown (USB) devices" 168 bool "rt2800usb - Include support for unknown (USB) devices"
181 default n 169 default n
182 ---help--- 170 ---help---
183 This adds support for rt2800 family devices that are known to 171 This adds support for rt2800usb devices that are known to
184 have a rt2800 family chipset, but for which the exact chipset 172 have a rt28xx family compatible chipset, but for which the exact
185 is unknown. 173 chipset is unknown.
186 174
187 Support status for these devices is unknown, and enabling these 175 Support status for these devices is unknown, and enabling these
188 devices may or may not work. 176 devices may or may not work.
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.c b/drivers/net/wireless/rt2x00/rt2400pci.c
index 4f420a9ec5dc..54ca49ad3472 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.c
+++ b/drivers/net/wireless/rt2x00/rt2400pci.c
@@ -633,6 +633,88 @@ static void rt2400pci_link_tuner(struct rt2x00_dev *rt2x00dev,
633} 633}
634 634
635/* 635/*
636 * Queue handlers.
637 */
638static void rt2400pci_start_queue(struct data_queue *queue)
639{
640 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
641 u32 reg;
642
643 switch (queue->qid) {
644 case QID_RX:
645 rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg);
646 rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 0);
647 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
648 break;
649 case QID_BEACON:
650 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
651 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
652 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
653 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
654 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
655 break;
656 default:
657 break;
658 }
659}
660
661static void rt2400pci_kick_queue(struct data_queue *queue)
662{
663 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
664 u32 reg;
665
666 switch (queue->qid) {
667 case QID_AC_VO:
668 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
669 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, 1);
670 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
671 break;
672 case QID_AC_VI:
673 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
674 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, 1);
675 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
676 break;
677 case QID_ATIM:
678 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
679 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, 1);
680 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
681 break;
682 default:
683 break;
684 }
685}
686
687static void rt2400pci_stop_queue(struct data_queue *queue)
688{
689 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
690 u32 reg;
691
692 switch (queue->qid) {
693 case QID_AC_VO:
694 case QID_AC_VI:
695 case QID_ATIM:
696 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
697 rt2x00_set_field32(&reg, TXCSR0_ABORT, 1);
698 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
699 break;
700 case QID_RX:
701 rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg);
702 rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 1);
703 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
704 break;
705 case QID_BEACON:
706 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
707 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 0);
708 rt2x00_set_field32(&reg, CSR14_TBCN, 0);
709 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
710 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
711 break;
712 default:
713 break;
714 }
715}
716
717/*
636 * Initialization functions. 718 * Initialization functions.
637 */ 719 */
638static bool rt2400pci_get_entry_state(struct queue_entry *entry) 720static bool rt2400pci_get_entry_state(struct queue_entry *entry)
@@ -878,18 +960,6 @@ static int rt2400pci_init_bbp(struct rt2x00_dev *rt2x00dev)
878/* 960/*
879 * Device state switch handlers. 961 * Device state switch handlers.
880 */ 962 */
881static void rt2400pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
882 enum dev_state state)
883{
884 u32 reg;
885
886 rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg);
887 rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX,
888 (state == STATE_RADIO_RX_OFF) ||
889 (state == STATE_RADIO_RX_OFF_LINK));
890 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
891}
892
893static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev, 963static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
894 enum dev_state state) 964 enum dev_state state)
895{ 965{
@@ -988,12 +1058,6 @@ static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev,
988 case STATE_RADIO_OFF: 1058 case STATE_RADIO_OFF:
989 rt2400pci_disable_radio(rt2x00dev); 1059 rt2400pci_disable_radio(rt2x00dev);
990 break; 1060 break;
991 case STATE_RADIO_RX_ON:
992 case STATE_RADIO_RX_ON_LINK:
993 case STATE_RADIO_RX_OFF:
994 case STATE_RADIO_RX_OFF_LINK:
995 rt2400pci_toggle_rx(rt2x00dev, state);
996 break;
997 case STATE_RADIO_IRQ_ON: 1061 case STATE_RADIO_IRQ_ON:
998 case STATE_RADIO_IRQ_ON_ISR: 1062 case STATE_RADIO_IRQ_ON_ISR:
999 case STATE_RADIO_IRQ_OFF: 1063 case STATE_RADIO_IRQ_OFF:
@@ -1125,32 +1189,6 @@ static void rt2400pci_write_beacon(struct queue_entry *entry,
1125 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1189 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1126} 1190}
1127 1191
1128static void rt2400pci_kick_tx_queue(struct data_queue *queue)
1129{
1130 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1131 u32 reg;
1132
1133 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
1134 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue->qid == QID_AC_BE));
1135 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue->qid == QID_AC_BK));
1136 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, (queue->qid == QID_ATIM));
1137 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
1138}
1139
1140static void rt2400pci_kill_tx_queue(struct data_queue *queue)
1141{
1142 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1143 u32 reg;
1144
1145 if (queue->qid == QID_BEACON) {
1146 rt2x00pci_register_write(rt2x00dev, CSR14, 0);
1147 } else {
1148 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
1149 rt2x00_set_field32(&reg, TXCSR0_ABORT, 1);
1150 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
1151 }
1152}
1153
1154/* 1192/*
1155 * RX control handlers 1193 * RX control handlers
1156 */ 1194 */
@@ -1284,13 +1322,13 @@ static irqreturn_t rt2400pci_interrupt_thread(int irq, void *dev_instance)
1284 * 4 - Priority ring transmit done interrupt. 1322 * 4 - Priority ring transmit done interrupt.
1285 */ 1323 */
1286 if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING)) 1324 if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING))
1287 rt2400pci_txdone(rt2x00dev, QID_AC_BE); 1325 rt2400pci_txdone(rt2x00dev, QID_AC_VO);
1288 1326
1289 /* 1327 /*
1290 * 5 - Tx ring transmit done interrupt. 1328 * 5 - Tx ring transmit done interrupt.
1291 */ 1329 */
1292 if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) 1330 if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING))
1293 rt2400pci_txdone(rt2x00dev, QID_AC_BK); 1331 rt2400pci_txdone(rt2x00dev, QID_AC_VI);
1294 1332
1295 /* Enable interrupts again. */ 1333 /* Enable interrupts again. */
1296 rt2x00dev->ops->lib->set_device_state(rt2x00dev, 1334 rt2x00dev->ops->lib->set_device_state(rt2x00dev,
@@ -1612,6 +1650,7 @@ static const struct ieee80211_ops rt2400pci_mac80211_ops = {
1612 .get_tsf = rt2400pci_get_tsf, 1650 .get_tsf = rt2400pci_get_tsf,
1613 .tx_last_beacon = rt2400pci_tx_last_beacon, 1651 .tx_last_beacon = rt2400pci_tx_last_beacon,
1614 .rfkill_poll = rt2x00mac_rfkill_poll, 1652 .rfkill_poll = rt2x00mac_rfkill_poll,
1653 .flush = rt2x00mac_flush,
1615}; 1654};
1616 1655
1617static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = { 1656static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
@@ -1627,10 +1666,11 @@ static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
1627 .link_stats = rt2400pci_link_stats, 1666 .link_stats = rt2400pci_link_stats,
1628 .reset_tuner = rt2400pci_reset_tuner, 1667 .reset_tuner = rt2400pci_reset_tuner,
1629 .link_tuner = rt2400pci_link_tuner, 1668 .link_tuner = rt2400pci_link_tuner,
1669 .start_queue = rt2400pci_start_queue,
1670 .kick_queue = rt2400pci_kick_queue,
1671 .stop_queue = rt2400pci_stop_queue,
1630 .write_tx_desc = rt2400pci_write_tx_desc, 1672 .write_tx_desc = rt2400pci_write_tx_desc,
1631 .write_beacon = rt2400pci_write_beacon, 1673 .write_beacon = rt2400pci_write_beacon,
1632 .kick_tx_queue = rt2400pci_kick_tx_queue,
1633 .kill_tx_queue = rt2400pci_kill_tx_queue,
1634 .fill_rxdone = rt2400pci_fill_rxdone, 1674 .fill_rxdone = rt2400pci_fill_rxdone,
1635 .config_filter = rt2400pci_config_filter, 1675 .config_filter = rt2400pci_config_filter,
1636 .config_intf = rt2400pci_config_intf, 1676 .config_intf = rt2400pci_config_intf,
@@ -1640,28 +1680,28 @@ static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = {
1640}; 1680};
1641 1681
1642static const struct data_queue_desc rt2400pci_queue_rx = { 1682static const struct data_queue_desc rt2400pci_queue_rx = {
1643 .entry_num = RX_ENTRIES, 1683 .entry_num = 24,
1644 .data_size = DATA_FRAME_SIZE, 1684 .data_size = DATA_FRAME_SIZE,
1645 .desc_size = RXD_DESC_SIZE, 1685 .desc_size = RXD_DESC_SIZE,
1646 .priv_size = sizeof(struct queue_entry_priv_pci), 1686 .priv_size = sizeof(struct queue_entry_priv_pci),
1647}; 1687};
1648 1688
1649static const struct data_queue_desc rt2400pci_queue_tx = { 1689static const struct data_queue_desc rt2400pci_queue_tx = {
1650 .entry_num = TX_ENTRIES, 1690 .entry_num = 24,
1651 .data_size = DATA_FRAME_SIZE, 1691 .data_size = DATA_FRAME_SIZE,
1652 .desc_size = TXD_DESC_SIZE, 1692 .desc_size = TXD_DESC_SIZE,
1653 .priv_size = sizeof(struct queue_entry_priv_pci), 1693 .priv_size = sizeof(struct queue_entry_priv_pci),
1654}; 1694};
1655 1695
1656static const struct data_queue_desc rt2400pci_queue_bcn = { 1696static const struct data_queue_desc rt2400pci_queue_bcn = {
1657 .entry_num = BEACON_ENTRIES, 1697 .entry_num = 1,
1658 .data_size = MGMT_FRAME_SIZE, 1698 .data_size = MGMT_FRAME_SIZE,
1659 .desc_size = TXD_DESC_SIZE, 1699 .desc_size = TXD_DESC_SIZE,
1660 .priv_size = sizeof(struct queue_entry_priv_pci), 1700 .priv_size = sizeof(struct queue_entry_priv_pci),
1661}; 1701};
1662 1702
1663static const struct data_queue_desc rt2400pci_queue_atim = { 1703static const struct data_queue_desc rt2400pci_queue_atim = {
1664 .entry_num = ATIM_ENTRIES, 1704 .entry_num = 8,
1665 .data_size = DATA_FRAME_SIZE, 1705 .data_size = DATA_FRAME_SIZE,
1666 .desc_size = TXD_DESC_SIZE, 1706 .desc_size = TXD_DESC_SIZE,
1667 .priv_size = sizeof(struct queue_entry_priv_pci), 1707 .priv_size = sizeof(struct queue_entry_priv_pci),
diff --git a/drivers/net/wireless/rt2x00/rt2400pci.h b/drivers/net/wireless/rt2x00/rt2400pci.h
index c048b18f4133..d3a4a68cc439 100644
--- a/drivers/net/wireless/rt2x00/rt2400pci.h
+++ b/drivers/net/wireless/rt2x00/rt2400pci.h
@@ -809,8 +809,8 @@
809/* 809/*
810 * DMA descriptor defines. 810 * DMA descriptor defines.
811 */ 811 */
812#define TXD_DESC_SIZE ( 8 * sizeof(__le32) ) 812#define TXD_DESC_SIZE (8 * sizeof(__le32))
813#define RXD_DESC_SIZE ( 8 * sizeof(__le32) ) 813#define RXD_DESC_SIZE (8 * sizeof(__le32))
814 814
815/* 815/*
816 * TX descriptor format for TX, PRIO, ATIM and Beacon Ring. 816 * TX descriptor format for TX, PRIO, ATIM and Beacon Ring.
@@ -948,6 +948,6 @@
948 ((__CLAMP_TX(__txpower) - MAX_TXPOWER) + MIN_TXPOWER) 948 ((__CLAMP_TX(__txpower) - MAX_TXPOWER) + MIN_TXPOWER)
949 949
950#define TXPOWER_TO_DEV(__txpower) \ 950#define TXPOWER_TO_DEV(__txpower) \
951 MAX_TXPOWER - (__CLAMP_TX(__txpower) - MIN_TXPOWER) 951 (MAX_TXPOWER - (__CLAMP_TX(__txpower) - MIN_TXPOWER))
952 952
953#endif /* RT2400PCI_H */ 953#endif /* RT2400PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 97feb7aef809..a9ff26a27724 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -723,6 +723,88 @@ dynamic_cca_tune:
723} 723}
724 724
725/* 725/*
726 * Queue handlers.
727 */
728static void rt2500pci_start_queue(struct data_queue *queue)
729{
730 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
731 u32 reg;
732
733 switch (queue->qid) {
734 case QID_RX:
735 rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg);
736 rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 0);
737 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
738 break;
739 case QID_BEACON:
740 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
741 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1);
742 rt2x00_set_field32(&reg, CSR14_TBCN, 1);
743 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1);
744 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
745 break;
746 default:
747 break;
748 }
749}
750
751static void rt2500pci_kick_queue(struct data_queue *queue)
752{
753 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
754 u32 reg;
755
756 switch (queue->qid) {
757 case QID_AC_VO:
758 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
759 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, 1);
760 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
761 break;
762 case QID_AC_VI:
763 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
764 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, 1);
765 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
766 break;
767 case QID_ATIM:
768 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
769 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, 1);
770 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
771 break;
772 default:
773 break;
774 }
775}
776
777static void rt2500pci_stop_queue(struct data_queue *queue)
778{
779 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
780 u32 reg;
781
782 switch (queue->qid) {
783 case QID_AC_VO:
784 case QID_AC_VI:
785 case QID_ATIM:
786 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
787 rt2x00_set_field32(&reg, TXCSR0_ABORT, 1);
788 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
789 break;
790 case QID_RX:
791 rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg);
792 rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 1);
793 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
794 break;
795 case QID_BEACON:
796 rt2x00pci_register_read(rt2x00dev, CSR14, &reg);
797 rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 0);
798 rt2x00_set_field32(&reg, CSR14_TBCN, 0);
799 rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0);
800 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
801 break;
802 default:
803 break;
804 }
805}
806
807/*
726 * Initialization functions. 808 * Initialization functions.
727 */ 809 */
728static bool rt2500pci_get_entry_state(struct queue_entry *entry) 810static bool rt2500pci_get_entry_state(struct queue_entry *entry)
@@ -1033,18 +1115,6 @@ static int rt2500pci_init_bbp(struct rt2x00_dev *rt2x00dev)
1033/* 1115/*
1034 * Device state switch handlers. 1116 * Device state switch handlers.
1035 */ 1117 */
1036static void rt2500pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
1037 enum dev_state state)
1038{
1039 u32 reg;
1040
1041 rt2x00pci_register_read(rt2x00dev, RXCSR0, &reg);
1042 rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX,
1043 (state == STATE_RADIO_RX_OFF) ||
1044 (state == STATE_RADIO_RX_OFF_LINK));
1045 rt2x00pci_register_write(rt2x00dev, RXCSR0, reg);
1046}
1047
1048static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev, 1118static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1049 enum dev_state state) 1119 enum dev_state state)
1050{ 1120{
@@ -1143,12 +1213,6 @@ static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1143 case STATE_RADIO_OFF: 1213 case STATE_RADIO_OFF:
1144 rt2500pci_disable_radio(rt2x00dev); 1214 rt2500pci_disable_radio(rt2x00dev);
1145 break; 1215 break;
1146 case STATE_RADIO_RX_ON:
1147 case STATE_RADIO_RX_ON_LINK:
1148 case STATE_RADIO_RX_OFF:
1149 case STATE_RADIO_RX_OFF_LINK:
1150 rt2500pci_toggle_rx(rt2x00dev, state);
1151 break;
1152 case STATE_RADIO_IRQ_ON: 1216 case STATE_RADIO_IRQ_ON:
1153 case STATE_RADIO_IRQ_ON_ISR: 1217 case STATE_RADIO_IRQ_ON_ISR:
1154 case STATE_RADIO_IRQ_OFF: 1218 case STATE_RADIO_IRQ_OFF:
@@ -1193,9 +1257,9 @@ static void rt2500pci_write_tx_desc(struct queue_entry *entry,
1193 1257
1194 rt2x00_desc_read(txd, 2, &word); 1258 rt2x00_desc_read(txd, 2, &word);
1195 rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER); 1259 rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER);
1196 rt2x00_set_field32(&word, TXD_W2_AIFS, txdesc->aifs); 1260 rt2x00_set_field32(&word, TXD_W2_AIFS, entry->queue->aifs);
1197 rt2x00_set_field32(&word, TXD_W2_CWMIN, txdesc->cw_min); 1261 rt2x00_set_field32(&word, TXD_W2_CWMIN, entry->queue->cw_min);
1198 rt2x00_set_field32(&word, TXD_W2_CWMAX, txdesc->cw_max); 1262 rt2x00_set_field32(&word, TXD_W2_CWMAX, entry->queue->cw_max);
1199 rt2x00_desc_write(txd, 2, word); 1263 rt2x00_desc_write(txd, 2, word);
1200 1264
1201 rt2x00_desc_read(txd, 3, &word); 1265 rt2x00_desc_read(txd, 3, &word);
@@ -1279,32 +1343,6 @@ static void rt2500pci_write_beacon(struct queue_entry *entry,
1279 rt2x00pci_register_write(rt2x00dev, CSR14, reg); 1343 rt2x00pci_register_write(rt2x00dev, CSR14, reg);
1280} 1344}
1281 1345
1282static void rt2500pci_kick_tx_queue(struct data_queue *queue)
1283{
1284 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1285 u32 reg;
1286
1287 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
1288 rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, (queue->qid == QID_AC_BE));
1289 rt2x00_set_field32(&reg, TXCSR0_KICK_TX, (queue->qid == QID_AC_BK));
1290 rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, (queue->qid == QID_ATIM));
1291 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
1292}
1293
1294static void rt2500pci_kill_tx_queue(struct data_queue *queue)
1295{
1296 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1297 u32 reg;
1298
1299 if (queue->qid == QID_BEACON) {
1300 rt2x00pci_register_write(rt2x00dev, CSR14, 0);
1301 } else {
1302 rt2x00pci_register_read(rt2x00dev, TXCSR0, &reg);
1303 rt2x00_set_field32(&reg, TXCSR0_ABORT, 1);
1304 rt2x00pci_register_write(rt2x00dev, TXCSR0, reg);
1305 }
1306}
1307
1308/* 1346/*
1309 * RX control handlers 1347 * RX control handlers
1310 */ 1348 */
@@ -1417,13 +1455,13 @@ static irqreturn_t rt2500pci_interrupt_thread(int irq, void *dev_instance)
1417 * 4 - Priority ring transmit done interrupt. 1455 * 4 - Priority ring transmit done interrupt.
1418 */ 1456 */
1419 if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING)) 1457 if (rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING))
1420 rt2500pci_txdone(rt2x00dev, QID_AC_BE); 1458 rt2500pci_txdone(rt2x00dev, QID_AC_VO);
1421 1459
1422 /* 1460 /*
1423 * 5 - Tx ring transmit done interrupt. 1461 * 5 - Tx ring transmit done interrupt.
1424 */ 1462 */
1425 if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) 1463 if (rt2x00_get_field32(reg, CSR7_TXDONE_TXRING))
1426 rt2500pci_txdone(rt2x00dev, QID_AC_BK); 1464 rt2500pci_txdone(rt2x00dev, QID_AC_VI);
1427 1465
1428 /* Enable interrupts again. */ 1466 /* Enable interrupts again. */
1429 rt2x00dev->ops->lib->set_device_state(rt2x00dev, 1467 rt2x00dev->ops->lib->set_device_state(rt2x00dev,
@@ -1909,6 +1947,7 @@ static const struct ieee80211_ops rt2500pci_mac80211_ops = {
1909 .get_tsf = rt2500pci_get_tsf, 1947 .get_tsf = rt2500pci_get_tsf,
1910 .tx_last_beacon = rt2500pci_tx_last_beacon, 1948 .tx_last_beacon = rt2500pci_tx_last_beacon,
1911 .rfkill_poll = rt2x00mac_rfkill_poll, 1949 .rfkill_poll = rt2x00mac_rfkill_poll,
1950 .flush = rt2x00mac_flush,
1912}; 1951};
1913 1952
1914static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = { 1953static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
@@ -1924,10 +1963,11 @@ static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
1924 .link_stats = rt2500pci_link_stats, 1963 .link_stats = rt2500pci_link_stats,
1925 .reset_tuner = rt2500pci_reset_tuner, 1964 .reset_tuner = rt2500pci_reset_tuner,
1926 .link_tuner = rt2500pci_link_tuner, 1965 .link_tuner = rt2500pci_link_tuner,
1966 .start_queue = rt2500pci_start_queue,
1967 .kick_queue = rt2500pci_kick_queue,
1968 .stop_queue = rt2500pci_stop_queue,
1927 .write_tx_desc = rt2500pci_write_tx_desc, 1969 .write_tx_desc = rt2500pci_write_tx_desc,
1928 .write_beacon = rt2500pci_write_beacon, 1970 .write_beacon = rt2500pci_write_beacon,
1929 .kick_tx_queue = rt2500pci_kick_tx_queue,
1930 .kill_tx_queue = rt2500pci_kill_tx_queue,
1931 .fill_rxdone = rt2500pci_fill_rxdone, 1971 .fill_rxdone = rt2500pci_fill_rxdone,
1932 .config_filter = rt2500pci_config_filter, 1972 .config_filter = rt2500pci_config_filter,
1933 .config_intf = rt2500pci_config_intf, 1973 .config_intf = rt2500pci_config_intf,
@@ -1937,28 +1977,28 @@ static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = {
1937}; 1977};
1938 1978
1939static const struct data_queue_desc rt2500pci_queue_rx = { 1979static const struct data_queue_desc rt2500pci_queue_rx = {
1940 .entry_num = RX_ENTRIES, 1980 .entry_num = 32,
1941 .data_size = DATA_FRAME_SIZE, 1981 .data_size = DATA_FRAME_SIZE,
1942 .desc_size = RXD_DESC_SIZE, 1982 .desc_size = RXD_DESC_SIZE,
1943 .priv_size = sizeof(struct queue_entry_priv_pci), 1983 .priv_size = sizeof(struct queue_entry_priv_pci),
1944}; 1984};
1945 1985
1946static const struct data_queue_desc rt2500pci_queue_tx = { 1986static const struct data_queue_desc rt2500pci_queue_tx = {
1947 .entry_num = TX_ENTRIES, 1987 .entry_num = 32,
1948 .data_size = DATA_FRAME_SIZE, 1988 .data_size = DATA_FRAME_SIZE,
1949 .desc_size = TXD_DESC_SIZE, 1989 .desc_size = TXD_DESC_SIZE,
1950 .priv_size = sizeof(struct queue_entry_priv_pci), 1990 .priv_size = sizeof(struct queue_entry_priv_pci),
1951}; 1991};
1952 1992
1953static const struct data_queue_desc rt2500pci_queue_bcn = { 1993static const struct data_queue_desc rt2500pci_queue_bcn = {
1954 .entry_num = BEACON_ENTRIES, 1994 .entry_num = 1,
1955 .data_size = MGMT_FRAME_SIZE, 1995 .data_size = MGMT_FRAME_SIZE,
1956 .desc_size = TXD_DESC_SIZE, 1996 .desc_size = TXD_DESC_SIZE,
1957 .priv_size = sizeof(struct queue_entry_priv_pci), 1997 .priv_size = sizeof(struct queue_entry_priv_pci),
1958}; 1998};
1959 1999
1960static const struct data_queue_desc rt2500pci_queue_atim = { 2000static const struct data_queue_desc rt2500pci_queue_atim = {
1961 .entry_num = ATIM_ENTRIES, 2001 .entry_num = 8,
1962 .data_size = DATA_FRAME_SIZE, 2002 .data_size = DATA_FRAME_SIZE,
1963 .desc_size = TXD_DESC_SIZE, 2003 .desc_size = TXD_DESC_SIZE,
1964 .priv_size = sizeof(struct queue_entry_priv_pci), 2004 .priv_size = sizeof(struct queue_entry_priv_pci),
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.h b/drivers/net/wireless/rt2x00/rt2500pci.h
index d708031361ac..2aad7ba8a100 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.h
+++ b/drivers/net/wireless/rt2x00/rt2500pci.h
@@ -1088,8 +1088,8 @@
1088/* 1088/*
1089 * DMA descriptor defines. 1089 * DMA descriptor defines.
1090 */ 1090 */
1091#define TXD_DESC_SIZE ( 11 * sizeof(__le32) ) 1091#define TXD_DESC_SIZE (11 * sizeof(__le32))
1092#define RXD_DESC_SIZE ( 11 * sizeof(__le32) ) 1092#define RXD_DESC_SIZE (11 * sizeof(__le32))
1093 1093
1094/* 1094/*
1095 * TX descriptor format for TX, PRIO, ATIM and Beacon Ring. 1095 * TX descriptor format for TX, PRIO, ATIM and Beacon Ring.
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 93e44c7f3a74..6b3b1de46792 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -39,7 +39,7 @@
39/* 39/*
40 * Allow hardware encryption to be disabled. 40 * Allow hardware encryption to be disabled.
41 */ 41 */
42static int modparam_nohwcrypt = 0; 42static int modparam_nohwcrypt;
43module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 43module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
44MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 44MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
45 45
@@ -739,6 +739,55 @@ static void rt2500usb_reset_tuner(struct rt2x00_dev *rt2x00dev,
739} 739}
740 740
741/* 741/*
742 * Queue handlers.
743 */
744static void rt2500usb_start_queue(struct data_queue *queue)
745{
746 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
747 u16 reg;
748
749 switch (queue->qid) {
750 case QID_RX:
751 rt2500usb_register_read(rt2x00dev, TXRX_CSR2, &reg);
752 rt2x00_set_field16(&reg, TXRX_CSR2_DISABLE_RX, 0);
753 rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg);
754 break;
755 case QID_BEACON:
756 rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg);
757 rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1);
758 rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1);
759 rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 1);
760 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
761 break;
762 default:
763 break;
764 }
765}
766
767static void rt2500usb_stop_queue(struct data_queue *queue)
768{
769 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
770 u16 reg;
771
772 switch (queue->qid) {
773 case QID_RX:
774 rt2500usb_register_read(rt2x00dev, TXRX_CSR2, &reg);
775 rt2x00_set_field16(&reg, TXRX_CSR2_DISABLE_RX, 1);
776 rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg);
777 break;
778 case QID_BEACON:
779 rt2500usb_register_read(rt2x00dev, TXRX_CSR19, &reg);
780 rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 0);
781 rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 0);
782 rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 0);
783 rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg);
784 break;
785 default:
786 break;
787 }
788}
789
790/*
742 * Initialization functions. 791 * Initialization functions.
743 */ 792 */
744static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev) 793static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev)
@@ -931,18 +980,6 @@ static int rt2500usb_init_bbp(struct rt2x00_dev *rt2x00dev)
931/* 980/*
932 * Device state switch handlers. 981 * Device state switch handlers.
933 */ 982 */
934static void rt2500usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
935 enum dev_state state)
936{
937 u16 reg;
938
939 rt2500usb_register_read(rt2x00dev, TXRX_CSR2, &reg);
940 rt2x00_set_field16(&reg, TXRX_CSR2_DISABLE_RX,
941 (state == STATE_RADIO_RX_OFF) ||
942 (state == STATE_RADIO_RX_OFF_LINK));
943 rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg);
944}
945
946static int rt2500usb_enable_radio(struct rt2x00_dev *rt2x00dev) 983static int rt2500usb_enable_radio(struct rt2x00_dev *rt2x00dev)
947{ 984{
948 /* 985 /*
@@ -1018,12 +1055,6 @@ static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1018 case STATE_RADIO_OFF: 1055 case STATE_RADIO_OFF:
1019 rt2500usb_disable_radio(rt2x00dev); 1056 rt2500usb_disable_radio(rt2x00dev);
1020 break; 1057 break;
1021 case STATE_RADIO_RX_ON:
1022 case STATE_RADIO_RX_ON_LINK:
1023 case STATE_RADIO_RX_OFF:
1024 case STATE_RADIO_RX_OFF_LINK:
1025 rt2500usb_toggle_rx(rt2x00dev, state);
1026 break;
1027 case STATE_RADIO_IRQ_ON: 1058 case STATE_RADIO_IRQ_ON:
1028 case STATE_RADIO_IRQ_ON_ISR: 1059 case STATE_RADIO_IRQ_ON_ISR:
1029 case STATE_RADIO_IRQ_OFF: 1060 case STATE_RADIO_IRQ_OFF:
@@ -1081,9 +1112,9 @@ static void rt2500usb_write_tx_desc(struct queue_entry *entry,
1081 1112
1082 rt2x00_desc_read(txd, 1, &word); 1113 rt2x00_desc_read(txd, 1, &word);
1083 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset); 1114 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
1084 rt2x00_set_field32(&word, TXD_W1_AIFS, txdesc->aifs); 1115 rt2x00_set_field32(&word, TXD_W1_AIFS, entry->queue->aifs);
1085 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); 1116 rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min);
1086 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); 1117 rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max);
1087 rt2x00_desc_write(txd, 1, word); 1118 rt2x00_desc_write(txd, 1, word);
1088 1119
1089 rt2x00_desc_read(txd, 2, &word); 1120 rt2x00_desc_read(txd, 2, &word);
@@ -1206,14 +1237,6 @@ static int rt2500usb_get_tx_data_len(struct queue_entry *entry)
1206 return length; 1237 return length;
1207} 1238}
1208 1239
1209static void rt2500usb_kill_tx_queue(struct data_queue *queue)
1210{
1211 if (queue->qid == QID_BEACON)
1212 rt2500usb_register_write(queue->rt2x00dev, TXRX_CSR19, 0);
1213
1214 rt2x00usb_kill_tx_queue(queue);
1215}
1216
1217/* 1240/*
1218 * RX control handlers 1241 * RX control handlers
1219 */ 1242 */
@@ -1801,6 +1824,7 @@ static const struct ieee80211_ops rt2500usb_mac80211_ops = {
1801 .bss_info_changed = rt2x00mac_bss_info_changed, 1824 .bss_info_changed = rt2x00mac_bss_info_changed,
1802 .conf_tx = rt2x00mac_conf_tx, 1825 .conf_tx = rt2x00mac_conf_tx,
1803 .rfkill_poll = rt2x00mac_rfkill_poll, 1826 .rfkill_poll = rt2x00mac_rfkill_poll,
1827 .flush = rt2x00mac_flush,
1804}; 1828};
1805 1829
1806static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = { 1830static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
@@ -1813,11 +1837,13 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
1813 .link_stats = rt2500usb_link_stats, 1837 .link_stats = rt2500usb_link_stats,
1814 .reset_tuner = rt2500usb_reset_tuner, 1838 .reset_tuner = rt2500usb_reset_tuner,
1815 .watchdog = rt2x00usb_watchdog, 1839 .watchdog = rt2x00usb_watchdog,
1840 .start_queue = rt2500usb_start_queue,
1841 .kick_queue = rt2x00usb_kick_queue,
1842 .stop_queue = rt2500usb_stop_queue,
1843 .flush_queue = rt2x00usb_flush_queue,
1816 .write_tx_desc = rt2500usb_write_tx_desc, 1844 .write_tx_desc = rt2500usb_write_tx_desc,
1817 .write_beacon = rt2500usb_write_beacon, 1845 .write_beacon = rt2500usb_write_beacon,
1818 .get_tx_data_len = rt2500usb_get_tx_data_len, 1846 .get_tx_data_len = rt2500usb_get_tx_data_len,
1819 .kick_tx_queue = rt2x00usb_kick_tx_queue,
1820 .kill_tx_queue = rt2500usb_kill_tx_queue,
1821 .fill_rxdone = rt2500usb_fill_rxdone, 1847 .fill_rxdone = rt2500usb_fill_rxdone,
1822 .config_shared_key = rt2500usb_config_key, 1848 .config_shared_key = rt2500usb_config_key,
1823 .config_pairwise_key = rt2500usb_config_key, 1849 .config_pairwise_key = rt2500usb_config_key,
@@ -1829,28 +1855,28 @@ static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = {
1829}; 1855};
1830 1856
1831static const struct data_queue_desc rt2500usb_queue_rx = { 1857static const struct data_queue_desc rt2500usb_queue_rx = {
1832 .entry_num = RX_ENTRIES, 1858 .entry_num = 32,
1833 .data_size = DATA_FRAME_SIZE, 1859 .data_size = DATA_FRAME_SIZE,
1834 .desc_size = RXD_DESC_SIZE, 1860 .desc_size = RXD_DESC_SIZE,
1835 .priv_size = sizeof(struct queue_entry_priv_usb), 1861 .priv_size = sizeof(struct queue_entry_priv_usb),
1836}; 1862};
1837 1863
1838static const struct data_queue_desc rt2500usb_queue_tx = { 1864static const struct data_queue_desc rt2500usb_queue_tx = {
1839 .entry_num = TX_ENTRIES, 1865 .entry_num = 32,
1840 .data_size = DATA_FRAME_SIZE, 1866 .data_size = DATA_FRAME_SIZE,
1841 .desc_size = TXD_DESC_SIZE, 1867 .desc_size = TXD_DESC_SIZE,
1842 .priv_size = sizeof(struct queue_entry_priv_usb), 1868 .priv_size = sizeof(struct queue_entry_priv_usb),
1843}; 1869};
1844 1870
1845static const struct data_queue_desc rt2500usb_queue_bcn = { 1871static const struct data_queue_desc rt2500usb_queue_bcn = {
1846 .entry_num = BEACON_ENTRIES, 1872 .entry_num = 1,
1847 .data_size = MGMT_FRAME_SIZE, 1873 .data_size = MGMT_FRAME_SIZE,
1848 .desc_size = TXD_DESC_SIZE, 1874 .desc_size = TXD_DESC_SIZE,
1849 .priv_size = sizeof(struct queue_entry_priv_usb_bcn), 1875 .priv_size = sizeof(struct queue_entry_priv_usb_bcn),
1850}; 1876};
1851 1877
1852static const struct data_queue_desc rt2500usb_queue_atim = { 1878static const struct data_queue_desc rt2500usb_queue_atim = {
1853 .entry_num = ATIM_ENTRIES, 1879 .entry_num = 8,
1854 .data_size = DATA_FRAME_SIZE, 1880 .data_size = DATA_FRAME_SIZE,
1855 .desc_size = TXD_DESC_SIZE, 1881 .desc_size = TXD_DESC_SIZE,
1856 .priv_size = sizeof(struct queue_entry_priv_usb), 1882 .priv_size = sizeof(struct queue_entry_priv_usb),
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
index eb8b6cab9925..4c55e8525cad 100644
--- a/drivers/net/wireless/rt2x00/rt2800.h
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -46,7 +46,11 @@
46 * RF2020 2.4G B/G 46 * RF2020 2.4G B/G
47 * RF3021 2.4G 1T2R 47 * RF3021 2.4G 1T2R
48 * RF3022 2.4G 2T2R 48 * RF3022 2.4G 2T2R
49 * RF3052 2.4G 2T2R 49 * RF3052 2.4G/5G 2T2R
50 * RF2853 2.4G/5G 3T3R
51 * RF3320 2.4G 1T1R(RT3350/RT3370/RT3390)
52 * RF3322 2.4G 2T2R(RT3352/RT3371/RT3372/RT3391/RT3392)
53 * RF3853 2.4G/5G 3T3R(RT3883/RT3563/RT3573/RT3593/RT3662)
50 */ 54 */
51#define RF2820 0x0001 55#define RF2820 0x0001
52#define RF2850 0x0002 56#define RF2850 0x0002
@@ -57,7 +61,10 @@
57#define RF3021 0x0007 61#define RF3021 0x0007
58#define RF3022 0x0008 62#define RF3022 0x0008
59#define RF3052 0x0009 63#define RF3052 0x0009
64#define RF2853 0x000a
60#define RF3320 0x000b 65#define RF3320 0x000b
66#define RF3322 0x000c
67#define RF3853 0x000d
61 68
62/* 69/*
63 * Chipset revisions. 70 * Chipset revisions.
@@ -206,10 +213,10 @@
206 213
207/* 214/*
208 * WMM_AIFSN_CFG: Aifsn for each EDCA AC 215 * WMM_AIFSN_CFG: Aifsn for each EDCA AC
209 * AIFSN0: AC_BE 216 * AIFSN0: AC_VO
210 * AIFSN1: AC_BK 217 * AIFSN1: AC_VI
211 * AIFSN2: AC_VI 218 * AIFSN2: AC_BE
212 * AIFSN3: AC_VO 219 * AIFSN3: AC_BK
213 */ 220 */
214#define WMM_AIFSN_CFG 0x0214 221#define WMM_AIFSN_CFG 0x0214
215#define WMM_AIFSN_CFG_AIFSN0 FIELD32(0x0000000f) 222#define WMM_AIFSN_CFG_AIFSN0 FIELD32(0x0000000f)
@@ -219,10 +226,10 @@
219 226
220/* 227/*
221 * WMM_CWMIN_CSR: CWmin for each EDCA AC 228 * WMM_CWMIN_CSR: CWmin for each EDCA AC
222 * CWMIN0: AC_BE 229 * CWMIN0: AC_VO
223 * CWMIN1: AC_BK 230 * CWMIN1: AC_VI
224 * CWMIN2: AC_VI 231 * CWMIN2: AC_BE
225 * CWMIN3: AC_VO 232 * CWMIN3: AC_BK
226 */ 233 */
227#define WMM_CWMIN_CFG 0x0218 234#define WMM_CWMIN_CFG 0x0218
228#define WMM_CWMIN_CFG_CWMIN0 FIELD32(0x0000000f) 235#define WMM_CWMIN_CFG_CWMIN0 FIELD32(0x0000000f)
@@ -232,10 +239,10 @@
232 239
233/* 240/*
234 * WMM_CWMAX_CSR: CWmax for each EDCA AC 241 * WMM_CWMAX_CSR: CWmax for each EDCA AC
235 * CWMAX0: AC_BE 242 * CWMAX0: AC_VO
236 * CWMAX1: AC_BK 243 * CWMAX1: AC_VI
237 * CWMAX2: AC_VI 244 * CWMAX2: AC_BE
238 * CWMAX3: AC_VO 245 * CWMAX3: AC_BK
239 */ 246 */
240#define WMM_CWMAX_CFG 0x021c 247#define WMM_CWMAX_CFG 0x021c
241#define WMM_CWMAX_CFG_CWMAX0 FIELD32(0x0000000f) 248#define WMM_CWMAX_CFG_CWMAX0 FIELD32(0x0000000f)
@@ -244,18 +251,18 @@
244#define WMM_CWMAX_CFG_CWMAX3 FIELD32(0x0000f000) 251#define WMM_CWMAX_CFG_CWMAX3 FIELD32(0x0000f000)
245 252
246/* 253/*
247 * AC_TXOP0: AC_BK/AC_BE TXOP register 254 * AC_TXOP0: AC_VO/AC_VI TXOP register
248 * AC0TXOP: AC_BK in unit of 32us 255 * AC0TXOP: AC_VO in unit of 32us
249 * AC1TXOP: AC_BE in unit of 32us 256 * AC1TXOP: AC_VI in unit of 32us
250 */ 257 */
251#define WMM_TXOP0_CFG 0x0220 258#define WMM_TXOP0_CFG 0x0220
252#define WMM_TXOP0_CFG_AC0TXOP FIELD32(0x0000ffff) 259#define WMM_TXOP0_CFG_AC0TXOP FIELD32(0x0000ffff)
253#define WMM_TXOP0_CFG_AC1TXOP FIELD32(0xffff0000) 260#define WMM_TXOP0_CFG_AC1TXOP FIELD32(0xffff0000)
254 261
255/* 262/*
256 * AC_TXOP1: AC_VO/AC_VI TXOP register 263 * AC_TXOP1: AC_BE/AC_BK TXOP register
257 * AC2TXOP: AC_VI in unit of 32us 264 * AC2TXOP: AC_BE in unit of 32us
258 * AC3TXOP: AC_VO in unit of 32us 265 * AC3TXOP: AC_BK in unit of 32us
259 */ 266 */
260#define WMM_TXOP1_CFG 0x0224 267#define WMM_TXOP1_CFG 0x0224
261#define WMM_TXOP1_CFG_AC2TXOP FIELD32(0x0000ffff) 268#define WMM_TXOP1_CFG_AC2TXOP FIELD32(0x0000ffff)
@@ -281,7 +288,7 @@
281#define MCU_CMD_CFG 0x022c 288#define MCU_CMD_CFG 0x022c
282 289
283/* 290/*
284 * AC_BK register offsets 291 * AC_VO register offsets
285 */ 292 */
286#define TX_BASE_PTR0 0x0230 293#define TX_BASE_PTR0 0x0230
287#define TX_MAX_CNT0 0x0234 294#define TX_MAX_CNT0 0x0234
@@ -289,7 +296,7 @@
289#define TX_DTX_IDX0 0x023c 296#define TX_DTX_IDX0 0x023c
290 297
291/* 298/*
292 * AC_BE register offsets 299 * AC_VI register offsets
293 */ 300 */
294#define TX_BASE_PTR1 0x0240 301#define TX_BASE_PTR1 0x0240
295#define TX_MAX_CNT1 0x0244 302#define TX_MAX_CNT1 0x0244
@@ -297,7 +304,7 @@
297#define TX_DTX_IDX1 0x024c 304#define TX_DTX_IDX1 0x024c
298 305
299/* 306/*
300 * AC_VI register offsets 307 * AC_BE register offsets
301 */ 308 */
302#define TX_BASE_PTR2 0x0250 309#define TX_BASE_PTR2 0x0250
303#define TX_MAX_CNT2 0x0254 310#define TX_MAX_CNT2 0x0254
@@ -305,7 +312,7 @@
305#define TX_DTX_IDX2 0x025c 312#define TX_DTX_IDX2 0x025c
306 313
307/* 314/*
308 * AC_VO register offsets 315 * AC_BK register offsets
309 */ 316 */
310#define TX_BASE_PTR3 0x0260 317#define TX_BASE_PTR3 0x0260
311#define TX_MAX_CNT3 0x0264 318#define TX_MAX_CNT3 0x0264
@@ -412,10 +419,22 @@
412#define BCN_OFFSET1_BCN7 FIELD32(0xff000000) 419#define BCN_OFFSET1_BCN7 FIELD32(0xff000000)
413 420
414/* 421/*
415 * PBF registers 422 * TXRXQ_PCNT: PBF register
416 * Most are for debug. Driver doesn't touch PBF register. 423 * PCNT_TX0Q: Page count for TX hardware queue 0
424 * PCNT_TX1Q: Page count for TX hardware queue 1
425 * PCNT_TX2Q: Page count for TX hardware queue 2
426 * PCNT_RX0Q: Page count for RX hardware queue
417 */ 427 */
418#define TXRXQ_PCNT 0x0438 428#define TXRXQ_PCNT 0x0438
429#define TXRXQ_PCNT_TX0Q FIELD32(0x000000ff)
430#define TXRXQ_PCNT_TX1Q FIELD32(0x0000ff00)
431#define TXRXQ_PCNT_TX2Q FIELD32(0x00ff0000)
432#define TXRXQ_PCNT_RX0Q FIELD32(0xff000000)
433
434/*
435 * PBF register
436 * Debug. Driver doesn't touch PBF register.
437 */
419#define PBF_DBG 0x043c 438#define PBF_DBG 0x043c
420 439
421/* 440/*
@@ -686,8 +705,18 @@
686 705
687/* 706/*
688 * CH_TIME_CFG: count as channel busy 707 * CH_TIME_CFG: count as channel busy
708 * EIFS_BUSY: Count EIFS as channel busy
709 * NAV_BUSY: Count NAS as channel busy
710 * RX_BUSY: Count RX as channel busy
711 * TX_BUSY: Count TX as channel busy
712 * TMR_EN: Enable channel statistics timer
689 */ 713 */
690#define CH_TIME_CFG 0x110c 714#define CH_TIME_CFG 0x110c
715#define CH_TIME_CFG_EIFS_BUSY FIELD32(0x00000010)
716#define CH_TIME_CFG_NAV_BUSY FIELD32(0x00000008)
717#define CH_TIME_CFG_RX_BUSY FIELD32(0x00000004)
718#define CH_TIME_CFG_TX_BUSY FIELD32(0x00000002)
719#define CH_TIME_CFG_TMR_EN FIELD32(0x00000001)
691 720
692/* 721/*
693 * PBF_LIFE_TIMER: TX/RX MPDU timestamp timer (free run) Unit: 1us 722 * PBF_LIFE_TIMER: TX/RX MPDU timestamp timer (free run) Unit: 1us
@@ -960,8 +989,31 @@
960 989
961/* 990/*
962 * TXOP_CTRL_CFG: 991 * TXOP_CTRL_CFG:
992 * TIMEOUT_TRUN_EN: Enable/Disable TXOP timeout truncation
993 * AC_TRUN_EN: Enable/Disable truncation for AC change
994 * TXRATEGRP_TRUN_EN: Enable/Disable truncation for TX rate group change
995 * USER_MODE_TRUN_EN: Enable/Disable truncation for user TXOP mode
996 * MIMO_PS_TRUN_EN: Enable/Disable truncation for MIMO PS RTS/CTS
997 * RESERVED_TRUN_EN: Reserved
998 * LSIG_TXOP_EN: Enable/Disable L-SIG TXOP protection
999 * EXT_CCA_EN: Enable/Disable extension channel CCA reference (Defer 40Mhz
1000 * transmissions if extension CCA is clear).
1001 * EXT_CCA_DLY: Extension CCA signal delay time (unit: us)
1002 * EXT_CWMIN: CwMin for extension channel backoff
1003 * 0: Disabled
1004 *
963 */ 1005 */
964#define TXOP_CTRL_CFG 0x1340 1006#define TXOP_CTRL_CFG 0x1340
1007#define TXOP_CTRL_CFG_TIMEOUT_TRUN_EN FIELD32(0x00000001)
1008#define TXOP_CTRL_CFG_AC_TRUN_EN FIELD32(0x00000002)
1009#define TXOP_CTRL_CFG_TXRATEGRP_TRUN_EN FIELD32(0x00000004)
1010#define TXOP_CTRL_CFG_USER_MODE_TRUN_EN FIELD32(0x00000008)
1011#define TXOP_CTRL_CFG_MIMO_PS_TRUN_EN FIELD32(0x00000010)
1012#define TXOP_CTRL_CFG_RESERVED_TRUN_EN FIELD32(0x00000020)
1013#define TXOP_CTRL_CFG_LSIG_TXOP_EN FIELD32(0x00000040)
1014#define TXOP_CTRL_CFG_EXT_CCA_EN FIELD32(0x00000080)
1015#define TXOP_CTRL_CFG_EXT_CCA_DLY FIELD32(0x0000ff00)
1016#define TXOP_CTRL_CFG_EXT_CWMIN FIELD32(0x000f0000)
965 1017
966/* 1018/*
967 * TX_RTS_CFG: 1019 * TX_RTS_CFG:
@@ -1485,17 +1537,17 @@
1485#define SHARED_KEY_MODE_BASE 0x7000 1537#define SHARED_KEY_MODE_BASE 0x7000
1486 1538
1487#define MAC_WCID_ENTRY(__idx) \ 1539#define MAC_WCID_ENTRY(__idx) \
1488 ( MAC_WCID_BASE + ((__idx) * sizeof(struct mac_wcid_entry)) ) 1540 (MAC_WCID_BASE + ((__idx) * sizeof(struct mac_wcid_entry)))
1489#define PAIRWISE_KEY_ENTRY(__idx) \ 1541#define PAIRWISE_KEY_ENTRY(__idx) \
1490 ( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) ) 1542 (PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)))
1491#define MAC_IVEIV_ENTRY(__idx) \ 1543#define MAC_IVEIV_ENTRY(__idx) \
1492 ( MAC_IVEIV_TABLE_BASE + ((__idx) * sizeof(struct mac_iveiv_entry)) ) 1544 (MAC_IVEIV_TABLE_BASE + ((__idx) * sizeof(struct mac_iveiv_entry)))
1493#define MAC_WCID_ATTR_ENTRY(__idx) \ 1545#define MAC_WCID_ATTR_ENTRY(__idx) \
1494 ( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) ) 1546 (MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)))
1495#define SHARED_KEY_ENTRY(__idx) \ 1547#define SHARED_KEY_ENTRY(__idx) \
1496 ( SHARED_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) ) 1548 (SHARED_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)))
1497#define SHARED_KEY_MODE_ENTRY(__idx) \ 1549#define SHARED_KEY_MODE_ENTRY(__idx) \
1498 ( SHARED_KEY_MODE_BASE + ((__idx) * sizeof(u32)) ) 1550 (SHARED_KEY_MODE_BASE + ((__idx) * sizeof(u32)))
1499 1551
1500struct mac_wcid_entry { 1552struct mac_wcid_entry {
1501 u8 mac[6]; 1553 u8 mac[6];
@@ -1635,9 +1687,9 @@ struct mac_iveiv_entry {
1635#define HW_BEACON_BASE7 0x5bc0 1687#define HW_BEACON_BASE7 0x5bc0
1636 1688
1637#define HW_BEACON_OFFSET(__index) \ 1689#define HW_BEACON_OFFSET(__index) \
1638 ( ((__index) < 4) ? ( HW_BEACON_BASE0 + (__index * 0x0200) ) : \ 1690 (((__index) < 4) ? (HW_BEACON_BASE0 + (__index * 0x0200)) : \
1639 (((__index) < 6) ? ( HW_BEACON_BASE4 + ((__index - 4) * 0x0200) ) : \ 1691 (((__index) < 6) ? (HW_BEACON_BASE4 + ((__index - 4) * 0x0200)) : \
1640 (HW_BEACON_BASE6 - ((__index - 6) * 0x0200))) ) 1692 (HW_BEACON_BASE6 - ((__index - 6) * 0x0200))))
1641 1693
1642/* 1694/*
1643 * BBP registers. 1695 * BBP registers.
@@ -1805,32 +1857,51 @@ struct mac_iveiv_entry {
1805#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00) 1857#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00)
1806 1858
1807/* 1859/*
1808 * EEPROM ANTENNA config 1860 * EEPROM NIC Configuration 0
1809 * RXPATH: 1: 1R, 2: 2R, 3: 3R 1861 * RXPATH: 1: 1R, 2: 2R, 3: 3R
1810 * TXPATH: 1: 1T, 2: 2T 1862 * TXPATH: 1: 1T, 2: 2T, 3: 3T
1811 */ 1863 * RF_TYPE: RFIC type
1812#define EEPROM_ANTENNA 0x001a 1864 */
1813#define EEPROM_ANTENNA_RXPATH FIELD16(0x000f) 1865#define EEPROM_NIC_CONF0 0x001a
1814#define EEPROM_ANTENNA_TXPATH FIELD16(0x00f0) 1866#define EEPROM_NIC_CONF0_RXPATH FIELD16(0x000f)
1815#define EEPROM_ANTENNA_RF_TYPE FIELD16(0x0f00) 1867#define EEPROM_NIC_CONF0_TXPATH FIELD16(0x00f0)
1816 1868#define EEPROM_NIC_CONF0_RF_TYPE FIELD16(0x0f00)
1817/* 1869
1818 * EEPROM NIC config 1870/*
1819 * CARDBUS_ACCEL: 0 - enable, 1 - disable 1871 * EEPROM NIC Configuration 1
1820 */ 1872 * HW_RADIO: 0: disable, 1: enable
1821#define EEPROM_NIC 0x001b 1873 * EXTERNAL_TX_ALC: 0: disable, 1: enable
1822#define EEPROM_NIC_HW_RADIO FIELD16(0x0001) 1874 * EXTERNAL_LNA_2G: 0: disable, 1: enable
1823#define EEPROM_NIC_DYNAMIC_TX_AGC FIELD16(0x0002) 1875 * EXTERNAL_LNA_5G: 0: disable, 1: enable
1824#define EEPROM_NIC_EXTERNAL_LNA_BG FIELD16(0x0004) 1876 * CARDBUS_ACCEL: 0: enable, 1: disable
1825#define EEPROM_NIC_EXTERNAL_LNA_A FIELD16(0x0008) 1877 * BW40M_SB_2G: 0: disable, 1: enable
1826#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0x0010) 1878 * BW40M_SB_5G: 0: disable, 1: enable
1827#define EEPROM_NIC_BW40M_SB_BG FIELD16(0x0020) 1879 * WPS_PBC: 0: disable, 1: enable
1828#define EEPROM_NIC_BW40M_SB_A FIELD16(0x0040) 1880 * BW40M_2G: 0: enable, 1: disable
1829#define EEPROM_NIC_WPS_PBC FIELD16(0x0080) 1881 * BW40M_5G: 0: enable, 1: disable
1830#define EEPROM_NIC_BW40M_BG FIELD16(0x0100) 1882 * BROADBAND_EXT_LNA: 0: disable, 1: enable
1831#define EEPROM_NIC_BW40M_A FIELD16(0x0200) 1883 * ANT_DIVERSITY: 00: Disable, 01: Diversity,
1832#define EEPROM_NIC_ANT_DIVERSITY FIELD16(0x0800) 1884 * 10: Main antenna, 11: Aux antenna
1833#define EEPROM_NIC_DAC_TEST FIELD16(0x8000) 1885 * INTERNAL_TX_ALC: 0: disable, 1: enable
1886 * BT_COEXIST: 0: disable, 1: enable
1887 * DAC_TEST: 0: disable, 1: enable
1888 */
1889#define EEPROM_NIC_CONF1 0x001b
1890#define EEPROM_NIC_CONF1_HW_RADIO FIELD16(0x0001)
1891#define EEPROM_NIC_CONF1_EXTERNAL_TX_ALC FIELD16(0x0002)
1892#define EEPROM_NIC_CONF1_EXTERNAL_LNA_2G FIELD16(0x0004)
1893#define EEPROM_NIC_CONF1_EXTERNAL_LNA_5G FIELD16(0x0008)
1894#define EEPROM_NIC_CONF1_CARDBUS_ACCEL FIELD16(0x0010)
1895#define EEPROM_NIC_CONF1_BW40M_SB_2G FIELD16(0x0020)
1896#define EEPROM_NIC_CONF1_BW40M_SB_5G FIELD16(0x0040)
1897#define EEPROM_NIC_CONF1_WPS_PBC FIELD16(0x0080)
1898#define EEPROM_NIC_CONF1_BW40M_2G FIELD16(0x0100)
1899#define EEPROM_NIC_CONF1_BW40M_5G FIELD16(0x0200)
1900#define EEPROM_NIC_CONF1_BROADBAND_EXT_LNA FIELD16(0x400)
1901#define EEPROM_NIC_CONF1_ANT_DIVERSITY FIELD16(0x1800)
1902#define EEPROM_NIC_CONF1_INTERNAL_TX_ALC FIELD16(0x2000)
1903#define EEPROM_NIC_CONF1_BT_COEXIST FIELD16(0x4000)
1904#define EEPROM_NIC_CONF1_DAC_TEST FIELD16(0x8000)
1834 1905
1835/* 1906/*
1836 * EEPROM frequency 1907 * EEPROM frequency
@@ -1852,9 +1923,9 @@ struct mac_iveiv_entry {
1852 * POLARITY_GPIO_4: Polarity GPIO4 setting. 1923 * POLARITY_GPIO_4: Polarity GPIO4 setting.
1853 * LED_MODE: Led mode. 1924 * LED_MODE: Led mode.
1854 */ 1925 */
1855#define EEPROM_LED1 0x001e 1926#define EEPROM_LED_AG_CONF 0x001e
1856#define EEPROM_LED2 0x001f 1927#define EEPROM_LED_ACT_CONF 0x001f
1857#define EEPROM_LED3 0x0020 1928#define EEPROM_LED_POLARITY 0x0020
1858#define EEPROM_LED_POLARITY_RDY_BG FIELD16(0x0001) 1929#define EEPROM_LED_POLARITY_RDY_BG FIELD16(0x0001)
1859#define EEPROM_LED_POLARITY_RDY_A FIELD16(0x0002) 1930#define EEPROM_LED_POLARITY_RDY_A FIELD16(0x0002)
1860#define EEPROM_LED_POLARITY_ACT FIELD16(0x0004) 1931#define EEPROM_LED_POLARITY_ACT FIELD16(0x0004)
@@ -1866,6 +1937,17 @@ struct mac_iveiv_entry {
1866#define EEPROM_LED_LED_MODE FIELD16(0x1f00) 1937#define EEPROM_LED_LED_MODE FIELD16(0x1f00)
1867 1938
1868/* 1939/*
1940 * EEPROM NIC Configuration 2
1941 * RX_STREAM: 0: Reserved, 1: 1 Stream, 2: 2 Stream
1942 * TX_STREAM: 0: Reserved, 1: 1 Stream, 2: 2 Stream
1943 * CRYSTAL: 00: Reserved, 01: One crystal, 10: Two crystal, 11: Reserved
1944 */
1945#define EEPROM_NIC_CONF2 0x0021
1946#define EEPROM_NIC_CONF2_RX_STREAM FIELD16(0x000f)
1947#define EEPROM_NIC_CONF2_TX_STREAM FIELD16(0x00f0)
1948#define EEPROM_NIC_CONF2_CRYSTAL FIELD16(0x0600)
1949
1950/*
1869 * EEPROM LNA 1951 * EEPROM LNA
1870 */ 1952 */
1871#define EEPROM_LNA 0x0022 1953#define EEPROM_LNA 0x0022
@@ -1915,7 +1997,7 @@ struct mac_iveiv_entry {
1915 1997
1916/* 1998/*
1917 * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power. 1999 * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power.
1918 * This is delta in 40MHZ. 2000 * This is delta in 40MHZ.
1919 * VALUE: Tx Power dalta value (MAX=4) 2001 * VALUE: Tx Power dalta value (MAX=4)
1920 * TYPE: 1: Plus the delta value, 0: minus the delta value 2002 * TYPE: 1: Plus the delta value, 0: minus the delta value
1921 * TXPOWER: Enable: 2003 * TXPOWER: Enable:
@@ -1971,9 +2053,9 @@ struct mac_iveiv_entry {
1971#define MCU_CURRENT 0x36 2053#define MCU_CURRENT 0x36
1972#define MCU_LED 0x50 2054#define MCU_LED 0x50
1973#define MCU_LED_STRENGTH 0x51 2055#define MCU_LED_STRENGTH 0x51
1974#define MCU_LED_1 0x52 2056#define MCU_LED_AG_CONF 0x52
1975#define MCU_LED_2 0x53 2057#define MCU_LED_ACT_CONF 0x53
1976#define MCU_LED_3 0x54 2058#define MCU_LED_LED_POLARITY 0x54
1977#define MCU_RADAR 0x60 2059#define MCU_RADAR 0x60
1978#define MCU_BOOT_SIGNAL 0x72 2060#define MCU_BOOT_SIGNAL 0x72
1979#define MCU_BBP_SIGNAL 0x80 2061#define MCU_BBP_SIGNAL 0x80
@@ -1987,8 +2069,8 @@ struct mac_iveiv_entry {
1987/* 2069/*
1988 * DMA descriptor defines. 2070 * DMA descriptor defines.
1989 */ 2071 */
1990#define TXWI_DESC_SIZE ( 4 * sizeof(__le32) ) 2072#define TXWI_DESC_SIZE (4 * sizeof(__le32))
1991#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) ) 2073#define RXWI_DESC_SIZE (4 * sizeof(__le32))
1992 2074
1993/* 2075/*
1994 * TX WI structure 2076 * TX WI structure
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 5f00e00789d8..54917a281398 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -277,13 +277,17 @@ int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
277 unsigned int i; 277 unsigned int i;
278 u32 reg; 278 u32 reg;
279 279
280 /*
281 * Some devices are really slow to respond here. Wait a whole second
282 * before timing out.
283 */
280 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 284 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
281 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg); 285 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
282 if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) && 286 if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
283 !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY)) 287 !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
284 return 0; 288 return 0;
285 289
286 msleep(1); 290 msleep(10);
287 } 291 }
288 292
289 ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n"); 293 ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
@@ -483,7 +487,7 @@ void rt2800_write_tx_data(struct queue_entry *entry,
483 txdesc->key_idx : 0xff); 487 txdesc->key_idx : 0xff);
484 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT, 488 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
485 txdesc->length); 489 txdesc->length);
486 rt2x00_set_field32(&word, TXWI_W1_PACKETID_QUEUE, txdesc->qid); 490 rt2x00_set_field32(&word, TXWI_W1_PACKETID_QUEUE, entry->queue->qid);
487 rt2x00_set_field32(&word, TXWI_W1_PACKETID_ENTRY, (entry->entry_idx % 3) + 1); 491 rt2x00_set_field32(&word, TXWI_W1_PACKETID_ENTRY, (entry->entry_idx % 3) + 1);
488 rt2x00_desc_write(txwi, 1, word); 492 rt2x00_desc_write(txwi, 1, word);
489 493
@@ -727,7 +731,7 @@ void rt2800_txdone(struct rt2x00_dev *rt2x00dev)
727 * that the TX_STA_FIFO stack has a size of 16. We stick to our 731 * that the TX_STA_FIFO stack has a size of 16. We stick to our
728 * tx ring size for now. 732 * tx ring size for now.
729 */ 733 */
730 for (i = 0; i < TX_ENTRIES; i++) { 734 for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
731 rt2800_register_read(rt2x00dev, TX_STA_FIFO, &reg); 735 rt2800_register_read(rt2x00dev, TX_STA_FIFO, &reg);
732 if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID)) 736 if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID))
733 break; 737 break;
@@ -768,6 +772,7 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
768 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 772 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
769 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 773 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
770 unsigned int beacon_base; 774 unsigned int beacon_base;
775 unsigned int padding_len;
771 u32 reg; 776 u32 reg;
772 777
773 /* 778 /*
@@ -802,11 +807,13 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
802 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb); 807 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
803 808
804 /* 809 /*
805 * Write entire beacon with TXWI to register. 810 * Write entire beacon with TXWI and padding to register.
806 */ 811 */
812 padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
813 skb_pad(entry->skb, padding_len);
807 beacon_base = HW_BEACON_OFFSET(entry->entry_idx); 814 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
808 rt2800_register_multiwrite(rt2x00dev, beacon_base, 815 rt2800_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
809 entry->skb->data, entry->skb->len); 816 entry->skb->len + padding_len);
810 817
811 /* 818 /*
812 * Enable beaconing again. 819 * Enable beaconing again.
@@ -824,7 +831,7 @@ void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc)
824} 831}
825EXPORT_SYMBOL_GPL(rt2800_write_beacon); 832EXPORT_SYMBOL_GPL(rt2800_write_beacon);
826 833
827static void inline rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev, 834static inline void rt2800_clear_beacon(struct rt2x00_dev *rt2x00dev,
828 unsigned int beacon_base) 835 unsigned int beacon_base)
829{ 836{
830 int i; 837 int i;
@@ -1144,6 +1151,7 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
1144 struct rt2x00intf_conf *conf, const unsigned int flags) 1151 struct rt2x00intf_conf *conf, const unsigned int flags)
1145{ 1152{
1146 u32 reg; 1153 u32 reg;
1154 bool update_bssid = false;
1147 1155
1148 if (flags & CONFIG_UPDATE_TYPE) { 1156 if (flags & CONFIG_UPDATE_TYPE) {
1149 /* 1157 /*
@@ -1173,6 +1181,16 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
1173 } 1181 }
1174 1182
1175 if (flags & CONFIG_UPDATE_MAC) { 1183 if (flags & CONFIG_UPDATE_MAC) {
1184 if (flags & CONFIG_UPDATE_TYPE &&
1185 conf->sync == TSF_SYNC_AP_NONE) {
1186 /*
1187 * The BSSID register has to be set to our own mac
1188 * address in AP mode.
1189 */
1190 memcpy(conf->bssid, conf->mac, sizeof(conf->mac));
1191 update_bssid = true;
1192 }
1193
1176 if (!is_zero_ether_addr((const u8 *)conf->mac)) { 1194 if (!is_zero_ether_addr((const u8 *)conf->mac)) {
1177 reg = le32_to_cpu(conf->mac[1]); 1195 reg = le32_to_cpu(conf->mac[1]);
1178 rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff); 1196 rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff);
@@ -1183,7 +1201,7 @@ void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
1183 conf->mac, sizeof(conf->mac)); 1201 conf->mac, sizeof(conf->mac));
1184 } 1202 }
1185 1203
1186 if (flags & CONFIG_UPDATE_BSSID) { 1204 if ((flags & CONFIG_UPDATE_BSSID) || update_bssid) {
1187 if (!is_zero_ether_addr((const u8 *)conf->bssid)) { 1205 if (!is_zero_ether_addr((const u8 *)conf->bssid)) {
1188 reg = le32_to_cpu(conf->bssid[1]); 1206 reg = le32_to_cpu(conf->bssid[1]);
1189 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 3); 1207 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 3);
@@ -1529,7 +1547,8 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
1529 rt2x00_rf(rt2x00dev, RF3020) || 1547 rt2x00_rf(rt2x00dev, RF3020) ||
1530 rt2x00_rf(rt2x00dev, RF3021) || 1548 rt2x00_rf(rt2x00dev, RF3021) ||
1531 rt2x00_rf(rt2x00dev, RF3022) || 1549 rt2x00_rf(rt2x00dev, RF3022) ||
1532 rt2x00_rf(rt2x00dev, RF3052)) 1550 rt2x00_rf(rt2x00dev, RF3052) ||
1551 rt2x00_rf(rt2x00dev, RF3320))
1533 rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info); 1552 rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info);
1534 else 1553 else
1535 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info); 1554 rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info);
@@ -1609,6 +1628,13 @@ static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
1609 } 1628 }
1610 1629
1611 msleep(1); 1630 msleep(1);
1631
1632 /*
1633 * Clear channel statistic counters
1634 */
1635 rt2800_register_read(rt2x00dev, CH_IDLE_STA, &reg);
1636 rt2800_register_read(rt2x00dev, CH_BUSY_STA, &reg);
1637 rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &reg);
1612} 1638}
1613 1639
1614static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, 1640static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
@@ -1914,8 +1940,8 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1914 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || 1940 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
1915 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) || 1941 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) ||
1916 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) { 1942 rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) {
1917 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); 1943 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
1918 if (rt2x00_get_field16(eeprom, EEPROM_NIC_DAC_TEST)) 1944 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST))
1919 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 1945 rt2800_register_write(rt2x00dev, TX_SW_CFG2,
1920 0x0000002c); 1946 0x0000002c);
1921 else 1947 else
@@ -2097,7 +2123,23 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2097 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); 2123 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
2098 } 2124 }
2099 2125
2100 rt2800_register_write(rt2x00dev, TXOP_CTRL_CFG, 0x0000583f); 2126 /*
2127 * The legacy driver also sets TXOP_CTRL_CFG_RESERVED_TRUN_EN to 1
2128 * although it is reserved.
2129 */
2130 rt2800_register_read(rt2x00dev, TXOP_CTRL_CFG, &reg);
2131 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_TIMEOUT_TRUN_EN, 1);
2132 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_AC_TRUN_EN, 1);
2133 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_TXRATEGRP_TRUN_EN, 1);
2134 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_USER_MODE_TRUN_EN, 1);
2135 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_MIMO_PS_TRUN_EN, 1);
2136 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_RESERVED_TRUN_EN, 1);
2137 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_LSIG_TXOP_EN, 0);
2138 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_EXT_CCA_EN, 0);
2139 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_EXT_CCA_DLY, 88);
2140 rt2x00_set_field32(&reg, TXOP_CTRL_CFG_EXT_CWMIN, 0);
2141 rt2800_register_write(rt2x00dev, TXOP_CTRL_CFG, reg);
2142
2101 rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, 0x00000002); 2143 rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, 0x00000002);
2102 2144
2103 rt2800_register_read(rt2x00dev, TX_RTS_CFG, &reg); 2145 rt2800_register_read(rt2x00dev, TX_RTS_CFG, &reg);
@@ -2134,7 +2176,7 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2134 SHARED_KEY_MODE_ENTRY(i), 0); 2176 SHARED_KEY_MODE_ENTRY(i), 0);
2135 2177
2136 for (i = 0; i < 256; i++) { 2178 for (i = 0; i < 256; i++) {
2137 u32 wcid[2] = { 0xffffffff, 0x00ffffff }; 2179 static const u32 wcid[2] = { 0xffffffff, 0x00ffffff };
2138 rt2800_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i), 2180 rt2800_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i),
2139 wcid, sizeof(wcid)); 2181 wcid, sizeof(wcid));
2140 2182
@@ -2227,6 +2269,17 @@ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
2227 rt2x00_set_field32(&reg, INT_TIMER_CFG_PRE_TBTT_TIMER, 6 << 4); 2269 rt2x00_set_field32(&reg, INT_TIMER_CFG_PRE_TBTT_TIMER, 6 << 4);
2228 rt2800_register_write(rt2x00dev, INT_TIMER_CFG, reg); 2270 rt2800_register_write(rt2x00dev, INT_TIMER_CFG, reg);
2229 2271
2272 /*
2273 * Set up channel statistics timer
2274 */
2275 rt2800_register_read(rt2x00dev, CH_TIME_CFG, &reg);
2276 rt2x00_set_field32(&reg, CH_TIME_CFG_EIFS_BUSY, 1);
2277 rt2x00_set_field32(&reg, CH_TIME_CFG_NAV_BUSY, 1);
2278 rt2x00_set_field32(&reg, CH_TIME_CFG_RX_BUSY, 1);
2279 rt2x00_set_field32(&reg, CH_TIME_CFG_TX_BUSY, 1);
2280 rt2x00_set_field32(&reg, CH_TIME_CFG_TMR_EN, 1);
2281 rt2800_register_write(rt2x00dev, CH_TIME_CFG, reg);
2282
2230 return 0; 2283 return 0;
2231} 2284}
2232 2285
@@ -2344,10 +2397,10 @@ static int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
2344 rt2x00_rt(rt2x00dev, RT3390)) { 2397 rt2x00_rt(rt2x00dev, RT3390)) {
2345 rt2800_bbp_read(rt2x00dev, 138, &value); 2398 rt2800_bbp_read(rt2x00dev, 138, &value);
2346 2399
2347 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); 2400 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
2348 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) == 1) 2401 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
2349 value |= 0x20; 2402 value |= 0x20;
2350 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH) == 1) 2403 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
2351 value &= ~0x02; 2404 value &= ~0x02;
2352 2405
2353 rt2800_bbp_write(rt2x00dev, 138, value); 2406 rt2800_bbp_write(rt2x00dev, 138, value);
@@ -2559,8 +2612,8 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2559 rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1); 2612 rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1);
2560 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || 2613 if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) ||
2561 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E)) { 2614 rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E)) {
2562 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); 2615 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
2563 if (rt2x00_get_field16(eeprom, EEPROM_NIC_DAC_TEST)) 2616 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST))
2564 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3); 2617 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3);
2565 else 2618 else
2566 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0); 2619 rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0);
@@ -2633,10 +2686,10 @@ static int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
2633 if (rt2x00_rt(rt2x00dev, RT3090)) { 2686 if (rt2x00_rt(rt2x00dev, RT3090)) {
2634 rt2800_bbp_read(rt2x00dev, 138, &bbp); 2687 rt2800_bbp_read(rt2x00dev, 138, &bbp);
2635 2688
2636 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); 2689 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
2637 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH) == 1) 2690 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1)
2638 rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0); 2691 rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0);
2639 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) == 1) 2692 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1)
2640 rt2x00_set_field8(&bbp, BBP138_TX_DAC1, 1); 2693 rt2x00_set_field8(&bbp, BBP138_TX_DAC1, 1);
2641 2694
2642 rt2800_bbp_write(rt2x00dev, 138, bbp); 2695 rt2800_bbp_write(rt2x00dev, 138, bbp);
@@ -2735,16 +2788,16 @@ int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev)
2735 /* 2788 /*
2736 * Initialize LED control 2789 * Initialize LED control
2737 */ 2790 */
2738 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word); 2791 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED_AG_CONF, &word);
2739 rt2800_mcu_request(rt2x00dev, MCU_LED_1, 0xff, 2792 rt2800_mcu_request(rt2x00dev, MCU_LED_AG_CONF, 0xff,
2740 word & 0xff, (word >> 8) & 0xff); 2793 word & 0xff, (word >> 8) & 0xff);
2741 2794
2742 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word); 2795 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED_ACT_CONF, &word);
2743 rt2800_mcu_request(rt2x00dev, MCU_LED_2, 0xff, 2796 rt2800_mcu_request(rt2x00dev, MCU_LED_ACT_CONF, 0xff,
2744 word & 0xff, (word >> 8) & 0xff); 2797 word & 0xff, (word >> 8) & 0xff);
2745 2798
2746 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word); 2799 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED_POLARITY, &word);
2747 rt2800_mcu_request(rt2x00dev, MCU_LED_3, 0xff, 2800 rt2800_mcu_request(rt2x00dev, MCU_LED_LED_POLARITY, 0xff,
2748 word & 0xff, (word >> 8) & 0xff); 2801 word & 0xff, (word >> 8) & 0xff);
2749 2802
2750 return 0; 2803 return 0;
@@ -2838,38 +2891,41 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2838 EEPROM(rt2x00dev, "MAC: %pM\n", mac); 2891 EEPROM(rt2x00dev, "MAC: %pM\n", mac);
2839 } 2892 }
2840 2893
2841 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word); 2894 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &word);
2842 if (word == 0xffff) { 2895 if (word == 0xffff) {
2843 rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2); 2896 rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RXPATH, 2);
2844 rt2x00_set_field16(&word, EEPROM_ANTENNA_TXPATH, 1); 2897 rt2x00_set_field16(&word, EEPROM_NIC_CONF0_TXPATH, 1);
2845 rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2820); 2898 rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RF_TYPE, RF2820);
2846 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); 2899 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
2847 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word); 2900 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
2848 } else if (rt2x00_rt(rt2x00dev, RT2860) || 2901 } else if (rt2x00_rt(rt2x00dev, RT2860) ||
2849 rt2x00_rt(rt2x00dev, RT2872)) { 2902 rt2x00_rt(rt2x00dev, RT2872)) {
2850 /* 2903 /*
2851 * There is a max of 2 RX streams for RT28x0 series 2904 * There is a max of 2 RX streams for RT28x0 series
2852 */ 2905 */
2853 if (rt2x00_get_field16(word, EEPROM_ANTENNA_RXPATH) > 2) 2906 if (rt2x00_get_field16(word, EEPROM_NIC_CONF0_RXPATH) > 2)
2854 rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2); 2907 rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RXPATH, 2);
2855 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); 2908 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word);
2856 } 2909 }
2857 2910
2858 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word); 2911 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &word);
2859 if (word == 0xffff) { 2912 if (word == 0xffff) {
2860 rt2x00_set_field16(&word, EEPROM_NIC_HW_RADIO, 0); 2913 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_HW_RADIO, 0);
2861 rt2x00_set_field16(&word, EEPROM_NIC_DYNAMIC_TX_AGC, 0); 2914 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC, 0);
2862 rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_BG, 0); 2915 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_EXTERNAL_LNA_2G, 0);
2863 rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_A, 0); 2916 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_EXTERNAL_LNA_5G, 0);
2864 rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0); 2917 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_CARDBUS_ACCEL, 0);
2865 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_BG, 0); 2918 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BW40M_SB_2G, 0);
2866 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_A, 0); 2919 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BW40M_SB_5G, 0);
2867 rt2x00_set_field16(&word, EEPROM_NIC_WPS_PBC, 0); 2920 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_WPS_PBC, 0);
2868 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_BG, 0); 2921 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BW40M_2G, 0);
2869 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_A, 0); 2922 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BW40M_5G, 0);
2870 rt2x00_set_field16(&word, EEPROM_NIC_ANT_DIVERSITY, 0); 2923 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BROADBAND_EXT_LNA, 0);
2871 rt2x00_set_field16(&word, EEPROM_NIC_DAC_TEST, 0); 2924 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_ANT_DIVERSITY, 0);
2872 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); 2925 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_INTERNAL_TX_ALC, 0);
2926 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BT_COEXIST, 0);
2927 rt2x00_set_field16(&word, EEPROM_NIC_CONF1_DAC_TEST, 0);
2928 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC_CONF1, word);
2873 EEPROM(rt2x00dev, "NIC: 0x%04x\n", word); 2929 EEPROM(rt2x00dev, "NIC: 0x%04x\n", word);
2874 } 2930 }
2875 2931
@@ -2884,9 +2940,9 @@ int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev)
2884 LED_MODE_TXRX_ACTIVITY); 2940 LED_MODE_TXRX_ACTIVITY);
2885 rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0); 2941 rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0);
2886 rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word); 2942 rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
2887 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED1, 0x5555); 2943 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_AG_CONF, 0x5555);
2888 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED2, 0x2221); 2944 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_ACT_CONF, 0x2221);
2889 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED3, 0xa9f8); 2945 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED_POLARITY, 0xa9f8);
2890 EEPROM(rt2x00dev, "Led Mode: 0x%04x\n", word); 2946 EEPROM(rt2x00dev, "Led Mode: 0x%04x\n", word);
2891 } 2947 }
2892 2948
@@ -2950,12 +3006,12 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
2950 /* 3006 /*
2951 * Read EEPROM word for configuration. 3007 * Read EEPROM word for configuration.
2952 */ 3008 */
2953 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); 3009 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
2954 3010
2955 /* 3011 /*
2956 * Identify RF chipset. 3012 * Identify RF chipset.
2957 */ 3013 */
2958 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); 3014 value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE);
2959 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg); 3015 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
2960 3016
2961 rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET), 3017 rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET),
@@ -2981,7 +3037,8 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
2981 !rt2x00_rf(rt2x00dev, RF2020) && 3037 !rt2x00_rf(rt2x00dev, RF2020) &&
2982 !rt2x00_rf(rt2x00dev, RF3021) && 3038 !rt2x00_rf(rt2x00dev, RF3021) &&
2983 !rt2x00_rf(rt2x00dev, RF3022) && 3039 !rt2x00_rf(rt2x00dev, RF3022) &&
2984 !rt2x00_rf(rt2x00dev, RF3052)) { 3040 !rt2x00_rf(rt2x00dev, RF3052) &&
3041 !rt2x00_rf(rt2x00dev, RF3320)) {
2985 ERROR(rt2x00dev, "Invalid RF chipset detected.\n"); 3042 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
2986 return -ENODEV; 3043 return -ENODEV;
2987 } 3044 }
@@ -2990,9 +3047,9 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
2990 * Identify default antenna configuration. 3047 * Identify default antenna configuration.
2991 */ 3048 */
2992 rt2x00dev->default_ant.tx = 3049 rt2x00dev->default_ant.tx =
2993 rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH); 3050 rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH);
2994 rt2x00dev->default_ant.rx = 3051 rt2x00dev->default_ant.rx =
2995 rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH); 3052 rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH);
2996 3053
2997 /* 3054 /*
2998 * Read frequency offset and RF programming sequence. 3055 * Read frequency offset and RF programming sequence.
@@ -3003,17 +3060,17 @@ int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev)
3003 /* 3060 /*
3004 * Read external LNA informations. 3061 * Read external LNA informations.
3005 */ 3062 */
3006 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom); 3063 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom);
3007 3064
3008 if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_A)) 3065 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_LNA_5G))
3009 __set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags); 3066 __set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags);
3010 if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG)) 3067 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_LNA_2G))
3011 __set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags); 3068 __set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags);
3012 3069
3013 /* 3070 /*
3014 * Detect if this device has an hardware controlled radio. 3071 * Detect if this device has an hardware controlled radio.
3015 */ 3072 */
3016 if (rt2x00_get_field16(eeprom, EEPROM_NIC_HW_RADIO)) 3073 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_HW_RADIO))
3017 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags); 3074 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
3018 3075
3019 /* 3076 /*
@@ -3225,7 +3282,7 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
3225 rt2x00dev->hw->max_report_rates = 7; 3282 rt2x00dev->hw->max_report_rates = 7;
3226 rt2x00dev->hw->max_rate_tries = 1; 3283 rt2x00dev->hw->max_rate_tries = 1;
3227 3284
3228 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom); 3285 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0, &eeprom);
3229 3286
3230 /* 3287 /*
3231 * Initialize hw_mode information. 3288 * Initialize hw_mode information.
@@ -3245,7 +3302,8 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
3245 } else if (rt2x00_rf(rt2x00dev, RF3020) || 3302 } else if (rt2x00_rf(rt2x00dev, RF3020) ||
3246 rt2x00_rf(rt2x00dev, RF2020) || 3303 rt2x00_rf(rt2x00dev, RF2020) ||
3247 rt2x00_rf(rt2x00dev, RF3021) || 3304 rt2x00_rf(rt2x00dev, RF3021) ||
3248 rt2x00_rf(rt2x00dev, RF3022)) { 3305 rt2x00_rf(rt2x00dev, RF3022) ||
3306 rt2x00_rf(rt2x00dev, RF3320)) {
3249 spec->num_channels = 14; 3307 spec->num_channels = 14;
3250 spec->channels = rf_vals_3x; 3308 spec->channels = rf_vals_3x;
3251 } else if (rt2x00_rf(rt2x00dev, RF3052)) { 3309 } else if (rt2x00_rf(rt2x00dev, RF3052)) {
@@ -3268,11 +3326,11 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
3268 IEEE80211_HT_CAP_SGI_20 | 3326 IEEE80211_HT_CAP_SGI_20 |
3269 IEEE80211_HT_CAP_SGI_40; 3327 IEEE80211_HT_CAP_SGI_40;
3270 3328
3271 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) >= 2) 3329 if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) >= 2)
3272 spec->ht.cap |= IEEE80211_HT_CAP_TX_STBC; 3330 spec->ht.cap |= IEEE80211_HT_CAP_TX_STBC;
3273 3331
3274 spec->ht.cap |= 3332 spec->ht.cap |=
3275 rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH) << 3333 rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) <<
3276 IEEE80211_HT_CAP_RX_STBC_SHIFT; 3334 IEEE80211_HT_CAP_RX_STBC_SHIFT;
3277 3335
3278 spec->ht.ampdu_factor = 3; 3336 spec->ht.ampdu_factor = 3;
@@ -3280,10 +3338,10 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
3280 spec->ht.mcs.tx_params = 3338 spec->ht.mcs.tx_params =
3281 IEEE80211_HT_MCS_TX_DEFINED | 3339 IEEE80211_HT_MCS_TX_DEFINED |
3282 IEEE80211_HT_MCS_TX_RX_DIFF | 3340 IEEE80211_HT_MCS_TX_RX_DIFF |
3283 ((rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) - 1) << 3341 ((rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) - 1) <<
3284 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); 3342 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
3285 3343
3286 switch (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH)) { 3344 switch (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH)) {
3287 case 3: 3345 case 3:
3288 spec->ht.mcs.rx_mask[2] = 0xff; 3346 spec->ht.mcs.rx_mask[2] = 0xff;
3289 case 2: 3347 case 2:
@@ -3502,6 +3560,37 @@ int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
3502} 3560}
3503EXPORT_SYMBOL_GPL(rt2800_ampdu_action); 3561EXPORT_SYMBOL_GPL(rt2800_ampdu_action);
3504 3562
3563int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
3564 struct survey_info *survey)
3565{
3566 struct rt2x00_dev *rt2x00dev = hw->priv;
3567 struct ieee80211_conf *conf = &hw->conf;
3568 u32 idle, busy, busy_ext;
3569
3570 if (idx != 0)
3571 return -ENOENT;
3572
3573 survey->channel = conf->channel;
3574
3575 rt2800_register_read(rt2x00dev, CH_IDLE_STA, &idle);
3576 rt2800_register_read(rt2x00dev, CH_BUSY_STA, &busy);
3577 rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC, &busy_ext);
3578
3579 if (idle || busy) {
3580 survey->filled = SURVEY_INFO_CHANNEL_TIME |
3581 SURVEY_INFO_CHANNEL_TIME_BUSY |
3582 SURVEY_INFO_CHANNEL_TIME_EXT_BUSY;
3583
3584 survey->channel_time = (idle + busy) / 1000;
3585 survey->channel_time_busy = busy / 1000;
3586 survey->channel_time_ext_busy = busy_ext / 1000;
3587 }
3588
3589 return 0;
3590
3591}
3592EXPORT_SYMBOL_GPL(rt2800_get_survey);
3593
3505MODULE_AUTHOR(DRV_PROJECT ", Bartlomiej Zolnierkiewicz"); 3594MODULE_AUTHOR(DRV_PROJECT ", Bartlomiej Zolnierkiewicz");
3506MODULE_VERSION(DRV_VERSION); 3595MODULE_VERSION(DRV_VERSION);
3507MODULE_DESCRIPTION("Ralink RT2800 library"); 3596MODULE_DESCRIPTION("Ralink RT2800 library");
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
index 81cbc92e7857..e3c995a9dec4 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.h
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -199,5 +199,7 @@ u64 rt2800_get_tsf(struct ieee80211_hw *hw);
199int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 199int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
200 enum ieee80211_ampdu_mlme_action action, 200 enum ieee80211_ampdu_mlme_action action,
201 struct ieee80211_sta *sta, u16 tid, u16 *ssn); 201 struct ieee80211_sta *sta, u16 tid, u16 *ssn);
202int rt2800_get_survey(struct ieee80211_hw *hw, int idx,
203 struct survey_info *survey);
202 204
203#endif /* RT2800LIB_H */ 205#endif /* RT2800LIB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
index b26739535986..aa97971a38af 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.c
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -84,20 +84,22 @@ static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
84 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); 84 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
85} 85}
86 86
87#ifdef CONFIG_RT2800PCI_SOC 87#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
88static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) 88static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
89{ 89{
90 u32 *base_addr = (u32 *) KSEG1ADDR(0x1F040000); /* XXX for RT3052 */ 90 void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE);
91 91
92 memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE); 92 memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE);
93
94 iounmap(base_addr);
93} 95}
94#else 96#else
95static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev) 97static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
96{ 98{
97} 99}
98#endif /* CONFIG_RT2800PCI_SOC */ 100#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */
99 101
100#ifdef CONFIG_RT2800PCI_PCI 102#ifdef CONFIG_PCI
101static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom) 103static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
102{ 104{
103 struct rt2x00_dev *rt2x00dev = eeprom->data; 105 struct rt2x00_dev *rt2x00dev = eeprom->data;
@@ -181,7 +183,78 @@ static inline int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev)
181static inline void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev) 183static inline void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
182{ 184{
183} 185}
184#endif /* CONFIG_RT2800PCI_PCI */ 186#endif /* CONFIG_PCI */
187
188/*
189 * Queue handlers.
190 */
191static void rt2800pci_start_queue(struct data_queue *queue)
192{
193 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
194 u32 reg;
195
196 switch (queue->qid) {
197 case QID_RX:
198 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
199 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
200 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
201 break;
202 case QID_BEACON:
203 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
204 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
205 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
206 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
207 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
208 break;
209 default:
210 break;
211 };
212}
213
214static void rt2800pci_kick_queue(struct data_queue *queue)
215{
216 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
217 struct queue_entry *entry;
218
219 switch (queue->qid) {
220 case QID_AC_VO:
221 case QID_AC_VI:
222 case QID_AC_BE:
223 case QID_AC_BK:
224 entry = rt2x00queue_get_entry(queue, Q_INDEX);
225 rt2800_register_write(rt2x00dev, TX_CTX_IDX(queue->qid), entry->entry_idx);
226 break;
227 case QID_MGMT:
228 entry = rt2x00queue_get_entry(queue, Q_INDEX);
229 rt2800_register_write(rt2x00dev, TX_CTX_IDX(5), entry->entry_idx);
230 break;
231 default:
232 break;
233 }
234}
235
236static void rt2800pci_stop_queue(struct data_queue *queue)
237{
238 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
239 u32 reg;
240
241 switch (queue->qid) {
242 case QID_RX:
243 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
244 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
245 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
246 break;
247 case QID_BEACON:
248 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
249 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
250 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
251 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
252 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
253 break;
254 default:
255 break;
256 }
257}
185 258
186/* 259/*
187 * Firmware functions 260 * Firmware functions
@@ -321,18 +394,6 @@ static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
321/* 394/*
322 * Device state switch handlers. 395 * Device state switch handlers.
323 */ 396 */
324static void rt2800pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
325 enum dev_state state)
326{
327 u32 reg;
328
329 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
330 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX,
331 (state == STATE_RADIO_RX_ON) ||
332 (state == STATE_RADIO_RX_ON_LINK));
333 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
334}
335
336static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev, 397static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
337 enum dev_state state) 398 enum dev_state state)
338{ 399{
@@ -442,7 +503,7 @@ static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
442 * if the device is booting and wasn't asleep it will return 503 * if the device is booting and wasn't asleep it will return
443 * failure when attempting to wakeup. 504 * failure when attempting to wakeup.
444 */ 505 */
445 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2); 506 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0xff, 2);
446 507
447 if (state == STATE_AWAKE) { 508 if (state == STATE_AWAKE) {
448 rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0); 509 rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0);
@@ -476,12 +537,6 @@ static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
476 rt2800pci_disable_radio(rt2x00dev); 537 rt2800pci_disable_radio(rt2x00dev);
477 rt2800pci_set_state(rt2x00dev, STATE_SLEEP); 538 rt2800pci_set_state(rt2x00dev, STATE_SLEEP);
478 break; 539 break;
479 case STATE_RADIO_RX_ON:
480 case STATE_RADIO_RX_ON_LINK:
481 case STATE_RADIO_RX_OFF:
482 case STATE_RADIO_RX_OFF_LINK:
483 rt2800pci_toggle_rx(rt2x00dev, state);
484 break;
485 case STATE_RADIO_IRQ_ON: 540 case STATE_RADIO_IRQ_ON:
486 case STATE_RADIO_IRQ_ON_ISR: 541 case STATE_RADIO_IRQ_ON_ISR:
487 case STATE_RADIO_IRQ_OFF: 542 case STATE_RADIO_IRQ_OFF:
@@ -567,41 +622,6 @@ static void rt2800pci_write_tx_desc(struct queue_entry *entry,
567} 622}
568 623
569/* 624/*
570 * TX data initialization
571 */
572static void rt2800pci_kick_tx_queue(struct data_queue *queue)
573{
574 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
575 struct queue_entry *entry = rt2x00queue_get_entry(queue, Q_INDEX);
576 unsigned int qidx;
577
578 if (queue->qid == QID_MGMT)
579 qidx = 5;
580 else
581 qidx = queue->qid;
582
583 rt2800_register_write(rt2x00dev, TX_CTX_IDX(qidx), entry->entry_idx);
584}
585
586static void rt2800pci_kill_tx_queue(struct data_queue *queue)
587{
588 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
589 u32 reg;
590
591 if (queue->qid == QID_BEACON) {
592 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, 0);
593 return;
594 }
595
596 rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
597 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, (queue->qid == QID_AC_BE));
598 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, (queue->qid == QID_AC_BK));
599 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, (queue->qid == QID_AC_VI));
600 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, (queue->qid == QID_AC_VO));
601 rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
602}
603
604/*
605 * RX control handlers 625 * RX control handlers
606 */ 626 */
607static void rt2800pci_fill_rxdone(struct queue_entry *entry, 627static void rt2800pci_fill_rxdone(struct queue_entry *entry,
@@ -668,14 +688,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
668 u32 status; 688 u32 status;
669 u8 qid; 689 u8 qid;
670 690
671 while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo)) { 691 while (kfifo_get(&rt2x00dev->txstatus_fifo, &status)) {
672 /* Now remove the tx status from the FIFO */
673 if (kfifo_out(&rt2x00dev->txstatus_fifo, &status,
674 sizeof(status)) != sizeof(status)) {
675 WARN_ON(1);
676 break;
677 }
678
679 qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE); 692 qid = rt2x00_get_field32(status, TX_STA_FIFO_PID_QUEUE);
680 if (qid >= QID_RX) { 693 if (qid >= QID_RX) {
681 /* 694 /*
@@ -683,7 +696,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
683 * this tx status. 696 * this tx status.
684 */ 697 */
685 WARNING(rt2x00dev, "Got TX status report with " 698 WARNING(rt2x00dev, "Got TX status report with "
686 "unexpected pid %u, dropping", qid); 699 "unexpected pid %u, dropping\n", qid);
687 break; 700 break;
688 } 701 }
689 702
@@ -694,7 +707,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
694 * processing here and drop the tx status 707 * processing here and drop the tx status
695 */ 708 */
696 WARNING(rt2x00dev, "Got TX status for an unavailable " 709 WARNING(rt2x00dev, "Got TX status for an unavailable "
697 "queue %u, dropping", qid); 710 "queue %u, dropping\n", qid);
698 break; 711 break;
699 } 712 }
700 713
@@ -704,7 +717,7 @@ static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
704 * and drop the tx status. 717 * and drop the tx status.
705 */ 718 */
706 WARNING(rt2x00dev, "Got TX status for an empty " 719 WARNING(rt2x00dev, "Got TX status for an empty "
707 "queue %u, dropping", qid); 720 "queue %u, dropping\n", qid);
708 break; 721 break;
709 } 722 }
710 723
@@ -777,20 +790,13 @@ static void rt2800pci_txstatus_interrupt(struct rt2x00_dev *rt2x00dev)
777 * Since we have only one producer and one consumer we don't 790 * Since we have only one producer and one consumer we don't
778 * need to lock the kfifo. 791 * need to lock the kfifo.
779 */ 792 */
780 for (i = 0; i < TX_ENTRIES; i++) { 793 for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
781 rt2800_register_read(rt2x00dev, TX_STA_FIFO, &status); 794 rt2800_register_read(rt2x00dev, TX_STA_FIFO, &status);
782 795
783 if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID)) 796 if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID))
784 break; 797 break;
785 798
786 if (kfifo_is_full(&rt2x00dev->txstatus_fifo)) { 799 if (!kfifo_put(&rt2x00dev->txstatus_fifo, &status)) {
787 WARNING(rt2x00dev, "TX status FIFO overrun,"
788 " drop tx status report.\n");
789 break;
790 }
791
792 if (kfifo_in(&rt2x00dev->txstatus_fifo, &status,
793 sizeof(status)) != sizeof(status)) {
794 WARNING(rt2x00dev, "TX status FIFO overrun," 800 WARNING(rt2x00dev, "TX status FIFO overrun,"
795 "drop tx status report.\n"); 801 "drop tx status report.\n");
796 break; 802 break;
@@ -912,6 +918,7 @@ static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
912 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags); 918 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
913 __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags); 919 __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags);
914 __set_bit(DRIVER_REQUIRE_TXSTATUS_FIFO, &rt2x00dev->flags); 920 __set_bit(DRIVER_REQUIRE_TXSTATUS_FIFO, &rt2x00dev->flags);
921 __set_bit(DRIVER_REQUIRE_TASKLET_CONTEXT, &rt2x00dev->flags);
915 if (!modparam_nohwcrypt) 922 if (!modparam_nohwcrypt)
916 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags); 923 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
917 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags); 924 __set_bit(DRIVER_SUPPORT_LINK_TUNING, &rt2x00dev->flags);
@@ -943,6 +950,8 @@ static const struct ieee80211_ops rt2800pci_mac80211_ops = {
943 .get_tsf = rt2800_get_tsf, 950 .get_tsf = rt2800_get_tsf,
944 .rfkill_poll = rt2x00mac_rfkill_poll, 951 .rfkill_poll = rt2x00mac_rfkill_poll,
945 .ampdu_action = rt2800_ampdu_action, 952 .ampdu_action = rt2800_ampdu_action,
953 .flush = rt2x00mac_flush,
954 .get_survey = rt2800_get_survey,
946}; 955};
947 956
948static const struct rt2800_ops rt2800pci_rt2800_ops = { 957static const struct rt2800_ops rt2800pci_rt2800_ops = {
@@ -975,11 +984,12 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
975 .link_stats = rt2800_link_stats, 984 .link_stats = rt2800_link_stats,
976 .reset_tuner = rt2800_reset_tuner, 985 .reset_tuner = rt2800_reset_tuner,
977 .link_tuner = rt2800_link_tuner, 986 .link_tuner = rt2800_link_tuner,
987 .start_queue = rt2800pci_start_queue,
988 .kick_queue = rt2800pci_kick_queue,
989 .stop_queue = rt2800pci_stop_queue,
978 .write_tx_desc = rt2800pci_write_tx_desc, 990 .write_tx_desc = rt2800pci_write_tx_desc,
979 .write_tx_data = rt2800_write_tx_data, 991 .write_tx_data = rt2800_write_tx_data,
980 .write_beacon = rt2800_write_beacon, 992 .write_beacon = rt2800_write_beacon,
981 .kick_tx_queue = rt2800pci_kick_tx_queue,
982 .kill_tx_queue = rt2800pci_kill_tx_queue,
983 .fill_rxdone = rt2800pci_fill_rxdone, 993 .fill_rxdone = rt2800pci_fill_rxdone,
984 .config_shared_key = rt2800_config_shared_key, 994 .config_shared_key = rt2800_config_shared_key,
985 .config_pairwise_key = rt2800_config_pairwise_key, 995 .config_pairwise_key = rt2800_config_pairwise_key,
@@ -991,21 +1001,21 @@ static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
991}; 1001};
992 1002
993static const struct data_queue_desc rt2800pci_queue_rx = { 1003static const struct data_queue_desc rt2800pci_queue_rx = {
994 .entry_num = RX_ENTRIES, 1004 .entry_num = 128,
995 .data_size = AGGREGATION_SIZE, 1005 .data_size = AGGREGATION_SIZE,
996 .desc_size = RXD_DESC_SIZE, 1006 .desc_size = RXD_DESC_SIZE,
997 .priv_size = sizeof(struct queue_entry_priv_pci), 1007 .priv_size = sizeof(struct queue_entry_priv_pci),
998}; 1008};
999 1009
1000static const struct data_queue_desc rt2800pci_queue_tx = { 1010static const struct data_queue_desc rt2800pci_queue_tx = {
1001 .entry_num = TX_ENTRIES, 1011 .entry_num = 64,
1002 .data_size = AGGREGATION_SIZE, 1012 .data_size = AGGREGATION_SIZE,
1003 .desc_size = TXD_DESC_SIZE, 1013 .desc_size = TXD_DESC_SIZE,
1004 .priv_size = sizeof(struct queue_entry_priv_pci), 1014 .priv_size = sizeof(struct queue_entry_priv_pci),
1005}; 1015};
1006 1016
1007static const struct data_queue_desc rt2800pci_queue_bcn = { 1017static const struct data_queue_desc rt2800pci_queue_bcn = {
1008 .entry_num = 8 * BEACON_ENTRIES, 1018 .entry_num = 8,
1009 .data_size = 0, /* No DMA required for beacons */ 1019 .data_size = 0, /* No DMA required for beacons */
1010 .desc_size = TXWI_DESC_SIZE, 1020 .desc_size = TXWI_DESC_SIZE,
1011 .priv_size = sizeof(struct queue_entry_priv_pci), 1021 .priv_size = sizeof(struct queue_entry_priv_pci),
@@ -1033,12 +1043,15 @@ static const struct rt2x00_ops rt2800pci_ops = {
1033/* 1043/*
1034 * RT2800pci module information. 1044 * RT2800pci module information.
1035 */ 1045 */
1036#ifdef CONFIG_RT2800PCI_PCI 1046#ifdef CONFIG_PCI
1037static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = { 1047static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1038 { PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1048 { PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) },
1039 { PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1049 { PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) },
1040 { PCI_DEVICE(0x1814, 0x0701), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1050 { PCI_DEVICE(0x1814, 0x0701), PCI_DEVICE_DATA(&rt2800pci_ops) },
1041 { PCI_DEVICE(0x1814, 0x0781), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1051 { PCI_DEVICE(0x1814, 0x0781), PCI_DEVICE_DATA(&rt2800pci_ops) },
1052 { PCI_DEVICE(0x1814, 0x3090), PCI_DEVICE_DATA(&rt2800pci_ops) },
1053 { PCI_DEVICE(0x1814, 0x3091), PCI_DEVICE_DATA(&rt2800pci_ops) },
1054 { PCI_DEVICE(0x1814, 0x3092), PCI_DEVICE_DATA(&rt2800pci_ops) },
1042 { PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1055 { PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) },
1043 { PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1056 { PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) },
1044 { PCI_DEVICE(0x1432, 0x7728), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1057 { PCI_DEVICE(0x1432, 0x7728), PCI_DEVICE_DATA(&rt2800pci_ops) },
@@ -1046,12 +1059,10 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1046 { PCI_DEVICE(0x1432, 0x7748), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1059 { PCI_DEVICE(0x1432, 0x7748), PCI_DEVICE_DATA(&rt2800pci_ops) },
1047 { PCI_DEVICE(0x1432, 0x7758), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1060 { PCI_DEVICE(0x1432, 0x7758), PCI_DEVICE_DATA(&rt2800pci_ops) },
1048 { PCI_DEVICE(0x1432, 0x7768), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1061 { PCI_DEVICE(0x1432, 0x7768), PCI_DEVICE_DATA(&rt2800pci_ops) },
1049 { PCI_DEVICE(0x1a3b, 0x1059), PCI_DEVICE_DATA(&rt2800pci_ops) },
1050#ifdef CONFIG_RT2800PCI_RT30XX
1051 { PCI_DEVICE(0x1814, 0x3090), PCI_DEVICE_DATA(&rt2800pci_ops) },
1052 { PCI_DEVICE(0x1814, 0x3091), PCI_DEVICE_DATA(&rt2800pci_ops) },
1053 { PCI_DEVICE(0x1814, 0x3092), PCI_DEVICE_DATA(&rt2800pci_ops) },
1054 { PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1062 { PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) },
1063 { PCI_DEVICE(0x1a3b, 0x1059), PCI_DEVICE_DATA(&rt2800pci_ops) },
1064#ifdef CONFIG_RT2800PCI_RT33XX
1065 { PCI_DEVICE(0x1814, 0x3390), PCI_DEVICE_DATA(&rt2800pci_ops) },
1055#endif 1066#endif
1056#ifdef CONFIG_RT2800PCI_RT35XX 1067#ifdef CONFIG_RT2800PCI_RT35XX
1057 { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) }, 1068 { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
@@ -1062,19 +1073,19 @@ static DEFINE_PCI_DEVICE_TABLE(rt2800pci_device_table) = {
1062#endif 1073#endif
1063 { 0, } 1074 { 0, }
1064}; 1075};
1065#endif /* CONFIG_RT2800PCI_PCI */ 1076#endif /* CONFIG_PCI */
1066 1077
1067MODULE_AUTHOR(DRV_PROJECT); 1078MODULE_AUTHOR(DRV_PROJECT);
1068MODULE_VERSION(DRV_VERSION); 1079MODULE_VERSION(DRV_VERSION);
1069MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver."); 1080MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver.");
1070MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards"); 1081MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards");
1071#ifdef CONFIG_RT2800PCI_PCI 1082#ifdef CONFIG_PCI
1072MODULE_FIRMWARE(FIRMWARE_RT2860); 1083MODULE_FIRMWARE(FIRMWARE_RT2860);
1073MODULE_DEVICE_TABLE(pci, rt2800pci_device_table); 1084MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
1074#endif /* CONFIG_RT2800PCI_PCI */ 1085#endif /* CONFIG_PCI */
1075MODULE_LICENSE("GPL"); 1086MODULE_LICENSE("GPL");
1076 1087
1077#ifdef CONFIG_RT2800PCI_SOC 1088#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
1078static int rt2800soc_probe(struct platform_device *pdev) 1089static int rt2800soc_probe(struct platform_device *pdev)
1079{ 1090{
1080 return rt2x00soc_probe(pdev, &rt2800pci_ops); 1091 return rt2x00soc_probe(pdev, &rt2800pci_ops);
@@ -1091,9 +1102,9 @@ static struct platform_driver rt2800soc_driver = {
1091 .suspend = rt2x00soc_suspend, 1102 .suspend = rt2x00soc_suspend,
1092 .resume = rt2x00soc_resume, 1103 .resume = rt2x00soc_resume,
1093}; 1104};
1094#endif /* CONFIG_RT2800PCI_SOC */ 1105#endif /* CONFIG_RALINK_RT288X || CONFIG_RALINK_RT305X */
1095 1106
1096#ifdef CONFIG_RT2800PCI_PCI 1107#ifdef CONFIG_PCI
1097static struct pci_driver rt2800pci_driver = { 1108static struct pci_driver rt2800pci_driver = {
1098 .name = KBUILD_MODNAME, 1109 .name = KBUILD_MODNAME,
1099 .id_table = rt2800pci_device_table, 1110 .id_table = rt2800pci_device_table,
@@ -1102,21 +1113,21 @@ static struct pci_driver rt2800pci_driver = {
1102 .suspend = rt2x00pci_suspend, 1113 .suspend = rt2x00pci_suspend,
1103 .resume = rt2x00pci_resume, 1114 .resume = rt2x00pci_resume,
1104}; 1115};
1105#endif /* CONFIG_RT2800PCI_PCI */ 1116#endif /* CONFIG_PCI */
1106 1117
1107static int __init rt2800pci_init(void) 1118static int __init rt2800pci_init(void)
1108{ 1119{
1109 int ret = 0; 1120 int ret = 0;
1110 1121
1111#ifdef CONFIG_RT2800PCI_SOC 1122#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
1112 ret = platform_driver_register(&rt2800soc_driver); 1123 ret = platform_driver_register(&rt2800soc_driver);
1113 if (ret) 1124 if (ret)
1114 return ret; 1125 return ret;
1115#endif 1126#endif
1116#ifdef CONFIG_RT2800PCI_PCI 1127#ifdef CONFIG_PCI
1117 ret = pci_register_driver(&rt2800pci_driver); 1128 ret = pci_register_driver(&rt2800pci_driver);
1118 if (ret) { 1129 if (ret) {
1119#ifdef CONFIG_RT2800PCI_SOC 1130#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
1120 platform_driver_unregister(&rt2800soc_driver); 1131 platform_driver_unregister(&rt2800soc_driver);
1121#endif 1132#endif
1122 return ret; 1133 return ret;
@@ -1128,10 +1139,10 @@ static int __init rt2800pci_init(void)
1128 1139
1129static void __exit rt2800pci_exit(void) 1140static void __exit rt2800pci_exit(void)
1130{ 1141{
1131#ifdef CONFIG_RT2800PCI_PCI 1142#ifdef CONFIG_PCI
1132 pci_unregister_driver(&rt2800pci_driver); 1143 pci_unregister_driver(&rt2800pci_driver);
1133#endif 1144#endif
1134#ifdef CONFIG_RT2800PCI_SOC 1145#if defined(CONFIG_RALINK_RT288X) || defined(CONFIG_RALINK_RT305X)
1135 platform_driver_unregister(&rt2800soc_driver); 1146 platform_driver_unregister(&rt2800soc_driver);
1136#endif 1147#endif
1137} 1148}
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
index 5a8dda9b5b5a..70e050d904c8 100644
--- a/drivers/net/wireless/rt2x00/rt2800pci.h
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -38,10 +38,10 @@
38 * Queue register offset macros 38 * Queue register offset macros
39 */ 39 */
40#define TX_QUEUE_REG_OFFSET 0x10 40#define TX_QUEUE_REG_OFFSET 0x10
41#define TX_BASE_PTR(__x) TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET) 41#define TX_BASE_PTR(__x) (TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET))
42#define TX_MAX_CNT(__x) TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET) 42#define TX_MAX_CNT(__x) (TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET))
43#define TX_CTX_IDX(__x) TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET) 43#define TX_CTX_IDX(__x) (TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
44#define TX_DTX_IDX(__x) TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET) 44#define TX_DTX_IDX(__x) (TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET))
45 45
46/* 46/*
47 * 8051 firmware image. 47 * 8051 firmware image.
@@ -52,8 +52,8 @@
52/* 52/*
53 * DMA descriptor defines. 53 * DMA descriptor defines.
54 */ 54 */
55#define TXD_DESC_SIZE ( 4 * sizeof(__le32) ) 55#define TXD_DESC_SIZE (4 * sizeof(__le32))
56#define RXD_DESC_SIZE ( 4 * sizeof(__le32) ) 56#define RXD_DESC_SIZE (4 * sizeof(__le32))
57 57
58/* 58/*
59 * TX descriptor format for TX, PRIO and Beacon Ring. 59 * TX descriptor format for TX, PRIO and Beacon Ring.
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 3dff56ec195a..b97a4a54ff4c 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -45,11 +45,60 @@
45/* 45/*
46 * Allow hardware encryption to be disabled. 46 * Allow hardware encryption to be disabled.
47 */ 47 */
48static int modparam_nohwcrypt = 0; 48static int modparam_nohwcrypt;
49module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 49module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
50MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 50MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
51 51
52/* 52/*
53 * Queue handlers.
54 */
55static void rt2800usb_start_queue(struct data_queue *queue)
56{
57 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
58 u32 reg;
59
60 switch (queue->qid) {
61 case QID_RX:
62 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
63 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
64 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
65 break;
66 case QID_BEACON:
67 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
68 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
69 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
70 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
71 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
72 break;
73 default:
74 break;
75 }
76}
77
78static void rt2800usb_stop_queue(struct data_queue *queue)
79{
80 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
81 u32 reg;
82
83 switch (queue->qid) {
84 case QID_RX:
85 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
86 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
87 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
88 break;
89 case QID_BEACON:
90 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
91 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
92 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
93 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
94 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
95 break;
96 default:
97 break;
98 }
99}
100
101/*
53 * Firmware functions 102 * Firmware functions
54 */ 103 */
55static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev) 104static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev)
@@ -107,18 +156,6 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
107/* 156/*
108 * Device state switch handlers. 157 * Device state switch handlers.
109 */ 158 */
110static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
111 enum dev_state state)
112{
113 u32 reg;
114
115 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
116 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX,
117 (state == STATE_RADIO_RX_ON) ||
118 (state == STATE_RADIO_RX_ON_LINK));
119 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
120}
121
122static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev) 159static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev)
123{ 160{
124 u32 reg; 161 u32 reg;
@@ -165,7 +202,8 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
165 * this limit so reduce the number to prevent errors. 202 * this limit so reduce the number to prevent errors.
166 */ 203 */
167 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_LIMIT, 204 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_LIMIT,
168 ((RX_ENTRIES * DATA_FRAME_SIZE) / 1024) - 3); 205 ((rt2x00dev->ops->rx->entry_num * DATA_FRAME_SIZE)
206 / 1024) - 3);
169 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_EN, 1); 207 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_EN, 1);
170 rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1); 208 rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1);
171 rt2800_register_write(rt2x00dev, USB_DMA_CFG, reg); 209 rt2800_register_write(rt2x00dev, USB_DMA_CFG, reg);
@@ -183,9 +221,9 @@ static int rt2800usb_set_state(struct rt2x00_dev *rt2x00dev,
183 enum dev_state state) 221 enum dev_state state)
184{ 222{
185 if (state == STATE_AWAKE) 223 if (state == STATE_AWAKE)
186 rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 0); 224 rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 2);
187 else 225 else
188 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2); 226 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0xff, 2);
189 227
190 return 0; 228 return 0;
191} 229}
@@ -214,12 +252,6 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
214 rt2800usb_disable_radio(rt2x00dev); 252 rt2800usb_disable_radio(rt2x00dev);
215 rt2800usb_set_state(rt2x00dev, STATE_SLEEP); 253 rt2800usb_set_state(rt2x00dev, STATE_SLEEP);
216 break; 254 break;
217 case STATE_RADIO_RX_ON:
218 case STATE_RADIO_RX_ON_LINK:
219 case STATE_RADIO_RX_OFF:
220 case STATE_RADIO_RX_OFF_LINK:
221 rt2800usb_toggle_rx(rt2x00dev, state);
222 break;
223 case STATE_RADIO_IRQ_ON: 255 case STATE_RADIO_IRQ_ON:
224 case STATE_RADIO_IRQ_ON_ISR: 256 case STATE_RADIO_IRQ_ON_ISR:
225 case STATE_RADIO_IRQ_OFF: 257 case STATE_RADIO_IRQ_OFF:
@@ -245,6 +277,49 @@ static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev,
245} 277}
246 278
247/* 279/*
280 * Watchdog handlers
281 */
282static void rt2800usb_watchdog(struct rt2x00_dev *rt2x00dev)
283{
284 unsigned int i;
285 u32 reg;
286
287 rt2800_register_read(rt2x00dev, TXRXQ_PCNT, &reg);
288 if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q)) {
289 WARNING(rt2x00dev, "TX HW queue 0 timed out,"
290 " invoke forced kick\n");
291
292 rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40012);
293
294 for (i = 0; i < 10; i++) {
295 udelay(10);
296 if (!rt2x00_get_field32(reg, TXRXQ_PCNT_TX0Q))
297 break;
298 }
299
300 rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40006);
301 }
302
303 rt2800_register_read(rt2x00dev, TXRXQ_PCNT, &reg);
304 if (rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q)) {
305 WARNING(rt2x00dev, "TX HW queue 1 timed out,"
306 " invoke forced kick\n");
307
308 rt2800_register_write(rt2x00dev, PBF_CFG, 0xf4000a);
309
310 for (i = 0; i < 10; i++) {
311 udelay(10);
312 if (!rt2x00_get_field32(reg, TXRXQ_PCNT_TX1Q))
313 break;
314 }
315
316 rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40006);
317 }
318
319 rt2x00usb_watchdog(rt2x00dev);
320}
321
322/*
248 * TX descriptor initialization 323 * TX descriptor initialization
249 */ 324 */
250static __le32 *rt2800usb_get_txwi(struct queue_entry *entry) 325static __le32 *rt2800usb_get_txwi(struct queue_entry *entry)
@@ -266,8 +341,14 @@ static void rt2800usb_write_tx_desc(struct queue_entry *entry,
266 * Initialize TXINFO descriptor 341 * Initialize TXINFO descriptor
267 */ 342 */
268 rt2x00_desc_read(txi, 0, &word); 343 rt2x00_desc_read(txi, 0, &word);
344
345 /*
346 * The size of TXINFO_W0_USB_DMA_TX_PKT_LEN is
347 * TXWI + 802.11 header + L2 pad + payload + pad,
348 * so need to decrease size of TXINFO and USB end pad.
349 */
269 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN, 350 rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN,
270 entry->skb->len - TXINFO_DESC_SIZE); 351 entry->skb->len - TXINFO_DESC_SIZE - 4);
271 rt2x00_set_field32(&word, TXINFO_W0_WIV, 352 rt2x00_set_field32(&word, TXINFO_W0_WIV,
272 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); 353 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
273 rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2); 354 rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2);
@@ -285,22 +366,37 @@ static void rt2800usb_write_tx_desc(struct queue_entry *entry,
285 skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE; 366 skbdesc->desc_len = TXINFO_DESC_SIZE + TXWI_DESC_SIZE;
286} 367}
287 368
288/* 369static void rt2800usb_write_tx_data(struct queue_entry *entry,
289 * TX data initialization 370 struct txentry_desc *txdesc)
290 */
291static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
292{ 371{
293 int length; 372 unsigned int len;
373 int err;
374
375 rt2800_write_tx_data(entry, txdesc);
294 376
295 /* 377 /*
296 * The length _must_ include 4 bytes padding, 378 * pad(1~3 bytes) is added after each 802.11 payload.
297 * it should always be multiple of 4, 379 * USB end pad(4 bytes) is added at each USB bulk out packet end.
298 * but it must _not_ be a multiple of the USB packet size. 380 * TX frame format is :
381 * | TXINFO | TXWI | 802.11 header | L2 pad | payload | pad | USB end pad |
382 * |<------------- tx_pkt_len ------------->|
299 */ 383 */
300 length = roundup(entry->skb->len + 4, 4); 384 len = roundup(entry->skb->len, 4) + 4;
301 length += (4 * !(length % entry->queue->usb_maxpacket)); 385 err = skb_padto(entry->skb, len);
386 if (unlikely(err)) {
387 WARNING(entry->queue->rt2x00dev, "TX SKB padding error, out of memory\n");
388 return;
389 }
302 390
303 return length; 391 entry->skb->len = len;
392}
393
394/*
395 * TX data initialization
396 */
397static int rt2800usb_get_tx_data_len(struct queue_entry *entry)
398{
399 return entry->skb->len;
304} 400}
305 401
306/* 402/*
@@ -335,14 +431,6 @@ static void rt2800usb_work_txdone(struct work_struct *work)
335 } 431 }
336} 432}
337 433
338static void rt2800usb_kill_tx_queue(struct data_queue *queue)
339{
340 if (queue->qid == QID_BEACON)
341 rt2x00usb_register_write(queue->rt2x00dev, BCN_TIME_CFG, 0);
342
343 rt2x00usb_kill_tx_queue(queue);
344}
345
346/* 434/*
347 * RX control handlers 435 * RX control handlers
348 */ 436 */
@@ -507,6 +595,8 @@ static const struct ieee80211_ops rt2800usb_mac80211_ops = {
507 .get_tsf = rt2800_get_tsf, 595 .get_tsf = rt2800_get_tsf,
508 .rfkill_poll = rt2x00mac_rfkill_poll, 596 .rfkill_poll = rt2x00mac_rfkill_poll,
509 .ampdu_action = rt2800_ampdu_action, 597 .ampdu_action = rt2800_ampdu_action,
598 .flush = rt2x00mac_flush,
599 .get_survey = rt2800_get_survey,
510}; 600};
511 601
512static const struct rt2800_ops rt2800usb_rt2800_ops = { 602static const struct rt2800_ops rt2800usb_rt2800_ops = {
@@ -535,13 +625,15 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
535 .link_stats = rt2800_link_stats, 625 .link_stats = rt2800_link_stats,
536 .reset_tuner = rt2800_reset_tuner, 626 .reset_tuner = rt2800_reset_tuner,
537 .link_tuner = rt2800_link_tuner, 627 .link_tuner = rt2800_link_tuner,
538 .watchdog = rt2x00usb_watchdog, 628 .watchdog = rt2800usb_watchdog,
629 .start_queue = rt2800usb_start_queue,
630 .kick_queue = rt2x00usb_kick_queue,
631 .stop_queue = rt2800usb_stop_queue,
632 .flush_queue = rt2x00usb_flush_queue,
539 .write_tx_desc = rt2800usb_write_tx_desc, 633 .write_tx_desc = rt2800usb_write_tx_desc,
540 .write_tx_data = rt2800_write_tx_data, 634 .write_tx_data = rt2800usb_write_tx_data,
541 .write_beacon = rt2800_write_beacon, 635 .write_beacon = rt2800_write_beacon,
542 .get_tx_data_len = rt2800usb_get_tx_data_len, 636 .get_tx_data_len = rt2800usb_get_tx_data_len,
543 .kick_tx_queue = rt2x00usb_kick_tx_queue,
544 .kill_tx_queue = rt2800usb_kill_tx_queue,
545 .fill_rxdone = rt2800usb_fill_rxdone, 637 .fill_rxdone = rt2800usb_fill_rxdone,
546 .config_shared_key = rt2800_config_shared_key, 638 .config_shared_key = rt2800_config_shared_key,
547 .config_pairwise_key = rt2800_config_pairwise_key, 639 .config_pairwise_key = rt2800_config_pairwise_key,
@@ -553,21 +645,21 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
553}; 645};
554 646
555static const struct data_queue_desc rt2800usb_queue_rx = { 647static const struct data_queue_desc rt2800usb_queue_rx = {
556 .entry_num = RX_ENTRIES, 648 .entry_num = 128,
557 .data_size = AGGREGATION_SIZE, 649 .data_size = AGGREGATION_SIZE,
558 .desc_size = RXINFO_DESC_SIZE + RXWI_DESC_SIZE, 650 .desc_size = RXINFO_DESC_SIZE + RXWI_DESC_SIZE,
559 .priv_size = sizeof(struct queue_entry_priv_usb), 651 .priv_size = sizeof(struct queue_entry_priv_usb),
560}; 652};
561 653
562static const struct data_queue_desc rt2800usb_queue_tx = { 654static const struct data_queue_desc rt2800usb_queue_tx = {
563 .entry_num = TX_ENTRIES, 655 .entry_num = 64,
564 .data_size = AGGREGATION_SIZE, 656 .data_size = AGGREGATION_SIZE,
565 .desc_size = TXINFO_DESC_SIZE + TXWI_DESC_SIZE, 657 .desc_size = TXINFO_DESC_SIZE + TXWI_DESC_SIZE,
566 .priv_size = sizeof(struct queue_entry_priv_usb), 658 .priv_size = sizeof(struct queue_entry_priv_usb),
567}; 659};
568 660
569static const struct data_queue_desc rt2800usb_queue_bcn = { 661static const struct data_queue_desc rt2800usb_queue_bcn = {
570 .entry_num = 8 * BEACON_ENTRIES, 662 .entry_num = 8,
571 .data_size = MGMT_FRAME_SIZE, 663 .data_size = MGMT_FRAME_SIZE,
572 .desc_size = TXINFO_DESC_SIZE + TXWI_DESC_SIZE, 664 .desc_size = TXINFO_DESC_SIZE + TXWI_DESC_SIZE,
573 .priv_size = sizeof(struct queue_entry_priv_usb), 665 .priv_size = sizeof(struct queue_entry_priv_usb),
@@ -599,11 +691,19 @@ static struct usb_device_id rt2800usb_device_table[] = {
599 /* Abocom */ 691 /* Abocom */
600 { USB_DEVICE(0x07b8, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) }, 692 { USB_DEVICE(0x07b8, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
601 { USB_DEVICE(0x07b8, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) }, 693 { USB_DEVICE(0x07b8, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
694 { USB_DEVICE(0x07b8, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
695 { USB_DEVICE(0x07b8, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
696 { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
602 { USB_DEVICE(0x1482, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) }, 697 { USB_DEVICE(0x1482, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
698 /* AirTies */
699 { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) },
603 /* Allwin */ 700 /* Allwin */
604 { USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) }, 701 { USB_DEVICE(0x8516, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
605 { USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) }, 702 { USB_DEVICE(0x8516, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
606 { USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) }, 703 { USB_DEVICE(0x8516, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
704 { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
705 { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
706 { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
607 /* Amit */ 707 /* Amit */
608 { USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) }, 708 { USB_DEVICE(0x15c5, 0x0008), USB_DEVICE_DATA(&rt2800usb_ops) },
609 /* Askey */ 709 /* Askey */
@@ -612,8 +712,13 @@ static struct usb_device_id rt2800usb_device_table[] = {
612 { USB_DEVICE(0x0b05, 0x1731), USB_DEVICE_DATA(&rt2800usb_ops) }, 712 { USB_DEVICE(0x0b05, 0x1731), USB_DEVICE_DATA(&rt2800usb_ops) },
613 { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) }, 713 { USB_DEVICE(0x0b05, 0x1732), USB_DEVICE_DATA(&rt2800usb_ops) },
614 { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) }, 714 { USB_DEVICE(0x0b05, 0x1742), USB_DEVICE_DATA(&rt2800usb_ops) },
715 { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
615 /* AzureWave */ 716 /* AzureWave */
616 { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) }, 717 { USB_DEVICE(0x13d3, 0x3247), USB_DEVICE_DATA(&rt2800usb_ops) },
718 { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
719 { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
720 { USB_DEVICE(0x13d3, 0x3307), USB_DEVICE_DATA(&rt2800usb_ops) },
721 { USB_DEVICE(0x13d3, 0x3321), USB_DEVICE_DATA(&rt2800usb_ops) },
617 /* Belkin */ 722 /* Belkin */
618 { USB_DEVICE(0x050d, 0x8053), USB_DEVICE_DATA(&rt2800usb_ops) }, 723 { USB_DEVICE(0x050d, 0x8053), USB_DEVICE_DATA(&rt2800usb_ops) },
619 { USB_DEVICE(0x050d, 0x805c), USB_DEVICE_DATA(&rt2800usb_ops) }, 724 { USB_DEVICE(0x050d, 0x805c), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -624,6 +729,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
624 { USB_DEVICE(0x14b2, 0x3c06), USB_DEVICE_DATA(&rt2800usb_ops) }, 729 { USB_DEVICE(0x14b2, 0x3c06), USB_DEVICE_DATA(&rt2800usb_ops) },
625 { USB_DEVICE(0x14b2, 0x3c07), USB_DEVICE_DATA(&rt2800usb_ops) }, 730 { USB_DEVICE(0x14b2, 0x3c07), USB_DEVICE_DATA(&rt2800usb_ops) },
626 { USB_DEVICE(0x14b2, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) }, 731 { USB_DEVICE(0x14b2, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
732 { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) },
627 { USB_DEVICE(0x14b2, 0x3c23), USB_DEVICE_DATA(&rt2800usb_ops) }, 733 { USB_DEVICE(0x14b2, 0x3c23), USB_DEVICE_DATA(&rt2800usb_ops) },
628 { USB_DEVICE(0x14b2, 0x3c25), USB_DEVICE_DATA(&rt2800usb_ops) }, 734 { USB_DEVICE(0x14b2, 0x3c25), USB_DEVICE_DATA(&rt2800usb_ops) },
629 { USB_DEVICE(0x14b2, 0x3c27), USB_DEVICE_DATA(&rt2800usb_ops) }, 735 { USB_DEVICE(0x14b2, 0x3c27), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -632,17 +738,36 @@ static struct usb_device_id rt2800usb_device_table[] = {
632 { USB_DEVICE(0x07aa, 0x002f), USB_DEVICE_DATA(&rt2800usb_ops) }, 738 { USB_DEVICE(0x07aa, 0x002f), USB_DEVICE_DATA(&rt2800usb_ops) },
633 { USB_DEVICE(0x07aa, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) }, 739 { USB_DEVICE(0x07aa, 0x003c), USB_DEVICE_DATA(&rt2800usb_ops) },
634 { USB_DEVICE(0x07aa, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) }, 740 { USB_DEVICE(0x07aa, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
741 { USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) },
635 /* D-Link */ 742 /* D-Link */
636 { USB_DEVICE(0x07d1, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) }, 743 { USB_DEVICE(0x07d1, 0x3c09), USB_DEVICE_DATA(&rt2800usb_ops) },
744 { USB_DEVICE(0x07d1, 0x3c0a), USB_DEVICE_DATA(&rt2800usb_ops) },
745 { USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) },
746 { USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) },
747 { USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) },
637 { USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) }, 748 { USB_DEVICE(0x07d1, 0x3c11), USB_DEVICE_DATA(&rt2800usb_ops) },
749 { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) },
750 /* Draytek */
751 { USB_DEVICE(0x07fa, 0x7712), USB_DEVICE_DATA(&rt2800usb_ops) },
638 /* Edimax */ 752 /* Edimax */
753 { USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) },
639 { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) }, 754 { USB_DEVICE(0x7392, 0x7717), USB_DEVICE_DATA(&rt2800usb_ops) },
640 { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) }, 755 { USB_DEVICE(0x7392, 0x7718), USB_DEVICE_DATA(&rt2800usb_ops) },
756 /* Encore */
757 { USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) },
758 { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
641 /* EnGenius */ 759 /* EnGenius */
642 { USB_DEVICE(0x1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) }, 760 { USB_DEVICE(0x1740, 0x9701), USB_DEVICE_DATA(&rt2800usb_ops) },
643 { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) }, 761 { USB_DEVICE(0x1740, 0x9702), USB_DEVICE_DATA(&rt2800usb_ops) },
762 { USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) },
763 { USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) },
764 { USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) },
765 { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
766 { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
767 { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
644 /* Gigabyte */ 768 /* Gigabyte */
645 { USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) }, 769 { USB_DEVICE(0x1044, 0x800b), USB_DEVICE_DATA(&rt2800usb_ops) },
770 { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) },
646 /* Hawking */ 771 /* Hawking */
647 { USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) }, 772 { USB_DEVICE(0x0e66, 0x0001), USB_DEVICE_DATA(&rt2800usb_ops) },
648 { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) }, 773 { USB_DEVICE(0x0e66, 0x0003), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -651,6 +776,10 @@ static struct usb_device_id rt2800usb_device_table[] = {
651 { USB_DEVICE(0x0e66, 0x0013), USB_DEVICE_DATA(&rt2800usb_ops) }, 776 { USB_DEVICE(0x0e66, 0x0013), USB_DEVICE_DATA(&rt2800usb_ops) },
652 { USB_DEVICE(0x0e66, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) }, 777 { USB_DEVICE(0x0e66, 0x0017), USB_DEVICE_DATA(&rt2800usb_ops) },
653 { USB_DEVICE(0x0e66, 0x0018), USB_DEVICE_DATA(&rt2800usb_ops) }, 778 { USB_DEVICE(0x0e66, 0x0018), USB_DEVICE_DATA(&rt2800usb_ops) },
779 /* I-O DATA */
780 { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
781 { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
782 { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
654 /* Linksys */ 783 /* Linksys */
655 { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) }, 784 { USB_DEVICE(0x1737, 0x0070), USB_DEVICE_DATA(&rt2800usb_ops) },
656 { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) }, 785 { USB_DEVICE(0x1737, 0x0071), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -658,17 +787,44 @@ static struct usb_device_id rt2800usb_device_table[] = {
658 { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) }, 787 { USB_DEVICE(0x0789, 0x0162), USB_DEVICE_DATA(&rt2800usb_ops) },
659 { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) }, 788 { USB_DEVICE(0x0789, 0x0163), USB_DEVICE_DATA(&rt2800usb_ops) },
660 { USB_DEVICE(0x0789, 0x0164), USB_DEVICE_DATA(&rt2800usb_ops) }, 789 { USB_DEVICE(0x0789, 0x0164), USB_DEVICE_DATA(&rt2800usb_ops) },
790 { USB_DEVICE(0x0789, 0x0166), USB_DEVICE_DATA(&rt2800usb_ops) },
661 /* Motorola */ 791 /* Motorola */
662 { USB_DEVICE(0x100d, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) }, 792 { USB_DEVICE(0x100d, 0x9031), USB_DEVICE_DATA(&rt2800usb_ops) },
663 /* MSI */ 793 /* MSI */
794 { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
795 { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
796 { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) },
797 { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
798 { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) },
664 { USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) }, 799 { USB_DEVICE(0x0db0, 0x6899), USB_DEVICE_DATA(&rt2800usb_ops) },
800 { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
801 { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) },
802 { USB_DEVICE(0x0db0, 0x822b), USB_DEVICE_DATA(&rt2800usb_ops) },
803 { USB_DEVICE(0x0db0, 0x822c), USB_DEVICE_DATA(&rt2800usb_ops) },
804 { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
805 { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) },
806 { USB_DEVICE(0x0db0, 0x871b), USB_DEVICE_DATA(&rt2800usb_ops) },
807 { USB_DEVICE(0x0db0, 0x871c), USB_DEVICE_DATA(&rt2800usb_ops) },
808 { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
809 /* Para */
810 { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
811 /* Pegatron */
812 { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
813 { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
665 /* Philips */ 814 /* Philips */
666 { USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) }, 815 { USB_DEVICE(0x0471, 0x200f), USB_DEVICE_DATA(&rt2800usb_ops) },
667 /* Planex */ 816 /* Planex */
817 { USB_DEVICE(0x2019, 0xab25), USB_DEVICE_DATA(&rt2800usb_ops) },
668 { USB_DEVICE(0x2019, 0xed06), USB_DEVICE_DATA(&rt2800usb_ops) }, 818 { USB_DEVICE(0x2019, 0xed06), USB_DEVICE_DATA(&rt2800usb_ops) },
819 /* Quanta */
820 { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) },
669 /* Ralink */ 821 /* Ralink */
822 { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) },
670 { USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) }, 823 { USB_DEVICE(0x148f, 0x2770), USB_DEVICE_DATA(&rt2800usb_ops) },
671 { USB_DEVICE(0x148f, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) }, 824 { USB_DEVICE(0x148f, 0x2870), USB_DEVICE_DATA(&rt2800usb_ops) },
825 { USB_DEVICE(0x148f, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
826 { USB_DEVICE(0x148f, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
827 { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
672 /* Samsung */ 828 /* Samsung */
673 { USB_DEVICE(0x04e8, 0x2018), USB_DEVICE_DATA(&rt2800usb_ops) }, 829 { USB_DEVICE(0x04e8, 0x2018), USB_DEVICE_DATA(&rt2800usb_ops) },
674 /* Siemens */ 830 /* Siemens */
@@ -681,13 +837,22 @@ static struct usb_device_id rt2800usb_device_table[] = {
681 { USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) }, 837 { USB_DEVICE(0x0df6, 0x0039), USB_DEVICE_DATA(&rt2800usb_ops) },
682 { USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) }, 838 { USB_DEVICE(0x0df6, 0x003b), USB_DEVICE_DATA(&rt2800usb_ops) },
683 { USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) }, 839 { USB_DEVICE(0x0df6, 0x003d), USB_DEVICE_DATA(&rt2800usb_ops) },
840 { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) },
684 { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) }, 841 { USB_DEVICE(0x0df6, 0x003f), USB_DEVICE_DATA(&rt2800usb_ops) },
842 { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
843 { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
844 { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
845 { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
685 /* SMC */ 846 /* SMC */
686 { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) }, 847 { USB_DEVICE(0x083a, 0x6618), USB_DEVICE_DATA(&rt2800usb_ops) },
848 { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
687 { USB_DEVICE(0x083a, 0x7512), USB_DEVICE_DATA(&rt2800usb_ops) }, 849 { USB_DEVICE(0x083a, 0x7512), USB_DEVICE_DATA(&rt2800usb_ops) },
688 { USB_DEVICE(0x083a, 0x7522), USB_DEVICE_DATA(&rt2800usb_ops) }, 850 { USB_DEVICE(0x083a, 0x7522), USB_DEVICE_DATA(&rt2800usb_ops) },
689 { USB_DEVICE(0x083a, 0x8522), USB_DEVICE_DATA(&rt2800usb_ops) }, 851 { USB_DEVICE(0x083a, 0x8522), USB_DEVICE_DATA(&rt2800usb_ops) },
690 { USB_DEVICE(0x083a, 0xa618), USB_DEVICE_DATA(&rt2800usb_ops) }, 852 { USB_DEVICE(0x083a, 0xa618), USB_DEVICE_DATA(&rt2800usb_ops) },
853 { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
854 { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
855 { USB_DEVICE(0x083a, 0xa703), USB_DEVICE_DATA(&rt2800usb_ops) },
691 { USB_DEVICE(0x083a, 0xb522), USB_DEVICE_DATA(&rt2800usb_ops) }, 856 { USB_DEVICE(0x083a, 0xb522), USB_DEVICE_DATA(&rt2800usb_ops) },
692 /* Sparklan */ 857 /* Sparklan */
693 { USB_DEVICE(0x15a9, 0x0006), USB_DEVICE_DATA(&rt2800usb_ops) }, 858 { USB_DEVICE(0x15a9, 0x0006), USB_DEVICE_DATA(&rt2800usb_ops) },
@@ -701,101 +866,16 @@ static struct usb_device_id rt2800usb_device_table[] = {
701 /* Zinwell */ 866 /* Zinwell */
702 { USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) }, 867 { USB_DEVICE(0x5a57, 0x0280), USB_DEVICE_DATA(&rt2800usb_ops) },
703 { USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) }, 868 { USB_DEVICE(0x5a57, 0x0282), USB_DEVICE_DATA(&rt2800usb_ops) },
869 { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
870 { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
704 /* Zyxel */ 871 /* Zyxel */
705 { USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) }, 872 { USB_DEVICE(0x0586, 0x3416), USB_DEVICE_DATA(&rt2800usb_ops) },
706#ifdef CONFIG_RT2800USB_RT30XX 873#ifdef CONFIG_RT2800USB_RT33XX
707 /* Abocom */
708 { USB_DEVICE(0x07b8, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
709 { USB_DEVICE(0x07b8, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
710 { USB_DEVICE(0x07b8, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
711 /* AirTies */
712 { USB_DEVICE(0x1eda, 0x2310), USB_DEVICE_DATA(&rt2800usb_ops) },
713 /* Allwin */
714 { USB_DEVICE(0x8516, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) },
715 { USB_DEVICE(0x8516, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
716 { USB_DEVICE(0x8516, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
717 /* ASUS */
718 { USB_DEVICE(0x0b05, 0x1784), USB_DEVICE_DATA(&rt2800usb_ops) },
719 /* AzureWave */
720 { USB_DEVICE(0x13d3, 0x3273), USB_DEVICE_DATA(&rt2800usb_ops) },
721 { USB_DEVICE(0x13d3, 0x3305), USB_DEVICE_DATA(&rt2800usb_ops) },
722 { USB_DEVICE(0x13d3, 0x3307), USB_DEVICE_DATA(&rt2800usb_ops) },
723 { USB_DEVICE(0x13d3, 0x3321), USB_DEVICE_DATA(&rt2800usb_ops) },
724 /* Conceptronic */
725 { USB_DEVICE(0x14b2, 0x3c12), USB_DEVICE_DATA(&rt2800usb_ops) },
726 /* Corega */
727 { USB_DEVICE(0x18c5, 0x0012), USB_DEVICE_DATA(&rt2800usb_ops) },
728 /* D-Link */
729 { USB_DEVICE(0x07d1, 0x3c0a), USB_DEVICE_DATA(&rt2800usb_ops) },
730 { USB_DEVICE(0x07d1, 0x3c0d), USB_DEVICE_DATA(&rt2800usb_ops) },
731 { USB_DEVICE(0x07d1, 0x3c0e), USB_DEVICE_DATA(&rt2800usb_ops) },
732 { USB_DEVICE(0x07d1, 0x3c0f), USB_DEVICE_DATA(&rt2800usb_ops) },
733 { USB_DEVICE(0x07d1, 0x3c16), USB_DEVICE_DATA(&rt2800usb_ops) },
734 /* Draytek */
735 { USB_DEVICE(0x07fa, 0x7712), USB_DEVICE_DATA(&rt2800usb_ops) },
736 /* Edimax */
737 { USB_DEVICE(0x7392, 0x7711), USB_DEVICE_DATA(&rt2800usb_ops) },
738 /* Encore */
739 { USB_DEVICE(0x203d, 0x1480), USB_DEVICE_DATA(&rt2800usb_ops) },
740 { USB_DEVICE(0x203d, 0x14a9), USB_DEVICE_DATA(&rt2800usb_ops) },
741 /* EnGenius */
742 { USB_DEVICE(0x1740, 0x9703), USB_DEVICE_DATA(&rt2800usb_ops) },
743 { USB_DEVICE(0x1740, 0x9705), USB_DEVICE_DATA(&rt2800usb_ops) },
744 { USB_DEVICE(0x1740, 0x9706), USB_DEVICE_DATA(&rt2800usb_ops) },
745 { USB_DEVICE(0x1740, 0x9707), USB_DEVICE_DATA(&rt2800usb_ops) },
746 { USB_DEVICE(0x1740, 0x9708), USB_DEVICE_DATA(&rt2800usb_ops) },
747 { USB_DEVICE(0x1740, 0x9709), USB_DEVICE_DATA(&rt2800usb_ops) },
748 /* Gigabyte */
749 { USB_DEVICE(0x1044, 0x800d), USB_DEVICE_DATA(&rt2800usb_ops) },
750 /* I-O DATA */
751 { USB_DEVICE(0x04bb, 0x0945), USB_DEVICE_DATA(&rt2800usb_ops) },
752 { USB_DEVICE(0x04bb, 0x0947), USB_DEVICE_DATA(&rt2800usb_ops) },
753 { USB_DEVICE(0x04bb, 0x0948), USB_DEVICE_DATA(&rt2800usb_ops) },
754 /* Logitec */
755 { USB_DEVICE(0x0789, 0x0166), USB_DEVICE_DATA(&rt2800usb_ops) },
756 /* MSI */
757 { USB_DEVICE(0x0db0, 0x3820), USB_DEVICE_DATA(&rt2800usb_ops) },
758 { USB_DEVICE(0x0db0, 0x3821), USB_DEVICE_DATA(&rt2800usb_ops) },
759 { USB_DEVICE(0x0db0, 0x3822), USB_DEVICE_DATA(&rt2800usb_ops) },
760 { USB_DEVICE(0x0db0, 0x3870), USB_DEVICE_DATA(&rt2800usb_ops) },
761 { USB_DEVICE(0x0db0, 0x3871), USB_DEVICE_DATA(&rt2800usb_ops) },
762 { USB_DEVICE(0x0db0, 0x821a), USB_DEVICE_DATA(&rt2800usb_ops) },
763 { USB_DEVICE(0x0db0, 0x822a), USB_DEVICE_DATA(&rt2800usb_ops) },
764 { USB_DEVICE(0x0db0, 0x822b), USB_DEVICE_DATA(&rt2800usb_ops) },
765 { USB_DEVICE(0x0db0, 0x822c), USB_DEVICE_DATA(&rt2800usb_ops) },
766 { USB_DEVICE(0x0db0, 0x870a), USB_DEVICE_DATA(&rt2800usb_ops) },
767 { USB_DEVICE(0x0db0, 0x871a), USB_DEVICE_DATA(&rt2800usb_ops) },
768 { USB_DEVICE(0x0db0, 0x871b), USB_DEVICE_DATA(&rt2800usb_ops) },
769 { USB_DEVICE(0x0db0, 0x871c), USB_DEVICE_DATA(&rt2800usb_ops) },
770 { USB_DEVICE(0x0db0, 0x899a), USB_DEVICE_DATA(&rt2800usb_ops) },
771 /* Para */
772 { USB_DEVICE(0x20b8, 0x8888), USB_DEVICE_DATA(&rt2800usb_ops) },
773 /* Pegatron */
774 { USB_DEVICE(0x1d4d, 0x000c), USB_DEVICE_DATA(&rt2800usb_ops) },
775 { USB_DEVICE(0x1d4d, 0x000e), USB_DEVICE_DATA(&rt2800usb_ops) },
776 /* Planex */
777 { USB_DEVICE(0x2019, 0xab25), USB_DEVICE_DATA(&rt2800usb_ops) },
778 /* Quanta */
779 { USB_DEVICE(0x1a32, 0x0304), USB_DEVICE_DATA(&rt2800usb_ops) },
780 /* Ralink */ 874 /* Ralink */
781 { USB_DEVICE(0x148f, 0x2070), USB_DEVICE_DATA(&rt2800usb_ops) }, 875 { USB_DEVICE(0x148f, 0x3370), USB_DEVICE_DATA(&rt2800usb_ops) },
782 { USB_DEVICE(0x148f, 0x3070), USB_DEVICE_DATA(&rt2800usb_ops) }, 876 { USB_DEVICE(0x148f, 0x8070), USB_DEVICE_DATA(&rt2800usb_ops) },
783 { USB_DEVICE(0x148f, 0x3071), USB_DEVICE_DATA(&rt2800usb_ops) },
784 { USB_DEVICE(0x148f, 0x3072), USB_DEVICE_DATA(&rt2800usb_ops) },
785 /* Sitecom */ 877 /* Sitecom */
786 { USB_DEVICE(0x0df6, 0x003e), USB_DEVICE_DATA(&rt2800usb_ops) }, 878 { USB_DEVICE(0x0df6, 0x0050), USB_DEVICE_DATA(&rt2800usb_ops) },
787 { USB_DEVICE(0x0df6, 0x0040), USB_DEVICE_DATA(&rt2800usb_ops) },
788 { USB_DEVICE(0x0df6, 0x0042), USB_DEVICE_DATA(&rt2800usb_ops) },
789 { USB_DEVICE(0x0df6, 0x0047), USB_DEVICE_DATA(&rt2800usb_ops) },
790 { USB_DEVICE(0x0df6, 0x0048), USB_DEVICE_DATA(&rt2800usb_ops) },
791 /* SMC */
792 { USB_DEVICE(0x083a, 0x7511), USB_DEVICE_DATA(&rt2800usb_ops) },
793 { USB_DEVICE(0x083a, 0xa701), USB_DEVICE_DATA(&rt2800usb_ops) },
794 { USB_DEVICE(0x083a, 0xa702), USB_DEVICE_DATA(&rt2800usb_ops) },
795 { USB_DEVICE(0x083a, 0xa703), USB_DEVICE_DATA(&rt2800usb_ops) },
796 /* Zinwell */
797 { USB_DEVICE(0x5a57, 0x0283), USB_DEVICE_DATA(&rt2800usb_ops) },
798 { USB_DEVICE(0x5a57, 0x5257), USB_DEVICE_DATA(&rt2800usb_ops) },
799#endif 879#endif
800#ifdef CONFIG_RT2800USB_RT35XX 880#ifdef CONFIG_RT2800USB_RT35XX
801 /* Allwin */ 881 /* Allwin */
@@ -809,12 +889,9 @@ static struct usb_device_id rt2800usb_device_table[] = {
809 /* I-O DATA */ 889 /* I-O DATA */
810 { USB_DEVICE(0x04bb, 0x0944), USB_DEVICE_DATA(&rt2800usb_ops) }, 890 { USB_DEVICE(0x04bb, 0x0944), USB_DEVICE_DATA(&rt2800usb_ops) },
811 /* Ralink */ 891 /* Ralink */
812 { USB_DEVICE(0x148f, 0x3370), USB_DEVICE_DATA(&rt2800usb_ops) },
813 { USB_DEVICE(0x148f, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) }, 892 { USB_DEVICE(0x148f, 0x3572), USB_DEVICE_DATA(&rt2800usb_ops) },
814 { USB_DEVICE(0x148f, 0x8070), USB_DEVICE_DATA(&rt2800usb_ops) },
815 /* Sitecom */ 893 /* Sitecom */
816 { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) }, 894 { USB_DEVICE(0x0df6, 0x0041), USB_DEVICE_DATA(&rt2800usb_ops) },
817 { USB_DEVICE(0x0df6, 0x0050), USB_DEVICE_DATA(&rt2800usb_ops) },
818 /* Zinwell */ 895 /* Zinwell */
819 { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) }, 896 { USB_DEVICE(0x5a57, 0x0284), USB_DEVICE_DATA(&rt2800usb_ops) },
820#endif 897#endif
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index 0722badccf86..671ea3592610 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -40,8 +40,8 @@
40/* 40/*
41 * DMA descriptor defines. 41 * DMA descriptor defines.
42 */ 42 */
43#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) ) 43#define TXINFO_DESC_SIZE (1 * sizeof(__le32))
44#define RXINFO_DESC_SIZE ( 1 * sizeof(__le32) ) 44#define RXINFO_DESC_SIZE (1 * sizeof(__le32))
45 45
46/* 46/*
47 * TX Info structure 47 * TX Info structure
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 94fe589acfaa..84aaf393da43 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -66,7 +66,7 @@
66 66
67#ifdef CONFIG_RT2X00_DEBUG 67#ifdef CONFIG_RT2X00_DEBUG
68#define DEBUG_PRINTK(__dev, __kernlvl, __lvl, __msg, __args...) \ 68#define DEBUG_PRINTK(__dev, __kernlvl, __lvl, __msg, __args...) \
69 DEBUG_PRINTK_MSG(__dev, __kernlvl, __lvl, __msg, ##__args); 69 DEBUG_PRINTK_MSG(__dev, __kernlvl, __lvl, __msg, ##__args)
70#else 70#else
71#define DEBUG_PRINTK(__dev, __kernlvl, __lvl, __msg, __args...) \ 71#define DEBUG_PRINTK(__dev, __kernlvl, __lvl, __msg, __args...) \
72 do { } while (0) 72 do { } while (0)
@@ -347,6 +347,10 @@ struct link {
347 struct delayed_work watchdog_work; 347 struct delayed_work watchdog_work;
348}; 348};
349 349
350enum rt2x00_delayed_flags {
351 DELAYED_UPDATE_BEACON,
352};
353
350/* 354/*
351 * Interface structure 355 * Interface structure
352 * Per interface configuration details, this structure 356 * Per interface configuration details, this structure
@@ -354,22 +358,6 @@ struct link {
354 */ 358 */
355struct rt2x00_intf { 359struct rt2x00_intf {
356 /* 360 /*
357 * All fields within the rt2x00_intf structure
358 * must be protected with a spinlock.
359 */
360 spinlock_t lock;
361
362 /*
363 * MAC of the device.
364 */
365 u8 mac[ETH_ALEN];
366
367 /*
368 * BBSID of the AP to associate with.
369 */
370 u8 bssid[ETH_ALEN];
371
372 /*
373 * beacon->skb must be protected with the mutex. 361 * beacon->skb must be protected with the mutex.
374 */ 362 */
375 struct mutex beacon_skb_mutex; 363 struct mutex beacon_skb_mutex;
@@ -384,8 +372,7 @@ struct rt2x00_intf {
384 /* 372 /*
385 * Actions that needed rescheduling. 373 * Actions that needed rescheduling.
386 */ 374 */
387 unsigned int delayed_flags; 375 unsigned long delayed_flags;
388#define DELAYED_UPDATE_BEACON 0x00000001
389 376
390 /* 377 /*
391 * Software sequence counter, this is only required 378 * Software sequence counter, this is only required
@@ -567,7 +554,15 @@ struct rt2x00lib_ops {
567 struct link_qual *qual); 554 struct link_qual *qual);
568 void (*link_tuner) (struct rt2x00_dev *rt2x00dev, 555 void (*link_tuner) (struct rt2x00_dev *rt2x00dev,
569 struct link_qual *qual, const u32 count); 556 struct link_qual *qual, const u32 count);
557
558 /*
559 * Data queue handlers.
560 */
570 void (*watchdog) (struct rt2x00_dev *rt2x00dev); 561 void (*watchdog) (struct rt2x00_dev *rt2x00dev);
562 void (*start_queue) (struct data_queue *queue);
563 void (*kick_queue) (struct data_queue *queue);
564 void (*stop_queue) (struct data_queue *queue);
565 void (*flush_queue) (struct data_queue *queue);
571 566
572 /* 567 /*
573 * TX control handlers 568 * TX control handlers
@@ -579,8 +574,6 @@ struct rt2x00lib_ops {
579 void (*write_beacon) (struct queue_entry *entry, 574 void (*write_beacon) (struct queue_entry *entry,
580 struct txentry_desc *txdesc); 575 struct txentry_desc *txdesc);
581 int (*get_tx_data_len) (struct queue_entry *entry); 576 int (*get_tx_data_len) (struct queue_entry *entry);
582 void (*kick_tx_queue) (struct data_queue *queue);
583 void (*kill_tx_queue) (struct data_queue *queue);
584 577
585 /* 578 /*
586 * RX control handlers 579 * RX control handlers
@@ -664,6 +657,7 @@ enum rt2x00_flags {
664 DRIVER_REQUIRE_COPY_IV, 657 DRIVER_REQUIRE_COPY_IV,
665 DRIVER_REQUIRE_L2PAD, 658 DRIVER_REQUIRE_L2PAD,
666 DRIVER_REQUIRE_TXSTATUS_FIFO, 659 DRIVER_REQUIRE_TXSTATUS_FIFO,
660 DRIVER_REQUIRE_TASKLET_CONTEXT,
667 661
668 /* 662 /*
669 * Driver features 663 * Driver features
@@ -901,7 +895,7 @@ struct rt2x00_dev {
901 /* 895 /*
902 * FIFO for storing tx status reports between isr and tasklet. 896 * FIFO for storing tx status reports between isr and tasklet.
903 */ 897 */
904 struct kfifo txstatus_fifo; 898 DECLARE_KFIFO_PTR(txstatus_fifo, u32);
905 899
906 /* 900 /*
907 * Tasklet for processing tx status reports (rt2800pci). 901 * Tasklet for processing tx status reports (rt2800pci).
@@ -915,7 +909,7 @@ struct rt2x00_dev {
915 * in those cases REGISTER_BUSY_COUNT attempts should be 909 * in those cases REGISTER_BUSY_COUNT attempts should be
916 * taken with a REGISTER_BUSY_DELAY interval. 910 * taken with a REGISTER_BUSY_DELAY interval.
917 */ 911 */
918#define REGISTER_BUSY_COUNT 5 912#define REGISTER_BUSY_COUNT 100
919#define REGISTER_BUSY_DELAY 100 913#define REGISTER_BUSY_DELAY 100
920 914
921/* 915/*
@@ -1067,6 +1061,78 @@ struct data_queue *rt2x00queue_get_queue(struct rt2x00_dev *rt2x00dev,
1067struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, 1061struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
1068 enum queue_index index); 1062 enum queue_index index);
1069 1063
1064/**
1065 * rt2x00queue_pause_queue - Pause a data queue
1066 * @queue: Pointer to &struct data_queue.
1067 *
1068 * This function will pause the data queue locally, preventing
1069 * new frames to be added to the queue (while the hardware is
1070 * still allowed to run).
1071 */
1072void rt2x00queue_pause_queue(struct data_queue *queue);
1073
1074/**
1075 * rt2x00queue_unpause_queue - unpause a data queue
1076 * @queue: Pointer to &struct data_queue.
1077 *
1078 * This function will unpause the data queue locally, allowing
1079 * new frames to be added to the queue again.
1080 */
1081void rt2x00queue_unpause_queue(struct data_queue *queue);
1082
1083/**
1084 * rt2x00queue_start_queue - Start a data queue
1085 * @queue: Pointer to &struct data_queue.
1086 *
1087 * This function will start handling all pending frames in the queue.
1088 */
1089void rt2x00queue_start_queue(struct data_queue *queue);
1090
1091/**
1092 * rt2x00queue_stop_queue - Halt a data queue
1093 * @queue: Pointer to &struct data_queue.
1094 *
1095 * This function will stop all pending frames in the queue.
1096 */
1097void rt2x00queue_stop_queue(struct data_queue *queue);
1098
1099/**
1100 * rt2x00queue_flush_queue - Flush a data queue
1101 * @queue: Pointer to &struct data_queue.
1102 * @drop: True to drop all pending frames.
1103 *
1104 * This function will flush the queue. After this call
1105 * the queue is guarenteed to be empty.
1106 */
1107void rt2x00queue_flush_queue(struct data_queue *queue, bool drop);
1108
1109/**
1110 * rt2x00queue_start_queues - Start all data queues
1111 * @rt2x00dev: Pointer to &struct rt2x00_dev.
1112 *
1113 * This function will loop through all available queues to start them
1114 */
1115void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev);
1116
1117/**
1118 * rt2x00queue_stop_queues - Halt all data queues
1119 * @rt2x00dev: Pointer to &struct rt2x00_dev.
1120 *
1121 * This function will loop through all available queues to stop
1122 * any pending frames.
1123 */
1124void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev);
1125
1126/**
1127 * rt2x00queue_flush_queues - Flush all data queues
1128 * @rt2x00dev: Pointer to &struct rt2x00_dev.
1129 * @drop: True to drop all pending frames.
1130 *
1131 * This function will loop through all available queues to flush
1132 * any pending frames.
1133 */
1134void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop);
1135
1070/* 1136/*
1071 * Debugfs handlers. 1137 * Debugfs handlers.
1072 */ 1138 */
@@ -1092,6 +1158,7 @@ static inline void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
1092 */ 1158 */
1093void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev); 1159void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev);
1094void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev); 1160void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev);
1161void rt2x00lib_dmastart(struct queue_entry *entry);
1095void rt2x00lib_dmadone(struct queue_entry *entry); 1162void rt2x00lib_dmadone(struct queue_entry *entry);
1096void rt2x00lib_txdone(struct queue_entry *entry, 1163void rt2x00lib_txdone(struct queue_entry *entry,
1097 struct txdone_entry_desc *txdesc); 1164 struct txdone_entry_desc *txdesc);
@@ -1133,6 +1200,7 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
1133int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue, 1200int rt2x00mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
1134 const struct ieee80211_tx_queue_params *params); 1201 const struct ieee80211_tx_queue_params *params);
1135void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw); 1202void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw);
1203void rt2x00mac_flush(struct ieee80211_hw *hw, bool drop);
1136 1204
1137/* 1205/*
1138 * Driver allocation handlers. 1206 * Driver allocation handlers.
diff --git a/drivers/net/wireless/rt2x00/rt2x00config.c b/drivers/net/wireless/rt2x00/rt2x00config.c
index 54ffb5aeb34e..e7f67d5eda52 100644
--- a/drivers/net/wireless/rt2x00/rt2x00config.c
+++ b/drivers/net/wireless/rt2x00/rt2x00config.c
@@ -62,13 +62,13 @@ void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev,
62 * This will prevent the device being confused when it wants 62 * This will prevent the device being confused when it wants
63 * to ACK frames or consideres itself associated. 63 * to ACK frames or consideres itself associated.
64 */ 64 */
65 memset(&conf.mac, 0, sizeof(conf.mac)); 65 memset(conf.mac, 0, sizeof(conf.mac));
66 if (mac) 66 if (mac)
67 memcpy(&conf.mac, mac, ETH_ALEN); 67 memcpy(conf.mac, mac, ETH_ALEN);
68 68
69 memset(&conf.bssid, 0, sizeof(conf.bssid)); 69 memset(conf.bssid, 0, sizeof(conf.bssid));
70 if (bssid) 70 if (bssid)
71 memcpy(&conf.bssid, bssid, ETH_ALEN); 71 memcpy(conf.bssid, bssid, ETH_ALEN);
72 72
73 flags |= CONFIG_UPDATE_TYPE; 73 flags |= CONFIG_UPDATE_TYPE;
74 if (mac || (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count)) 74 if (mac || (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count))
@@ -133,7 +133,7 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
133 */ 133 */
134 if (!(ant->flags & ANTENNA_RX_DIVERSITY)) 134 if (!(ant->flags & ANTENNA_RX_DIVERSITY))
135 config.rx = rt2x00lib_config_antenna_check(config.rx, def->rx); 135 config.rx = rt2x00lib_config_antenna_check(config.rx, def->rx);
136 else if(config.rx == ANTENNA_SW_DIVERSITY) 136 else if (config.rx == ANTENNA_SW_DIVERSITY)
137 config.rx = active->rx; 137 config.rx = active->rx;
138 138
139 if (!(ant->flags & ANTENNA_TX_DIVERSITY)) 139 if (!(ant->flags & ANTENNA_TX_DIVERSITY))
@@ -146,7 +146,7 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
146 * else the changes will be ignored by the device. 146 * else the changes will be ignored by the device.
147 */ 147 */
148 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 148 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
149 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF_LINK); 149 rt2x00queue_stop_queue(rt2x00dev->rx);
150 150
151 /* 151 /*
152 * Write new antenna setup to device and reset the link tuner. 152 * Write new antenna setup to device and reset the link tuner.
@@ -160,7 +160,7 @@ void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev,
160 memcpy(active, &config, sizeof(config)); 160 memcpy(active, &config, sizeof(config));
161 161
162 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 162 if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
163 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK); 163 rt2x00queue_start_queue(rt2x00dev->rx);
164} 164}
165 165
166void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, 166void rt2x00lib_config(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/rt2x00/rt2x00debug.c b/drivers/net/wireless/rt2x00/rt2x00debug.c
index fcdb6b0dc40f..c92db3264741 100644
--- a/drivers/net/wireless/rt2x00/rt2x00debug.c
+++ b/drivers/net/wireless/rt2x00/rt2x00debug.c
@@ -162,11 +162,11 @@ void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev,
162 struct timeval timestamp; 162 struct timeval timestamp;
163 u32 data_len; 163 u32 data_len;
164 164
165 do_gettimeofday(&timestamp); 165 if (likely(!test_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags)))
166
167 if (!test_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags))
168 return; 166 return;
169 167
168 do_gettimeofday(&timestamp);
169
170 if (skb_queue_len(&intf->frame_dump_skbqueue) > 20) { 170 if (skb_queue_len(&intf->frame_dump_skbqueue) > 20) {
171 DEBUG(rt2x00dev, "txrx dump queue length exceeded.\n"); 171 DEBUG(rt2x00dev, "txrx dump queue length exceeded.\n");
172 return; 172 return;
@@ -339,18 +339,19 @@ static ssize_t rt2x00debug_read_queue_stats(struct file *file,
339 return -ENOMEM; 339 return -ENOMEM;
340 340
341 temp = data + 341 temp = data +
342 sprintf(data, "qid\tcount\tlimit\tlength\tindex\tdma done\tdone\n"); 342 sprintf(data, "qid\tflags\t\tcount\tlimit\tlength\tindex\tdma done\tdone\n");
343 343
344 queue_for_each(intf->rt2x00dev, queue) { 344 queue_for_each(intf->rt2x00dev, queue) {
345 spin_lock_irqsave(&queue->lock, irqflags); 345 spin_lock_irqsave(&queue->index_lock, irqflags);
346 346
347 temp += sprintf(temp, "%d\t%d\t%d\t%d\t%d\t%d\t%d\n", queue->qid, 347 temp += sprintf(temp, "%d\t0x%.8x\t%d\t%d\t%d\t%d\t%d\t\t%d\n",
348 queue->qid, (unsigned int)queue->flags,
348 queue->count, queue->limit, queue->length, 349 queue->count, queue->limit, queue->length,
349 queue->index[Q_INDEX], 350 queue->index[Q_INDEX],
350 queue->index[Q_INDEX_DMA_DONE], 351 queue->index[Q_INDEX_DMA_DONE],
351 queue->index[Q_INDEX_DONE]); 352 queue->index[Q_INDEX_DONE]);
352 353
353 spin_unlock_irqrestore(&queue->lock, irqflags); 354 spin_unlock_irqrestore(&queue->index_lock, irqflags);
354 } 355 }
355 356
356 size = strlen(data); 357 size = strlen(data);
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 5ba79b935f09..9597a03242cc 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -66,20 +66,16 @@ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev)
66 set_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags); 66 set_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags);
67 67
68 /* 68 /*
69 * Enable RX. 69 * Enable queues.
70 */ 70 */
71 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON); 71 rt2x00queue_start_queues(rt2x00dev);
72 rt2x00link_start_tuner(rt2x00dev);
72 73
73 /* 74 /*
74 * Start watchdog monitoring. 75 * Start watchdog monitoring.
75 */ 76 */
76 rt2x00link_start_watchdog(rt2x00dev); 77 rt2x00link_start_watchdog(rt2x00dev);
77 78
78 /*
79 * Start the TX queues.
80 */
81 ieee80211_wake_queues(rt2x00dev->hw);
82
83 return 0; 79 return 0;
84} 80}
85 81
@@ -89,20 +85,16 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
89 return; 85 return;
90 86
91 /* 87 /*
92 * Stop the TX queues in mac80211.
93 */
94 ieee80211_stop_queues(rt2x00dev->hw);
95 rt2x00queue_stop_queues(rt2x00dev);
96
97 /*
98 * Stop watchdog monitoring. 88 * Stop watchdog monitoring.
99 */ 89 */
100 rt2x00link_stop_watchdog(rt2x00dev); 90 rt2x00link_stop_watchdog(rt2x00dev);
101 91
102 /* 92 /*
103 * Disable RX. 93 * Stop all queues
104 */ 94 */
105 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF); 95 rt2x00link_stop_tuner(rt2x00dev);
96 rt2x00queue_stop_queues(rt2x00dev);
97 rt2x00queue_flush_queues(rt2x00dev, true);
106 98
107 /* 99 /*
108 * Disable radio. 100 * Disable radio.
@@ -113,41 +105,11 @@ void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev)
113 rt2x00leds_led_radio(rt2x00dev, false); 105 rt2x00leds_led_radio(rt2x00dev, false);
114} 106}
115 107
116void rt2x00lib_toggle_rx(struct rt2x00_dev *rt2x00dev, enum dev_state state)
117{
118 /*
119 * When we are disabling the RX, we should also stop the link tuner.
120 */
121 if (state == STATE_RADIO_RX_OFF)
122 rt2x00link_stop_tuner(rt2x00dev);
123
124 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
125
126 /*
127 * When we are enabling the RX, we should also start the link tuner.
128 */
129 if (state == STATE_RADIO_RX_ON)
130 rt2x00link_start_tuner(rt2x00dev);
131}
132
133static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac, 108static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
134 struct ieee80211_vif *vif) 109 struct ieee80211_vif *vif)
135{ 110{
136 struct rt2x00_dev *rt2x00dev = data; 111 struct rt2x00_dev *rt2x00dev = data;
137 struct rt2x00_intf *intf = vif_to_intf(vif); 112 struct rt2x00_intf *intf = vif_to_intf(vif);
138 int delayed_flags;
139
140 /*
141 * Copy all data we need during this action under the protection
142 * of a spinlock. Otherwise race conditions might occur which results
143 * into an invalid configuration.
144 */
145 spin_lock(&intf->lock);
146
147 delayed_flags = intf->delayed_flags;
148 intf->delayed_flags = 0;
149
150 spin_unlock(&intf->lock);
151 113
152 /* 114 /*
153 * It is possible the radio was disabled while the work had been 115 * It is possible the radio was disabled while the work had been
@@ -158,7 +120,7 @@ static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac,
158 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) 120 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
159 return; 121 return;
160 122
161 if (delayed_flags & DELAYED_UPDATE_BEACON) 123 if (test_and_clear_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags))
162 rt2x00queue_update_beacon(rt2x00dev, vif, true); 124 rt2x00queue_update_beacon(rt2x00dev, vif, true);
163} 125}
164 126
@@ -251,8 +213,16 @@ void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev)
251} 213}
252EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt); 214EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt);
253 215
216void rt2x00lib_dmastart(struct queue_entry *entry)
217{
218 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
219 rt2x00queue_index_inc(entry->queue, Q_INDEX);
220}
221EXPORT_SYMBOL_GPL(rt2x00lib_dmastart);
222
254void rt2x00lib_dmadone(struct queue_entry *entry) 223void rt2x00lib_dmadone(struct queue_entry *entry)
255{ 224{
225 set_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags);
256 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); 226 clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
257 rt2x00queue_index_inc(entry->queue, Q_INDEX_DMA_DONE); 227 rt2x00queue_index_inc(entry->queue, Q_INDEX_DMA_DONE);
258} 228}
@@ -264,11 +234,9 @@ void rt2x00lib_txdone(struct queue_entry *entry,
264 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 234 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
265 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); 235 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb);
266 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); 236 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
267 enum data_queue_qid qid = skb_get_queue_mapping(entry->skb); 237 unsigned int header_length, i;
268 unsigned int header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
269 u8 rate_idx, rate_flags, retry_rates; 238 u8 rate_idx, rate_flags, retry_rates;
270 u8 skbdesc_flags = skbdesc->flags; 239 u8 skbdesc_flags = skbdesc->flags;
271 unsigned int i;
272 bool success; 240 bool success;
273 241
274 /* 242 /*
@@ -287,6 +255,11 @@ void rt2x00lib_txdone(struct queue_entry *entry,
287 skbdesc->flags &= ~SKBDESC_DESC_IN_SKB; 255 skbdesc->flags &= ~SKBDESC_DESC_IN_SKB;
288 256
289 /* 257 /*
258 * Determine the length of 802.11 header.
259 */
260 header_length = ieee80211_get_hdrlen_from_skb(entry->skb);
261
262 /*
290 * Remove L2 padding which was added during 263 * Remove L2 padding which was added during
291 */ 264 */
292 if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags)) 265 if (test_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags))
@@ -390,9 +363,12 @@ void rt2x00lib_txdone(struct queue_entry *entry,
390 * through a mac80211 library call (RTS/CTS) then we should not 363 * through a mac80211 library call (RTS/CTS) then we should not
391 * send the status report back. 364 * send the status report back.
392 */ 365 */
393 if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) 366 if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) {
394 ieee80211_tx_status(rt2x00dev->hw, entry->skb); 367 if (test_bit(DRIVER_REQUIRE_TASKLET_CONTEXT, &rt2x00dev->flags))
395 else 368 ieee80211_tx_status(rt2x00dev->hw, entry->skb);
369 else
370 ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb);
371 } else
396 dev_kfree_skb_any(entry->skb); 372 dev_kfree_skb_any(entry->skb);
397 373
398 /* 374 /*
@@ -411,7 +387,7 @@ void rt2x00lib_txdone(struct queue_entry *entry,
411 * is reenabled when the txdone handler has finished. 387 * is reenabled when the txdone handler has finished.
412 */ 388 */
413 if (!rt2x00queue_threshold(entry->queue)) 389 if (!rt2x00queue_threshold(entry->queue))
414 ieee80211_wake_queue(rt2x00dev->hw, qid); 390 rt2x00queue_unpause_queue(entry->queue);
415} 391}
416EXPORT_SYMBOL_GPL(rt2x00lib_txdone); 392EXPORT_SYMBOL_GPL(rt2x00lib_txdone);
417 393
@@ -483,6 +459,10 @@ void rt2x00lib_rxdone(struct queue_entry *entry)
483 unsigned int header_length; 459 unsigned int header_length;
484 int rate_idx; 460 int rate_idx;
485 461
462 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) ||
463 !test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
464 goto submit_entry;
465
486 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) 466 if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags))
487 goto submit_entry; 467 goto submit_entry;
488 468
@@ -567,9 +547,11 @@ void rt2x00lib_rxdone(struct queue_entry *entry)
567 entry->skb = skb; 547 entry->skb = skb;
568 548
569submit_entry: 549submit_entry:
570 rt2x00dev->ops->lib->clear_entry(entry); 550 entry->flags = 0;
571 rt2x00queue_index_inc(entry->queue, Q_INDEX);
572 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE); 551 rt2x00queue_index_inc(entry->queue, Q_INDEX_DONE);
552 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) &&
553 test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
554 rt2x00dev->ops->lib->clear_entry(entry);
573} 555}
574EXPORT_SYMBOL_GPL(rt2x00lib_rxdone); 556EXPORT_SYMBOL_GPL(rt2x00lib_rxdone);
575 557
@@ -678,7 +660,7 @@ static void rt2x00lib_rate(struct ieee80211_rate *entry,
678{ 660{
679 entry->flags = 0; 661 entry->flags = 0;
680 entry->bitrate = rate->bitrate; 662 entry->bitrate = rate->bitrate;
681 entry->hw_value =index; 663 entry->hw_value = index;
682 entry->hw_value_short = index; 664 entry->hw_value_short = index;
683 665
684 if (rate->flags & DEV_RATE_SHORT_PREAMBLE) 666 if (rate->flags & DEV_RATE_SHORT_PREAMBLE)
@@ -818,8 +800,7 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
818 /* 800 /*
819 * Allocate tx status FIFO for driver use. 801 * Allocate tx status FIFO for driver use.
820 */ 802 */
821 if (test_bit(DRIVER_REQUIRE_TXSTATUS_FIFO, &rt2x00dev->flags) && 803 if (test_bit(DRIVER_REQUIRE_TXSTATUS_FIFO, &rt2x00dev->flags)) {
822 rt2x00dev->ops->lib->txstatus_tasklet) {
823 /* 804 /*
824 * Allocate txstatus fifo and tasklet, we use a size of 512 805 * Allocate txstatus fifo and tasklet, we use a size of 512
825 * for the kfifo which is big enough to store 512/4=128 tx 806 * for the kfifo which is big enough to store 512/4=128 tx
@@ -833,9 +814,10 @@ static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev)
833 return status; 814 return status;
834 815
835 /* tasklet for processing the tx status reports. */ 816 /* tasklet for processing the tx status reports. */
836 tasklet_init(&rt2x00dev->txstatus_tasklet, 817 if (rt2x00dev->ops->lib->txstatus_tasklet)
837 rt2x00dev->ops->lib->txstatus_tasklet, 818 tasklet_init(&rt2x00dev->txstatus_tasklet,
838 (unsigned long)rt2x00dev); 819 rt2x00dev->ops->lib->txstatus_tasklet,
820 (unsigned long)rt2x00dev);
839 821
840 } 822 }
841 823
diff --git a/drivers/net/wireless/rt2x00/rt2x00ht.c b/drivers/net/wireless/rt2x00/rt2x00ht.c
index c637bcaec5f8..b7ad46ecaa1d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00ht.c
+++ b/drivers/net/wireless/rt2x00/rt2x00ht.c
@@ -40,8 +40,6 @@ void rt2x00ht_create_tx_descriptor(struct queue_entry *entry,
40 if (tx_info->control.sta) 40 if (tx_info->control.sta)
41 txdesc->mpdu_density = 41 txdesc->mpdu_density =
42 tx_info->control.sta->ht_cap.ampdu_density; 42 tx_info->control.sta->ht_cap.ampdu_density;
43 else
44 txdesc->mpdu_density = 0;
45 43
46 txdesc->ba_size = 7; /* FIXME: What value is needed? */ 44 txdesc->ba_size = 7; /* FIXME: What value is needed? */
47 45
diff --git a/drivers/net/wireless/rt2x00/rt2x00lib.h b/drivers/net/wireless/rt2x00/rt2x00lib.h
index 619da23b7b56..a105c500627b 100644
--- a/drivers/net/wireless/rt2x00/rt2x00lib.h
+++ b/drivers/net/wireless/rt2x00/rt2x00lib.h
@@ -57,7 +57,7 @@ static inline const struct rt2x00_rate *rt2x00_get_rate(const u16 hw_value)
57} 57}
58 58
59#define RATE_MCS(__mode, __mcs) \ 59#define RATE_MCS(__mode, __mcs) \
60 ( (((__mode) & 0x00ff) << 8) | ((__mcs) & 0x00ff) ) 60 ((((__mode) & 0x00ff) << 8) | ((__mcs) & 0x00ff))
61 61
62static inline int rt2x00_get_rate_mcs(const u16 mcs_value) 62static inline int rt2x00_get_rate_mcs(const u16 mcs_value)
63{ 63{
@@ -69,7 +69,6 @@ static inline int rt2x00_get_rate_mcs(const u16 mcs_value)
69 */ 69 */
70int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev); 70int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev);
71void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev); 71void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev);
72void rt2x00lib_toggle_rx(struct rt2x00_dev *rt2x00dev, enum dev_state state);
73 72
74/* 73/*
75 * Initialization handlers. 74 * Initialization handlers.
@@ -179,15 +178,6 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
179void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index); 178void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index);
180 179
181/** 180/**
182 * rt2x00queue_stop_queues - Halt all data queues
183 * @rt2x00dev: Pointer to &struct rt2x00_dev.
184 *
185 * This function will loop through all available queues to stop
186 * any pending outgoing frames.
187 */
188void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev);
189
190/**
191 * rt2x00queue_init_queues - Initialize all data queues 181 * rt2x00queue_init_queues - Initialize all data queues
192 * @rt2x00dev: Pointer to &struct rt2x00_dev. 182 * @rt2x00dev: Pointer to &struct rt2x00_dev.
193 * 183 *
diff --git a/drivers/net/wireless/rt2x00/rt2x00link.c b/drivers/net/wireless/rt2x00/rt2x00link.c
index b971d8798ebf..bfda60eaf4ef 100644
--- a/drivers/net/wireless/rt2x00/rt2x00link.c
+++ b/drivers/net/wireless/rt2x00/rt2x00link.c
@@ -67,7 +67,7 @@
67 (__avg).avg_weight ? \ 67 (__avg).avg_weight ? \
68 ((((__avg).avg_weight * ((AVG_SAMPLES) - 1)) + \ 68 ((((__avg).avg_weight * ((AVG_SAMPLES) - 1)) + \
69 ((__val) * (AVG_FACTOR))) / \ 69 ((__val) * (AVG_FACTOR))) / \
70 (AVG_SAMPLES) ) : \ 70 (AVG_SAMPLES)) : \
71 ((__val) * (AVG_FACTOR)); \ 71 ((__val) * (AVG_FACTOR)); \
72 __new.avg = __new.avg_weight / (AVG_FACTOR); \ 72 __new.avg = __new.avg_weight / (AVG_FACTOR); \
73 __new; \ 73 __new; \
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index a7d3f4c3ee0d..f3da051df39e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -104,7 +104,7 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
104 struct rt2x00_dev *rt2x00dev = hw->priv; 104 struct rt2x00_dev *rt2x00dev = hw->priv;
105 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 105 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
106 enum data_queue_qid qid = skb_get_queue_mapping(skb); 106 enum data_queue_qid qid = skb_get_queue_mapping(skb);
107 struct data_queue *queue; 107 struct data_queue *queue = NULL;
108 108
109 /* 109 /*
110 * Mac80211 might be calling this function while we are trying 110 * Mac80211 might be calling this function while we are trying
@@ -153,7 +153,7 @@ int rt2x00mac_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
153 goto exit_fail; 153 goto exit_fail;
154 154
155 if (rt2x00queue_threshold(queue)) 155 if (rt2x00queue_threshold(queue))
156 ieee80211_stop_queue(rt2x00dev->hw, qid); 156 rt2x00queue_pause_queue(queue);
157 157
158 return NETDEV_TX_OK; 158 return NETDEV_TX_OK;
159 159
@@ -268,7 +268,6 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
268 else 268 else
269 rt2x00dev->intf_sta_count++; 269 rt2x00dev->intf_sta_count++;
270 270
271 spin_lock_init(&intf->lock);
272 spin_lock_init(&intf->seqlock); 271 spin_lock_init(&intf->seqlock);
273 mutex_init(&intf->beacon_skb_mutex); 272 mutex_init(&intf->beacon_skb_mutex);
274 intf->beacon = entry; 273 intf->beacon = entry;
@@ -282,15 +281,8 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
282 * STA interfaces at this time, since this can cause 281 * STA interfaces at this time, since this can cause
283 * invalid behavior in the device. 282 * invalid behavior in the device.
284 */ 283 */
285 memcpy(&intf->mac, vif->addr, ETH_ALEN); 284 rt2x00lib_config_intf(rt2x00dev, intf, vif->type,
286 if (vif->type == NL80211_IFTYPE_AP) { 285 vif->addr, NULL);
287 memcpy(&intf->bssid, vif->addr, ETH_ALEN);
288 rt2x00lib_config_intf(rt2x00dev, intf, vif->type,
289 intf->mac, intf->bssid);
290 } else {
291 rt2x00lib_config_intf(rt2x00dev, intf, vif->type,
292 intf->mac, NULL);
293 }
294 286
295 /* 287 /*
296 * Some filters depend on the current working mode. We can force 288 * Some filters depend on the current working mode. We can force
@@ -358,7 +350,7 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed)
358 * if for any reason the link tuner must be reset, this will be 350 * if for any reason the link tuner must be reset, this will be
359 * handled by rt2x00lib_config(). 351 * handled by rt2x00lib_config().
360 */ 352 */
361 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_OFF_LINK); 353 rt2x00queue_stop_queue(rt2x00dev->rx);
362 354
363 /* 355 /*
364 * When we've just turned on the radio, we want to reprogram 356 * When we've just turned on the radio, we want to reprogram
@@ -376,7 +368,7 @@ int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed)
376 rt2x00lib_config_antenna(rt2x00dev, rt2x00dev->default_ant); 368 rt2x00lib_config_antenna(rt2x00dev, rt2x00dev->default_ant);
377 369
378 /* Turn RX back on */ 370 /* Turn RX back on */
379 rt2x00lib_toggle_rx(rt2x00dev, STATE_RADIO_RX_ON_LINK); 371 rt2x00queue_start_queue(rt2x00dev->rx);
380 372
381 return 0; 373 return 0;
382} 374}
@@ -451,9 +443,7 @@ static void rt2x00mac_set_tim_iter(void *data, u8 *mac,
451 vif->type != NL80211_IFTYPE_WDS) 443 vif->type != NL80211_IFTYPE_WDS)
452 return; 444 return;
453 445
454 spin_lock(&intf->lock); 446 set_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags);
455 intf->delayed_flags |= DELAYED_UPDATE_BEACON;
456 spin_unlock(&intf->lock);
457} 447}
458 448
459int rt2x00mac_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, 449int rt2x00mac_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta,
@@ -478,17 +468,17 @@ EXPORT_SYMBOL_GPL(rt2x00mac_set_tim);
478static void memcpy_tkip(struct rt2x00lib_crypto *crypto, u8 *key, u8 key_len) 468static void memcpy_tkip(struct rt2x00lib_crypto *crypto, u8 *key, u8 key_len)
479{ 469{
480 if (key_len > NL80211_TKIP_DATA_OFFSET_ENCR_KEY) 470 if (key_len > NL80211_TKIP_DATA_OFFSET_ENCR_KEY)
481 memcpy(&crypto->key, 471 memcpy(crypto->key,
482 &key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY], 472 &key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY],
483 sizeof(crypto->key)); 473 sizeof(crypto->key));
484 474
485 if (key_len > NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY) 475 if (key_len > NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY)
486 memcpy(&crypto->tx_mic, 476 memcpy(crypto->tx_mic,
487 &key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], 477 &key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
488 sizeof(crypto->tx_mic)); 478 sizeof(crypto->tx_mic));
489 479
490 if (key_len > NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY) 480 if (key_len > NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY)
491 memcpy(&crypto->rx_mic, 481 memcpy(crypto->rx_mic,
492 &key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], 482 &key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
493 sizeof(crypto->rx_mic)); 483 sizeof(crypto->rx_mic));
494} 484}
@@ -498,7 +488,6 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
498 struct ieee80211_key_conf *key) 488 struct ieee80211_key_conf *key)
499{ 489{
500 struct rt2x00_dev *rt2x00dev = hw->priv; 490 struct rt2x00_dev *rt2x00dev = hw->priv;
501 struct rt2x00_intf *intf = vif_to_intf(vif);
502 int (*set_key) (struct rt2x00_dev *rt2x00dev, 491 int (*set_key) (struct rt2x00_dev *rt2x00dev,
503 struct rt2x00lib_crypto *crypto, 492 struct rt2x00lib_crypto *crypto,
504 struct ieee80211_key_conf *key); 493 struct ieee80211_key_conf *key);
@@ -522,7 +511,7 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
522 if (rt2x00dev->intf_sta_count) 511 if (rt2x00dev->intf_sta_count)
523 crypto.bssidx = 0; 512 crypto.bssidx = 0;
524 else 513 else
525 crypto.bssidx = intf->mac[5] & (rt2x00dev->ops->max_ap_intf - 1); 514 crypto.bssidx = vif->addr[5] & (rt2x00dev->ops->max_ap_intf - 1);
526 515
527 crypto.cipher = rt2x00crypto_key_to_cipher(key); 516 crypto.cipher = rt2x00crypto_key_to_cipher(key);
528 if (crypto.cipher == CIPHER_NONE) 517 if (crypto.cipher == CIPHER_NONE)
@@ -540,7 +529,7 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
540 if (crypto.cipher == CIPHER_TKIP) 529 if (crypto.cipher == CIPHER_TKIP)
541 memcpy_tkip(&crypto, &key->key[0], key->keylen); 530 memcpy_tkip(&crypto, &key->key[0], key->keylen);
542 else 531 else
543 memcpy(&crypto.key, &key->key[0], key->keylen); 532 memcpy(crypto.key, &key->key[0], key->keylen);
544 /* 533 /*
545 * Each BSS has a maximum of 4 shared keys. 534 * Each BSS has a maximum of 4 shared keys.
546 * Shared key index values: 535 * Shared key index values:
@@ -620,22 +609,8 @@ void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw,
620 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) 609 if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags))
621 return; 610 return;
622 611
623 spin_lock(&intf->lock);
624
625 /* 612 /*
626 * conf->bssid can be NULL if coming from the internal 613 * Update the BSSID.
627 * beacon update routine.
628 */
629 if (changes & BSS_CHANGED_BSSID)
630 memcpy(&intf->bssid, bss_conf->bssid, ETH_ALEN);
631
632 spin_unlock(&intf->lock);
633
634 /*
635 * Call rt2x00_config_intf() outside of the spinlock context since
636 * the call will sleep for USB drivers. By using the ieee80211_if_conf
637 * values as arguments we make keep access to rt2x00_intf thread safe
638 * even without the lock.
639 */ 614 */
640 if (changes & BSS_CHANGED_BSSID) 615 if (changes & BSS_CHANGED_BSSID)
641 rt2x00lib_config_intf(rt2x00dev, intf, vif->type, NULL, 616 rt2x00lib_config_intf(rt2x00dev, intf, vif->type, NULL,
@@ -719,3 +694,13 @@ void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw)
719 wiphy_rfkill_set_hw_state(hw->wiphy, !active); 694 wiphy_rfkill_set_hw_state(hw->wiphy, !active);
720} 695}
721EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll); 696EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll);
697
698void rt2x00mac_flush(struct ieee80211_hw *hw, bool drop)
699{
700 struct rt2x00_dev *rt2x00dev = hw->priv;
701 struct data_queue *queue;
702
703 tx_queue_for_each(rt2x00dev, queue)
704 rt2x00queue_flush_queue(queue, drop);
705}
706EXPORT_SYMBOL_GPL(rt2x00mac_flush);
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c
index 2449d785cf8d..73631c6fbb30 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.c
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.c
@@ -82,6 +82,13 @@ void rt2x00pci_rxdone(struct rt2x00_dev *rt2x00dev)
82 skbdesc->desc_len = entry->queue->desc_size; 82 skbdesc->desc_len = entry->queue->desc_size;
83 83
84 /* 84 /*
85 * DMA is already done, notify rt2x00lib that
86 * it finished successfully.
87 */
88 rt2x00lib_dmastart(entry);
89 rt2x00lib_dmadone(entry);
90
91 /*
85 * Send the frame to rt2x00lib for further processing. 92 * Send the frame to rt2x00lib for further processing.
86 */ 93 */
87 rt2x00lib_rxdone(entry); 94 rt2x00lib_rxdone(entry);
@@ -105,7 +112,7 @@ static int rt2x00pci_alloc_queue_dma(struct rt2x00_dev *rt2x00dev,
105 */ 112 */
106 addr = dma_alloc_coherent(rt2x00dev->dev, 113 addr = dma_alloc_coherent(rt2x00dev->dev,
107 queue->limit * queue->desc_size, 114 queue->limit * queue->desc_size,
108 &dma, GFP_KERNEL | GFP_DMA); 115 &dma, GFP_KERNEL);
109 if (!addr) 116 if (!addr)
110 return -ENOMEM; 117 return -ENOMEM;
111 118
@@ -279,7 +286,7 @@ int rt2x00pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
279 rt2x00dev->irq = pci_dev->irq; 286 rt2x00dev->irq = pci_dev->irq;
280 rt2x00dev->name = pci_name(pci_dev); 287 rt2x00dev->name = pci_name(pci_dev);
281 288
282 if (pci_dev->is_pcie) 289 if (pci_is_pcie(pci_dev))
283 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE); 290 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE);
284 else 291 else
285 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI); 292 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index b854d62ff99b..746ce8fe8cf4 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -64,7 +64,7 @@ static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
64 const void *value, 64 const void *value,
65 const u32 length) 65 const u32 length)
66{ 66{
67 memcpy_toio(rt2x00dev->csr.base + offset, value, length); 67 __iowrite32_copy(rt2x00dev->csr.base + offset, value, length >> 2);
68} 68}
69 69
70/** 70/**
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index e360d287defb..ca82b3a91697 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -199,7 +199,12 @@ void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length)
199 199
200void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length) 200void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length)
201{ 201{
202 unsigned int l2pad = L2PAD_SIZE(header_length); 202 /*
203 * L2 padding is only present if the skb contains more than just the
204 * IEEE 802.11 header.
205 */
206 unsigned int l2pad = (skb->len > header_length) ?
207 L2PAD_SIZE(header_length) : 0;
203 208
204 if (!l2pad) 209 if (!l2pad)
205 return; 210 return;
@@ -311,14 +316,6 @@ static void rt2x00queue_create_tx_descriptor(struct queue_entry *entry,
311 memset(txdesc, 0, sizeof(*txdesc)); 316 memset(txdesc, 0, sizeof(*txdesc));
312 317
313 /* 318 /*
314 * Initialize information from queue
315 */
316 txdesc->qid = entry->queue->qid;
317 txdesc->cw_min = entry->queue->cw_min;
318 txdesc->cw_max = entry->queue->cw_max;
319 txdesc->aifs = entry->queue->aifs;
320
321 /*
322 * Header and frame information. 319 * Header and frame information.
323 */ 320 */
324 txdesc->length = entry->skb->len; 321 txdesc->length = entry->skb->len;
@@ -460,12 +457,9 @@ static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry,
460 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb); 457 rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry->skb);
461} 458}
462 459
463static void rt2x00queue_kick_tx_queue(struct queue_entry *entry, 460static void rt2x00queue_kick_tx_queue(struct data_queue *queue,
464 struct txentry_desc *txdesc) 461 struct txentry_desc *txdesc)
465{ 462{
466 struct data_queue *queue = entry->queue;
467 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
468
469 /* 463 /*
470 * Check if we need to kick the queue, there are however a few rules 464 * Check if we need to kick the queue, there are however a few rules
471 * 1) Don't kick unless this is the last in frame in a burst. 465 * 1) Don't kick unless this is the last in frame in a burst.
@@ -477,7 +471,7 @@ static void rt2x00queue_kick_tx_queue(struct queue_entry *entry,
477 */ 471 */
478 if (rt2x00queue_threshold(queue) || 472 if (rt2x00queue_threshold(queue) ||
479 !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) 473 !test_bit(ENTRY_TXD_BURST, &txdesc->flags))
480 rt2x00dev->ops->lib->kick_tx_queue(queue); 474 queue->rt2x00dev->ops->lib->kick_queue(queue);
481} 475}
482 476
483int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, 477int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
@@ -567,7 +561,7 @@ int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb,
567 561
568 rt2x00queue_index_inc(queue, Q_INDEX); 562 rt2x00queue_index_inc(queue, Q_INDEX);
569 rt2x00queue_write_tx_descriptor(entry, &txdesc); 563 rt2x00queue_write_tx_descriptor(entry, &txdesc);
570 rt2x00queue_kick_tx_queue(entry, &txdesc); 564 rt2x00queue_kick_tx_queue(queue, &txdesc);
571 565
572 return 0; 566 return 0;
573} 567}
@@ -591,7 +585,7 @@ int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev,
591 rt2x00queue_free_skb(intf->beacon); 585 rt2x00queue_free_skb(intf->beacon);
592 586
593 if (!enable_beacon) { 587 if (!enable_beacon) {
594 rt2x00dev->ops->lib->kill_tx_queue(intf->beacon->queue); 588 rt2x00queue_stop_queue(intf->beacon->queue);
595 mutex_unlock(&intf->beacon_skb_mutex); 589 mutex_unlock(&intf->beacon_skb_mutex);
596 return 0; 590 return 0;
597 } 591 }
@@ -649,10 +643,10 @@ void rt2x00queue_for_each_entry(struct data_queue *queue,
649 * it should not be kicked during this run, since it 643 * it should not be kicked during this run, since it
650 * is part of another TX operation. 644 * is part of another TX operation.
651 */ 645 */
652 spin_lock_irqsave(&queue->lock, irqflags); 646 spin_lock_irqsave(&queue->index_lock, irqflags);
653 index_start = queue->index[start]; 647 index_start = queue->index[start];
654 index_end = queue->index[end]; 648 index_end = queue->index[end];
655 spin_unlock_irqrestore(&queue->lock, irqflags); 649 spin_unlock_irqrestore(&queue->index_lock, irqflags);
656 650
657 /* 651 /*
658 * Start from the TX done pointer, this guarentees that we will 652 * Start from the TX done pointer, this guarentees that we will
@@ -706,11 +700,11 @@ struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue,
706 return NULL; 700 return NULL;
707 } 701 }
708 702
709 spin_lock_irqsave(&queue->lock, irqflags); 703 spin_lock_irqsave(&queue->index_lock, irqflags);
710 704
711 entry = &queue->entries[queue->index[index]]; 705 entry = &queue->entries[queue->index[index]];
712 706
713 spin_unlock_irqrestore(&queue->lock, irqflags); 707 spin_unlock_irqrestore(&queue->index_lock, irqflags);
714 708
715 return entry; 709 return entry;
716} 710}
@@ -726,7 +720,7 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
726 return; 720 return;
727 } 721 }
728 722
729 spin_lock_irqsave(&queue->lock, irqflags); 723 spin_lock_irqsave(&queue->index_lock, irqflags);
730 724
731 queue->index[index]++; 725 queue->index[index]++;
732 if (queue->index[index] >= queue->limit) 726 if (queue->index[index] >= queue->limit)
@@ -741,15 +735,219 @@ void rt2x00queue_index_inc(struct data_queue *queue, enum queue_index index)
741 queue->count++; 735 queue->count++;
742 } 736 }
743 737
744 spin_unlock_irqrestore(&queue->lock, irqflags); 738 spin_unlock_irqrestore(&queue->index_lock, irqflags);
745} 739}
746 740
741void rt2x00queue_pause_queue(struct data_queue *queue)
742{
743 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
744 !test_bit(QUEUE_STARTED, &queue->flags) ||
745 test_and_set_bit(QUEUE_PAUSED, &queue->flags))
746 return;
747
748 switch (queue->qid) {
749 case QID_AC_VO:
750 case QID_AC_VI:
751 case QID_AC_BE:
752 case QID_AC_BK:
753 /*
754 * For TX queues, we have to disable the queue
755 * inside mac80211.
756 */
757 ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid);
758 break;
759 default:
760 break;
761 }
762}
763EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue);
764
765void rt2x00queue_unpause_queue(struct data_queue *queue)
766{
767 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
768 !test_bit(QUEUE_STARTED, &queue->flags) ||
769 !test_and_clear_bit(QUEUE_PAUSED, &queue->flags))
770 return;
771
772 switch (queue->qid) {
773 case QID_AC_VO:
774 case QID_AC_VI:
775 case QID_AC_BE:
776 case QID_AC_BK:
777 /*
778 * For TX queues, we have to enable the queue
779 * inside mac80211.
780 */
781 ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid);
782 break;
783 case QID_RX:
784 /*
785 * For RX we need to kick the queue now in order to
786 * receive frames.
787 */
788 queue->rt2x00dev->ops->lib->kick_queue(queue);
789 default:
790 break;
791 }
792}
793EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue);
794
795void rt2x00queue_start_queue(struct data_queue *queue)
796{
797 mutex_lock(&queue->status_lock);
798
799 if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) ||
800 test_and_set_bit(QUEUE_STARTED, &queue->flags)) {
801 mutex_unlock(&queue->status_lock);
802 return;
803 }
804
805 set_bit(QUEUE_PAUSED, &queue->flags);
806
807 queue->rt2x00dev->ops->lib->start_queue(queue);
808
809 rt2x00queue_unpause_queue(queue);
810
811 mutex_unlock(&queue->status_lock);
812}
813EXPORT_SYMBOL_GPL(rt2x00queue_start_queue);
814
815void rt2x00queue_stop_queue(struct data_queue *queue)
816{
817 mutex_lock(&queue->status_lock);
818
819 if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) {
820 mutex_unlock(&queue->status_lock);
821 return;
822 }
823
824 rt2x00queue_pause_queue(queue);
825
826 queue->rt2x00dev->ops->lib->stop_queue(queue);
827
828 mutex_unlock(&queue->status_lock);
829}
830EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue);
831
832void rt2x00queue_flush_queue(struct data_queue *queue, bool drop)
833{
834 unsigned int i;
835 bool started;
836 bool tx_queue =
837 (queue->qid == QID_AC_VO) ||
838 (queue->qid == QID_AC_VI) ||
839 (queue->qid == QID_AC_BE) ||
840 (queue->qid == QID_AC_BK);
841
842 mutex_lock(&queue->status_lock);
843
844 /*
845 * If the queue has been started, we must stop it temporarily
846 * to prevent any new frames to be queued on the device. If
847 * we are not dropping the pending frames, the queue must
848 * only be stopped in the software and not the hardware,
849 * otherwise the queue will never become empty on its own.
850 */
851 started = test_bit(QUEUE_STARTED, &queue->flags);
852 if (started) {
853 /*
854 * Pause the queue
855 */
856 rt2x00queue_pause_queue(queue);
857
858 /*
859 * If we are not supposed to drop any pending
860 * frames, this means we must force a start (=kick)
861 * to the queue to make sure the hardware will
862 * start transmitting.
863 */
864 if (!drop && tx_queue)
865 queue->rt2x00dev->ops->lib->kick_queue(queue);
866 }
867
868 /*
869 * Check if driver supports flushing, we can only guarentee
870 * full support for flushing if the driver is able
871 * to cancel all pending frames (drop = true).
872 */
873 if (drop && queue->rt2x00dev->ops->lib->flush_queue)
874 queue->rt2x00dev->ops->lib->flush_queue(queue);
875
876 /*
877 * When we don't want to drop any frames, or when
878 * the driver doesn't fully flush the queue correcly,
879 * we must wait for the queue to become empty.
880 */
881 for (i = 0; !rt2x00queue_empty(queue) && i < 100; i++)
882 msleep(10);
883
884 /*
885 * The queue flush has failed...
886 */
887 if (unlikely(!rt2x00queue_empty(queue)))
888 WARNING(queue->rt2x00dev, "Queue %d failed to flush", queue->qid);
889
890 /*
891 * Restore the queue to the previous status
892 */
893 if (started)
894 rt2x00queue_unpause_queue(queue);
895
896 mutex_unlock(&queue->status_lock);
897}
898EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue);
899
900void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev)
901{
902 struct data_queue *queue;
903
904 /*
905 * rt2x00queue_start_queue will call ieee80211_wake_queue
906 * for each queue after is has been properly initialized.
907 */
908 tx_queue_for_each(rt2x00dev, queue)
909 rt2x00queue_start_queue(queue);
910
911 rt2x00queue_start_queue(rt2x00dev->rx);
912}
913EXPORT_SYMBOL_GPL(rt2x00queue_start_queues);
914
915void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
916{
917 struct data_queue *queue;
918
919 /*
920 * rt2x00queue_stop_queue will call ieee80211_stop_queue
921 * as well, but we are completely shutting doing everything
922 * now, so it is much safer to stop all TX queues at once,
923 * and use rt2x00queue_stop_queue for cleaning up.
924 */
925 ieee80211_stop_queues(rt2x00dev->hw);
926
927 tx_queue_for_each(rt2x00dev, queue)
928 rt2x00queue_stop_queue(queue);
929
930 rt2x00queue_stop_queue(rt2x00dev->rx);
931}
932EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues);
933
934void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop)
935{
936 struct data_queue *queue;
937
938 tx_queue_for_each(rt2x00dev, queue)
939 rt2x00queue_flush_queue(queue, drop);
940
941 rt2x00queue_flush_queue(rt2x00dev->rx, drop);
942}
943EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues);
944
747static void rt2x00queue_reset(struct data_queue *queue) 945static void rt2x00queue_reset(struct data_queue *queue)
748{ 946{
749 unsigned long irqflags; 947 unsigned long irqflags;
750 unsigned int i; 948 unsigned int i;
751 949
752 spin_lock_irqsave(&queue->lock, irqflags); 950 spin_lock_irqsave(&queue->index_lock, irqflags);
753 951
754 queue->count = 0; 952 queue->count = 0;
755 queue->length = 0; 953 queue->length = 0;
@@ -759,15 +957,7 @@ static void rt2x00queue_reset(struct data_queue *queue)
759 queue->last_action[i] = jiffies; 957 queue->last_action[i] = jiffies;
760 } 958 }
761 959
762 spin_unlock_irqrestore(&queue->lock, irqflags); 960 spin_unlock_irqrestore(&queue->index_lock, irqflags);
763}
764
765void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev)
766{
767 struct data_queue *queue;
768
769 txall_queue_for_each(rt2x00dev, queue)
770 rt2x00dev->ops->lib->kill_tx_queue(queue);
771} 961}
772 962
773void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) 963void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
@@ -778,11 +968,8 @@ void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev)
778 queue_for_each(rt2x00dev, queue) { 968 queue_for_each(rt2x00dev, queue) {
779 rt2x00queue_reset(queue); 969 rt2x00queue_reset(queue);
780 970
781 for (i = 0; i < queue->limit; i++) { 971 for (i = 0; i < queue->limit; i++)
782 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); 972 rt2x00dev->ops->lib->clear_entry(&queue->entries[i]);
783 if (queue->qid == QID_RX)
784 rt2x00queue_index_inc(queue, Q_INDEX);
785 }
786 } 973 }
787} 974}
788 975
@@ -809,8 +996,8 @@ static int rt2x00queue_alloc_entries(struct data_queue *queue,
809 return -ENOMEM; 996 return -ENOMEM;
810 997
811#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \ 998#define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \
812 ( ((char *)(__base)) + ((__limit) * (__esize)) + \ 999 (((char *)(__base)) + ((__limit) * (__esize)) + \
813 ((__index) * (__psize)) ) 1000 ((__index) * (__psize)))
814 1001
815 for (i = 0; i < queue->limit; i++) { 1002 for (i = 0; i < queue->limit; i++) {
816 entries[i].flags = 0; 1003 entries[i].flags = 0;
@@ -911,7 +1098,8 @@ void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev)
911static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, 1098static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev,
912 struct data_queue *queue, enum data_queue_qid qid) 1099 struct data_queue *queue, enum data_queue_qid qid)
913{ 1100{
914 spin_lock_init(&queue->lock); 1101 mutex_init(&queue->status_lock);
1102 spin_lock_init(&queue->index_lock);
915 1103
916 queue->rt2x00dev = rt2x00dev; 1104 queue->rt2x00dev = rt2x00dev;
917 queue->qid = qid; 1105 queue->qid = qid;
@@ -953,7 +1141,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
953 /* 1141 /*
954 * Initialize queue parameters. 1142 * Initialize queue parameters.
955 * RX: qid = QID_RX 1143 * RX: qid = QID_RX
956 * TX: qid = QID_AC_BE + index 1144 * TX: qid = QID_AC_VO + index
957 * TX: cw_min: 2^5 = 32. 1145 * TX: cw_min: 2^5 = 32.
958 * TX: cw_max: 2^10 = 1024. 1146 * TX: cw_max: 2^10 = 1024.
959 * BCN: qid = QID_BEACON 1147 * BCN: qid = QID_BEACON
@@ -961,7 +1149,7 @@ int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev)
961 */ 1149 */
962 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX); 1150 rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX);
963 1151
964 qid = QID_AC_BE; 1152 qid = QID_AC_VO;
965 tx_queue_for_each(rt2x00dev, queue) 1153 tx_queue_for_each(rt2x00dev, queue)
966 rt2x00queue_init(rt2x00dev, queue, qid++); 1154 rt2x00queue_init(rt2x00dev, queue, qid++);
967 1155
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.h b/drivers/net/wireless/rt2x00/rt2x00queue.h
index d81d85f34866..fab8e2687f29 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.h
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.h
@@ -43,28 +43,12 @@
43#define AGGREGATION_SIZE 3840 43#define AGGREGATION_SIZE 3840
44 44
45/** 45/**
46 * DOC: Number of entries per queue
47 *
48 * Under normal load without fragmentation, 12 entries are sufficient
49 * without the queue being filled up to the maximum. When using fragmentation
50 * and the queue threshold code, we need to add some additional margins to
51 * make sure the queue will never (or only under extreme load) fill up
52 * completely.
53 * Since we don't use preallocated DMA, having a large number of queue entries
54 * will have minimal impact on the memory requirements for the queue.
55 */
56#define RX_ENTRIES 24
57#define TX_ENTRIES 24
58#define BEACON_ENTRIES 1
59#define ATIM_ENTRIES 8
60
61/**
62 * enum data_queue_qid: Queue identification 46 * enum data_queue_qid: Queue identification
63 * 47 *
48 * @QID_AC_VO: AC VO queue
49 * @QID_AC_VI: AC VI queue
64 * @QID_AC_BE: AC BE queue 50 * @QID_AC_BE: AC BE queue
65 * @QID_AC_BK: AC BK queue 51 * @QID_AC_BK: AC BK queue
66 * @QID_AC_VI: AC VI queue
67 * @QID_AC_VO: AC VO queue
68 * @QID_HCCA: HCCA queue 52 * @QID_HCCA: HCCA queue
69 * @QID_MGMT: MGMT queue (prio queue) 53 * @QID_MGMT: MGMT queue (prio queue)
70 * @QID_RX: RX queue 54 * @QID_RX: RX queue
@@ -73,10 +57,10 @@
73 * @QID_ATIM: Atim queue (value unspeficied, don't send it to device) 57 * @QID_ATIM: Atim queue (value unspeficied, don't send it to device)
74 */ 58 */
75enum data_queue_qid { 59enum data_queue_qid {
76 QID_AC_BE = 0, 60 QID_AC_VO = 0,
77 QID_AC_BK = 1, 61 QID_AC_VI = 1,
78 QID_AC_VI = 2, 62 QID_AC_BE = 2,
79 QID_AC_VO = 3, 63 QID_AC_BK = 3,
80 QID_HCCA = 4, 64 QID_HCCA = 4,
81 QID_MGMT = 13, 65 QID_MGMT = 13,
82 QID_RX = 14, 66 QID_RX = 14,
@@ -296,7 +280,6 @@ enum txentry_desc_flags {
296 * Summary of information for the frame descriptor before sending a TX frame. 280 * Summary of information for the frame descriptor before sending a TX frame.
297 * 281 *
298 * @flags: Descriptor flags (See &enum queue_entry_flags). 282 * @flags: Descriptor flags (See &enum queue_entry_flags).
299 * @qid: Queue identification (See &enum data_queue_qid).
300 * @length: Length of the entire frame. 283 * @length: Length of the entire frame.
301 * @header_length: Length of 802.11 header. 284 * @header_length: Length of 802.11 header.
302 * @length_high: PLCP length high word. 285 * @length_high: PLCP length high word.
@@ -309,11 +292,8 @@ enum txentry_desc_flags {
309 * @rate_mode: Rate mode (See @enum rate_modulation). 292 * @rate_mode: Rate mode (See @enum rate_modulation).
310 * @mpdu_density: MDPU density. 293 * @mpdu_density: MDPU density.
311 * @retry_limit: Max number of retries. 294 * @retry_limit: Max number of retries.
312 * @aifs: AIFS value.
313 * @ifs: IFS value. 295 * @ifs: IFS value.
314 * @txop: IFS value for 11n capable chips. 296 * @txop: IFS value for 11n capable chips.
315 * @cw_min: cwmin value.
316 * @cw_max: cwmax value.
317 * @cipher: Cipher type used for encryption. 297 * @cipher: Cipher type used for encryption.
318 * @key_idx: Key index used for encryption. 298 * @key_idx: Key index used for encryption.
319 * @iv_offset: Position where IV should be inserted by hardware. 299 * @iv_offset: Position where IV should be inserted by hardware.
@@ -322,8 +302,6 @@ enum txentry_desc_flags {
322struct txentry_desc { 302struct txentry_desc {
323 unsigned long flags; 303 unsigned long flags;
324 304
325 enum data_queue_qid qid;
326
327 u16 length; 305 u16 length;
328 u16 header_length; 306 u16 header_length;
329 307
@@ -339,11 +317,8 @@ struct txentry_desc {
339 u16 mpdu_density; 317 u16 mpdu_density;
340 318
341 short retry_limit; 319 short retry_limit;
342 short aifs;
343 short ifs; 320 short ifs;
344 short txop; 321 short txop;
345 short cw_min;
346 short cw_max;
347 322
348 enum cipher cipher; 323 enum cipher cipher;
349 u16 key_idx; 324 u16 key_idx;
@@ -365,12 +340,16 @@ struct txentry_desc {
365 * @ENTRY_DATA_IO_FAILED: Hardware indicated that an IO error occured 340 * @ENTRY_DATA_IO_FAILED: Hardware indicated that an IO error occured
366 * while transfering the data to the hardware. No TX status report will 341 * while transfering the data to the hardware. No TX status report will
367 * be expected from the hardware. 342 * be expected from the hardware.
343 * @ENTRY_DATA_STATUS_PENDING: The entry has been send to the device and
344 * returned. It is now waiting for the status reporting before the
345 * entry can be reused again.
368 */ 346 */
369enum queue_entry_flags { 347enum queue_entry_flags {
370 ENTRY_BCN_ASSIGNED, 348 ENTRY_BCN_ASSIGNED,
371 ENTRY_OWNER_DEVICE_DATA, 349 ENTRY_OWNER_DEVICE_DATA,
372 ENTRY_DATA_PENDING, 350 ENTRY_DATA_PENDING,
373 ENTRY_DATA_IO_FAILED 351 ENTRY_DATA_IO_FAILED,
352 ENTRY_DATA_STATUS_PENDING,
374}; 353};
375 354
376/** 355/**
@@ -417,13 +396,33 @@ enum queue_index {
417}; 396};
418 397
419/** 398/**
399 * enum data_queue_flags: Status flags for data queues
400 *
401 * @QUEUE_STARTED: The queue has been started. Fox RX queues this means the
402 * device might be DMA'ing skbuffers. TX queues will accept skbuffers to
403 * be transmitted and beacon queues will start beaconing the configured
404 * beacons.
405 * @QUEUE_PAUSED: The queue has been started but is currently paused.
406 * When this bit is set, the queue has been stopped in mac80211,
407 * preventing new frames to be enqueued. However, a few frames
408 * might still appear shortly after the pausing...
409 */
410enum data_queue_flags {
411 QUEUE_STARTED,
412 QUEUE_PAUSED,
413};
414
415/**
420 * struct data_queue: Data queue 416 * struct data_queue: Data queue
421 * 417 *
422 * @rt2x00dev: Pointer to main &struct rt2x00dev where this queue belongs to. 418 * @rt2x00dev: Pointer to main &struct rt2x00dev where this queue belongs to.
423 * @entries: Base address of the &struct queue_entry which are 419 * @entries: Base address of the &struct queue_entry which are
424 * part of this queue. 420 * part of this queue.
425 * @qid: The queue identification, see &enum data_queue_qid. 421 * @qid: The queue identification, see &enum data_queue_qid.
426 * @lock: Spinlock to protect index handling. Whenever @index, @index_done or 422 * @flags: Entry flags, see &enum queue_entry_flags.
423 * @status_lock: The mutex for protecting the start/stop/flush
424 * handling on this queue.
425 * @index_lock: Spinlock to protect index handling. Whenever @index, @index_done or
427 * @index_crypt needs to be changed this lock should be grabbed to prevent 426 * @index_crypt needs to be changed this lock should be grabbed to prevent
428 * index corruption due to concurrency. 427 * index corruption due to concurrency.
429 * @count: Number of frames handled in the queue. 428 * @count: Number of frames handled in the queue.
@@ -446,8 +445,11 @@ struct data_queue {
446 struct queue_entry *entries; 445 struct queue_entry *entries;
447 446
448 enum data_queue_qid qid; 447 enum data_queue_qid qid;
448 unsigned long flags;
449
450 struct mutex status_lock;
451 spinlock_t index_lock;
449 452
450 spinlock_t lock;
451 unsigned int count; 453 unsigned int count;
452 unsigned short limit; 454 unsigned short limit;
453 unsigned short threshold; 455 unsigned short threshold;
@@ -618,10 +620,10 @@ static inline int rt2x00queue_threshold(struct data_queue *queue)
618} 620}
619 621
620/** 622/**
621 * rt2x00queue_timeout - Check if a timeout occured for STATUS reorts 623 * rt2x00queue_status_timeout - Check if a timeout occured for STATUS reports
622 * @queue: Queue to check. 624 * @queue: Queue to check.
623 */ 625 */
624static inline int rt2x00queue_timeout(struct data_queue *queue) 626static inline int rt2x00queue_status_timeout(struct data_queue *queue)
625{ 627{
626 return time_after(queue->last_action[Q_INDEX_DMA_DONE], 628 return time_after(queue->last_action[Q_INDEX_DMA_DONE],
627 queue->last_action[Q_INDEX_DONE] + (HZ / 10)); 629 queue->last_action[Q_INDEX_DONE] + (HZ / 10));
diff --git a/drivers/net/wireless/rt2x00/rt2x00reg.h b/drivers/net/wireless/rt2x00/rt2x00reg.h
index cef94621cef7..e8259ae48ced 100644
--- a/drivers/net/wireless/rt2x00/rt2x00reg.h
+++ b/drivers/net/wireless/rt2x00/rt2x00reg.h
@@ -83,10 +83,6 @@ enum dev_state {
83 */ 83 */
84 STATE_RADIO_ON, 84 STATE_RADIO_ON,
85 STATE_RADIO_OFF, 85 STATE_RADIO_OFF,
86 STATE_RADIO_RX_ON,
87 STATE_RADIO_RX_OFF,
88 STATE_RADIO_RX_ON_LINK,
89 STATE_RADIO_RX_OFF_LINK,
90 STATE_RADIO_IRQ_ON, 86 STATE_RADIO_IRQ_ON,
91 STATE_RADIO_IRQ_OFF, 87 STATE_RADIO_IRQ_OFF,
92 STATE_RADIO_IRQ_ON_ISR, 88 STATE_RADIO_IRQ_ON_ISR,
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.c b/drivers/net/wireless/rt2x00/rt2x00soc.c
index fc98063de71d..2aa5c38022f3 100644
--- a/drivers/net/wireless/rt2x00/rt2x00soc.c
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.c
@@ -40,6 +40,8 @@ static void rt2x00soc_free_reg(struct rt2x00_dev *rt2x00dev)
40 40
41 kfree(rt2x00dev->eeprom); 41 kfree(rt2x00dev->eeprom);
42 rt2x00dev->eeprom = NULL; 42 rt2x00dev->eeprom = NULL;
43
44 iounmap(rt2x00dev->csr.base);
43} 45}
44 46
45static int rt2x00soc_alloc_reg(struct rt2x00_dev *rt2x00dev) 47static int rt2x00soc_alloc_reg(struct rt2x00_dev *rt2x00dev)
@@ -51,9 +53,9 @@ static int rt2x00soc_alloc_reg(struct rt2x00_dev *rt2x00dev)
51 if (!res) 53 if (!res)
52 return -ENODEV; 54 return -ENODEV;
53 55
54 rt2x00dev->csr.base = (void __iomem *)KSEG1ADDR(res->start); 56 rt2x00dev->csr.base = ioremap(res->start, resource_size(res));
55 if (!rt2x00dev->csr.base) 57 if (!rt2x00dev->csr.base)
56 goto exit; 58 return -ENOMEM;
57 59
58 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL); 60 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
59 if (!rt2x00dev->eeprom) 61 if (!rt2x00dev->eeprom)
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index b3317df7a7d4..1a9937d5aff6 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -195,7 +195,8 @@ static void rt2x00usb_work_txdone(struct work_struct *work)
195 while (!rt2x00queue_empty(queue)) { 195 while (!rt2x00queue_empty(queue)) {
196 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); 196 entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
197 197
198 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 198 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
199 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
199 break; 200 break;
200 201
201 rt2x00usb_work_txdone_entry(entry); 202 rt2x00usb_work_txdone_entry(entry);
@@ -226,9 +227,7 @@ static void rt2x00usb_interrupt_txdone(struct urb *urb)
226 * Schedule the delayed work for reading the TX status 227 * Schedule the delayed work for reading the TX status
227 * from the device. 228 * from the device.
228 */ 229 */
229 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) && 230 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work);
230 test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
231 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->txdone_work);
232} 231}
233 232
234static void rt2x00usb_kick_tx_entry(struct queue_entry *entry) 233static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
@@ -237,8 +236,10 @@ static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
237 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); 236 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
238 struct queue_entry_priv_usb *entry_priv = entry->priv_data; 237 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
239 u32 length; 238 u32 length;
239 int status;
240 240
241 if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags)) 241 if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags) ||
242 test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
242 return; 243 return;
243 244
244 /* 245 /*
@@ -253,121 +254,15 @@ static void rt2x00usb_kick_tx_entry(struct queue_entry *entry)
253 entry->skb->data, length, 254 entry->skb->data, length,
254 rt2x00usb_interrupt_txdone, entry); 255 rt2x00usb_interrupt_txdone, entry);
255 256
256 if (usb_submit_urb(entry_priv->urb, GFP_ATOMIC)) { 257 status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
258 if (status) {
259 if (status == -ENODEV)
260 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
257 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); 261 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
258 rt2x00lib_dmadone(entry); 262 rt2x00lib_dmadone(entry);
259 } 263 }
260} 264}
261 265
262void rt2x00usb_kick_tx_queue(struct data_queue *queue)
263{
264 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
265 rt2x00usb_kick_tx_entry);
266}
267EXPORT_SYMBOL_GPL(rt2x00usb_kick_tx_queue);
268
269static void rt2x00usb_kill_tx_entry(struct queue_entry *entry)
270{
271 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
272 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
273 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
274
275 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
276 return;
277
278 usb_kill_urb(entry_priv->urb);
279
280 /*
281 * Kill guardian urb (if required by driver).
282 */
283 if ((entry->queue->qid == QID_BEACON) &&
284 (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)))
285 usb_kill_urb(bcn_priv->guardian_urb);
286}
287
288void rt2x00usb_kill_tx_queue(struct data_queue *queue)
289{
290 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
291 rt2x00usb_kill_tx_entry);
292}
293EXPORT_SYMBOL_GPL(rt2x00usb_kill_tx_queue);
294
295static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
296{
297 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
298 unsigned short threshold = queue->threshold;
299
300 WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
301 " invoke forced forced reset", queue->qid);
302
303 /*
304 * Temporarily disable the TX queue, this will force mac80211
305 * to use the other queues until this queue has been restored.
306 *
307 * Set the queue threshold to the queue limit. This prevents the
308 * queue from being enabled during the txdone handler.
309 */
310 queue->threshold = queue->limit;
311 ieee80211_stop_queue(rt2x00dev->hw, queue->qid);
312
313 /*
314 * Kill all entries in the queue, afterwards we need to
315 * wait a bit for all URBs to be cancelled.
316 */
317 rt2x00usb_kill_tx_queue(queue);
318
319 /*
320 * In case that a driver has overriden the txdone_work
321 * function, we invoke the TX done through there.
322 */
323 rt2x00dev->txdone_work.func(&rt2x00dev->txdone_work);
324
325 /*
326 * Security measure: if the driver did override the
327 * txdone_work function, and the hardware did arrive
328 * in a state which causes it to malfunction, it is
329 * possible that the driver couldn't handle the txdone
330 * event correctly. So after giving the driver the
331 * chance to cleanup, we now force a cleanup of any
332 * leftovers.
333 */
334 if (!rt2x00queue_empty(queue)) {
335 WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
336 " status handling failed, invoke hard reset", queue->qid);
337 rt2x00usb_work_txdone(&rt2x00dev->txdone_work);
338 }
339
340 /*
341 * The queue has been reset, and mac80211 is allowed to use the
342 * queue again.
343 */
344 queue->threshold = threshold;
345 ieee80211_wake_queue(rt2x00dev->hw, queue->qid);
346}
347
348static void rt2x00usb_watchdog_tx_status(struct data_queue *queue)
349{
350 WARNING(queue->rt2x00dev, "TX queue %d status timed out,"
351 " invoke forced tx handler", queue->qid);
352
353 ieee80211_queue_work(queue->rt2x00dev->hw, &queue->rt2x00dev->txdone_work);
354}
355
356void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
357{
358 struct data_queue *queue;
359
360 tx_queue_for_each(rt2x00dev, queue) {
361 if (!rt2x00queue_empty(queue)) {
362 if (rt2x00queue_dma_timeout(queue))
363 rt2x00usb_watchdog_tx_dma(queue);
364 if (rt2x00queue_timeout(queue))
365 rt2x00usb_watchdog_tx_status(queue);
366 }
367 }
368}
369EXPORT_SYMBOL_GPL(rt2x00usb_watchdog);
370
371/* 266/*
372 * RX data handlers. 267 * RX data handlers.
373 */ 268 */
@@ -382,7 +277,8 @@ static void rt2x00usb_work_rxdone(struct work_struct *work)
382 while (!rt2x00queue_empty(rt2x00dev->rx)) { 277 while (!rt2x00queue_empty(rt2x00dev->rx)) {
383 entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE); 278 entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE);
384 279
385 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) 280 if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
281 !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
386 break; 282 break;
387 283
388 /* 284 /*
@@ -424,11 +320,157 @@ static void rt2x00usb_interrupt_rxdone(struct urb *urb)
424 * Schedule the delayed work for reading the RX status 320 * Schedule the delayed work for reading the RX status
425 * from the device. 321 * from the device.
426 */ 322 */
427 if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) && 323 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work);
428 test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
429 ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->rxdone_work);
430} 324}
431 325
326static void rt2x00usb_kick_rx_entry(struct queue_entry *entry)
327{
328 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
329 struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
330 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
331 int status;
332
333 if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) ||
334 test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
335 return;
336
337 rt2x00lib_dmastart(entry);
338
339 usb_fill_bulk_urb(entry_priv->urb, usb_dev,
340 usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint),
341 entry->skb->data, entry->skb->len,
342 rt2x00usb_interrupt_rxdone, entry);
343
344 status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
345 if (status) {
346 if (status == -ENODEV)
347 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
348 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
349 rt2x00lib_dmadone(entry);
350 }
351}
352
353void rt2x00usb_kick_queue(struct data_queue *queue)
354{
355 switch (queue->qid) {
356 case QID_AC_VO:
357 case QID_AC_VI:
358 case QID_AC_BE:
359 case QID_AC_BK:
360 if (!rt2x00queue_empty(queue))
361 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
362 rt2x00usb_kick_tx_entry);
363 break;
364 case QID_RX:
365 if (!rt2x00queue_full(queue))
366 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
367 rt2x00usb_kick_rx_entry);
368 break;
369 default:
370 break;
371 }
372}
373EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue);
374
375static void rt2x00usb_flush_entry(struct queue_entry *entry)
376{
377 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
378 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
379 struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data;
380
381 if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))
382 return;
383
384 usb_kill_urb(entry_priv->urb);
385
386 /*
387 * Kill guardian urb (if required by driver).
388 */
389 if ((entry->queue->qid == QID_BEACON) &&
390 (test_bit(DRIVER_REQUIRE_BEACON_GUARD, &rt2x00dev->flags)))
391 usb_kill_urb(bcn_priv->guardian_urb);
392}
393
394void rt2x00usb_flush_queue(struct data_queue *queue)
395{
396 struct work_struct *completion;
397 unsigned int i;
398
399 rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX,
400 rt2x00usb_flush_entry);
401
402 /*
403 * Obtain the queue completion handler
404 */
405 switch (queue->qid) {
406 case QID_AC_VO:
407 case QID_AC_VI:
408 case QID_AC_BE:
409 case QID_AC_BK:
410 completion = &queue->rt2x00dev->txdone_work;
411 break;
412 case QID_RX:
413 completion = &queue->rt2x00dev->rxdone_work;
414 break;
415 default:
416 return;
417 }
418
419 for (i = 0; i < 20; i++) {
420 /*
421 * Check if the driver is already done, otherwise we
422 * have to sleep a little while to give the driver/hw
423 * the oppurtunity to complete interrupt process itself.
424 */
425 if (rt2x00queue_empty(queue))
426 break;
427
428 /*
429 * Schedule the completion handler manually, when this
430 * worker function runs, it should cleanup the queue.
431 */
432 ieee80211_queue_work(queue->rt2x00dev->hw, completion);
433
434 /*
435 * Wait for a little while to give the driver
436 * the oppurtunity to recover itself.
437 */
438 msleep(10);
439 }
440}
441EXPORT_SYMBOL_GPL(rt2x00usb_flush_queue);
442
443static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue)
444{
445 WARNING(queue->rt2x00dev, "TX queue %d DMA timed out,"
446 " invoke forced forced reset\n", queue->qid);
447
448 rt2x00queue_flush_queue(queue, true);
449}
450
451static void rt2x00usb_watchdog_tx_status(struct data_queue *queue)
452{
453 WARNING(queue->rt2x00dev, "TX queue %d status timed out,"
454 " invoke forced tx handler\n", queue->qid);
455
456 ieee80211_queue_work(queue->rt2x00dev->hw, &queue->rt2x00dev->txdone_work);
457}
458
459void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev)
460{
461 struct data_queue *queue;
462
463 tx_queue_for_each(rt2x00dev, queue) {
464 if (!rt2x00queue_empty(queue)) {
465 if (rt2x00queue_dma_timeout(queue))
466 rt2x00usb_watchdog_tx_dma(queue);
467 if (rt2x00queue_status_timeout(queue))
468 rt2x00usb_watchdog_tx_status(queue);
469 }
470 }
471}
472EXPORT_SYMBOL_GPL(rt2x00usb_watchdog);
473
432/* 474/*
433 * Radio handlers 475 * Radio handlers
434 */ 476 */
@@ -436,12 +478,6 @@ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev)
436{ 478{
437 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0, 479 rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0,
438 REGISTER_TIMEOUT); 480 REGISTER_TIMEOUT);
439
440 /*
441 * The USB version of kill_tx_queue also works
442 * on the RX queue.
443 */
444 rt2x00dev->ops->lib->kill_tx_queue(rt2x00dev->rx);
445} 481}
446EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio); 482EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
447 483
@@ -450,25 +486,10 @@ EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio);
450 */ 486 */
451void rt2x00usb_clear_entry(struct queue_entry *entry) 487void rt2x00usb_clear_entry(struct queue_entry *entry)
452{ 488{
453 struct usb_device *usb_dev =
454 to_usb_device_intf(entry->queue->rt2x00dev->dev);
455 struct queue_entry_priv_usb *entry_priv = entry->priv_data;
456 int pipe;
457
458 entry->flags = 0; 489 entry->flags = 0;
459 490
460 if (entry->queue->qid == QID_RX) { 491 if (entry->queue->qid == QID_RX)
461 pipe = usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint); 492 rt2x00usb_kick_rx_entry(entry);
462 usb_fill_bulk_urb(entry_priv->urb, usb_dev, pipe,
463 entry->skb->data, entry->skb->len,
464 rt2x00usb_interrupt_rxdone, entry);
465
466 set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags);
467 if (usb_submit_urb(entry_priv->urb, GFP_ATOMIC)) {
468 set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
469 rt2x00lib_dmadone(entry);
470 }
471 }
472} 493}
473EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry); 494EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry);
474 495
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index c2d997f67b3e..6aaf51fc7ad8 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -378,22 +378,22 @@ struct queue_entry_priv_usb_bcn {
378}; 378};
379 379
380/** 380/**
381 * rt2x00usb_kick_tx_queue - Kick data queue 381 * rt2x00usb_kick_queue - Kick data queue
382 * @queue: Data queue to kick 382 * @queue: Data queue to kick
383 * 383 *
384 * This will walk through all entries of the queue and push all pending 384 * This will walk through all entries of the queue and push all pending
385 * frames to the hardware as a single burst. 385 * frames to the hardware as a single burst.
386 */ 386 */
387void rt2x00usb_kick_tx_queue(struct data_queue *queue); 387void rt2x00usb_kick_queue(struct data_queue *queue);
388 388
389/** 389/**
390 * rt2x00usb_kill_tx_queue - Kill data queue 390 * rt2x00usb_flush_queue - Flush data queue
391 * @queue: Data queue to kill 391 * @queue: Data queue to stop
392 * 392 *
393 * This will walk through all entries of the queue and kill all 393 * This will walk through all entries of the queue and kill all
394 * previously kicked frames before they can be send. 394 * URB's which were send to the device.
395 */ 395 */
396void rt2x00usb_kill_tx_queue(struct data_queue *queue); 396void rt2x00usb_flush_queue(struct data_queue *queue);
397 397
398/** 398/**
399 * rt2x00usb_watchdog - Watchdog for USB communication 399 * rt2x00usb_watchdog - Watchdog for USB communication
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index af548c87f108..8de44dd401e0 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -1140,6 +1140,106 @@ dynamic_cca_tune:
1140} 1140}
1141 1141
1142/* 1142/*
1143 * Queue handlers.
1144 */
1145static void rt61pci_start_queue(struct data_queue *queue)
1146{
1147 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1148 u32 reg;
1149
1150 switch (queue->qid) {
1151 case QID_RX:
1152 rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg);
1153 rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 0);
1154 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
1155 break;
1156 case QID_BEACON:
1157 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
1158 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
1159 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
1160 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
1161 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
1162 break;
1163 default:
1164 break;
1165 }
1166}
1167
1168static void rt61pci_kick_queue(struct data_queue *queue)
1169{
1170 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1171 u32 reg;
1172
1173 switch (queue->qid) {
1174 case QID_AC_VO:
1175 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1176 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, 1);
1177 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1178 break;
1179 case QID_AC_VI:
1180 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1181 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, 1);
1182 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1183 break;
1184 case QID_AC_BE:
1185 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1186 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC2, 1);
1187 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1188 break;
1189 case QID_AC_BK:
1190 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1191 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC3, 1);
1192 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1193 break;
1194 default:
1195 break;
1196 }
1197}
1198
1199static void rt61pci_stop_queue(struct data_queue *queue)
1200{
1201 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1202 u32 reg;
1203
1204 switch (queue->qid) {
1205 case QID_AC_VO:
1206 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1207 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC0, 1);
1208 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1209 break;
1210 case QID_AC_VI:
1211 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1212 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC1, 1);
1213 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1214 break;
1215 case QID_AC_BE:
1216 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1217 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC2, 1);
1218 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1219 break;
1220 case QID_AC_BK:
1221 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1222 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC3, 1);
1223 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1224 break;
1225 case QID_RX:
1226 rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg);
1227 rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 1);
1228 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
1229 break;
1230 case QID_BEACON:
1231 rt2x00pci_register_read(rt2x00dev, TXRX_CSR9, &reg);
1232 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0);
1233 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0);
1234 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
1235 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, reg);
1236 break;
1237 default:
1238 break;
1239 }
1240}
1241
1242/*
1143 * Firmware functions 1243 * Firmware functions
1144 */ 1244 */
1145static char *rt61pci_get_firmware_name(struct rt2x00_dev *rt2x00dev) 1245static char *rt61pci_get_firmware_name(struct rt2x00_dev *rt2x00dev)
@@ -1616,18 +1716,6 @@ static int rt61pci_init_bbp(struct rt2x00_dev *rt2x00dev)
1616/* 1716/*
1617 * Device state switch handlers. 1717 * Device state switch handlers.
1618 */ 1718 */
1619static void rt61pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
1620 enum dev_state state)
1621{
1622 u32 reg;
1623
1624 rt2x00pci_register_read(rt2x00dev, TXRX_CSR0, &reg);
1625 rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX,
1626 (state == STATE_RADIO_RX_OFF) ||
1627 (state == STATE_RADIO_RX_OFF_LINK));
1628 rt2x00pci_register_write(rt2x00dev, TXRX_CSR0, reg);
1629}
1630
1631static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev, 1719static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
1632 enum dev_state state) 1720 enum dev_state state)
1633{ 1721{
@@ -1744,12 +1832,6 @@ static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev,
1744 case STATE_RADIO_OFF: 1832 case STATE_RADIO_OFF:
1745 rt61pci_disable_radio(rt2x00dev); 1833 rt61pci_disable_radio(rt2x00dev);
1746 break; 1834 break;
1747 case STATE_RADIO_RX_ON:
1748 case STATE_RADIO_RX_ON_LINK:
1749 case STATE_RADIO_RX_OFF:
1750 case STATE_RADIO_RX_OFF_LINK:
1751 rt61pci_toggle_rx(rt2x00dev, state);
1752 break;
1753 case STATE_RADIO_IRQ_ON: 1835 case STATE_RADIO_IRQ_ON:
1754 case STATE_RADIO_IRQ_ON_ISR: 1836 case STATE_RADIO_IRQ_ON_ISR:
1755 case STATE_RADIO_IRQ_OFF: 1837 case STATE_RADIO_IRQ_OFF:
@@ -1789,10 +1871,10 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
1789 * Start writing the descriptor words. 1871 * Start writing the descriptor words.
1790 */ 1872 */
1791 rt2x00_desc_read(txd, 1, &word); 1873 rt2x00_desc_read(txd, 1, &word);
1792 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->qid); 1874 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid);
1793 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs); 1875 rt2x00_set_field32(&word, TXD_W1_AIFSN, entry->queue->aifs);
1794 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); 1876 rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min);
1795 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); 1877 rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max);
1796 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset); 1878 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
1797 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1879 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE,
1798 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags)); 1880 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
@@ -1820,7 +1902,7 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
1820 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); 1902 rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1);
1821 rt2x00_desc_write(txd, 5, word); 1903 rt2x00_desc_write(txd, 5, word);
1822 1904
1823 if (txdesc->qid != QID_BEACON) { 1905 if (entry->queue->qid != QID_BEACON) {
1824 rt2x00_desc_read(txd, 6, &word); 1906 rt2x00_desc_read(txd, 6, &word);
1825 rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS, 1907 rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS,
1826 skbdesc->skb_dma); 1908 skbdesc->skb_dma);
@@ -1866,8 +1948,8 @@ static void rt61pci_write_tx_desc(struct queue_entry *entry,
1866 * Register descriptor details in skb frame descriptor. 1948 * Register descriptor details in skb frame descriptor.
1867 */ 1949 */
1868 skbdesc->desc = txd; 1950 skbdesc->desc = txd;
1869 skbdesc->desc_len = 1951 skbdesc->desc_len = (entry->queue->qid == QID_BEACON) ? TXINFO_SIZE :
1870 (txdesc->qid == QID_BEACON) ? TXINFO_SIZE : TXD_DESC_SIZE; 1952 TXD_DESC_SIZE;
1871} 1953}
1872 1954
1873/* 1955/*
@@ -1879,6 +1961,7 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
1879 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 1961 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1880 struct queue_entry_priv_pci *entry_priv = entry->priv_data; 1962 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
1881 unsigned int beacon_base; 1963 unsigned int beacon_base;
1964 unsigned int padding_len;
1882 u32 reg; 1965 u32 reg;
1883 1966
1884 /* 1967 /*
@@ -1900,13 +1983,16 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
1900 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb); 1983 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
1901 1984
1902 /* 1985 /*
1903 * Write entire beacon with descriptor to register. 1986 * Write entire beacon with descriptor and padding to register.
1904 */ 1987 */
1988 padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
1989 skb_pad(entry->skb, padding_len);
1905 beacon_base = HW_BEACON_OFFSET(entry->entry_idx); 1990 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
1906 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base, 1991 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base,
1907 entry_priv->desc, TXINFO_SIZE); 1992 entry_priv->desc, TXINFO_SIZE);
1908 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base + TXINFO_SIZE, 1993 rt2x00pci_register_multiwrite(rt2x00dev, beacon_base + TXINFO_SIZE,
1909 entry->skb->data, entry->skb->len); 1994 entry->skb->data,
1995 entry->skb->len + padding_len);
1910 1996
1911 /* 1997 /*
1912 * Enable beaconing again. 1998 * Enable beaconing again.
@@ -1928,37 +2014,6 @@ static void rt61pci_write_beacon(struct queue_entry *entry,
1928 entry->skb = NULL; 2014 entry->skb = NULL;
1929} 2015}
1930 2016
1931static void rt61pci_kick_tx_queue(struct data_queue *queue)
1932{
1933 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1934 u32 reg;
1935
1936 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1937 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, (queue->qid == QID_AC_BE));
1938 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, (queue->qid == QID_AC_BK));
1939 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC2, (queue->qid == QID_AC_VI));
1940 rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC3, (queue->qid == QID_AC_VO));
1941 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1942}
1943
1944static void rt61pci_kill_tx_queue(struct data_queue *queue)
1945{
1946 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1947 u32 reg;
1948
1949 if (queue->qid == QID_BEACON) {
1950 rt2x00pci_register_write(rt2x00dev, TXRX_CSR9, 0);
1951 return;
1952 }
1953
1954 rt2x00pci_register_read(rt2x00dev, TX_CNTL_CSR, &reg);
1955 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC0, (queue->qid == QID_AC_BE));
1956 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC1, (queue->qid == QID_AC_BK));
1957 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC2, (queue->qid == QID_AC_VI));
1958 rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC3, (queue->qid == QID_AC_VO));
1959 rt2x00pci_register_write(rt2x00dev, TX_CNTL_CSR, reg);
1960}
1961
1962/* 2017/*
1963 * RX control handlers 2018 * RX control handlers
1964 */ 2019 */
@@ -2078,7 +2133,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
2078 * that the TX_STA_FIFO stack has a size of 16. We stick to our 2133 * that the TX_STA_FIFO stack has a size of 16. We stick to our
2079 * tx ring size for now. 2134 * tx ring size for now.
2080 */ 2135 */
2081 for (i = 0; i < TX_ENTRIES; i++) { 2136 for (i = 0; i < rt2x00dev->ops->tx->entry_num; i++) {
2082 rt2x00pci_register_read(rt2x00dev, STA_CSR4, &reg); 2137 rt2x00pci_register_read(rt2x00dev, STA_CSR4, &reg);
2083 if (!rt2x00_get_field32(reg, STA_CSR4_VALID)) 2138 if (!rt2x00_get_field32(reg, STA_CSR4_VALID))
2084 break; 2139 break;
@@ -2824,6 +2879,7 @@ static const struct ieee80211_ops rt61pci_mac80211_ops = {
2824 .conf_tx = rt61pci_conf_tx, 2879 .conf_tx = rt61pci_conf_tx,
2825 .get_tsf = rt61pci_get_tsf, 2880 .get_tsf = rt61pci_get_tsf,
2826 .rfkill_poll = rt2x00mac_rfkill_poll, 2881 .rfkill_poll = rt2x00mac_rfkill_poll,
2882 .flush = rt2x00mac_flush,
2827}; 2883};
2828 2884
2829static const struct rt2x00lib_ops rt61pci_rt2x00_ops = { 2885static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
@@ -2842,10 +2898,11 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
2842 .link_stats = rt61pci_link_stats, 2898 .link_stats = rt61pci_link_stats,
2843 .reset_tuner = rt61pci_reset_tuner, 2899 .reset_tuner = rt61pci_reset_tuner,
2844 .link_tuner = rt61pci_link_tuner, 2900 .link_tuner = rt61pci_link_tuner,
2901 .start_queue = rt61pci_start_queue,
2902 .kick_queue = rt61pci_kick_queue,
2903 .stop_queue = rt61pci_stop_queue,
2845 .write_tx_desc = rt61pci_write_tx_desc, 2904 .write_tx_desc = rt61pci_write_tx_desc,
2846 .write_beacon = rt61pci_write_beacon, 2905 .write_beacon = rt61pci_write_beacon,
2847 .kick_tx_queue = rt61pci_kick_tx_queue,
2848 .kill_tx_queue = rt61pci_kill_tx_queue,
2849 .fill_rxdone = rt61pci_fill_rxdone, 2906 .fill_rxdone = rt61pci_fill_rxdone,
2850 .config_shared_key = rt61pci_config_shared_key, 2907 .config_shared_key = rt61pci_config_shared_key,
2851 .config_pairwise_key = rt61pci_config_pairwise_key, 2908 .config_pairwise_key = rt61pci_config_pairwise_key,
@@ -2857,21 +2914,21 @@ static const struct rt2x00lib_ops rt61pci_rt2x00_ops = {
2857}; 2914};
2858 2915
2859static const struct data_queue_desc rt61pci_queue_rx = { 2916static const struct data_queue_desc rt61pci_queue_rx = {
2860 .entry_num = RX_ENTRIES, 2917 .entry_num = 32,
2861 .data_size = DATA_FRAME_SIZE, 2918 .data_size = DATA_FRAME_SIZE,
2862 .desc_size = RXD_DESC_SIZE, 2919 .desc_size = RXD_DESC_SIZE,
2863 .priv_size = sizeof(struct queue_entry_priv_pci), 2920 .priv_size = sizeof(struct queue_entry_priv_pci),
2864}; 2921};
2865 2922
2866static const struct data_queue_desc rt61pci_queue_tx = { 2923static const struct data_queue_desc rt61pci_queue_tx = {
2867 .entry_num = TX_ENTRIES, 2924 .entry_num = 32,
2868 .data_size = DATA_FRAME_SIZE, 2925 .data_size = DATA_FRAME_SIZE,
2869 .desc_size = TXD_DESC_SIZE, 2926 .desc_size = TXD_DESC_SIZE,
2870 .priv_size = sizeof(struct queue_entry_priv_pci), 2927 .priv_size = sizeof(struct queue_entry_priv_pci),
2871}; 2928};
2872 2929
2873static const struct data_queue_desc rt61pci_queue_bcn = { 2930static const struct data_queue_desc rt61pci_queue_bcn = {
2874 .entry_num = 4 * BEACON_ENTRIES, 2931 .entry_num = 4,
2875 .data_size = 0, /* No DMA required for beacons */ 2932 .data_size = 0, /* No DMA required for beacons */
2876 .desc_size = TXINFO_SIZE, 2933 .desc_size = TXINFO_SIZE,
2877 .priv_size = sizeof(struct queue_entry_priv_pci), 2934 .priv_size = sizeof(struct queue_entry_priv_pci),
diff --git a/drivers/net/wireless/rt2x00/rt61pci.h b/drivers/net/wireless/rt2x00/rt61pci.h
index e2e728ab0b2e..e3cd6db76b0e 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.h
+++ b/drivers/net/wireless/rt2x00/rt61pci.h
@@ -412,7 +412,7 @@ struct hw_pairwise_ta_entry {
412 * DROP_VERSION_ERROR: Drop version error frame. 412 * DROP_VERSION_ERROR: Drop version error frame.
413 * DROP_MULTICAST: Drop multicast frames. 413 * DROP_MULTICAST: Drop multicast frames.
414 * DROP_BORADCAST: Drop broadcast frames. 414 * DROP_BORADCAST: Drop broadcast frames.
415 * ROP_ACK_CTS: Drop received ACK and CTS. 415 * DROP_ACK_CTS: Drop received ACK and CTS.
416 */ 416 */
417#define TXRX_CSR0 0x3040 417#define TXRX_CSR0 0x3040
418#define TXRX_CSR0_RX_ACK_TIMEOUT FIELD32(0x000001ff) 418#define TXRX_CSR0_RX_ACK_TIMEOUT FIELD32(0x000001ff)
@@ -784,25 +784,25 @@ struct hw_pairwise_ta_entry {
784 */ 784 */
785 785
786/* 786/*
787 * AC0_BASE_CSR: AC_BK base address. 787 * AC0_BASE_CSR: AC_VO base address.
788 */ 788 */
789#define AC0_BASE_CSR 0x3400 789#define AC0_BASE_CSR 0x3400
790#define AC0_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) 790#define AC0_BASE_CSR_RING_REGISTER FIELD32(0xffffffff)
791 791
792/* 792/*
793 * AC1_BASE_CSR: AC_BE base address. 793 * AC1_BASE_CSR: AC_VI base address.
794 */ 794 */
795#define AC1_BASE_CSR 0x3404 795#define AC1_BASE_CSR 0x3404
796#define AC1_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) 796#define AC1_BASE_CSR_RING_REGISTER FIELD32(0xffffffff)
797 797
798/* 798/*
799 * AC2_BASE_CSR: AC_VI base address. 799 * AC2_BASE_CSR: AC_BE base address.
800 */ 800 */
801#define AC2_BASE_CSR 0x3408 801#define AC2_BASE_CSR 0x3408
802#define AC2_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) 802#define AC2_BASE_CSR_RING_REGISTER FIELD32(0xffffffff)
803 803
804/* 804/*
805 * AC3_BASE_CSR: AC_VO base address. 805 * AC3_BASE_CSR: AC_BK base address.
806 */ 806 */
807#define AC3_BASE_CSR 0x340c 807#define AC3_BASE_CSR 0x340c
808#define AC3_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) 808#define AC3_BASE_CSR_RING_REGISTER FIELD32(0xffffffff)
@@ -814,7 +814,7 @@ struct hw_pairwise_ta_entry {
814#define MGMT_BASE_CSR_RING_REGISTER FIELD32(0xffffffff) 814#define MGMT_BASE_CSR_RING_REGISTER FIELD32(0xffffffff)
815 815
816/* 816/*
817 * TX_RING_CSR0: TX Ring size for AC_BK, AC_BE, AC_VI, AC_VO. 817 * TX_RING_CSR0: TX Ring size for AC_VO, AC_VI, AC_BE, AC_BK.
818 */ 818 */
819#define TX_RING_CSR0 0x3418 819#define TX_RING_CSR0 0x3418
820#define TX_RING_CSR0_AC0_RING_SIZE FIELD32(0x000000ff) 820#define TX_RING_CSR0_AC0_RING_SIZE FIELD32(0x000000ff)
@@ -833,10 +833,10 @@ struct hw_pairwise_ta_entry {
833 833
834/* 834/*
835 * AIFSN_CSR: AIFSN for each EDCA AC. 835 * AIFSN_CSR: AIFSN for each EDCA AC.
836 * AIFSN0: For AC_BK. 836 * AIFSN0: For AC_VO.
837 * AIFSN1: For AC_BE. 837 * AIFSN1: For AC_VI.
838 * AIFSN2: For AC_VI. 838 * AIFSN2: For AC_BE.
839 * AIFSN3: For AC_VO. 839 * AIFSN3: For AC_BK.
840 */ 840 */
841#define AIFSN_CSR 0x3420 841#define AIFSN_CSR 0x3420
842#define AIFSN_CSR_AIFSN0 FIELD32(0x0000000f) 842#define AIFSN_CSR_AIFSN0 FIELD32(0x0000000f)
@@ -846,10 +846,10 @@ struct hw_pairwise_ta_entry {
846 846
847/* 847/*
848 * CWMIN_CSR: CWmin for each EDCA AC. 848 * CWMIN_CSR: CWmin for each EDCA AC.
849 * CWMIN0: For AC_BK. 849 * CWMIN0: For AC_VO.
850 * CWMIN1: For AC_BE. 850 * CWMIN1: For AC_VI.
851 * CWMIN2: For AC_VI. 851 * CWMIN2: For AC_BE.
852 * CWMIN3: For AC_VO. 852 * CWMIN3: For AC_BK.
853 */ 853 */
854#define CWMIN_CSR 0x3424 854#define CWMIN_CSR 0x3424
855#define CWMIN_CSR_CWMIN0 FIELD32(0x0000000f) 855#define CWMIN_CSR_CWMIN0 FIELD32(0x0000000f)
@@ -859,10 +859,10 @@ struct hw_pairwise_ta_entry {
859 859
860/* 860/*
861 * CWMAX_CSR: CWmax for each EDCA AC. 861 * CWMAX_CSR: CWmax for each EDCA AC.
862 * CWMAX0: For AC_BK. 862 * CWMAX0: For AC_VO.
863 * CWMAX1: For AC_BE. 863 * CWMAX1: For AC_VI.
864 * CWMAX2: For AC_VI. 864 * CWMAX2: For AC_BE.
865 * CWMAX3: For AC_VO. 865 * CWMAX3: For AC_BK.
866 */ 866 */
867#define CWMAX_CSR 0x3428 867#define CWMAX_CSR 0x3428
868#define CWMAX_CSR_CWMAX0 FIELD32(0x0000000f) 868#define CWMAX_CSR_CWMAX0 FIELD32(0x0000000f)
@@ -883,14 +883,14 @@ struct hw_pairwise_ta_entry {
883 883
884/* 884/*
885 * TX_CNTL_CSR: KICK/Abort TX. 885 * TX_CNTL_CSR: KICK/Abort TX.
886 * KICK_TX_AC0: For AC_BK. 886 * KICK_TX_AC0: For AC_VO.
887 * KICK_TX_AC1: For AC_BE. 887 * KICK_TX_AC1: For AC_VI.
888 * KICK_TX_AC2: For AC_VI. 888 * KICK_TX_AC2: For AC_BE.
889 * KICK_TX_AC3: For AC_VO. 889 * KICK_TX_AC3: For AC_BK.
890 * ABORT_TX_AC0: For AC_BK. 890 * ABORT_TX_AC0: For AC_VO.
891 * ABORT_TX_AC1: For AC_BE. 891 * ABORT_TX_AC1: For AC_VI.
892 * ABORT_TX_AC2: For AC_VI. 892 * ABORT_TX_AC2: For AC_BE.
893 * ABORT_TX_AC3: For AC_VO. 893 * ABORT_TX_AC3: For AC_BK.
894 */ 894 */
895#define TX_CNTL_CSR 0x3430 895#define TX_CNTL_CSR 0x3430
896#define TX_CNTL_CSR_KICK_TX_AC0 FIELD32(0x00000001) 896#define TX_CNTL_CSR_KICK_TX_AC0 FIELD32(0x00000001)
@@ -1010,18 +1010,18 @@ struct hw_pairwise_ta_entry {
1010#define E2PROM_CSR_LOAD_STATUS FIELD32(0x00000040) 1010#define E2PROM_CSR_LOAD_STATUS FIELD32(0x00000040)
1011 1011
1012/* 1012/*
1013 * AC_TXOP_CSR0: AC_BK/AC_BE TXOP register. 1013 * AC_TXOP_CSR0: AC_VO/AC_VI TXOP register.
1014 * AC0_TX_OP: For AC_BK, in unit of 32us. 1014 * AC0_TX_OP: For AC_VO, in unit of 32us.
1015 * AC1_TX_OP: For AC_BE, in unit of 32us. 1015 * AC1_TX_OP: For AC_VI, in unit of 32us.
1016 */ 1016 */
1017#define AC_TXOP_CSR0 0x3474 1017#define AC_TXOP_CSR0 0x3474
1018#define AC_TXOP_CSR0_AC0_TX_OP FIELD32(0x0000ffff) 1018#define AC_TXOP_CSR0_AC0_TX_OP FIELD32(0x0000ffff)
1019#define AC_TXOP_CSR0_AC1_TX_OP FIELD32(0xffff0000) 1019#define AC_TXOP_CSR0_AC1_TX_OP FIELD32(0xffff0000)
1020 1020
1021/* 1021/*
1022 * AC_TXOP_CSR1: AC_VO/AC_VI TXOP register. 1022 * AC_TXOP_CSR1: AC_BE/AC_BK TXOP register.
1023 * AC2_TX_OP: For AC_VI, in unit of 32us. 1023 * AC2_TX_OP: For AC_BE, in unit of 32us.
1024 * AC3_TX_OP: For AC_VO, in unit of 32us. 1024 * AC3_TX_OP: For AC_BK, in unit of 32us.
1025 */ 1025 */
1026#define AC_TXOP_CSR1 0x3478 1026#define AC_TXOP_CSR1 0x3478
1027#define AC_TXOP_CSR1_AC2_TX_OP FIELD32(0x0000ffff) 1027#define AC_TXOP_CSR1_AC2_TX_OP FIELD32(0x0000ffff)
diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
index 9be8089317e4..0b4e8590cbb7 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.c
+++ b/drivers/net/wireless/rt2x00/rt73usb.c
@@ -40,7 +40,7 @@
40/* 40/*
41 * Allow hardware encryption to be disabled. 41 * Allow hardware encryption to be disabled.
42 */ 42 */
43static int modparam_nohwcrypt = 0; 43static int modparam_nohwcrypt;
44module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO); 44module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
45MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 45MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
46 46
@@ -1031,6 +1031,55 @@ dynamic_cca_tune:
1031} 1031}
1032 1032
1033/* 1033/*
1034 * Queue handlers.
1035 */
1036static void rt73usb_start_queue(struct data_queue *queue)
1037{
1038 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1039 u32 reg;
1040
1041 switch (queue->qid) {
1042 case QID_RX:
1043 rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
1044 rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 0);
1045 rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
1046 break;
1047 case QID_BEACON:
1048 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
1049 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1);
1050 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1);
1051 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1);
1052 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1053 break;
1054 default:
1055 break;
1056 }
1057}
1058
1059static void rt73usb_stop_queue(struct data_queue *queue)
1060{
1061 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev;
1062 u32 reg;
1063
1064 switch (queue->qid) {
1065 case QID_RX:
1066 rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
1067 rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 1);
1068 rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
1069 break;
1070 case QID_BEACON:
1071 rt2x00usb_register_read(rt2x00dev, TXRX_CSR9, &reg);
1072 rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0);
1073 rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0);
1074 rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0);
1075 rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg);
1076 break;
1077 default:
1078 break;
1079 }
1080}
1081
1082/*
1034 * Firmware functions 1083 * Firmware functions
1035 */ 1084 */
1036static char *rt73usb_get_firmware_name(struct rt2x00_dev *rt2x00dev) 1085static char *rt73usb_get_firmware_name(struct rt2x00_dev *rt2x00dev)
@@ -1324,18 +1373,6 @@ static int rt73usb_init_bbp(struct rt2x00_dev *rt2x00dev)
1324/* 1373/*
1325 * Device state switch handlers. 1374 * Device state switch handlers.
1326 */ 1375 */
1327static void rt73usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
1328 enum dev_state state)
1329{
1330 u32 reg;
1331
1332 rt2x00usb_register_read(rt2x00dev, TXRX_CSR0, &reg);
1333 rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX,
1334 (state == STATE_RADIO_RX_OFF) ||
1335 (state == STATE_RADIO_RX_OFF_LINK));
1336 rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg);
1337}
1338
1339static int rt73usb_enable_radio(struct rt2x00_dev *rt2x00dev) 1376static int rt73usb_enable_radio(struct rt2x00_dev *rt2x00dev)
1340{ 1377{
1341 /* 1378 /*
@@ -1402,12 +1439,6 @@ static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev,
1402 case STATE_RADIO_OFF: 1439 case STATE_RADIO_OFF:
1403 rt73usb_disable_radio(rt2x00dev); 1440 rt73usb_disable_radio(rt2x00dev);
1404 break; 1441 break;
1405 case STATE_RADIO_RX_ON:
1406 case STATE_RADIO_RX_ON_LINK:
1407 case STATE_RADIO_RX_OFF:
1408 case STATE_RADIO_RX_OFF_LINK:
1409 rt73usb_toggle_rx(rt2x00dev, state);
1410 break;
1411 case STATE_RADIO_IRQ_ON: 1442 case STATE_RADIO_IRQ_ON:
1412 case STATE_RADIO_IRQ_ON_ISR: 1443 case STATE_RADIO_IRQ_ON_ISR:
1413 case STATE_RADIO_IRQ_OFF: 1444 case STATE_RADIO_IRQ_OFF:
@@ -1472,10 +1503,10 @@ static void rt73usb_write_tx_desc(struct queue_entry *entry,
1472 rt2x00_desc_write(txd, 0, word); 1503 rt2x00_desc_write(txd, 0, word);
1473 1504
1474 rt2x00_desc_read(txd, 1, &word); 1505 rt2x00_desc_read(txd, 1, &word);
1475 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, txdesc->qid); 1506 rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid);
1476 rt2x00_set_field32(&word, TXD_W1_AIFSN, txdesc->aifs); 1507 rt2x00_set_field32(&word, TXD_W1_AIFSN, entry->queue->aifs);
1477 rt2x00_set_field32(&word, TXD_W1_CWMIN, txdesc->cw_min); 1508 rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min);
1478 rt2x00_set_field32(&word, TXD_W1_CWMAX, txdesc->cw_max); 1509 rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max);
1479 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset); 1510 rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset);
1480 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, 1511 rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE,
1481 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags)); 1512 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
@@ -1515,6 +1546,7 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
1515{ 1546{
1516 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; 1547 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
1517 unsigned int beacon_base; 1548 unsigned int beacon_base;
1549 unsigned int padding_len;
1518 u32 reg; 1550 u32 reg;
1519 1551
1520 /* 1552 /*
@@ -1542,11 +1574,13 @@ static void rt73usb_write_beacon(struct queue_entry *entry,
1542 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb); 1574 rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry->skb);
1543 1575
1544 /* 1576 /*
1545 * Write entire beacon with descriptor to register. 1577 * Write entire beacon with descriptor and padding to register.
1546 */ 1578 */
1579 padding_len = roundup(entry->skb->len, 4) - entry->skb->len;
1580 skb_pad(entry->skb, padding_len);
1547 beacon_base = HW_BEACON_OFFSET(entry->entry_idx); 1581 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
1548 rt2x00usb_register_multiwrite(rt2x00dev, beacon_base, 1582 rt2x00usb_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data,
1549 entry->skb->data, entry->skb->len); 1583 entry->skb->len + padding_len);
1550 1584
1551 /* 1585 /*
1552 * Enable beaconing again. 1586 * Enable beaconing again.
@@ -1582,14 +1616,6 @@ static int rt73usb_get_tx_data_len(struct queue_entry *entry)
1582 return length; 1616 return length;
1583} 1617}
1584 1618
1585static void rt73usb_kill_tx_queue(struct data_queue *queue)
1586{
1587 if (queue->qid == QID_BEACON)
1588 rt2x00usb_register_write(queue->rt2x00dev, TXRX_CSR9, 0);
1589
1590 rt2x00usb_kill_tx_queue(queue);
1591}
1592
1593/* 1619/*
1594 * RX control handlers 1620 * RX control handlers
1595 */ 1621 */
@@ -2264,6 +2290,7 @@ static const struct ieee80211_ops rt73usb_mac80211_ops = {
2264 .conf_tx = rt73usb_conf_tx, 2290 .conf_tx = rt73usb_conf_tx,
2265 .get_tsf = rt73usb_get_tsf, 2291 .get_tsf = rt73usb_get_tsf,
2266 .rfkill_poll = rt2x00mac_rfkill_poll, 2292 .rfkill_poll = rt2x00mac_rfkill_poll,
2293 .flush = rt2x00mac_flush,
2267}; 2294};
2268 2295
2269static const struct rt2x00lib_ops rt73usb_rt2x00_ops = { 2296static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
@@ -2280,11 +2307,13 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
2280 .reset_tuner = rt73usb_reset_tuner, 2307 .reset_tuner = rt73usb_reset_tuner,
2281 .link_tuner = rt73usb_link_tuner, 2308 .link_tuner = rt73usb_link_tuner,
2282 .watchdog = rt2x00usb_watchdog, 2309 .watchdog = rt2x00usb_watchdog,
2310 .start_queue = rt73usb_start_queue,
2311 .kick_queue = rt2x00usb_kick_queue,
2312 .stop_queue = rt73usb_stop_queue,
2313 .flush_queue = rt2x00usb_flush_queue,
2283 .write_tx_desc = rt73usb_write_tx_desc, 2314 .write_tx_desc = rt73usb_write_tx_desc,
2284 .write_beacon = rt73usb_write_beacon, 2315 .write_beacon = rt73usb_write_beacon,
2285 .get_tx_data_len = rt73usb_get_tx_data_len, 2316 .get_tx_data_len = rt73usb_get_tx_data_len,
2286 .kick_tx_queue = rt2x00usb_kick_tx_queue,
2287 .kill_tx_queue = rt73usb_kill_tx_queue,
2288 .fill_rxdone = rt73usb_fill_rxdone, 2317 .fill_rxdone = rt73usb_fill_rxdone,
2289 .config_shared_key = rt73usb_config_shared_key, 2318 .config_shared_key = rt73usb_config_shared_key,
2290 .config_pairwise_key = rt73usb_config_pairwise_key, 2319 .config_pairwise_key = rt73usb_config_pairwise_key,
@@ -2296,21 +2325,21 @@ static const struct rt2x00lib_ops rt73usb_rt2x00_ops = {
2296}; 2325};
2297 2326
2298static const struct data_queue_desc rt73usb_queue_rx = { 2327static const struct data_queue_desc rt73usb_queue_rx = {
2299 .entry_num = RX_ENTRIES, 2328 .entry_num = 32,
2300 .data_size = DATA_FRAME_SIZE, 2329 .data_size = DATA_FRAME_SIZE,
2301 .desc_size = RXD_DESC_SIZE, 2330 .desc_size = RXD_DESC_SIZE,
2302 .priv_size = sizeof(struct queue_entry_priv_usb), 2331 .priv_size = sizeof(struct queue_entry_priv_usb),
2303}; 2332};
2304 2333
2305static const struct data_queue_desc rt73usb_queue_tx = { 2334static const struct data_queue_desc rt73usb_queue_tx = {
2306 .entry_num = TX_ENTRIES, 2335 .entry_num = 32,
2307 .data_size = DATA_FRAME_SIZE, 2336 .data_size = DATA_FRAME_SIZE,
2308 .desc_size = TXD_DESC_SIZE, 2337 .desc_size = TXD_DESC_SIZE,
2309 .priv_size = sizeof(struct queue_entry_priv_usb), 2338 .priv_size = sizeof(struct queue_entry_priv_usb),
2310}; 2339};
2311 2340
2312static const struct data_queue_desc rt73usb_queue_bcn = { 2341static const struct data_queue_desc rt73usb_queue_bcn = {
2313 .entry_num = 4 * BEACON_ENTRIES, 2342 .entry_num = 4,
2314 .data_size = MGMT_FRAME_SIZE, 2343 .data_size = MGMT_FRAME_SIZE,
2315 .desc_size = TXINFO_SIZE, 2344 .desc_size = TXINFO_SIZE,
2316 .priv_size = sizeof(struct queue_entry_priv_usb), 2345 .priv_size = sizeof(struct queue_entry_priv_usb),
diff --git a/drivers/net/wireless/rt2x00/rt73usb.h b/drivers/net/wireless/rt2x00/rt73usb.h
index 44d5b2bebd39..9f6b470414d3 100644
--- a/drivers/net/wireless/rt2x00/rt73usb.h
+++ b/drivers/net/wireless/rt2x00/rt73usb.h
@@ -322,7 +322,7 @@ struct hw_pairwise_ta_entry {
322 * DROP_VERSION_ERROR: Drop version error frame. 322 * DROP_VERSION_ERROR: Drop version error frame.
323 * DROP_MULTICAST: Drop multicast frames. 323 * DROP_MULTICAST: Drop multicast frames.
324 * DROP_BORADCAST: Drop broadcast frames. 324 * DROP_BORADCAST: Drop broadcast frames.
325 * ROP_ACK_CTS: Drop received ACK and CTS. 325 * DROP_ACK_CTS: Drop received ACK and CTS.
326 */ 326 */
327#define TXRX_CSR0 0x3040 327#define TXRX_CSR0 0x3040
328#define TXRX_CSR0_RX_ACK_TIMEOUT FIELD32(0x000001ff) 328#define TXRX_CSR0_RX_ACK_TIMEOUT FIELD32(0x000001ff)
@@ -689,10 +689,10 @@ struct hw_pairwise_ta_entry {
689 689
690/* 690/*
691 * AIFSN_CSR: AIFSN for each EDCA AC. 691 * AIFSN_CSR: AIFSN for each EDCA AC.
692 * AIFSN0: For AC_BK. 692 * AIFSN0: For AC_VO.
693 * AIFSN1: For AC_BE. 693 * AIFSN1: For AC_VI.
694 * AIFSN2: For AC_VI. 694 * AIFSN2: For AC_BE.
695 * AIFSN3: For AC_VO. 695 * AIFSN3: For AC_BK.
696 */ 696 */
697#define AIFSN_CSR 0x0400 697#define AIFSN_CSR 0x0400
698#define AIFSN_CSR_AIFSN0 FIELD32(0x0000000f) 698#define AIFSN_CSR_AIFSN0 FIELD32(0x0000000f)
@@ -702,10 +702,10 @@ struct hw_pairwise_ta_entry {
702 702
703/* 703/*
704 * CWMIN_CSR: CWmin for each EDCA AC. 704 * CWMIN_CSR: CWmin for each EDCA AC.
705 * CWMIN0: For AC_BK. 705 * CWMIN0: For AC_VO.
706 * CWMIN1: For AC_BE. 706 * CWMIN1: For AC_VI.
707 * CWMIN2: For AC_VI. 707 * CWMIN2: For AC_BE.
708 * CWMIN3: For AC_VO. 708 * CWMIN3: For AC_BK.
709 */ 709 */
710#define CWMIN_CSR 0x0404 710#define CWMIN_CSR 0x0404
711#define CWMIN_CSR_CWMIN0 FIELD32(0x0000000f) 711#define CWMIN_CSR_CWMIN0 FIELD32(0x0000000f)
@@ -715,10 +715,10 @@ struct hw_pairwise_ta_entry {
715 715
716/* 716/*
717 * CWMAX_CSR: CWmax for each EDCA AC. 717 * CWMAX_CSR: CWmax for each EDCA AC.
718 * CWMAX0: For AC_BK. 718 * CWMAX0: For AC_VO.
719 * CWMAX1: For AC_BE. 719 * CWMAX1: For AC_VI.
720 * CWMAX2: For AC_VI. 720 * CWMAX2: For AC_BE.
721 * CWMAX3: For AC_VO. 721 * CWMAX3: For AC_BK.
722 */ 722 */
723#define CWMAX_CSR 0x0408 723#define CWMAX_CSR 0x0408
724#define CWMAX_CSR_CWMAX0 FIELD32(0x0000000f) 724#define CWMAX_CSR_CWMAX0 FIELD32(0x0000000f)
@@ -727,18 +727,18 @@ struct hw_pairwise_ta_entry {
727#define CWMAX_CSR_CWMAX3 FIELD32(0x0000f000) 727#define CWMAX_CSR_CWMAX3 FIELD32(0x0000f000)
728 728
729/* 729/*
730 * AC_TXOP_CSR0: AC_BK/AC_BE TXOP register. 730 * AC_TXOP_CSR0: AC_VO/AC_VI TXOP register.
731 * AC0_TX_OP: For AC_BK, in unit of 32us. 731 * AC0_TX_OP: For AC_VO, in unit of 32us.
732 * AC1_TX_OP: For AC_BE, in unit of 32us. 732 * AC1_TX_OP: For AC_VI, in unit of 32us.
733 */ 733 */
734#define AC_TXOP_CSR0 0x040c 734#define AC_TXOP_CSR0 0x040c
735#define AC_TXOP_CSR0_AC0_TX_OP FIELD32(0x0000ffff) 735#define AC_TXOP_CSR0_AC0_TX_OP FIELD32(0x0000ffff)
736#define AC_TXOP_CSR0_AC1_TX_OP FIELD32(0xffff0000) 736#define AC_TXOP_CSR0_AC1_TX_OP FIELD32(0xffff0000)
737 737
738/* 738/*
739 * AC_TXOP_CSR1: AC_VO/AC_VI TXOP register. 739 * AC_TXOP_CSR1: AC_BE/AC_BK TXOP register.
740 * AC2_TX_OP: For AC_VI, in unit of 32us. 740 * AC2_TX_OP: For AC_BE, in unit of 32us.
741 * AC3_TX_OP: For AC_VO, in unit of 32us. 741 * AC3_TX_OP: For AC_BK, in unit of 32us.
742 */ 742 */
743#define AC_TXOP_CSR1 0x0410 743#define AC_TXOP_CSR1 0x0410
744#define AC_TXOP_CSR1_AC2_TX_OP FIELD32(0x0000ffff) 744#define AC_TXOP_CSR1_AC2_TX_OP FIELD32(0x0000ffff)
diff --git a/drivers/net/wireless/rtl818x/Makefile b/drivers/net/wireless/rtl818x/Makefile
index 93cbfbedb46d..997569076923 100644
--- a/drivers/net/wireless/rtl818x/Makefile
+++ b/drivers/net/wireless/rtl818x/Makefile
@@ -1,7 +1,2 @@
1rtl8180-objs := rtl8180_dev.o rtl8180_rtl8225.o rtl8180_sa2400.o rtl8180_max2820.o rtl8180_grf5101.o 1obj-$(CONFIG_RTL8180) += rtl8180/
2rtl8187-objs := rtl8187_dev.o rtl8187_rtl8225.o rtl8187_leds.o rtl8187_rfkill.o 2obj-$(CONFIG_RTL8187) += rtl8187/
3
4obj-$(CONFIG_RTL8180) += rtl8180.o
5obj-$(CONFIG_RTL8187) += rtl8187.o
6
7
diff --git a/drivers/net/wireless/rtl818x/rtl8180/Makefile b/drivers/net/wireless/rtl818x/rtl8180/Makefile
new file mode 100644
index 000000000000..cb4fb8596f0b
--- /dev/null
+++ b/drivers/net/wireless/rtl818x/rtl8180/Makefile
@@ -0,0 +1,5 @@
1rtl8180-objs := dev.o rtl8225.o sa2400.o max2820.o grf5101.o
2
3obj-$(CONFIG_RTL8180) += rtl8180.o
4
5ccflags-y += -Idrivers/net/wireless/rtl818x
diff --git a/drivers/net/wireless/rtl818x/rtl8180_dev.c b/drivers/net/wireless/rtl818x/rtl8180/dev.c
index 707c688da618..5851cbc1e957 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/dev.c
@@ -24,10 +24,10 @@
24#include <net/mac80211.h> 24#include <net/mac80211.h>
25 25
26#include "rtl8180.h" 26#include "rtl8180.h"
27#include "rtl8180_rtl8225.h" 27#include "rtl8225.h"
28#include "rtl8180_sa2400.h" 28#include "sa2400.h"
29#include "rtl8180_max2820.h" 29#include "max2820.h"
30#include "rtl8180_grf5101.h" 30#include "grf5101.h"
31 31
32MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>"); 32MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
33MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>"); 33MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
diff --git a/drivers/net/wireless/rtl818x/rtl8180_grf5101.c b/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
index 5cab9dfa8c07..5ee7589dd546 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_grf5101.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/grf5101.c
@@ -25,7 +25,7 @@
25#include <net/mac80211.h> 25#include <net/mac80211.h>
26 26
27#include "rtl8180.h" 27#include "rtl8180.h"
28#include "rtl8180_grf5101.h" 28#include "grf5101.h"
29 29
30static const int grf5101_encode[] = { 30static const int grf5101_encode[] = {
31 0x0, 0x8, 0x4, 0xC, 31 0x0, 0x8, 0x4, 0xC,
diff --git a/drivers/net/wireless/rtl818x/rtl8180_grf5101.h b/drivers/net/wireless/rtl818x/rtl8180/grf5101.h
index 76647111bcff..76647111bcff 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_grf5101.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/grf5101.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180_max2820.c b/drivers/net/wireless/rtl818x/rtl8180/max2820.c
index 16c4655181c0..667b3363d437 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_max2820.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/max2820.c
@@ -24,7 +24,7 @@
24#include <net/mac80211.h> 24#include <net/mac80211.h>
25 25
26#include "rtl8180.h" 26#include "rtl8180.h"
27#include "rtl8180_max2820.h" 27#include "max2820.h"
28 28
29static const u32 max2820_chan[] = { 29static const u32 max2820_chan[] = {
30 12, /* CH 1 */ 30 12, /* CH 1 */
diff --git a/drivers/net/wireless/rtl818x/rtl8180_max2820.h b/drivers/net/wireless/rtl818x/rtl8180/max2820.h
index 61cf6d1e7d57..61cf6d1e7d57 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_max2820.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/max2820.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180.h b/drivers/net/wireless/rtl818x/rtl8180/rtl8180.h
index 30523314da43..30523314da43 100644
--- a/drivers/net/wireless/rtl818x/rtl8180.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8180.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
index 69e4d4745dae..7c4574ba9d75 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.c
@@ -21,7 +21,7 @@
21#include <net/mac80211.h> 21#include <net/mac80211.h>
22 22
23#include "rtl8180.h" 23#include "rtl8180.h"
24#include "rtl8180_rtl8225.h" 24#include "rtl8225.h"
25 25
26static void rtl8225_write(struct ieee80211_hw *dev, u8 addr, u16 data) 26static void rtl8225_write(struct ieee80211_hw *dev, u8 addr, u16 data)
27{ 27{
diff --git a/drivers/net/wireless/rtl818x/rtl8180_rtl8225.h b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.h
index 310013a2d726..310013a2d726 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_rtl8225.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/rtl8225.h
diff --git a/drivers/net/wireless/rtl818x/rtl8180_sa2400.c b/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
index d064fcc5ec08..44771a6286af 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_sa2400.c
+++ b/drivers/net/wireless/rtl818x/rtl8180/sa2400.c
@@ -25,7 +25,7 @@
25#include <net/mac80211.h> 25#include <net/mac80211.h>
26 26
27#include "rtl8180.h" 27#include "rtl8180.h"
28#include "rtl8180_sa2400.h" 28#include "sa2400.h"
29 29
30static const u32 sa2400_chan[] = { 30static const u32 sa2400_chan[] = {
31 0x00096c, /* ch1 */ 31 0x00096c, /* ch1 */
diff --git a/drivers/net/wireless/rtl818x/rtl8180_sa2400.h b/drivers/net/wireless/rtl818x/rtl8180/sa2400.h
index a4aaa0d413f1..a4aaa0d413f1 100644
--- a/drivers/net/wireless/rtl818x/rtl8180_sa2400.h
+++ b/drivers/net/wireless/rtl818x/rtl8180/sa2400.h
diff --git a/drivers/net/wireless/rtl818x/rtl8187/Makefile b/drivers/net/wireless/rtl818x/rtl8187/Makefile
new file mode 100644
index 000000000000..7b6299268ecf
--- /dev/null
+++ b/drivers/net/wireless/rtl818x/rtl8187/Makefile
@@ -0,0 +1,5 @@
1rtl8187-objs := dev.o rtl8225.o leds.o rfkill.o
2
3obj-$(CONFIG_RTL8187) += rtl8187.o
4
5ccflags-y += -Idrivers/net/wireless/rtl818x
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187/dev.c
index 38fa8244cc96..6b82cac37ee3 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/dev.c
@@ -29,11 +29,11 @@
29#include <net/mac80211.h> 29#include <net/mac80211.h>
30 30
31#include "rtl8187.h" 31#include "rtl8187.h"
32#include "rtl8187_rtl8225.h" 32#include "rtl8225.h"
33#ifdef CONFIG_RTL8187_LEDS 33#ifdef CONFIG_RTL8187_LEDS
34#include "rtl8187_leds.h" 34#include "leds.h"
35#endif 35#endif
36#include "rtl8187_rfkill.h" 36#include "rfkill.h"
37 37
38MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>"); 38MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
39MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>"); 39MODULE_AUTHOR("Andrea Merello <andreamrl@tiscali.it>");
@@ -553,6 +553,46 @@ static int rtl8187b_init_status_urb(struct ieee80211_hw *dev)
553 return ret; 553 return ret;
554} 554}
555 555
556static void rtl8187_set_anaparam(struct rtl8187_priv *priv, bool rfon)
557{
558 u32 anaparam, anaparam2;
559 u8 anaparam3, reg;
560
561 if (!priv->is_rtl8187b) {
562 if (rfon) {
563 anaparam = RTL8187_RTL8225_ANAPARAM_ON;
564 anaparam2 = RTL8187_RTL8225_ANAPARAM2_ON;
565 } else {
566 anaparam = RTL8187_RTL8225_ANAPARAM_OFF;
567 anaparam2 = RTL8187_RTL8225_ANAPARAM2_OFF;
568 }
569 } else {
570 if (rfon) {
571 anaparam = RTL8187B_RTL8225_ANAPARAM_ON;
572 anaparam2 = RTL8187B_RTL8225_ANAPARAM2_ON;
573 anaparam3 = RTL8187B_RTL8225_ANAPARAM3_ON;
574 } else {
575 anaparam = RTL8187B_RTL8225_ANAPARAM_OFF;
576 anaparam2 = RTL8187B_RTL8225_ANAPARAM2_OFF;
577 anaparam3 = RTL8187B_RTL8225_ANAPARAM3_OFF;
578 }
579 }
580
581 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
582 RTL818X_EEPROM_CMD_CONFIG);
583 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
584 reg |= RTL818X_CONFIG3_ANAPARAM_WRITE;
585 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
586 rtl818x_iowrite32(priv, &priv->map->ANAPARAM, anaparam);
587 rtl818x_iowrite32(priv, &priv->map->ANAPARAM2, anaparam2);
588 if (priv->is_rtl8187b)
589 rtl818x_iowrite8(priv, &priv->map->ANAPARAM3, anaparam3);
590 reg &= ~RTL818X_CONFIG3_ANAPARAM_WRITE;
591 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
592 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
593 RTL818X_EEPROM_CMD_NORMAL);
594}
595
556static int rtl8187_cmd_reset(struct ieee80211_hw *dev) 596static int rtl8187_cmd_reset(struct ieee80211_hw *dev)
557{ 597{
558 struct rtl8187_priv *priv = dev->priv; 598 struct rtl8187_priv *priv = dev->priv;
@@ -603,19 +643,7 @@ static int rtl8187_init_hw(struct ieee80211_hw *dev)
603 int res; 643 int res;
604 644
605 /* reset */ 645 /* reset */
606 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, 646 rtl8187_set_anaparam(priv, true);
607 RTL818X_EEPROM_CMD_CONFIG);
608 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
609 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg |
610 RTL818X_CONFIG3_ANAPARAM_WRITE);
611 rtl818x_iowrite32(priv, &priv->map->ANAPARAM,
612 RTL8187_RTL8225_ANAPARAM_ON);
613 rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
614 RTL8187_RTL8225_ANAPARAM2_ON);
615 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg &
616 ~RTL818X_CONFIG3_ANAPARAM_WRITE);
617 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
618 RTL818X_EEPROM_CMD_NORMAL);
619 647
620 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0); 648 rtl818x_iowrite16(priv, &priv->map->INT_MASK, 0);
621 649
@@ -629,17 +657,7 @@ static int rtl8187_init_hw(struct ieee80211_hw *dev)
629 if (res) 657 if (res)
630 return res; 658 return res;
631 659
632 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); 660 rtl8187_set_anaparam(priv, true);
633 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
634 rtl818x_iowrite8(priv, &priv->map->CONFIG3,
635 reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
636 rtl818x_iowrite32(priv, &priv->map->ANAPARAM,
637 RTL8187_RTL8225_ANAPARAM_ON);
638 rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
639 RTL8187_RTL8225_ANAPARAM2_ON);
640 rtl818x_iowrite8(priv, &priv->map->CONFIG3,
641 reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
642 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
643 661
644 /* setup card */ 662 /* setup card */
645 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0); 663 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0);
@@ -712,10 +730,9 @@ static const u8 rtl8187b_reg_table[][3] = {
712 730
713 {0x58, 0x4B, 1}, {0x59, 0x00, 1}, {0x5A, 0x4B, 1}, {0x5B, 0x00, 1}, 731 {0x58, 0x4B, 1}, {0x59, 0x00, 1}, {0x5A, 0x4B, 1}, {0x5B, 0x00, 1},
714 {0x60, 0x4B, 1}, {0x61, 0x09, 1}, {0x62, 0x4B, 1}, {0x63, 0x09, 1}, 732 {0x60, 0x4B, 1}, {0x61, 0x09, 1}, {0x62, 0x4B, 1}, {0x63, 0x09, 1},
715 {0xCE, 0x0F, 1}, {0xCF, 0x00, 1}, {0xE0, 0xFF, 1}, {0xE1, 0x0F, 1}, 733 {0xCE, 0x0F, 1}, {0xCF, 0x00, 1}, {0xF0, 0x4E, 1}, {0xF1, 0x01, 1},
716 {0xE2, 0x00, 1}, {0xF0, 0x4E, 1}, {0xF1, 0x01, 1}, {0xF2, 0x02, 1}, 734 {0xF2, 0x02, 1}, {0xF3, 0x03, 1}, {0xF4, 0x04, 1}, {0xF5, 0x05, 1},
717 {0xF3, 0x03, 1}, {0xF4, 0x04, 1}, {0xF5, 0x05, 1}, {0xF6, 0x06, 1}, 735 {0xF6, 0x06, 1}, {0xF7, 0x07, 1}, {0xF8, 0x08, 1},
718 {0xF7, 0x07, 1}, {0xF8, 0x08, 1},
719 736
720 {0x4E, 0x00, 2}, {0x0C, 0x04, 2}, {0x21, 0x61, 2}, {0x22, 0x68, 2}, 737 {0x4E, 0x00, 2}, {0x0C, 0x04, 2}, {0x21, 0x61, 2}, {0x22, 0x68, 2},
721 {0x23, 0x6F, 2}, {0x24, 0x76, 2}, {0x25, 0x7D, 2}, {0x26, 0x84, 2}, 738 {0x23, 0x6F, 2}, {0x24, 0x76, 2}, {0x25, 0x7D, 2}, {0x26, 0x84, 2},
@@ -723,14 +740,13 @@ static const u8 rtl8187b_reg_table[][3] = {
723 {0x52, 0x04, 2}, {0x53, 0xA0, 2}, {0x54, 0x1F, 2}, {0x55, 0x23, 2}, 740 {0x52, 0x04, 2}, {0x53, 0xA0, 2}, {0x54, 0x1F, 2}, {0x55, 0x23, 2},
724 {0x56, 0x45, 2}, {0x57, 0x67, 2}, {0x58, 0x08, 2}, {0x59, 0x08, 2}, 741 {0x56, 0x45, 2}, {0x57, 0x67, 2}, {0x58, 0x08, 2}, {0x59, 0x08, 2},
725 {0x5A, 0x08, 2}, {0x5B, 0x08, 2}, {0x60, 0x08, 2}, {0x61, 0x08, 2}, 742 {0x5A, 0x08, 2}, {0x5B, 0x08, 2}, {0x60, 0x08, 2}, {0x61, 0x08, 2},
726 {0x62, 0x08, 2}, {0x63, 0x08, 2}, {0x64, 0xCF, 2}, {0x72, 0x56, 2}, 743 {0x62, 0x08, 2}, {0x63, 0x08, 2}, {0x64, 0xCF, 2},
727 {0x73, 0x9A, 2},
728 744
729 {0x34, 0xF0, 0}, {0x35, 0x0F, 0}, {0x5B, 0x40, 0}, {0x84, 0x88, 0}, 745 {0x5B, 0x40, 0}, {0x84, 0x88, 0}, {0x85, 0x24, 0}, {0x88, 0x54, 0},
730 {0x85, 0x24, 0}, {0x88, 0x54, 0}, {0x8B, 0xB8, 0}, {0x8C, 0x07, 0}, 746 {0x8B, 0xB8, 0}, {0x8C, 0x07, 0}, {0x8D, 0x00, 0}, {0x94, 0x1B, 0},
731 {0x8D, 0x00, 0}, {0x94, 0x1B, 0}, {0x95, 0x12, 0}, {0x96, 0x00, 0}, 747 {0x95, 0x12, 0}, {0x96, 0x00, 0}, {0x97, 0x06, 0}, {0x9D, 0x1A, 0},
732 {0x97, 0x06, 0}, {0x9D, 0x1A, 0}, {0x9F, 0x10, 0}, {0xB4, 0x22, 0}, 748 {0x9F, 0x10, 0}, {0xB4, 0x22, 0}, {0xBE, 0x80, 0}, {0xDB, 0x00, 0},
733 {0xBE, 0x80, 0}, {0xDB, 0x00, 0}, {0xEE, 0x00, 0}, {0x4C, 0x00, 2}, 749 {0xEE, 0x00, 0}, {0x4C, 0x00, 2},
734 750
735 {0x9F, 0x00, 3}, {0x8C, 0x01, 0}, {0x8D, 0x10, 0}, {0x8E, 0x08, 0}, 751 {0x9F, 0x00, 3}, {0x8C, 0x01, 0}, {0x8D, 0x10, 0}, {0x8E, 0x08, 0},
736 {0x8F, 0x00, 0} 752 {0x8F, 0x00, 0}
@@ -742,48 +758,34 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
742 int res, i; 758 int res, i;
743 u8 reg; 759 u8 reg;
744 760
745 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, 761 rtl8187_set_anaparam(priv, true);
746 RTL818X_EEPROM_CMD_CONFIG);
747
748 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
749 reg |= RTL818X_CONFIG3_ANAPARAM_WRITE | RTL818X_CONFIG3_GNT_SELECT;
750 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
751 rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
752 RTL8187B_RTL8225_ANAPARAM2_ON);
753 rtl818x_iowrite32(priv, &priv->map->ANAPARAM,
754 RTL8187B_RTL8225_ANAPARAM_ON);
755 rtl818x_iowrite8(priv, &priv->map->ANAPARAM3,
756 RTL8187B_RTL8225_ANAPARAM3_ON);
757 762
763 /* Reset PLL sequence on 8187B. Realtek note: reduces power
764 * consumption about 30 mA */
758 rtl818x_iowrite8(priv, (u8 *)0xFF61, 0x10); 765 rtl818x_iowrite8(priv, (u8 *)0xFF61, 0x10);
759 reg = rtl818x_ioread8(priv, (u8 *)0xFF62); 766 reg = rtl818x_ioread8(priv, (u8 *)0xFF62);
760 rtl818x_iowrite8(priv, (u8 *)0xFF62, reg & ~(1 << 5)); 767 rtl818x_iowrite8(priv, (u8 *)0xFF62, reg & ~(1 << 5));
761 rtl818x_iowrite8(priv, (u8 *)0xFF62, reg | (1 << 5)); 768 rtl818x_iowrite8(priv, (u8 *)0xFF62, reg | (1 << 5));
762 769
763 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
764 reg &= ~RTL818X_CONFIG3_ANAPARAM_WRITE;
765 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
766
767 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
768 RTL818X_EEPROM_CMD_NORMAL);
769
770 res = rtl8187_cmd_reset(dev); 770 res = rtl8187_cmd_reset(dev);
771 if (res) 771 if (res)
772 return res; 772 return res;
773 773
774 rtl818x_iowrite16(priv, (__le16 *)0xFF2D, 0x0FFF); 774 rtl8187_set_anaparam(priv, true);
775
776 /* BRSR (Basic Rate Set Register) on 8187B looks to be the same as
777 * RESP_RATE on 8187L in Realtek sources: each bit should be each
778 * one of the 12 rates, all are enabled */
779 rtl818x_iowrite16(priv, (__le16 *)0xFF34, 0x0FFF);
780
775 reg = rtl818x_ioread8(priv, &priv->map->CW_CONF); 781 reg = rtl818x_ioread8(priv, &priv->map->CW_CONF);
776 reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT; 782 reg |= RTL818X_CW_CONF_PERPACKET_RETRY_SHIFT;
777 rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg); 783 rtl818x_iowrite8(priv, &priv->map->CW_CONF, reg);
778 reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
779 reg |= RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT |
780 RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
781 rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
782 784
785 /* Auto Rate Fallback Register (ARFR): 1M-54M setting */
783 rtl818x_iowrite16_idx(priv, (__le16 *)0xFFE0, 0x0FFF, 1); 786 rtl818x_iowrite16_idx(priv, (__le16 *)0xFFE0, 0x0FFF, 1);
787 rtl818x_iowrite8_idx(priv, (u8 *)0xFFE2, 0x00, 1);
784 788
785 rtl818x_iowrite16(priv, &priv->map->BEACON_INTERVAL, 100);
786 rtl818x_iowrite16(priv, &priv->map->ATIM_WND, 2);
787 rtl818x_iowrite16_idx(priv, (__le16 *)0xFFD4, 0xFFFF, 1); 789 rtl818x_iowrite16_idx(priv, (__le16 *)0xFFD4, 0xFFFF, 1);
788 790
789 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, 791 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
@@ -811,16 +813,9 @@ static int rtl8187b_init_hw(struct ieee80211_hw *dev)
811 813
812 rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x00004001); 814 rtl818x_iowrite32(priv, &priv->map->RF_TIMING, 0x00004001);
813 815
816 /* RFSW_CTRL register */
814 rtl818x_iowrite16_idx(priv, (__le16 *)0xFF72, 0x569A, 2); 817 rtl818x_iowrite16_idx(priv, (__le16 *)0xFF72, 0x569A, 2);
815 818
816 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
817 RTL818X_EEPROM_CMD_CONFIG);
818 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
819 reg |= RTL818X_CONFIG3_ANAPARAM_WRITE;
820 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg);
821 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD,
822 RTL818X_EEPROM_CMD_NORMAL);
823
824 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480); 819 rtl818x_iowrite16(priv, &priv->map->RFPinsOutput, 0x0480);
825 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x2488); 820 rtl818x_iowrite16(priv, &priv->map->RFPinsSelect, 0x2488);
826 rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF); 821 rtl818x_iowrite16(priv, &priv->map->RFPinsEnable, 0x1FFF);
@@ -929,6 +924,12 @@ static int rtl8187_start(struct ieee80211_hw *dev)
929 priv->rx_conf = reg; 924 priv->rx_conf = reg;
930 rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg); 925 rtl818x_iowrite32(priv, &priv->map->RX_CONF, reg);
931 926
927 reg = rtl818x_ioread8(priv, &priv->map->TX_AGC_CTL);
928 reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_GAIN_SHIFT;
929 reg &= ~RTL818X_TX_AGC_CTL_PERPACKET_ANTSEL_SHIFT;
930 reg &= ~RTL818X_TX_AGC_CTL_FEEDBACK_ANT;
931 rtl818x_iowrite8(priv, &priv->map->TX_AGC_CTL, reg);
932
932 rtl818x_iowrite32(priv, &priv->map->TX_CONF, 933 rtl818x_iowrite32(priv, &priv->map->TX_CONF,
933 RTL818X_TX_CONF_HW_SEQNUM | 934 RTL818X_TX_CONF_HW_SEQNUM |
934 RTL818X_TX_CONF_DISREQQSIZE | 935 RTL818X_TX_CONF_DISREQQSIZE |
@@ -1002,6 +1003,7 @@ static void rtl8187_stop(struct ieee80211_hw *dev)
1002 rtl818x_iowrite8(priv, &priv->map->CMD, reg); 1003 rtl818x_iowrite8(priv, &priv->map->CMD, reg);
1003 1004
1004 priv->rf->stop(dev); 1005 priv->rf->stop(dev);
1006 rtl8187_set_anaparam(priv, false);
1005 1007
1006 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG); 1008 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
1007 reg = rtl818x_ioread8(priv, &priv->map->CONFIG4); 1009 reg = rtl818x_ioread8(priv, &priv->map->CONFIG4);
diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.c b/drivers/net/wireless/rtl818x/rtl8187/leds.c
index 4637337d5ce6..2e0de2f5f0f9 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_leds.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/leds.c
@@ -20,7 +20,7 @@
20#include <linux/eeprom_93cx6.h> 20#include <linux/eeprom_93cx6.h>
21 21
22#include "rtl8187.h" 22#include "rtl8187.h"
23#include "rtl8187_leds.h" 23#include "leds.h"
24 24
25static void led_turn_on(struct work_struct *work) 25static void led_turn_on(struct work_struct *work)
26{ 26{
diff --git a/drivers/net/wireless/rtl818x/rtl8187_leds.h b/drivers/net/wireless/rtl818x/rtl8187/leds.h
index d743c96d4a20..d743c96d4a20 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_leds.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/leds.h
diff --git a/drivers/net/wireless/rtl818x/rtl8187_rfkill.c b/drivers/net/wireless/rtl818x/rtl8187/rfkill.c
index 03555e1e0cab..34116719974a 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_rfkill.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/rfkill.c
@@ -18,7 +18,7 @@
18#include <net/mac80211.h> 18#include <net/mac80211.h>
19 19
20#include "rtl8187.h" 20#include "rtl8187.h"
21#include "rtl8187_rfkill.h" 21#include "rfkill.h"
22 22
23static bool rtl8187_is_radio_enabled(struct rtl8187_priv *priv) 23static bool rtl8187_is_radio_enabled(struct rtl8187_priv *priv)
24{ 24{
diff --git a/drivers/net/wireless/rtl818x/rtl8187_rfkill.h b/drivers/net/wireless/rtl818x/rtl8187/rfkill.h
index e12575e96d11..e12575e96d11 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_rfkill.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rfkill.h
diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
index 98878160a65a..0d7b1423f77b 100644
--- a/drivers/net/wireless/rtl818x/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8187.h
@@ -16,7 +16,7 @@
16#define RTL8187_H 16#define RTL8187_H
17 17
18#include "rtl818x.h" 18#include "rtl818x.h"
19#include "rtl8187_leds.h" 19#include "leds.h"
20 20
21#define RTL8187_EEPROM_TXPWR_BASE 0x05 21#define RTL8187_EEPROM_TXPWR_BASE 0x05
22#define RTL8187_EEPROM_MAC_ADDR 0x07 22#define RTL8187_EEPROM_MAC_ADDR 0x07
diff --git a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
index 97eebdcf7eb9..908903f721f5 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.c
@@ -21,7 +21,7 @@
21#include <net/mac80211.h> 21#include <net/mac80211.h>
22 22
23#include "rtl8187.h" 23#include "rtl8187.h"
24#include "rtl8187_rtl8225.h" 24#include "rtl8225.h"
25 25
26static void rtl8225_write_bitbang(struct ieee80211_hw *dev, u8 addr, u16 data) 26static void rtl8225_write_bitbang(struct ieee80211_hw *dev, u8 addr, u16 data)
27{ 27{
@@ -898,29 +898,7 @@ static void rtl8225z2_b_rf_init(struct ieee80211_hw *dev)
898 898
899static void rtl8225_rf_stop(struct ieee80211_hw *dev) 899static void rtl8225_rf_stop(struct ieee80211_hw *dev)
900{ 900{
901 u8 reg;
902 struct rtl8187_priv *priv = dev->priv;
903
904 rtl8225_write(dev, 0x4, 0x1f); 901 rtl8225_write(dev, 0x4, 0x1f);
905
906 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_CONFIG);
907 reg = rtl818x_ioread8(priv, &priv->map->CONFIG3);
908 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg | RTL818X_CONFIG3_ANAPARAM_WRITE);
909 if (!priv->is_rtl8187b) {
910 rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
911 RTL8187_RTL8225_ANAPARAM2_OFF);
912 rtl818x_iowrite32(priv, &priv->map->ANAPARAM,
913 RTL8187_RTL8225_ANAPARAM_OFF);
914 } else {
915 rtl818x_iowrite32(priv, &priv->map->ANAPARAM2,
916 RTL8187B_RTL8225_ANAPARAM2_OFF);
917 rtl818x_iowrite32(priv, &priv->map->ANAPARAM,
918 RTL8187B_RTL8225_ANAPARAM_OFF);
919 rtl818x_iowrite8(priv, &priv->map->ANAPARAM3,
920 RTL8187B_RTL8225_ANAPARAM3_OFF);
921 }
922 rtl818x_iowrite8(priv, &priv->map->CONFIG3, reg & ~RTL818X_CONFIG3_ANAPARAM_WRITE);
923 rtl818x_iowrite8(priv, &priv->map->EEPROM_CMD, RTL818X_EEPROM_CMD_NORMAL);
924} 902}
925 903
926static void rtl8225_rf_set_channel(struct ieee80211_hw *dev, 904static void rtl8225_rf_set_channel(struct ieee80211_hw *dev,
diff --git a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.h b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h
index 20c5b6ead0f6..20c5b6ead0f6 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_rtl8225.h
+++ b/drivers/net/wireless/rtl818x/rtl8187/rtl8225.h
diff --git a/drivers/net/wireless/rtlwifi/Kconfig b/drivers/net/wireless/rtlwifi/Kconfig
new file mode 100644
index 000000000000..7f6573f7f470
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/Kconfig
@@ -0,0 +1,15 @@
1config RTL8192CE
2 tristate "Realtek RTL8192CE/RTL8188SE Wireless Network Adapter"
3 depends on MAC80211 && EXPERIMENTAL
4 select FW_LOADER
5 select RTLWIFI
6 ---help---
7 This is the driver for Realtek RTL8192CE/RTL8188CE 802.11n PCIe
8 wireless network adapters.
9
10 If you choose to build it as a module, it will be called rtl8192ce
11
12config RTLWIFI
13 tristate
14 depends on RTL8192CE
15 default m
diff --git a/drivers/net/wireless/rtlwifi/Makefile b/drivers/net/wireless/rtlwifi/Makefile
new file mode 100644
index 000000000000..2a7a4384f8ee
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/Makefile
@@ -0,0 +1,13 @@
1obj-$(CONFIG_RTLWIFI) += rtlwifi.o
2rtlwifi-objs := \
3 base.o \
4 cam.o \
5 core.o \
6 debug.o \
7 efuse.o \
8 pci.o \
9 ps.o \
10 rc.o \
11 regd.o
12
13obj-$(CONFIG_RTL8192CE) += rtl8192ce/
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
new file mode 100644
index 000000000000..cf0b73e51fc2
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -0,0 +1,956 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include <linux/ip.h>
31#include "wifi.h"
32#include "rc.h"
33#include "base.h"
34#include "efuse.h"
35#include "cam.h"
36#include "ps.h"
37#include "regd.h"
38
39/*
40 *NOTICE!!!: This file will be very big, we hsould
41 *keep it clear under follwing roles:
42 *
43 *This file include follwing part, so, if you add new
44 *functions into this file, please check which part it
45 *should includes. or check if you should add new part
46 *for this file:
47 *
48 *1) mac80211 init functions
49 *2) tx information functions
50 *3) functions called by core.c
51 *4) wq & timer callback functions
52 *5) frame process functions
53 *6) sysfs functions
54 *7) ...
55 */
56
57/*********************************************************
58 *
59 * mac80211 init functions
60 *
61 *********************************************************/
62static struct ieee80211_channel rtl_channeltable[] = {
63 {.center_freq = 2412, .hw_value = 1,},
64 {.center_freq = 2417, .hw_value = 2,},
65 {.center_freq = 2422, .hw_value = 3,},
66 {.center_freq = 2427, .hw_value = 4,},
67 {.center_freq = 2432, .hw_value = 5,},
68 {.center_freq = 2437, .hw_value = 6,},
69 {.center_freq = 2442, .hw_value = 7,},
70 {.center_freq = 2447, .hw_value = 8,},
71 {.center_freq = 2452, .hw_value = 9,},
72 {.center_freq = 2457, .hw_value = 10,},
73 {.center_freq = 2462, .hw_value = 11,},
74 {.center_freq = 2467, .hw_value = 12,},
75 {.center_freq = 2472, .hw_value = 13,},
76 {.center_freq = 2484, .hw_value = 14,},
77};
78
79static struct ieee80211_rate rtl_ratetable[] = {
80 {.bitrate = 10, .hw_value = 0x00,},
81 {.bitrate = 20, .hw_value = 0x01,},
82 {.bitrate = 55, .hw_value = 0x02,},
83 {.bitrate = 110, .hw_value = 0x03,},
84 {.bitrate = 60, .hw_value = 0x04,},
85 {.bitrate = 90, .hw_value = 0x05,},
86 {.bitrate = 120, .hw_value = 0x06,},
87 {.bitrate = 180, .hw_value = 0x07,},
88 {.bitrate = 240, .hw_value = 0x08,},
89 {.bitrate = 360, .hw_value = 0x09,},
90 {.bitrate = 480, .hw_value = 0x0a,},
91 {.bitrate = 540, .hw_value = 0x0b,},
92};
93
94static const struct ieee80211_supported_band rtl_band_2ghz = {
95 .band = IEEE80211_BAND_2GHZ,
96
97 .channels = rtl_channeltable,
98 .n_channels = ARRAY_SIZE(rtl_channeltable),
99
100 .bitrates = rtl_ratetable,
101 .n_bitrates = ARRAY_SIZE(rtl_ratetable),
102
103 .ht_cap = {0},
104};
105
106static void _rtl_init_hw_ht_capab(struct ieee80211_hw *hw,
107 struct ieee80211_sta_ht_cap *ht_cap)
108{
109 struct rtl_priv *rtlpriv = rtl_priv(hw);
110 struct rtl_phy *rtlphy = &(rtlpriv->phy);
111
112 ht_cap->ht_supported = true;
113 ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
114 IEEE80211_HT_CAP_SGI_40 |
115 IEEE80211_HT_CAP_SGI_20 |
116 IEEE80211_HT_CAP_DSSSCCK40 | IEEE80211_HT_CAP_MAX_AMSDU;
117
118 /*
119 *Maximum length of AMPDU that the STA can receive.
120 *Length = 2 ^ (13 + max_ampdu_length_exp) - 1 (octets)
121 */
122 ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
123
124 /*Minimum MPDU start spacing , */
125 ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_16;
126
127 ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED;
128
129 /*
130 *hw->wiphy->bands[IEEE80211_BAND_2GHZ]
131 *base on ant_num
132 *rx_mask: RX mask
133 *if rx_ant =1 rx_mask[0]=0xff;==>MCS0-MCS7
134 *if rx_ant =2 rx_mask[1]=0xff;==>MCS8-MCS15
135 *if rx_ant >=3 rx_mask[2]=0xff;
136 *if BW_40 rx_mask[4]=0x01;
137 *highest supported RX rate
138 */
139 if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_2T2R) {
140
141 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("1T2R or 2T2R\n"));
142
143 ht_cap->mcs.rx_mask[0] = 0xFF;
144 ht_cap->mcs.rx_mask[1] = 0xFF;
145 ht_cap->mcs.rx_mask[4] = 0x01;
146
147 ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS15;
148 } else if (get_rf_type(rtlphy) == RF_1T1R) {
149
150 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("1T1R\n"));
151
152 ht_cap->mcs.rx_mask[0] = 0xFF;
153 ht_cap->mcs.rx_mask[1] = 0x00;
154 ht_cap->mcs.rx_mask[4] = 0x01;
155
156 ht_cap->mcs.rx_highest = MAX_BIT_RATE_40MHZ_MCS7;
157 }
158}
159
160static void _rtl_init_mac80211(struct ieee80211_hw *hw)
161{
162 struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
163 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
164 struct ieee80211_supported_band *sband;
165
166 /* <1> use mac->bands as mem for hw->wiphy->bands */
167 sband = &(rtlmac->bands[IEEE80211_BAND_2GHZ]);
168
169 /*
170 * <2> set hw->wiphy->bands[IEEE80211_BAND_2GHZ]
171 * to default value(1T1R)
172 */
173 memcpy(&(rtlmac->bands[IEEE80211_BAND_2GHZ]), &rtl_band_2ghz,
174 sizeof(struct ieee80211_supported_band));
175
176 /* <3> init ht cap base on ant_num */
177 _rtl_init_hw_ht_capab(hw, &sband->ht_cap);
178
179 /* <4> set mac->sband to wiphy->sband */
180 hw->wiphy->bands[IEEE80211_BAND_2GHZ] = sband;
181
182 /* <5> set hw caps */
183 hw->flags = IEEE80211_HW_SIGNAL_DBM |
184 IEEE80211_HW_RX_INCLUDES_FCS |
185 IEEE80211_HW_BEACON_FILTER | IEEE80211_HW_AMPDU_AGGREGATION | /*PS*/
186 /*IEEE80211_HW_SUPPORTS_PS | */
187 /*IEEE80211_HW_PS_NULLFUNC_STACK | */
188 /*IEEE80211_HW_SUPPORTS_DYNAMIC_PS | */
189 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 0;
190
191 hw->wiphy->interface_modes =
192 BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC);
193
194 hw->wiphy->rts_threshold = 2347;
195
196 hw->queues = AC_MAX;
197 hw->extra_tx_headroom = RTL_TX_HEADER_SIZE;
198
199 /* TODO: Correct this value for our hw */
200 /* TODO: define these hard code value */
201 hw->channel_change_time = 100;
202 hw->max_listen_interval = 5;
203 hw->max_rate_tries = 4;
204 /* hw->max_rates = 1; */
205
206 /* <6> mac address */
207 if (is_valid_ether_addr(rtlefuse->dev_addr)) {
208 SET_IEEE80211_PERM_ADDR(hw, rtlefuse->dev_addr);
209 } else {
210 u8 rtlmac[] = { 0x00, 0xe0, 0x4c, 0x81, 0x92, 0x00 };
211 get_random_bytes((rtlmac + (ETH_ALEN - 1)), 1);
212 SET_IEEE80211_PERM_ADDR(hw, rtlmac);
213 }
214
215}
216
217static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
218{
219 struct rtl_priv *rtlpriv = rtl_priv(hw);
220
221 /* <1> timer */
222 init_timer(&rtlpriv->works.watchdog_timer);
223 setup_timer(&rtlpriv->works.watchdog_timer,
224 rtl_watch_dog_timer_callback, (unsigned long)hw);
225
226 /* <2> work queue */
227 rtlpriv->works.hw = hw;
228 rtlpriv->works.rtl_wq = alloc_workqueue(rtlpriv->cfg->name, 0, 0);
229 INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
230 (void *)rtl_watchdog_wq_callback);
231 INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
232 (void *)rtl_ips_nic_off_wq_callback);
233
234}
235
236void rtl_deinit_deferred_work(struct ieee80211_hw *hw)
237{
238 struct rtl_priv *rtlpriv = rtl_priv(hw);
239
240 del_timer_sync(&rtlpriv->works.watchdog_timer);
241
242 cancel_delayed_work(&rtlpriv->works.watchdog_wq);
243 cancel_delayed_work(&rtlpriv->works.ips_nic_off_wq);
244}
245
246void rtl_init_rfkill(struct ieee80211_hw *hw)
247{
248 struct rtl_priv *rtlpriv = rtl_priv(hw);
249
250 bool radio_state;
251 bool blocked;
252 u8 valid = 0;
253
254 radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
255
256 /*set init state to that of switch */
257 rtlpriv->rfkill.rfkill_state = radio_state;
258 printk(KERN_INFO "rtlwifi: wireless switch is %s\n",
259 rtlpriv->rfkill.rfkill_state ? "on" : "off");
260
261 if (valid) {
262 rtlpriv->rfkill.rfkill_state = radio_state;
263
264 blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1;
265 wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
266 }
267
268 wiphy_rfkill_start_polling(hw->wiphy);
269}
270
271void rtl_deinit_rfkill(struct ieee80211_hw *hw)
272{
273 wiphy_rfkill_stop_polling(hw->wiphy);
274}
275
276int rtl_init_core(struct ieee80211_hw *hw)
277{
278 struct rtl_priv *rtlpriv = rtl_priv(hw);
279 struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
280
281 /* <1> init mac80211 */
282 _rtl_init_mac80211(hw);
283 rtlmac->hw = hw;
284
285 /* <2> rate control register */
286 if (rtl_rate_control_register()) {
287 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
288 ("rtl: Unable to register rtl_rc,"
289 "use default RC !!\n"));
290 } else {
291 hw->rate_control_algorithm = "rtl_rc";
292 }
293
294 /*
295 * <3> init CRDA must come after init
296 * mac80211 hw in _rtl_init_mac80211.
297 */
298 if (rtl_regd_init(hw, rtl_reg_notifier)) {
299 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("REGD init failed\n"));
300 return 1;
301 } else {
302 /* CRDA regd hint must after init CRDA */
303 if (regulatory_hint(hw->wiphy, rtlpriv->regd.alpha2)) {
304 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
305 ("regulatory_hint fail\n"));
306 }
307 }
308
309 /* <4> locks */
310 mutex_init(&rtlpriv->locks.conf_mutex);
311 spin_lock_init(&rtlpriv->locks.ips_lock);
312 spin_lock_init(&rtlpriv->locks.irq_th_lock);
313 spin_lock_init(&rtlpriv->locks.h2c_lock);
314 spin_lock_init(&rtlpriv->locks.rf_ps_lock);
315 spin_lock_init(&rtlpriv->locks.rf_lock);
316 spin_lock_init(&rtlpriv->locks.lps_lock);
317
318 rtlmac->link_state = MAC80211_NOLINK;
319
320 /* <5> init deferred work */
321 _rtl_init_deferred_work(hw);
322
323 return 0;
324}
325
326void rtl_deinit_core(struct ieee80211_hw *hw)
327{
328 /*RC*/
329 rtl_rate_control_unregister();
330}
331
332void rtl_init_rx_config(struct ieee80211_hw *hw)
333{
334 struct rtl_priv *rtlpriv = rtl_priv(hw);
335 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
336
337 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_RCR, (u8 *) (&mac->rx_conf));
338 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_MGT_FILTER,
339 (u8 *) (&mac->rx_mgt_filter));
340 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_CTRL_FILTER,
341 (u8 *) (&mac->rx_ctrl_filter));
342 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_DATA_FILTER,
343 (u8 *) (&mac->rx_data_filter));
344}
345
346/*********************************************************
347 *
348 * tx information functions
349 *
350 *********************************************************/
351static void _rtl_qurey_shortpreamble_mode(struct ieee80211_hw *hw,
352 struct rtl_tcb_desc *tcb_desc,
353 struct ieee80211_tx_info *info)
354{
355 struct rtl_priv *rtlpriv = rtl_priv(hw);
356 u8 rate_flag = info->control.rates[0].flags;
357
358 tcb_desc->use_shortpreamble = false;
359
360 /* 1M can only use Long Preamble. 11B spec */
361 if (tcb_desc->hw_rate == rtlpriv->cfg->maps[RTL_RC_CCK_RATE1M])
362 return;
363 else if (rate_flag & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
364 tcb_desc->use_shortpreamble = true;
365
366 return;
367}
368
369static void _rtl_query_shortgi(struct ieee80211_hw *hw,
370 struct rtl_tcb_desc *tcb_desc,
371 struct ieee80211_tx_info *info)
372{
373 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
374 u8 rate_flag = info->control.rates[0].flags;
375
376 tcb_desc->use_shortgi = false;
377
378 if (!mac->ht_enable)
379 return;
380
381 if (!mac->sgi_40 && !mac->sgi_20)
382 return;
383
384 if ((mac->bw_40 == true) && mac->sgi_40)
385 tcb_desc->use_shortgi = true;
386 else if ((mac->bw_40 == false) && mac->sgi_20)
387 tcb_desc->use_shortgi = true;
388
389 if (!(rate_flag & IEEE80211_TX_RC_SHORT_GI))
390 tcb_desc->use_shortgi = false;
391
392}
393
394static void _rtl_query_protection_mode(struct ieee80211_hw *hw,
395 struct rtl_tcb_desc *tcb_desc,
396 struct ieee80211_tx_info *info)
397{
398 struct rtl_priv *rtlpriv = rtl_priv(hw);
399 u8 rate_flag = info->control.rates[0].flags;
400
401 /* Common Settings */
402 tcb_desc->b_rts_stbc = false;
403 tcb_desc->b_cts_enable = false;
404 tcb_desc->rts_sc = 0;
405 tcb_desc->b_rts_bw = false;
406 tcb_desc->b_rts_use_shortpreamble = false;
407 tcb_desc->b_rts_use_shortgi = false;
408
409 if (rate_flag & IEEE80211_TX_RC_USE_CTS_PROTECT) {
410 /* Use CTS-to-SELF in protection mode. */
411 tcb_desc->b_rts_enable = true;
412 tcb_desc->b_cts_enable = true;
413 tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
414 } else if (rate_flag & IEEE80211_TX_RC_USE_RTS_CTS) {
415 /* Use RTS-CTS in protection mode. */
416 tcb_desc->b_rts_enable = true;
417 tcb_desc->rts_rate = rtlpriv->cfg->maps[RTL_RC_OFDM_RATE24M];
418 }
419
420}
421
422static void _rtl_txrate_selectmode(struct ieee80211_hw *hw,
423 struct rtl_tcb_desc *tcb_desc)
424{
425 struct rtl_priv *rtlpriv = rtl_priv(hw);
426 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
427
428 if (!tcb_desc->disable_ratefallback || !tcb_desc->use_driver_rate) {
429 if (mac->opmode == NL80211_IFTYPE_STATION)
430 tcb_desc->ratr_index = 0;
431 else if (mac->opmode == NL80211_IFTYPE_ADHOC) {
432 if (tcb_desc->b_multicast || tcb_desc->b_broadcast) {
433 tcb_desc->hw_rate =
434 rtlpriv->cfg->maps[RTL_RC_CCK_RATE2M];
435 tcb_desc->use_driver_rate = 1;
436 } else {
437 /* TODO */
438 }
439 }
440 }
441
442 if (rtlpriv->dm.b_useramask) {
443 /* TODO we will differentiate adhoc and station futrue */
444 tcb_desc->mac_id = 0;
445
446 if ((mac->mode == WIRELESS_MODE_N_24G) ||
447 (mac->mode == WIRELESS_MODE_N_5G)) {
448 tcb_desc->ratr_index = RATR_INX_WIRELESS_NGB;
449 } else if (mac->mode & WIRELESS_MODE_G) {
450 tcb_desc->ratr_index = RATR_INX_WIRELESS_GB;
451 } else if (mac->mode & WIRELESS_MODE_B) {
452 tcb_desc->ratr_index = RATR_INX_WIRELESS_B;
453 }
454 }
455
456}
457
458static void _rtl_query_bandwidth_mode(struct ieee80211_hw *hw,
459 struct rtl_tcb_desc *tcb_desc)
460{
461 struct rtl_priv *rtlpriv = rtl_priv(hw);
462 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
463
464 tcb_desc->b_packet_bw = false;
465
466 if (!mac->bw_40 || !mac->ht_enable)
467 return;
468
469 if (tcb_desc->b_multicast || tcb_desc->b_broadcast)
470 return;
471
472 /*use legency rate, shall use 20MHz */
473 if (tcb_desc->hw_rate <= rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M])
474 return;
475
476 tcb_desc->b_packet_bw = true;
477}
478
479static u8 _rtl_get_highest_n_rate(struct ieee80211_hw *hw)
480{
481 struct rtl_priv *rtlpriv = rtl_priv(hw);
482 struct rtl_phy *rtlphy = &(rtlpriv->phy);
483 u8 hw_rate;
484
485 if (get_rf_type(rtlphy) == RF_2T2R)
486 hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS15];
487 else
488 hw_rate = rtlpriv->cfg->maps[RTL_RC_HT_RATEMCS7];
489
490 return hw_rate;
491}
492
493void rtl_get_tcb_desc(struct ieee80211_hw *hw,
494 struct ieee80211_tx_info *info,
495 struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc)
496{
497 struct rtl_priv *rtlpriv = rtl_priv(hw);
498 struct rtl_mac *rtlmac = rtl_mac(rtl_priv(hw));
499 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
500 struct ieee80211_rate *txrate;
501 u16 fc = le16_to_cpu(hdr->frame_control);
502
503 memset(tcb_desc, 0, sizeof(struct rtl_tcb_desc));
504
505 if (ieee80211_is_data(fc)) {
506 txrate = ieee80211_get_tx_rate(hw, info);
507 tcb_desc->hw_rate = txrate->hw_value;
508
509 /*
510 *we set data rate RTL_RC_CCK_RATE1M
511 *in rtl_rc.c if skb is special data or
512 *mgt which need low data rate.
513 */
514
515 /*
516 *So tcb_desc->hw_rate is just used for
517 *special data and mgt frames
518 */
519 if (tcb_desc->hw_rate < rtlpriv->cfg->maps[RTL_RC_CCK_RATE11M]) {
520 tcb_desc->use_driver_rate = true;
521 tcb_desc->ratr_index = 7;
522
523 tcb_desc->hw_rate =
524 rtlpriv->cfg->maps[RTL_RC_CCK_RATE1M];
525 tcb_desc->disable_ratefallback = 1;
526 } else {
527 /*
528 *because hw will nerver use hw_rate
529 *when tcb_desc->use_driver_rate = false
530 *so we never set highest N rate here,
531 *and N rate will all be controled by FW
532 *when tcb_desc->use_driver_rate = false
533 */
534 if (rtlmac->ht_enable) {
535 tcb_desc->hw_rate = _rtl_get_highest_n_rate(hw);
536 } else {
537 if (rtlmac->mode == WIRELESS_MODE_B) {
538 tcb_desc->hw_rate =
539 rtlpriv->cfg->maps[RTL_RC_CCK_RATE11M];
540 } else {
541 tcb_desc->hw_rate =
542 rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M];
543 }
544 }
545 }
546
547 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
548 tcb_desc->b_multicast = 1;
549 else if (is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
550 tcb_desc->b_broadcast = 1;
551
552 _rtl_txrate_selectmode(hw, tcb_desc);
553 _rtl_query_bandwidth_mode(hw, tcb_desc);
554 _rtl_qurey_shortpreamble_mode(hw, tcb_desc, info);
555 _rtl_query_shortgi(hw, tcb_desc, info);
556 _rtl_query_protection_mode(hw, tcb_desc, info);
557 } else {
558 tcb_desc->use_driver_rate = true;
559 tcb_desc->ratr_index = 7;
560 tcb_desc->disable_ratefallback = 1;
561 tcb_desc->mac_id = 0;
562
563 tcb_desc->hw_rate = rtlpriv->cfg->maps[RTL_RC_CCK_RATE1M];
564 }
565}
566EXPORT_SYMBOL(rtl_get_tcb_desc);
567
568bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb)
569{
570 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
571 struct rtl_priv *rtlpriv = rtl_priv(hw);
572 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
573 u16 fc = le16_to_cpu(hdr->frame_control);
574
575 if (ieee80211_is_auth(fc)) {
576 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG, ("MAC80211_LINKING\n"));
577 rtl_ips_nic_on(hw);
578
579 mac->link_state = MAC80211_LINKING;
580 }
581
582 return true;
583}
584
585bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
586{
587 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
588 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
589 struct rtl_priv *rtlpriv = rtl_priv(hw);
590 u16 fc = le16_to_cpu(hdr->frame_control);
591 u8 *act = (u8 *) (((u8 *) skb->data + MAC80211_3ADDR_LEN));
592 u8 category;
593
594 if (!ieee80211_is_action(fc))
595 return true;
596
597 category = *act;
598 act++;
599 switch (category) {
600 case ACT_CAT_BA:
601 switch (*act) {
602 case ACT_ADDBAREQ:
603 if (mac->act_scanning)
604 return false;
605
606 RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
607 ("%s ACT_ADDBAREQ From :" MAC_FMT "\n",
608 is_tx ? "Tx" : "Rx", MAC_ARG(hdr->addr2)));
609 break;
610 case ACT_ADDBARSP:
611 RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
612 ("%s ACT_ADDBARSP From :" MAC_FMT "\n",
613 is_tx ? "Tx" : "Rx", MAC_ARG(hdr->addr2)));
614 break;
615 case ACT_DELBA:
616 RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
617 ("ACT_ADDBADEL From :" MAC_FMT "\n",
618 MAC_ARG(hdr->addr2)));
619 break;
620 }
621 break;
622 default:
623 break;
624 }
625
626 return true;
627}
628
629/*should call before software enc*/
630u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx)
631{
632 struct rtl_priv *rtlpriv = rtl_priv(hw);
633 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
634 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
635 u16 fc = le16_to_cpu(hdr->frame_control);
636 u16 ether_type;
637 u8 mac_hdr_len = ieee80211_get_hdrlen_from_skb(skb);
638 const struct iphdr *ip;
639
640 if (!ieee80211_is_data(fc))
641 goto end;
642
643 if (ieee80211_is_nullfunc(fc))
644 return true;
645
646 ip = (struct iphdr *)((u8 *) skb->data + mac_hdr_len +
647 SNAP_SIZE + PROTOC_TYPE_SIZE);
648 ether_type = *(u16 *) ((u8 *) skb->data + mac_hdr_len + SNAP_SIZE);
649 ether_type = ntohs(ether_type);
650
651 if (ETH_P_IP == ether_type) {
652 if (IPPROTO_UDP == ip->protocol) {
653 struct udphdr *udp = (struct udphdr *)((u8 *) ip +
654 (ip->ihl << 2));
655 if (((((u8 *) udp)[1] == 68) &&
656 (((u8 *) udp)[3] == 67)) ||
657 ((((u8 *) udp)[1] == 67) &&
658 (((u8 *) udp)[3] == 68))) {
659 /*
660 * 68 : UDP BOOTP client
661 * 67 : UDP BOOTP server
662 */
663 RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV),
664 DBG_DMESG, ("dhcp %s !!\n",
665 (is_tx) ? "Tx" : "Rx"));
666
667 if (is_tx) {
668 rtl_lps_leave(hw);
669 ppsc->last_delaylps_stamp_jiffies =
670 jiffies;
671 }
672
673 return true;
674 }
675 }
676 } else if (ETH_P_ARP == ether_type) {
677 if (is_tx) {
678 rtl_lps_leave(hw);
679 ppsc->last_delaylps_stamp_jiffies = jiffies;
680 }
681
682 return true;
683 } else if (ETH_P_PAE == ether_type) {
684 RT_TRACE(rtlpriv, (COMP_SEND | COMP_RECV), DBG_DMESG,
685 ("802.1X %s EAPOL pkt!!\n", (is_tx) ? "Tx" : "Rx"));
686
687 if (is_tx) {
688 rtl_lps_leave(hw);
689 ppsc->last_delaylps_stamp_jiffies = jiffies;
690 }
691
692 return true;
693 } else if (0x86DD == ether_type) {
694 return true;
695 }
696
697end:
698 return false;
699}
700
701/*********************************************************
702 *
703 * functions called by core.c
704 *
705 *********************************************************/
706int rtl_tx_agg_start(struct ieee80211_hw *hw, const u8 *ra, u16 tid, u16 *ssn)
707{
708 struct rtl_priv *rtlpriv = rtl_priv(hw);
709 struct rtl_tid_data *tid_data;
710 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
711
712 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
713 ("on ra = %pM tid = %d\n", ra, tid));
714
715 if (unlikely(tid >= MAX_TID_COUNT))
716 return -EINVAL;
717
718 if (mac->tids[tid].agg.agg_state != RTL_AGG_OFF) {
719 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
720 ("Start AGG when state is not RTL_AGG_OFF !\n"));
721 return -ENXIO;
722 }
723
724 tid_data = &mac->tids[tid];
725 *ssn = SEQ_TO_SN(tid_data->seq_number);
726
727 RT_TRACE(rtlpriv, COMP_SEND, DBG_DMESG,
728 ("HW queue is empty tid:%d\n", tid));
729 tid_data->agg.agg_state = RTL_AGG_ON;
730
731 ieee80211_start_tx_ba_cb_irqsafe(mac->vif, ra, tid);
732
733 return 0;
734}
735
736int rtl_tx_agg_stop(struct ieee80211_hw *hw, const u8 * ra, u16 tid)
737{
738 int ssn = -1;
739 struct rtl_priv *rtlpriv = rtl_priv(hw);
740 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
741 struct rtl_tid_data *tid_data;
742
743 if (!ra) {
744 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("ra = NULL\n"));
745 return -EINVAL;
746 }
747
748 if (unlikely(tid >= MAX_TID_COUNT))
749 return -EINVAL;
750
751 if (mac->tids[tid].agg.agg_state != RTL_AGG_ON)
752 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
753 ("Stopping AGG while state not ON or starting\n"));
754
755 tid_data = &mac->tids[tid];
756 ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
757
758 mac->tids[tid].agg.agg_state = RTL_AGG_OFF;
759
760 ieee80211_stop_tx_ba_cb_irqsafe(mac->vif, ra, tid);
761
762 return 0;
763}
764
765/*********************************************************
766 *
767 * wq & timer callback functions
768 *
769 *********************************************************/
770void rtl_watchdog_wq_callback(void *data)
771{
772 struct rtl_works *rtlworks = container_of_dwork_rtl(data,
773 struct rtl_works,
774 watchdog_wq);
775 struct ieee80211_hw *hw = rtlworks->hw;
776 struct rtl_priv *rtlpriv = rtl_priv(hw);
777 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
778 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
779
780 bool b_busytraffic = false;
781 bool b_higher_busytraffic = false;
782 bool b_higher_busyrxtraffic = false;
783 bool b_higher_busytxtraffic = false;
784
785 u8 idx = 0;
786 u32 rx_cnt_inp4eriod = 0;
787 u32 tx_cnt_inp4eriod = 0;
788 u32 aver_rx_cnt_inperiod = 0;
789 u32 aver_tx_cnt_inperiod = 0;
790
791 bool benter_ps = false;
792
793 if (is_hal_stop(rtlhal))
794 return;
795
796 /* <1> Determine if action frame is allowed */
797 if (mac->link_state > MAC80211_NOLINK) {
798 if (mac->cnt_after_linked < 20)
799 mac->cnt_after_linked++;
800 } else {
801 mac->cnt_after_linked = 0;
802 }
803
804 /* <2> DM */
805 rtlpriv->cfg->ops->dm_watchdog(hw);
806
807 /*
808 *<3> to check if traffic busy, if
809 * busytraffic we don't change channel
810 */
811 if (mac->link_state >= MAC80211_LINKED) {
812
813 /* (1) get aver_rx_cnt_inperiod & aver_tx_cnt_inperiod */
814 for (idx = 0; idx <= 2; idx++) {
815 rtlpriv->link_info.num_rx_in4period[idx] =
816 rtlpriv->link_info.num_rx_in4period[idx + 1];
817 rtlpriv->link_info.num_tx_in4period[idx] =
818 rtlpriv->link_info.num_tx_in4period[idx + 1];
819 }
820 rtlpriv->link_info.num_rx_in4period[3] =
821 rtlpriv->link_info.num_rx_inperiod;
822 rtlpriv->link_info.num_tx_in4period[3] =
823 rtlpriv->link_info.num_tx_inperiod;
824 for (idx = 0; idx <= 3; idx++) {
825 rx_cnt_inp4eriod +=
826 rtlpriv->link_info.num_rx_in4period[idx];
827 tx_cnt_inp4eriod +=
828 rtlpriv->link_info.num_tx_in4period[idx];
829 }
830 aver_rx_cnt_inperiod = rx_cnt_inp4eriod / 4;
831 aver_tx_cnt_inperiod = tx_cnt_inp4eriod / 4;
832
833 /* (2) check traffic busy */
834 if (aver_rx_cnt_inperiod > 100 || aver_tx_cnt_inperiod > 100)
835 b_busytraffic = true;
836
837 /* Higher Tx/Rx data. */
838 if (aver_rx_cnt_inperiod > 4000 ||
839 aver_tx_cnt_inperiod > 4000) {
840 b_higher_busytraffic = true;
841
842 /* Extremely high Rx data. */
843 if (aver_rx_cnt_inperiod > 5000)
844 b_higher_busyrxtraffic = true;
845 else
846 b_higher_busytxtraffic = false;
847 }
848
849 if (((rtlpriv->link_info.num_rx_inperiod +
850 rtlpriv->link_info.num_tx_inperiod) > 8) ||
851 (rtlpriv->link_info.num_rx_inperiod > 2))
852 benter_ps = false;
853 else
854 benter_ps = true;
855
856 /* LeisurePS only work in infra mode. */
857 if (benter_ps)
858 rtl_lps_enter(hw);
859 else
860 rtl_lps_leave(hw);
861 }
862
863 rtlpriv->link_info.num_rx_inperiod = 0;
864 rtlpriv->link_info.num_tx_inperiod = 0;
865
866 rtlpriv->link_info.b_busytraffic = b_busytraffic;
867 rtlpriv->link_info.b_higher_busytraffic = b_higher_busytraffic;
868 rtlpriv->link_info.b_higher_busyrxtraffic = b_higher_busyrxtraffic;
869
870}
871
872void rtl_watch_dog_timer_callback(unsigned long data)
873{
874 struct ieee80211_hw *hw = (struct ieee80211_hw *)data;
875 struct rtl_priv *rtlpriv = rtl_priv(hw);
876
877 queue_delayed_work(rtlpriv->works.rtl_wq,
878 &rtlpriv->works.watchdog_wq, 0);
879
880 mod_timer(&rtlpriv->works.watchdog_timer,
881 jiffies + MSECS(RTL_WATCH_DOG_TIME));
882}
883
884/*********************************************************
885 *
886 * sysfs functions
887 *
888 *********************************************************/
889static ssize_t rtl_show_debug_level(struct device *d,
890 struct device_attribute *attr, char *buf)
891{
892 struct ieee80211_hw *hw = dev_get_drvdata(d);
893 struct rtl_priv *rtlpriv = rtl_priv(hw);
894
895 return sprintf(buf, "0x%08X\n", rtlpriv->dbg.global_debuglevel);
896}
897
898static ssize_t rtl_store_debug_level(struct device *d,
899 struct device_attribute *attr,
900 const char *buf, size_t count)
901{
902 struct ieee80211_hw *hw = dev_get_drvdata(d);
903 struct rtl_priv *rtlpriv = rtl_priv(hw);
904 unsigned long val;
905 int ret;
906
907 ret = strict_strtoul(buf, 0, &val);
908 if (ret) {
909 printk(KERN_DEBUG "%s is not in hex or decimal form.\n", buf);
910 } else {
911 rtlpriv->dbg.global_debuglevel = val;
912 printk(KERN_DEBUG "debuglevel:%x\n",
913 rtlpriv->dbg.global_debuglevel);
914 }
915
916 return strnlen(buf, count);
917}
918
919static DEVICE_ATTR(debug_level, S_IWUSR | S_IRUGO,
920 rtl_show_debug_level, rtl_store_debug_level);
921
922static struct attribute *rtl_sysfs_entries[] = {
923
924 &dev_attr_debug_level.attr,
925
926 NULL
927};
928
929/*
930 * "name" is folder name witch will be
931 * put in device directory like :
932 * sys/devices/pci0000:00/0000:00:1c.4/
933 * 0000:06:00.0/rtl_sysfs
934 */
935struct attribute_group rtl_attribute_group = {
936 .name = "rtlsysfs",
937 .attrs = rtl_sysfs_entries,
938};
939
940MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
941MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
942MODULE_AUTHOR("Larry Finger <Larry.FInger@lwfinger.net>");
943MODULE_LICENSE("GPL");
944MODULE_DESCRIPTION("Realtek 802.11n PCI wireless core");
945
946static int __init rtl_core_module_init(void)
947{
948 return 0;
949}
950
951static void __exit rtl_core_module_exit(void)
952{
953}
954
955module_init(rtl_core_module_init);
956module_exit(rtl_core_module_exit);
diff --git a/drivers/net/wireless/rtlwifi/base.h b/drivers/net/wireless/rtlwifi/base.h
new file mode 100644
index 000000000000..3de5a14745f1
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/base.h
@@ -0,0 +1,120 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *****************************************************************************/
28
29#ifndef __RTL_BASE_H__
30#define __RTL_BASE_H__
31
32#define RTL_DUMMY_OFFSET 0
33#define RTL_DUMMY_UNIT 8
34#define RTL_TX_DUMMY_SIZE (RTL_DUMMY_OFFSET * RTL_DUMMY_UNIT)
35#define RTL_TX_DESC_SIZE 32
36#define RTL_TX_HEADER_SIZE (RTL_TX_DESC_SIZE + RTL_TX_DUMMY_SIZE)
37
38#define HT_AMSDU_SIZE_4K 3839
39#define HT_AMSDU_SIZE_8K 7935
40
41#define MAX_BIT_RATE_40MHZ_MCS15 300 /* Mbps */
42#define MAX_BIT_RATE_40MHZ_MCS7 150 /* Mbps */
43
44#define RTL_RATE_COUNT_LEGACY 12
45#define RTL_CHANNEL_COUNT 14
46
47#define FRAME_OFFSET_FRAME_CONTROL 0
48#define FRAME_OFFSET_DURATION 2
49#define FRAME_OFFSET_ADDRESS1 4
50#define FRAME_OFFSET_ADDRESS2 10
51#define FRAME_OFFSET_ADDRESS3 16
52#define FRAME_OFFSET_SEQUENCE 22
53#define FRAME_OFFSET_ADDRESS4 24
54
55#define SET_80211_HDR_FRAME_CONTROL(_hdr, _val) \
56 WRITEEF2BYTE(_hdr, _val)
57#define SET_80211_HDR_TYPE_AND_SUBTYPE(_hdr, _val) \
58 WRITEEF1BYTE(_hdr, _val)
59#define SET_80211_HDR_PWR_MGNT(_hdr, _val) \
60 SET_BITS_TO_LE_2BYTE(_hdr, 12, 1, _val)
61#define SET_80211_HDR_TO_DS(_hdr, _val) \
62 SET_BITS_TO_LE_2BYTE(_hdr, 8, 1, _val)
63
64#define SET_80211_PS_POLL_AID(_hdr, _val) \
65 WRITEEF2BYTE(((u8 *)(_hdr)) + 2, _val)
66#define SET_80211_PS_POLL_BSSID(_hdr, _val) \
67 CP_MACADDR(((u8 *)(_hdr)) + 4, (u8 *)(_val))
68#define SET_80211_PS_POLL_TA(_hdr, _val) \
69 CP_MACADDR(((u8 *)(_hdr)) + 10, (u8 *)(_val))
70
71#define SET_80211_HDR_DURATION(_hdr, _val) \
72 WRITEEF2BYTE((u8 *)(_hdr)+FRAME_OFFSET_DURATION, _val)
73#define SET_80211_HDR_ADDRESS1(_hdr, _val) \
74 CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS1, (u8*)(_val))
75#define SET_80211_HDR_ADDRESS2(_hdr, _val) \
76 CP_MACADDR((u8 *)(_hdr) + FRAME_OFFSET_ADDRESS2, (u8 *)(_val))
77#define SET_80211_HDR_ADDRESS3(_hdr, _val) \
78 CP_MACADDR((u8 *)(_hdr)+FRAME_OFFSET_ADDRESS3, (u8 *)(_val))
79#define SET_80211_HDR_FRAGMENT_SEQUENCE(_hdr, _val) \
80 WRITEEF2BYTE((u8 *)(_hdr)+FRAME_OFFSET_SEQUENCE, _val)
81
82#define SET_BEACON_PROBE_RSP_TIME_STAMP_LOW(__phdr, __val) \
83 WRITEEF4BYTE(((u8 *)(__phdr)) + 24, __val)
84#define SET_BEACON_PROBE_RSP_TIME_STAMP_HIGH(__phdr, __val) \
85 WRITEEF4BYTE(((u8 *)(__phdr)) + 28, __val)
86#define SET_BEACON_PROBE_RSP_BEACON_INTERVAL(__phdr, __val) \
87 WRITEEF2BYTE(((u8 *)(__phdr)) + 32, __val)
88#define GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) \
89 READEF2BYTE(((u8 *)(__phdr)) + 34)
90#define SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
91 WRITEEF2BYTE(((u8 *)(__phdr)) + 34, __val)
92#define MASK_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, __val) \
93 SET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr, \
94 (GET_BEACON_PROBE_RSP_CAPABILITY_INFO(__phdr) & (~(__val))))
95
96int rtl_init_core(struct ieee80211_hw *hw);
97void rtl_deinit_core(struct ieee80211_hw *hw);
98void rtl_init_rx_config(struct ieee80211_hw *hw);
99void rtl_init_rfkill(struct ieee80211_hw *hw);
100void rtl_deinit_rfkill(struct ieee80211_hw *hw);
101
102void rtl_watch_dog_timer_callback(unsigned long data);
103void rtl_deinit_deferred_work(struct ieee80211_hw *hw);
104
105bool rtl_action_proc(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
106bool rtl_tx_mgmt_proc(struct ieee80211_hw *hw, struct sk_buff *skb);
107u8 rtl_is_special_data(struct ieee80211_hw *hw, struct sk_buff *skb, u8 is_tx);
108
109void rtl_watch_dog_timer_callback(unsigned long data);
110int rtl_tx_agg_start(struct ieee80211_hw *hw, const u8 *ra,
111 u16 tid, u16 *ssn);
112int rtl_tx_agg_stop(struct ieee80211_hw *hw, const u8 *ra, u16 tid);
113void rtl_watchdog_wq_callback(void *data);
114
115void rtl_get_tcb_desc(struct ieee80211_hw *hw,
116 struct ieee80211_tx_info *info,
117 struct sk_buff *skb, struct rtl_tcb_desc *tcb_desc);
118
119extern struct attribute_group rtl_attribute_group;
120#endif
diff --git a/drivers/net/wireless/rtlwifi/cam.c b/drivers/net/wireless/rtlwifi/cam.c
new file mode 100644
index 000000000000..52c9c1367cac
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/cam.c
@@ -0,0 +1,291 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 *****************************************************************************/
27
28#include "wifi.h"
29#include "cam.h"
30
31void rtl_cam_reset_sec_info(struct ieee80211_hw *hw)
32{
33 struct rtl_priv *rtlpriv = rtl_priv(hw);
34
35 rtlpriv->sec.use_defaultkey = false;
36 rtlpriv->sec.pairwise_enc_algorithm = NO_ENCRYPTION;
37 rtlpriv->sec.group_enc_algorithm = NO_ENCRYPTION;
38 memset(rtlpriv->sec.key_buf, 0, KEY_BUF_SIZE * MAX_KEY_LEN);
39 memset(rtlpriv->sec.key_len, 0, KEY_BUF_SIZE);
40 rtlpriv->sec.pairwise_key = NULL;
41}
42
43static void rtl_cam_program_entry(struct ieee80211_hw *hw, u32 entry_no,
44 u8 *mac_addr, u8 *key_cont_128, u16 us_config)
45{
46 struct rtl_priv *rtlpriv = rtl_priv(hw);
47
48 u32 target_command;
49 u32 target_content = 0;
50 u8 entry_i;
51
52 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
53 ("key_cont_128:\n %x:%x:%x:%x:%x:%x\n",
54 key_cont_128[0], key_cont_128[1],
55 key_cont_128[2], key_cont_128[3],
56 key_cont_128[4], key_cont_128[5]));
57
58 for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
59 target_command = entry_i + CAM_CONTENT_COUNT * entry_no;
60 target_command = target_command | BIT(31) | BIT(16);
61
62 if (entry_i == 0) {
63 target_content = (u32) (*(mac_addr + 0)) << 16 |
64 (u32) (*(mac_addr + 1)) << 24 | (u32) us_config;
65
66 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI],
67 target_content);
68 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
69 target_command);
70
71 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
72 ("rtl_cam_program_entry(): "
73 "WRITE %x: %x\n",
74 rtlpriv->cfg->maps[WCAMI], target_content));
75 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
76 ("The Key ID is %d\n", entry_no));
77 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
78 ("rtl_cam_program_entry(): "
79 "WRITE %x: %x\n",
80 rtlpriv->cfg->maps[RWCAM], target_command));
81
82 } else if (entry_i == 1) {
83
84 target_content = (u32) (*(mac_addr + 5)) << 24 |
85 (u32) (*(mac_addr + 4)) << 16 |
86 (u32) (*(mac_addr + 3)) << 8 |
87 (u32) (*(mac_addr + 2));
88
89 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI],
90 target_content);
91 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
92 target_command);
93
94 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
95 ("rtl_cam_program_entry(): WRITE A4: %x\n",
96 target_content));
97 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
98 ("rtl_cam_program_entry(): WRITE A0: %x\n",
99 target_command));
100
101 } else {
102
103 target_content =
104 (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 3)) <<
105 24 | (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 2))
106 << 16 |
107 (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 1)) << 8
108 | (u32) (*(key_cont_128 + (entry_i * 4 - 8) + 0));
109
110 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI],
111 target_content);
112 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM],
113 target_command);
114 udelay(100);
115
116 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
117 ("rtl_cam_program_entry(): WRITE A4: %x\n",
118 target_content));
119 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
120 ("rtl_cam_program_entry(): WRITE A0: %x\n",
121 target_command));
122 }
123 }
124
125 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
126 ("after set key, usconfig:%x\n", us_config));
127}
128
129u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
130 u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
131 u32 ul_default_key, u8 *key_content)
132{
133 u32 us_config;
134 struct rtl_priv *rtlpriv = rtl_priv(hw);
135
136 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
137 ("EntryNo:%x, ulKeyId=%x, ulEncAlg=%x, "
138 "ulUseDK=%x MacAddr" MAC_FMT "\n",
139 ul_entry_idx, ul_key_id, ul_enc_alg,
140 ul_default_key, MAC_ARG(mac_addr)));
141
142 if (ul_key_id == TOTAL_CAM_ENTRY) {
143 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
144 ("<=== ulKeyId exceed!\n"));
145 return 0;
146 }
147
148 if (ul_default_key == 1) {
149 us_config = CFG_VALID | ((u16) (ul_enc_alg) << 2);
150 } else {
151 us_config = CFG_VALID | ((ul_enc_alg) << 2) | ul_key_id;
152 }
153
154 rtl_cam_program_entry(hw, ul_entry_idx, mac_addr,
155 (u8 *) key_content, us_config);
156
157 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("<===\n"));
158
159 return 1;
160
161}
162EXPORT_SYMBOL(rtl_cam_add_one_entry);
163
164int rtl_cam_delete_one_entry(struct ieee80211_hw *hw,
165 u8 *mac_addr, u32 ul_key_id)
166{
167 u32 ul_command;
168 struct rtl_priv *rtlpriv = rtl_priv(hw);
169
170 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("key_idx:%d\n", ul_key_id));
171
172 ul_command = ul_key_id * CAM_CONTENT_COUNT;
173 ul_command = ul_command | BIT(31) | BIT(16);
174
175 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], 0);
176 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
177
178 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
179 ("rtl_cam_delete_one_entry(): WRITE A4: %x\n", 0));
180 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
181 ("rtl_cam_delete_one_entry(): WRITE A0: %x\n", ul_command));
182
183 return 0;
184
185}
186EXPORT_SYMBOL(rtl_cam_delete_one_entry);
187
188void rtl_cam_reset_all_entry(struct ieee80211_hw *hw)
189{
190 u32 ul_command;
191 struct rtl_priv *rtlpriv = rtl_priv(hw);
192
193 ul_command = BIT(31) | BIT(30);
194 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
195}
196EXPORT_SYMBOL(rtl_cam_reset_all_entry);
197
198void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index)
199{
200 struct rtl_priv *rtlpriv = rtl_priv(hw);
201
202 u32 ul_command;
203 u32 ul_content;
204 u32 ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES];
205
206 switch (rtlpriv->sec.pairwise_enc_algorithm) {
207 case WEP40_ENCRYPTION:
208 ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_WEP40];
209 break;
210 case WEP104_ENCRYPTION:
211 ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_WEP104];
212 break;
213 case TKIP_ENCRYPTION:
214 ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_TKIP];
215 break;
216 case AESCCMP_ENCRYPTION:
217 ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES];
218 break;
219 default:
220 ul_enc_algo = rtlpriv->cfg->maps[SEC_CAM_AES];
221 }
222
223 ul_content = (uc_index & 3) | ((u16) (ul_enc_algo) << 2);
224
225 ul_content |= BIT(15);
226 ul_command = CAM_CONTENT_COUNT * uc_index;
227 ul_command = ul_command | BIT(31) | BIT(16);
228
229 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content);
230 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
231
232 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
233 ("rtl_cam_mark_invalid(): WRITE A4: %x\n", ul_content));
234 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
235 ("rtl_cam_mark_invalid(): WRITE A0: %x\n", ul_command));
236}
237EXPORT_SYMBOL(rtl_cam_mark_invalid);
238
239void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index)
240{
241 struct rtl_priv *rtlpriv = rtl_priv(hw);
242
243 u32 ul_command;
244 u32 ul_content;
245 u32 ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
246 u8 entry_i;
247
248 switch (rtlpriv->sec.pairwise_enc_algorithm) {
249 case WEP40_ENCRYPTION:
250 ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_WEP40];
251 break;
252 case WEP104_ENCRYPTION:
253 ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_WEP104];
254 break;
255 case TKIP_ENCRYPTION:
256 ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_TKIP];
257 break;
258 case AESCCMP_ENCRYPTION:
259 ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
260 break;
261 default:
262 ul_encalgo = rtlpriv->cfg->maps[SEC_CAM_AES];
263 }
264
265 for (entry_i = 0; entry_i < CAM_CONTENT_COUNT; entry_i++) {
266
267 if (entry_i == 0) {
268 ul_content =
269 (uc_index & 0x03) | ((u16) (ul_encalgo) << 2);
270 ul_content |= BIT(15);
271
272 } else {
273 ul_content = 0;
274 }
275
276 ul_command = CAM_CONTENT_COUNT * uc_index + entry_i;
277 ul_command = ul_command | BIT(31) | BIT(16);
278
279 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[WCAMI], ul_content);
280 rtl_write_dword(rtlpriv, rtlpriv->cfg->maps[RWCAM], ul_command);
281
282 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
283 ("rtl_cam_empty_entry(): WRITE A4: %x\n",
284 ul_content));
285 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
286 ("rtl_cam_empty_entry(): WRITE A0: %x\n",
287 ul_command));
288 }
289
290}
291EXPORT_SYMBOL(rtl_cam_empty_entry);
diff --git a/drivers/net/wireless/rtlwifi/cam.h b/drivers/net/wireless/rtlwifi/cam.h
new file mode 100644
index 000000000000..dd82f057d53d
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/cam.h
@@ -0,0 +1,53 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 *****************************************************************************/
27
28#ifndef __RTL_CAM_H_
29#define __RTL_CAM_H_
30
31#define TOTAL_CAM_ENTRY 32
32#define CAM_CONTENT_COUNT 8
33
34#define CFG_DEFAULT_KEY BIT(5)
35#define CFG_VALID BIT(15)
36
37#define PAIRWISE_KEYIDX 0
38#define CAM_PAIRWISE_KEY_POSITION 4
39
40#define CAM_CONFIG_USEDK 1
41#define CAM_CONFIG_NO_USEDK 0
42
43extern void rtl_cam_reset_all_entry(struct ieee80211_hw *hw);
44extern u8 rtl_cam_add_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
45 u32 ul_key_id, u32 ul_entry_idx, u32 ul_enc_alg,
46 u32 ul_default_key, u8 *key_content);
47int rtl_cam_delete_one_entry(struct ieee80211_hw *hw, u8 *mac_addr,
48 u32 ul_key_id);
49void rtl_cam_mark_invalid(struct ieee80211_hw *hw, u8 uc_index);
50void rtl_cam_empty_entry(struct ieee80211_hw *hw, u8 uc_index);
51void rtl_cam_reset_sec_info(struct ieee80211_hw *hw);
52
53#endif
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
new file mode 100644
index 000000000000..d6a924a05654
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -0,0 +1,1029 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *****************************************************************************/
28
29#include "wifi.h"
30#include "core.h"
31#include "cam.h"
32#include "base.h"
33#include "ps.h"
34
35/*mutex for start & stop is must here. */
36static int rtl_op_start(struct ieee80211_hw *hw)
37{
38 int err = 0;
39 struct rtl_priv *rtlpriv = rtl_priv(hw);
40 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
41
42 if (!is_hal_stop(rtlhal))
43 return 0;
44 if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
45 return 0;
46 mutex_lock(&rtlpriv->locks.conf_mutex);
47 err = rtlpriv->intf_ops->adapter_start(hw);
48 if (err)
49 goto out;
50 rtl_watch_dog_timer_callback((unsigned long)hw);
51out:
52 mutex_unlock(&rtlpriv->locks.conf_mutex);
53 return err;
54}
55
56static void rtl_op_stop(struct ieee80211_hw *hw)
57{
58 struct rtl_priv *rtlpriv = rtl_priv(hw);
59 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
60 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
61 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
62
63 if (is_hal_stop(rtlhal))
64 return;
65
66 if (unlikely(ppsc->rfpwr_state == ERFOFF)) {
67 rtl_ips_nic_on(hw);
68 mdelay(1);
69 }
70
71 mutex_lock(&rtlpriv->locks.conf_mutex);
72
73 mac->link_state = MAC80211_NOLINK;
74 memset(mac->bssid, 0, 6);
75
76 /*reset sec info */
77 rtl_cam_reset_sec_info(hw);
78
79 rtl_deinit_deferred_work(hw);
80 rtlpriv->intf_ops->adapter_stop(hw);
81
82 mutex_unlock(&rtlpriv->locks.conf_mutex);
83}
84
85static int rtl_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
86{
87 struct rtl_priv *rtlpriv = rtl_priv(hw);
88 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
89 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
90
91 if (unlikely(is_hal_stop(rtlhal) || ppsc->rfpwr_state != ERFON))
92 goto err_free;
93
94 if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
95 goto err_free;
96
97
98 rtlpriv->intf_ops->adapter_tx(hw, skb);
99
100 return NETDEV_TX_OK;
101
102err_free:
103 dev_kfree_skb_any(skb);
104 return NETDEV_TX_OK;
105}
106
107static int rtl_op_add_interface(struct ieee80211_hw *hw,
108 struct ieee80211_vif *vif)
109{
110 struct rtl_priv *rtlpriv = rtl_priv(hw);
111 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
112 int err = 0;
113
114 if (mac->vif) {
115 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
116 ("vif has been set!! mac->vif = 0x%p\n", mac->vif));
117 return -EOPNOTSUPP;
118 }
119
120 rtl_ips_nic_on(hw);
121
122 mutex_lock(&rtlpriv->locks.conf_mutex);
123 switch (vif->type) {
124 case NL80211_IFTYPE_STATION:
125 if (mac->beacon_enabled == 1) {
126 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
127 ("NL80211_IFTYPE_STATION\n"));
128 mac->beacon_enabled = 0;
129 rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
130 rtlpriv->cfg->maps
131 [RTL_IBSS_INT_MASKS]);
132 }
133 break;
134 case NL80211_IFTYPE_ADHOC:
135 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
136 ("NL80211_IFTYPE_ADHOC\n"));
137
138 mac->link_state = MAC80211_LINKED;
139 rtlpriv->cfg->ops->set_bcn_reg(hw);
140 break;
141 case NL80211_IFTYPE_AP:
142 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
143 ("NL80211_IFTYPE_AP\n"));
144 break;
145 default:
146 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
147 ("operation mode %d is not support!\n", vif->type));
148 err = -EOPNOTSUPP;
149 goto out;
150 }
151
152 mac->vif = vif;
153 mac->opmode = vif->type;
154 rtlpriv->cfg->ops->set_network_type(hw, vif->type);
155 memcpy(mac->mac_addr, vif->addr, ETH_ALEN);
156 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
157
158out:
159 mutex_unlock(&rtlpriv->locks.conf_mutex);
160 return err;
161}
162
163static void rtl_op_remove_interface(struct ieee80211_hw *hw,
164 struct ieee80211_vif *vif)
165{
166 struct rtl_priv *rtlpriv = rtl_priv(hw);
167 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
168
169 mutex_lock(&rtlpriv->locks.conf_mutex);
170
171 /* Free beacon resources */
172 if ((mac->opmode == NL80211_IFTYPE_AP) ||
173 (mac->opmode == NL80211_IFTYPE_ADHOC) ||
174 (mac->opmode == NL80211_IFTYPE_MESH_POINT)) {
175 if (mac->beacon_enabled == 1) {
176 mac->beacon_enabled = 0;
177 rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
178 rtlpriv->cfg->maps
179 [RTL_IBSS_INT_MASKS]);
180 }
181 }
182
183 /*
184 *Note: We assume NL80211_IFTYPE_UNSPECIFIED as
185 *NO LINK for our hardware.
186 */
187 mac->vif = NULL;
188 mac->link_state = MAC80211_NOLINK;
189 memset(mac->bssid, 0, 6);
190 mac->opmode = NL80211_IFTYPE_UNSPECIFIED;
191 rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
192
193 mutex_unlock(&rtlpriv->locks.conf_mutex);
194}
195
196
197static int rtl_op_config(struct ieee80211_hw *hw, u32 changed)
198{
199 struct rtl_priv *rtlpriv = rtl_priv(hw);
200 struct rtl_phy *rtlphy = &(rtlpriv->phy);
201 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
202 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
203 struct ieee80211_conf *conf = &hw->conf;
204
205 mutex_lock(&rtlpriv->locks.conf_mutex);
206 if (changed & IEEE80211_CONF_CHANGE_LISTEN_INTERVAL) { /*BIT(2)*/
207 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
208 ("IEEE80211_CONF_CHANGE_LISTEN_INTERVAL\n"));
209 }
210
211 /*For IPS */
212 if (changed & IEEE80211_CONF_CHANGE_IDLE) {
213 if (hw->conf.flags & IEEE80211_CONF_IDLE)
214 rtl_ips_nic_off(hw);
215 else
216 rtl_ips_nic_on(hw);
217 } else {
218 /*
219 *although rfoff may not cause by ips, but we will
220 *check the reason in set_rf_power_state function
221 */
222 if (unlikely(ppsc->rfpwr_state == ERFOFF))
223 rtl_ips_nic_on(hw);
224 }
225
226 /*For LPS */
227 if (changed & IEEE80211_CONF_CHANGE_PS) {
228 if (conf->flags & IEEE80211_CONF_PS)
229 rtl_lps_enter(hw);
230 else
231 rtl_lps_leave(hw);
232 }
233
234 if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
235 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
236 ("IEEE80211_CONF_CHANGE_RETRY_LIMITS %x\n",
237 hw->conf.long_frame_max_tx_count));
238 mac->retry_long = hw->conf.long_frame_max_tx_count;
239 mac->retry_short = hw->conf.long_frame_max_tx_count;
240 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RETRY_LIMIT,
241 (u8 *) (&hw->conf.
242 long_frame_max_tx_count));
243 }
244
245 if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
246 struct ieee80211_channel *channel = hw->conf.channel;
247 u8 wide_chan = (u8) channel->hw_value;
248
249 /*
250 *because we should back channel to
251 *current_network.chan in in scanning,
252 *So if set_chan == current_network.chan
253 *we should set it.
254 *because mac80211 tell us wrong bw40
255 *info for cisco1253 bw20, so we modify
256 *it here based on UPPER & LOWER
257 */
258 switch (hw->conf.channel_type) {
259 case NL80211_CHAN_HT20:
260 case NL80211_CHAN_NO_HT:
261 /* SC */
262 mac->cur_40_prime_sc =
263 PRIME_CHNL_OFFSET_DONT_CARE;
264 rtlphy->current_chan_bw = HT_CHANNEL_WIDTH_20;
265 mac->bw_40 = false;
266 break;
267 case NL80211_CHAN_HT40MINUS:
268 /* SC */
269 mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_UPPER;
270 rtlphy->current_chan_bw =
271 HT_CHANNEL_WIDTH_20_40;
272 mac->bw_40 = true;
273
274 /*wide channel */
275 wide_chan -= 2;
276
277 break;
278 case NL80211_CHAN_HT40PLUS:
279 /* SC */
280 mac->cur_40_prime_sc = PRIME_CHNL_OFFSET_LOWER;
281 rtlphy->current_chan_bw =
282 HT_CHANNEL_WIDTH_20_40;
283 mac->bw_40 = true;
284
285 /*wide channel */
286 wide_chan += 2;
287
288 break;
289 default:
290 mac->bw_40 = false;
291 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
292 ("switch case not processed\n"));
293 break;
294 }
295
296 if (wide_chan <= 0)
297 wide_chan = 1;
298 rtlphy->current_channel = wide_chan;
299
300 rtlpriv->cfg->ops->set_channel_access(hw);
301 rtlpriv->cfg->ops->switch_channel(hw);
302 rtlpriv->cfg->ops->set_bw_mode(hw,
303 hw->conf.channel_type);
304 }
305
306 mutex_unlock(&rtlpriv->locks.conf_mutex);
307
308 return 0;
309}
310
311static void rtl_op_configure_filter(struct ieee80211_hw *hw,
312 unsigned int changed_flags,
313 unsigned int *new_flags, u64 multicast)
314{
315 struct rtl_priv *rtlpriv = rtl_priv(hw);
316 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
317
318 *new_flags &= RTL_SUPPORTED_FILTERS;
319 if (!changed_flags)
320 return;
321
322 /*TODO: we disable broadcase now, so enable here */
323 if (changed_flags & FIF_ALLMULTI) {
324 if (*new_flags & FIF_ALLMULTI) {
325 mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AM] |
326 rtlpriv->cfg->maps[MAC_RCR_AB];
327 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
328 ("Enable receive multicast frame.\n"));
329 } else {
330 mac->rx_conf &= ~(rtlpriv->cfg->maps[MAC_RCR_AM] |
331 rtlpriv->cfg->maps[MAC_RCR_AB]);
332 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
333 ("Disable receive multicast frame.\n"));
334 }
335 }
336
337 if (changed_flags & FIF_FCSFAIL) {
338 if (*new_flags & FIF_FCSFAIL) {
339 mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACRC32];
340 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
341 ("Enable receive FCS error frame.\n"));
342 } else {
343 mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACRC32];
344 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
345 ("Disable receive FCS error frame.\n"));
346 }
347 }
348
349 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
350 /*
351 *TODO: BIT(5) is probe response BIT(8) is beacon
352 *TODO: Use define for BIT(5) and BIT(8)
353 */
354 if (*new_flags & FIF_BCN_PRBRESP_PROMISC)
355 mac->rx_mgt_filter |= (BIT(5) | BIT(8));
356 else
357 mac->rx_mgt_filter &= ~(BIT(5) | BIT(8));
358 }
359
360 if (changed_flags & FIF_CONTROL) {
361 if (*new_flags & FIF_CONTROL) {
362 mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_ACF];
363 mac->rx_ctrl_filter |= RTL_SUPPORTED_CTRL_FILTER;
364
365 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
366 ("Enable receive control frame.\n"));
367 } else {
368 mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_ACF];
369 mac->rx_ctrl_filter &= ~RTL_SUPPORTED_CTRL_FILTER;
370 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
371 ("Disable receive control frame.\n"));
372 }
373 }
374
375 if (changed_flags & FIF_OTHER_BSS) {
376 if (*new_flags & FIF_OTHER_BSS) {
377 mac->rx_conf |= rtlpriv->cfg->maps[MAC_RCR_AAP];
378 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
379 ("Enable receive other BSS's frame.\n"));
380 } else {
381 mac->rx_conf &= ~rtlpriv->cfg->maps[MAC_RCR_AAP];
382 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
383 ("Disable receive other BSS's frame.\n"));
384 }
385 }
386
387 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR, (u8 *) (&mac->rx_conf));
388 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_MGT_FILTER,
389 (u8 *) (&mac->rx_mgt_filter));
390 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CTRL_FILTER,
391 (u8 *) (&mac->rx_ctrl_filter));
392}
393
394static int _rtl_get_hal_qnum(u16 queue)
395{
396 int qnum;
397
398 switch (queue) {
399 case 0:
400 qnum = AC3_VO;
401 break;
402 case 1:
403 qnum = AC2_VI;
404 break;
405 case 2:
406 qnum = AC0_BE;
407 break;
408 case 3:
409 qnum = AC1_BK;
410 break;
411 default:
412 qnum = AC0_BE;
413 break;
414 }
415 return qnum;
416}
417
418/*
419 *for mac80211 VO=0, VI=1, BE=2, BK=3
420 *for rtl819x BE=0, BK=1, VI=2, VO=3
421 */
422static int rtl_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
423 const struct ieee80211_tx_queue_params *param)
424{
425 struct rtl_priv *rtlpriv = rtl_priv(hw);
426 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
427 int aci;
428
429 if (queue >= AC_MAX) {
430 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
431 ("queue number %d is incorrect!\n", queue));
432 return -EINVAL;
433 }
434
435 aci = _rtl_get_hal_qnum(queue);
436 mac->ac[aci].aifs = param->aifs;
437 mac->ac[aci].cw_min = param->cw_min;
438 mac->ac[aci].cw_max = param->cw_max;
439 mac->ac[aci].tx_op = param->txop;
440 memcpy(&mac->edca_param[aci], param, sizeof(*param));
441 rtlpriv->cfg->ops->set_qos(hw, aci);
442 return 0;
443}
444
445static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
446 struct ieee80211_vif *vif,
447 struct ieee80211_bss_conf *bss_conf, u32 changed)
448{
449 struct rtl_priv *rtlpriv = rtl_priv(hw);
450 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
451 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
452
453 mutex_lock(&rtlpriv->locks.conf_mutex);
454
455 if ((vif->type == NL80211_IFTYPE_ADHOC) ||
456 (vif->type == NL80211_IFTYPE_AP) ||
457 (vif->type == NL80211_IFTYPE_MESH_POINT)) {
458
459 if ((changed & BSS_CHANGED_BEACON) ||
460 (changed & BSS_CHANGED_BEACON_ENABLED &&
461 bss_conf->enable_beacon)) {
462
463 if (mac->beacon_enabled == 0) {
464 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
465 ("BSS_CHANGED_BEACON_ENABLED\n"));
466
467 /*start hw beacon interrupt. */
468 /*rtlpriv->cfg->ops->set_bcn_reg(hw); */
469 mac->beacon_enabled = 1;
470 rtlpriv->cfg->ops->update_interrupt_mask(hw,
471 rtlpriv->cfg->maps
472 [RTL_IBSS_INT_MASKS],
473 0);
474 }
475 } else {
476 if (mac->beacon_enabled == 1) {
477 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
478 ("ADHOC DISABLE BEACON\n"));
479
480 mac->beacon_enabled = 0;
481 rtlpriv->cfg->ops->update_interrupt_mask(hw, 0,
482 rtlpriv->cfg->maps
483 [RTL_IBSS_INT_MASKS]);
484 }
485 }
486
487 if (changed & BSS_CHANGED_BEACON_INT) {
488 RT_TRACE(rtlpriv, COMP_BEACON, DBG_TRACE,
489 ("BSS_CHANGED_BEACON_INT\n"));
490 mac->beacon_interval = bss_conf->beacon_int;
491 rtlpriv->cfg->ops->set_bcn_intv(hw);
492 }
493 }
494
495 /*TODO: reference to enum ieee80211_bss_change */
496 if (changed & BSS_CHANGED_ASSOC) {
497 if (bss_conf->assoc) {
498 mac->link_state = MAC80211_LINKED;
499 mac->cnt_after_linked = 0;
500 mac->assoc_id = bss_conf->aid;
501 memcpy(mac->bssid, bss_conf->bssid, 6);
502
503 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
504 ("BSS_CHANGED_ASSOC\n"));
505 } else {
506 if (mac->link_state == MAC80211_LINKED)
507 rtl_lps_leave(hw);
508
509 mac->link_state = MAC80211_NOLINK;
510 memset(mac->bssid, 0, 6);
511
512 /* reset sec info */
513 rtl_cam_reset_sec_info(hw);
514
515 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
516 ("BSS_CHANGED_UN_ASSOC\n"));
517 }
518 }
519
520 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
521 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
522 ("BSS_CHANGED_ERP_CTS_PROT\n"));
523 mac->use_cts_protect = bss_conf->use_cts_prot;
524 }
525
526 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
527 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
528 ("BSS_CHANGED_ERP_PREAMBLE use short preamble:%x\n",
529 bss_conf->use_short_preamble));
530
531 mac->short_preamble = bss_conf->use_short_preamble;
532 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ACK_PREAMBLE,
533 (u8 *) (&mac->short_preamble));
534 }
535
536 if (changed & BSS_CHANGED_ERP_SLOT) {
537 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
538 ("BSS_CHANGED_ERP_SLOT\n"));
539
540 if (bss_conf->use_short_slot)
541 mac->slot_time = RTL_SLOT_TIME_9;
542 else
543 mac->slot_time = RTL_SLOT_TIME_20;
544
545 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
546 (u8 *) (&mac->slot_time));
547 }
548
549 if (changed & BSS_CHANGED_HT) {
550 struct ieee80211_sta *sta = NULL;
551
552 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
553 ("BSS_CHANGED_HT\n"));
554
555 sta = ieee80211_find_sta(mac->vif, mac->bssid);
556
557 if (sta) {
558 if (sta->ht_cap.ampdu_density >
559 mac->current_ampdu_density)
560 mac->current_ampdu_density =
561 sta->ht_cap.ampdu_density;
562 if (sta->ht_cap.ampdu_factor <
563 mac->current_ampdu_factor)
564 mac->current_ampdu_factor =
565 sta->ht_cap.ampdu_factor;
566 }
567
568 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SHORTGI_DENSITY,
569 (u8 *) (&mac->max_mss_density));
570 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_FACTOR,
571 &mac->current_ampdu_factor);
572 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AMPDU_MIN_SPACE,
573 &mac->current_ampdu_density);
574 }
575
576 if (changed & BSS_CHANGED_BSSID) {
577 struct ieee80211_sta *sta = NULL;
578 u32 basic_rates;
579 u8 i;
580
581 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BSSID,
582 (u8 *) bss_conf->bssid);
583
584 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_DMESG,
585 (MAC_FMT "\n", MAC_ARG(bss_conf->bssid)));
586
587 memcpy(mac->bssid, bss_conf->bssid, 6);
588 if (is_valid_ether_addr(bss_conf->bssid)) {
589 switch (vif->type) {
590 case NL80211_IFTYPE_UNSPECIFIED:
591 break;
592 case NL80211_IFTYPE_ADHOC:
593 break;
594 case NL80211_IFTYPE_STATION:
595 break;
596 case NL80211_IFTYPE_AP:
597 break;
598 default:
599 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
600 ("switch case not process\n"));
601 break;
602 }
603 rtlpriv->cfg->ops->set_network_type(hw, vif->type);
604 } else
605 rtlpriv->cfg->ops->set_network_type(hw,
606 NL80211_IFTYPE_UNSPECIFIED);
607
608 memset(mac->mcs, 0, 16);
609 mac->ht_enable = false;
610 mac->sgi_40 = false;
611 mac->sgi_20 = false;
612
613 if (!bss_conf->use_short_slot)
614 mac->mode = WIRELESS_MODE_B;
615 else
616 mac->mode = WIRELESS_MODE_G;
617
618 sta = ieee80211_find_sta(mac->vif, mac->bssid);
619
620 if (sta) {
621 if (sta->ht_cap.ht_supported) {
622 mac->mode = WIRELESS_MODE_N_24G;
623 mac->ht_enable = true;
624 }
625
626 if (mac->ht_enable) {
627 u16 ht_cap = sta->ht_cap.cap;
628 memcpy(mac->mcs, (u8 *) (&sta->ht_cap.mcs), 16);
629
630 for (i = 0; i < 16; i++)
631 RT_TRACE(rtlpriv, COMP_MAC80211,
632 DBG_LOUD, ("%x ",
633 mac->mcs[i]));
634 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD,
635 ("\n"));
636
637 if (ht_cap & IEEE80211_HT_CAP_SGI_40)
638 mac->sgi_40 = true;
639
640 if (ht_cap & IEEE80211_HT_CAP_SGI_20)
641 mac->sgi_20 = true;
642
643 /*
644 * for cisco 1252 bw20 it's wrong
645 * if (ht_cap &
646 * IEEE80211_HT_CAP_SUP_WIDTH_20_40) {
647 * mac->bw_40 = true;
648 * }
649 */
650 }
651 }
652
653 /*mac80211 just give us CCK rates any time
654 *So we add G rate in basic rates when
655 not in B mode*/
656 if (changed & BSS_CHANGED_BASIC_RATES) {
657 if (mac->mode == WIRELESS_MODE_B)
658 basic_rates = bss_conf->basic_rates | 0x00f;
659 else
660 basic_rates = bss_conf->basic_rates | 0xff0;
661
662 if (!vif)
663 goto out;
664
665 mac->basic_rates = basic_rates;
666 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_BASIC_RATE,
667 (u8 *) (&basic_rates));
668
669 if (rtlpriv->dm.b_useramask)
670 rtlpriv->cfg->ops->update_rate_mask(hw, 0);
671 else
672 rtlpriv->cfg->ops->update_rate_table(hw);
673
674 }
675 }
676
677 /*
678 * For FW LPS:
679 * To tell firmware we have connected
680 * to an AP. For 92SE/CE power save v2.
681 */
682 if (changed & BSS_CHANGED_ASSOC) {
683 if (bss_conf->assoc) {
684 if (ppsc->b_fwctrl_lps) {
685 u8 mstatus = RT_MEDIA_CONNECT;
686 rtlpriv->cfg->ops->set_hw_reg(hw,
687 HW_VAR_H2C_FW_JOINBSSRPT,
688 (u8 *) (&mstatus));
689 ppsc->report_linked = true;
690 }
691 } else {
692 if (ppsc->b_fwctrl_lps) {
693 u8 mstatus = RT_MEDIA_DISCONNECT;
694 rtlpriv->cfg->ops->set_hw_reg(hw,
695 HW_VAR_H2C_FW_JOINBSSRPT,
696 (u8 *)(&mstatus));
697 ppsc->report_linked = false;
698 }
699 }
700 }
701
702out:
703 mutex_unlock(&rtlpriv->locks.conf_mutex);
704}
705
706static u64 rtl_op_get_tsf(struct ieee80211_hw *hw)
707{
708 struct rtl_priv *rtlpriv = rtl_priv(hw);
709 u64 tsf;
710
711 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&tsf));
712 return tsf;
713}
714
715static void rtl_op_set_tsf(struct ieee80211_hw *hw, u64 tsf)
716{
717 struct rtl_priv *rtlpriv = rtl_priv(hw);
718 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
719 u8 bibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ? 1 : 0;;
720
721 mac->tsf = tsf;
722 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_CORRECT_TSF, (u8 *) (&bibss));
723}
724
725static void rtl_op_reset_tsf(struct ieee80211_hw *hw)
726{
727 struct rtl_priv *rtlpriv = rtl_priv(hw);
728 u8 tmp = 0;
729
730 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_DUAL_TSF_RST, (u8 *) (&tmp));
731}
732
733static void rtl_op_sta_notify(struct ieee80211_hw *hw,
734 struct ieee80211_vif *vif,
735 enum sta_notify_cmd cmd,
736 struct ieee80211_sta *sta)
737{
738 switch (cmd) {
739 case STA_NOTIFY_SLEEP:
740 break;
741 case STA_NOTIFY_AWAKE:
742 break;
743 default:
744 break;
745 }
746}
747
748static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
749 struct ieee80211_vif *vif,
750 enum ieee80211_ampdu_mlme_action action,
751 struct ieee80211_sta *sta, u16 tid, u16 * ssn)
752{
753 struct rtl_priv *rtlpriv = rtl_priv(hw);
754
755 switch (action) {
756 case IEEE80211_AMPDU_TX_START:
757 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
758 ("IEEE80211_AMPDU_TX_START: TID:%d\n", tid));
759 return rtl_tx_agg_start(hw, sta->addr, tid, ssn);
760 break;
761 case IEEE80211_AMPDU_TX_STOP:
762 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
763 ("IEEE80211_AMPDU_TX_STOP: TID:%d\n", tid));
764 return rtl_tx_agg_stop(hw, sta->addr, tid);
765 break;
766 case IEEE80211_AMPDU_TX_OPERATIONAL:
767 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
768 ("IEEE80211_AMPDU_TX_OPERATIONAL:TID:%d\n", tid));
769 break;
770 case IEEE80211_AMPDU_RX_START:
771 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
772 ("IEEE80211_AMPDU_RX_START:TID:%d\n", tid));
773 break;
774 case IEEE80211_AMPDU_RX_STOP:
775 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
776 ("IEEE80211_AMPDU_RX_STOP:TID:%d\n", tid));
777 break;
778 default:
779 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
780 ("IEEE80211_AMPDU_ERR!!!!:\n"));
781 return -EOPNOTSUPP;
782 }
783 return 0;
784}
785
786static void rtl_op_sw_scan_start(struct ieee80211_hw *hw)
787{
788 struct rtl_priv *rtlpriv = rtl_priv(hw);
789 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
790
791 mac->act_scanning = true;
792
793 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("\n"));
794
795 if (mac->link_state == MAC80211_LINKED) {
796 rtl_lps_leave(hw);
797 mac->link_state = MAC80211_LINKED_SCANNING;
798 } else
799 rtl_ips_nic_on(hw);
800
801 rtlpriv->cfg->ops->led_control(hw, LED_CTL_SITE_SURVEY);
802 rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_BACKUP);
803}
804
805static void rtl_op_sw_scan_complete(struct ieee80211_hw *hw)
806{
807 struct rtl_priv *rtlpriv = rtl_priv(hw);
808 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
809
810 RT_TRACE(rtlpriv, COMP_MAC80211, DBG_LOUD, ("\n"));
811
812 rtlpriv->cfg->ops->scan_operation_backup(hw, SCAN_OPT_RESTORE);
813 mac->act_scanning = false;
814 if (mac->link_state == MAC80211_LINKED_SCANNING) {
815 mac->link_state = MAC80211_LINKED;
816
817 /* fix fwlps issue */
818 rtlpriv->cfg->ops->set_network_type(hw, mac->opmode);
819
820 if (rtlpriv->dm.b_useramask)
821 rtlpriv->cfg->ops->update_rate_mask(hw, 0);
822 else
823 rtlpriv->cfg->ops->update_rate_table(hw);
824
825 }
826
827}
828
829static int rtl_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
830 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
831 struct ieee80211_key_conf *key)
832{
833 struct rtl_priv *rtlpriv = rtl_priv(hw);
834 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
835 u8 key_type = NO_ENCRYPTION;
836 u8 key_idx;
837 bool group_key = false;
838 bool wep_only = false;
839 int err = 0;
840 u8 mac_addr[ETH_ALEN];
841 u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
842 u8 zero_addr[ETH_ALEN] = { 0 };
843
844 if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
845 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
846 ("not open hw encryption\n"));
847 return -ENOSPC; /*User disabled HW-crypto */
848 }
849 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
850 ("%s hardware based encryption for keyidx: %d, mac: %pM\n",
851 cmd == SET_KEY ? "Using" : "Disabling", key->keyidx,
852 sta ? sta->addr : bcast_addr));
853 rtlpriv->sec.being_setkey = true;
854 rtl_ips_nic_on(hw);
855 mutex_lock(&rtlpriv->locks.conf_mutex);
856 /* <1> get encryption alg */
857 switch (key->cipher) {
858 case WLAN_CIPHER_SUITE_WEP40:
859 key_type = WEP40_ENCRYPTION;
860 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("alg:WEP40\n"));
861 rtlpriv->sec.use_defaultkey = true;
862 break;
863 case WLAN_CIPHER_SUITE_WEP104:
864 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
865 ("alg:WEP104\n"));
866 key_type = WEP104_ENCRYPTION;
867 rtlpriv->sec.use_defaultkey = true;
868 break;
869 case WLAN_CIPHER_SUITE_TKIP:
870 key_type = TKIP_ENCRYPTION;
871 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("alg:TKIP\n"));
872 if (mac->opmode == NL80211_IFTYPE_ADHOC)
873 rtlpriv->sec.use_defaultkey = true;
874 break;
875 case WLAN_CIPHER_SUITE_CCMP:
876 key_type = AESCCMP_ENCRYPTION;
877 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("alg:CCMP\n"));
878 if (mac->opmode == NL80211_IFTYPE_ADHOC)
879 rtlpriv->sec.use_defaultkey = true;
880 break;
881 default:
882 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
883 ("alg_err:%x!!!!:\n", key->cipher));
884 goto out_unlock;
885 }
886 /* <2> get key_idx */
887 key_idx = (u8) (key->keyidx);
888 if (key_idx > 3)
889 goto out_unlock;
890 /* <3> if pairwise key enable_hw_sec */
891 group_key = !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE);
892 if ((!group_key) || (mac->opmode == NL80211_IFTYPE_ADHOC) ||
893 rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION) {
894 if (rtlpriv->sec.pairwise_enc_algorithm == NO_ENCRYPTION &&
895 (key_type == WEP40_ENCRYPTION ||
896 key_type == WEP104_ENCRYPTION))
897 wep_only = true;
898 rtlpriv->sec.pairwise_enc_algorithm = key_type;
899 rtlpriv->cfg->ops->enable_hw_sec(hw);
900 }
901 /* <4> set key based on cmd */
902 switch (cmd) {
903 case SET_KEY:
904 if (wep_only) {
905 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
906 ("set WEP(group/pairwise) key\n"));
907 /* Pairwise key with an assigned MAC address. */
908 rtlpriv->sec.pairwise_enc_algorithm = key_type;
909 rtlpriv->sec.group_enc_algorithm = key_type;
910 /*set local buf about wep key. */
911 memcpy(rtlpriv->sec.key_buf[key_idx],
912 key->key, key->keylen);
913 rtlpriv->sec.key_len[key_idx] = key->keylen;
914 memcpy(mac_addr, zero_addr, ETH_ALEN);
915 } else if (group_key) { /* group key */
916 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
917 ("set group key\n"));
918 /* group key */
919 rtlpriv->sec.group_enc_algorithm = key_type;
920 /*set local buf about group key. */
921 memcpy(rtlpriv->sec.key_buf[key_idx],
922 key->key, key->keylen);
923 rtlpriv->sec.key_len[key_idx] = key->keylen;
924 memcpy(mac_addr, bcast_addr, ETH_ALEN);
925 } else { /* pairwise key */
926 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
927 ("set pairwise key\n"));
928 if (!sta) {
929 RT_ASSERT(false, ("pairwise key withnot"
930 "mac_addr\n"));
931 err = -EOPNOTSUPP;
932 goto out_unlock;
933 }
934 /* Pairwise key with an assigned MAC address. */
935 rtlpriv->sec.pairwise_enc_algorithm = key_type;
936 /*set local buf about pairwise key. */
937 memcpy(rtlpriv->sec.key_buf[PAIRWISE_KEYIDX],
938 key->key, key->keylen);
939 rtlpriv->sec.key_len[PAIRWISE_KEYIDX] = key->keylen;
940 rtlpriv->sec.pairwise_key =
941 rtlpriv->sec.key_buf[PAIRWISE_KEYIDX];
942 memcpy(mac_addr, sta->addr, ETH_ALEN);
943 }
944 rtlpriv->cfg->ops->set_key(hw, key_idx, mac_addr,
945 group_key, key_type, wep_only,
946 false);
947 /* <5> tell mac80211 do something: */
948 /*must use sw generate IV, or can not work !!!!. */
949 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
950 key->hw_key_idx = key_idx;
951 if (key_type == TKIP_ENCRYPTION)
952 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
953 break;
954 case DISABLE_KEY:
955 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
956 ("disable key delete one entry\n"));
957 /*set local buf about wep key. */
958 memset(rtlpriv->sec.key_buf[key_idx], 0, key->keylen);
959 rtlpriv->sec.key_len[key_idx] = 0;
960 memcpy(mac_addr, zero_addr, ETH_ALEN);
961 /*
962 *mac80211 will delete entrys one by one,
963 *so don't use rtl_cam_reset_all_entry
964 *or clear all entry here.
965 */
966 rtl_cam_delete_one_entry(hw, mac_addr, key_idx);
967 break;
968 default:
969 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
970 ("cmd_err:%x!!!!:\n", cmd));
971 }
972out_unlock:
973 mutex_unlock(&rtlpriv->locks.conf_mutex);
974 rtlpriv->sec.being_setkey = false;
975 return err;
976}
977
978static void rtl_op_rfkill_poll(struct ieee80211_hw *hw)
979{
980 struct rtl_priv *rtlpriv = rtl_priv(hw);
981
982 bool radio_state;
983 bool blocked;
984 u8 valid = 0;
985
986 if (!test_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status))
987 return;
988
989 mutex_lock(&rtlpriv->locks.conf_mutex);
990
991 /*if Radio On return true here */
992 radio_state = rtlpriv->cfg->ops->radio_onoff_checking(hw, &valid);
993
994 if (valid) {
995 if (unlikely(radio_state != rtlpriv->rfkill.rfkill_state)) {
996 rtlpriv->rfkill.rfkill_state = radio_state;
997
998 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
999 (KERN_INFO "wireless radio switch turned %s\n",
1000 radio_state ? "on" : "off"));
1001
1002 blocked = (rtlpriv->rfkill.rfkill_state == 1) ? 0 : 1;
1003 wiphy_rfkill_set_hw_state(hw->wiphy, blocked);
1004 }
1005 }
1006
1007 mutex_unlock(&rtlpriv->locks.conf_mutex);
1008}
1009
1010const struct ieee80211_ops rtl_ops = {
1011 .start = rtl_op_start,
1012 .stop = rtl_op_stop,
1013 .tx = rtl_op_tx,
1014 .add_interface = rtl_op_add_interface,
1015 .remove_interface = rtl_op_remove_interface,
1016 .config = rtl_op_config,
1017 .configure_filter = rtl_op_configure_filter,
1018 .set_key = rtl_op_set_key,
1019 .conf_tx = rtl_op_conf_tx,
1020 .bss_info_changed = rtl_op_bss_info_changed,
1021 .get_tsf = rtl_op_get_tsf,
1022 .set_tsf = rtl_op_set_tsf,
1023 .reset_tsf = rtl_op_reset_tsf,
1024 .sta_notify = rtl_op_sta_notify,
1025 .ampdu_action = rtl_op_ampdu_action,
1026 .sw_scan_start = rtl_op_sw_scan_start,
1027 .sw_scan_complete = rtl_op_sw_scan_complete,
1028 .rfkill_poll = rtl_op_rfkill_poll,
1029};
diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/rtlwifi/core.h
new file mode 100644
index 000000000000..0ef31c3c6196
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/core.h
@@ -0,0 +1,42 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * Tmis program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * Tmis program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * tmis program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * Tme full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *****************************************************************************/
28
29#ifndef __RTL_CORE_H__
30#define __RTL_CORE_H__
31
32#define RTL_SUPPORTED_FILTERS \
33 (FIF_PROMISC_IN_BSS | \
34 FIF_ALLMULTI | FIF_CONTROL | \
35 FIF_OTHER_BSS | \
36 FIF_FCSFAIL | \
37 FIF_BCN_PRBRESP_PROMISC)
38
39#define RTL_SUPPORTED_CTRL_FILTER 0xFF
40
41extern const struct ieee80211_ops rtl_ops;
42#endif
diff --git a/drivers/net/wireless/rtlwifi/debug.c b/drivers/net/wireless/rtlwifi/debug.c
new file mode 100644
index 000000000000..5fa73852cb66
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/debug.c
@@ -0,0 +1,50 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * Tmis program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * Tmis program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * tmis program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * Tme full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *****************************************************************************/
28
29#include "wifi.h"
30
31void rtl_dbgp_flag_init(struct ieee80211_hw *hw)
32{
33 struct rtl_priv *rtlpriv = rtl_priv(hw);
34 u8 i;
35
36 rtlpriv->dbg.global_debuglevel = DBG_EMERG;
37
38 rtlpriv->dbg.global_debugcomponents =
39 COMP_ERR | COMP_FW | COMP_INIT | COMP_RECV | COMP_SEND |
40 COMP_MLME | COMP_SCAN | COMP_INTR | COMP_LED | COMP_SEC |
41 COMP_BEACON | COMP_RATE | COMP_RXDESC | COMP_DIG | COMP_TXAGC |
42 COMP_POWER | COMP_POWER_TRACKING | COMP_BB_POWERSAVING | COMP_SWAS |
43 COMP_RF | COMP_TURBO | COMP_RATR | COMP_CMD |
44 COMP_EFUSE | COMP_QOS | COMP_MAC80211 | COMP_REGD | COMP_CHAN;
45
46 for (i = 0; i < DBGP_TYPE_MAX; i++)
47 rtlpriv->dbg.dbgp_type[i] = 0;
48
49 /*Init Debug flag enable condition */
50}
diff --git a/drivers/net/wireless/rtlwifi/debug.h b/drivers/net/wireless/rtlwifi/debug.h
new file mode 100644
index 000000000000..08bdec2ceda4
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/debug.h
@@ -0,0 +1,212 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * Tmis program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * Tmis program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * tmis program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * Tme full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *****************************************************************************/
28
29#ifndef __RTL_DEBUG_H__
30#define __RTL_DEBUG_H__
31
32/*--------------------------------------------------------------
33 Debug level
34--------------------------------------------------------------*/
35/*
36 *Fatal bug.
37 *For example, Tx/Rx/IO locked up,
38 *memory access violation,
39 *resource allocation failed,
40 *unexpected HW behavior, HW BUG
41 *and so on.
42 */
43#define DBG_EMERG 0
44
45/*
46 *Abnormal, rare, or unexpeted cases.
47 *For example, Packet/IO Ctl canceled,
48 *device suprisely unremoved and so on.
49 */
50#define DBG_WARNING 2
51
52/*
53 *Normal case driver developer should
54 *open, we can see link status like
55 *assoc/AddBA/DHCP/adapter start and
56 *so on basic and useful infromations.
57 */
58#define DBG_DMESG 3
59
60/*
61 *Normal case with useful information
62 *about current SW or HW state.
63 *For example, Tx/Rx descriptor to fill,
64 *Tx/Rx descriptor completed status,
65 *SW protocol state change, dynamic
66 *mechanism state change and so on.
67 */
68#define DBG_LOUD 4
69
70/*
71 *Normal case with detail execution
72 *flow or information.
73 */
74#define DBG_TRACE 5
75
76/*--------------------------------------------------------------
77 Define the rt_trace components
78--------------------------------------------------------------*/
79#define COMP_ERR BIT(0)
80#define COMP_FW BIT(1)
81#define COMP_INIT BIT(2) /*For init/deinit */
82#define COMP_RECV BIT(3) /*For Rx. */
83#define COMP_SEND BIT(4) /*For Tx. */
84#define COMP_MLME BIT(5) /*For MLME. */
85#define COMP_SCAN BIT(6) /*For Scan. */
86#define COMP_INTR BIT(7) /*For interrupt Related. */
87#define COMP_LED BIT(8) /*For LED. */
88#define COMP_SEC BIT(9) /*For sec. */
89#define COMP_BEACON BIT(10) /*For beacon. */
90#define COMP_RATE BIT(11) /*For rate. */
91#define COMP_RXDESC BIT(12) /*For rx desc. */
92#define COMP_DIG BIT(13) /*For DIG */
93#define COMP_TXAGC BIT(14) /*For Tx power */
94#define COMP_HIPWR BIT(15) /*For High Power Mechanism */
95#define COMP_POWER BIT(16) /*For lps/ips/aspm. */
96#define COMP_POWER_TRACKING BIT(17) /*For TX POWER TRACKING */
97#define COMP_BB_POWERSAVING BIT(18)
98#define COMP_SWAS BIT(19) /*For SW Antenna Switch */
99#define COMP_RF BIT(20) /*For RF. */
100#define COMP_TURBO BIT(21) /*For EDCA TURBO. */
101#define COMP_RATR BIT(22)
102#define COMP_CMD BIT(23)
103#define COMP_EFUSE BIT(24)
104#define COMP_QOS BIT(25)
105#define COMP_MAC80211 BIT(26)
106#define COMP_REGD BIT(27)
107#define COMP_CHAN BIT(28)
108
109/*--------------------------------------------------------------
110 Define the rt_print components
111--------------------------------------------------------------*/
112/* Define EEPROM and EFUSE check module bit*/
113#define EEPROM_W BIT(0)
114#define EFUSE_PG BIT(1)
115#define EFUSE_READ_ALL BIT(2)
116
117/* Define init check for module bit*/
118#define INIT_EEPROM BIT(0)
119#define INIT_TxPower BIT(1)
120#define INIT_IQK BIT(2)
121#define INIT_RF BIT(3)
122
123/* Define PHY-BB/RF/MAC check module bit */
124#define PHY_BBR BIT(0)
125#define PHY_BBW BIT(1)
126#define PHY_RFR BIT(2)
127#define PHY_RFW BIT(3)
128#define PHY_MACR BIT(4)
129#define PHY_MACW BIT(5)
130#define PHY_ALLR BIT(6)
131#define PHY_ALLW BIT(7)
132#define PHY_TXPWR BIT(8)
133#define PHY_PWRDIFF BIT(9)
134
135enum dbgp_flag_e {
136 FQOS = 0,
137 FTX = 1,
138 FRX = 2,
139 FSEC = 3,
140 FMGNT = 4,
141 FMLME = 5,
142 FRESOURCE = 6,
143 FBEACON = 7,
144 FISR = 8,
145 FPHY = 9,
146 FMP = 10,
147 FEEPROM = 11,
148 FPWR = 12,
149 FDM = 13,
150 FDBGCtrl = 14,
151 FC2H = 15,
152 FBT = 16,
153 FINIT = 17,
154 FIOCTL = 18,
155 DBGP_TYPE_MAX
156};
157
158#define RT_ASSERT(_exp, fmt) \
159 do { \
160 if (!(_exp)) { \
161 printk(KERN_DEBUG "%s:%s(): ", KBUILD_MODNAME, \
162 __func__); \
163 printk fmt; \
164 } \
165 } while (0);
166
167#define RT_TRACE(rtlpriv, comp, level, fmt)\
168 do { \
169 if (unlikely(((comp) & rtlpriv->dbg.global_debugcomponents) && \
170 ((level) <= rtlpriv->dbg.global_debuglevel))) {\
171 printk(KERN_DEBUG "%s:%s():<%lx-%x> ", KBUILD_MODNAME, \
172 __func__, in_interrupt(), in_atomic()); \
173 printk fmt; \
174 } \
175 } while (0);
176
177#define RTPRINT(rtlpriv, dbgtype, dbgflag, printstr) \
178 do { \
179 if (unlikely(rtlpriv->dbg.dbgp_type[dbgtype] & dbgflag)) { \
180 printk(KERN_DEBUG "%s: ", KBUILD_MODNAME); \
181 printk printstr; \
182 } \
183 } while (0);
184
185#define RT_PRINT_DATA(rtlpriv, _comp, _level, _titlestring, _hexdata, \
186 _hexdatalen) \
187 do {\
188 if (unlikely(((_comp) & rtlpriv->dbg.global_debugcomponents) &&\
189 (_level <= rtlpriv->dbg.global_debuglevel))) { \
190 int __i; \
191 u8* ptr = (u8 *)_hexdata; \
192 printk(KERN_DEBUG "%s: ", KBUILD_MODNAME); \
193 printk("In process \"%s\" (pid %i):", current->comm,\
194 current->pid); \
195 printk(_titlestring); \
196 for (__i = 0; __i < (int)_hexdatalen; __i++) { \
197 printk("%02X%s", ptr[__i], (((__i + 1) % 4)\
198 == 0) ? " " : " ");\
199 if (((__i + 1) % 16) == 0) \
200 printk("\n"); \
201 } \
202 printk(KERN_DEBUG "\n"); \
203 } \
204 } while (0);
205
206#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
207#define MAC_ARG(x) \
208 ((u8 *)(x))[0], ((u8 *)(x))[1], ((u8 *)(x))[2],\
209 ((u8 *)(x))[3], ((u8 *)(x))[4], ((u8 *)(x))[5]
210
211void rtl_dbgp_flag_init(struct ieee80211_hw *hw);
212#endif
diff --git a/drivers/net/wireless/rtlwifi/efuse.c b/drivers/net/wireless/rtlwifi/efuse.c
new file mode 100644
index 000000000000..b8433f3a9bc2
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/efuse.c
@@ -0,0 +1,1189 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * Tmis program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * Tmis program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * tmis program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * Tme full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "wifi.h"
31#include "efuse.h"
32
33static const u8 MAX_PGPKT_SIZE = 9;
34static const u8 PGPKT_DATA_SIZE = 8;
35static const int EFUSE_MAX_SIZE = 512;
36
37static const u8 EFUSE_OOB_PROTECT_BYTES = 15;
38
39static const struct efuse_map RTL8712_SDIO_EFUSE_TABLE[] = {
40 {0, 0, 0, 2},
41 {0, 1, 0, 2},
42 {0, 2, 0, 2},
43 {1, 0, 0, 1},
44 {1, 0, 1, 1},
45 {1, 1, 0, 1},
46 {1, 1, 1, 3},
47 {1, 3, 0, 17},
48 {3, 3, 1, 48},
49 {10, 0, 0, 6},
50 {10, 3, 0, 1},
51 {10, 3, 1, 1},
52 {11, 0, 0, 28}
53};
54
55static void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset,
56 u8 *pbuf);
57static void efuse_shadow_read_1byte(struct ieee80211_hw *hw, u16 offset,
58 u8 *value);
59static void efuse_shadow_read_2byte(struct ieee80211_hw *hw, u16 offset,
60 u16 *value);
61static void efuse_shadow_read_4byte(struct ieee80211_hw *hw, u16 offset,
62 u32 *value);
63static void efuse_shadow_write_1byte(struct ieee80211_hw *hw, u16 offset,
64 u8 value);
65static void efuse_shadow_write_2byte(struct ieee80211_hw *hw, u16 offset,
66 u16 value);
67static void efuse_shadow_write_4byte(struct ieee80211_hw *hw, u16 offset,
68 u32 value);
69static int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr,
70 u8 *data);
71static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr,
72 u8 data);
73static void efuse_read_all_map(struct ieee80211_hw *hw, u8 *efuse);
74static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset,
75 u8 *data);
76static int efuse_pg_packet_write(struct ieee80211_hw *hw, u8 offset,
77 u8 word_en, u8 *data);
78static void efuse_word_enable_data_read(u8 word_en, u8 *sourdata,
79 u8 *targetdata);
80static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw,
81 u16 efuse_addr, u8 word_en, u8 *data);
82static void efuse_power_switch(struct ieee80211_hw *hw, u8 bwrite,
83 u8 pwrstate);
84static u16 efuse_get_current_size(struct ieee80211_hw *hw);
85static u8 efuse_calculate_word_cnts(u8 word_en);
86
87void efuse_initialize(struct ieee80211_hw *hw)
88{
89 struct rtl_priv *rtlpriv = rtl_priv(hw);
90 u8 bytetemp;
91 u8 temp;
92
93 bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN] + 1);
94 temp = bytetemp | 0x20;
95 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN] + 1, temp);
96
97 bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[SYS_ISO_CTRL] + 1);
98 temp = bytetemp & 0xFE;
99 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[SYS_ISO_CTRL] + 1, temp);
100
101 bytetemp = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3);
102 temp = bytetemp | 0x80;
103 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_TEST] + 3, temp);
104
105 rtl_write_byte(rtlpriv, 0x2F8, 0x3);
106
107 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0x72);
108
109}
110
111u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address)
112{
113 struct rtl_priv *rtlpriv = rtl_priv(hw);
114 u8 data;
115 u8 bytetemp;
116 u8 temp;
117 u32 k = 0;
118
119 if (address < EFUSE_REAL_CONTENT_LEN) {
120 temp = address & 0xFF;
121 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
122 temp);
123 bytetemp = rtl_read_byte(rtlpriv,
124 rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
125 temp = ((address >> 8) & 0x03) | (bytetemp & 0xFC);
126 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
127 temp);
128
129 bytetemp = rtl_read_byte(rtlpriv,
130 rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
131 temp = bytetemp & 0x7F;
132 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3,
133 temp);
134
135 bytetemp = rtl_read_byte(rtlpriv,
136 rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
137 while (!(bytetemp & 0x80)) {
138 bytetemp = rtl_read_byte(rtlpriv,
139 rtlpriv->cfg->
140 maps[EFUSE_CTRL] + 3);
141 k++;
142 if (k == 1000) {
143 k = 0;
144 break;
145 }
146 }
147 data = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
148 return data;
149 } else
150 return 0xFF;
151
152}
153EXPORT_SYMBOL(efuse_read_1byte);
154
155void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value)
156{
157 struct rtl_priv *rtlpriv = rtl_priv(hw);
158 u8 bytetemp;
159 u8 temp;
160 u32 k = 0;
161
162 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
163 ("Addr=%x Data =%x\n", address, value));
164
165 if (address < EFUSE_REAL_CONTENT_LEN) {
166 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], value);
167
168 temp = address & 0xFF;
169 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
170 temp);
171 bytetemp = rtl_read_byte(rtlpriv,
172 rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
173
174 temp = ((address >> 8) & 0x03) | (bytetemp & 0xFC);
175 rtl_write_byte(rtlpriv,
176 rtlpriv->cfg->maps[EFUSE_CTRL] + 2, temp);
177
178 bytetemp = rtl_read_byte(rtlpriv,
179 rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
180 temp = bytetemp | 0x80;
181 rtl_write_byte(rtlpriv,
182 rtlpriv->cfg->maps[EFUSE_CTRL] + 3, temp);
183
184 bytetemp = rtl_read_byte(rtlpriv,
185 rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
186
187 while (bytetemp & 0x80) {
188 bytetemp = rtl_read_byte(rtlpriv,
189 rtlpriv->cfg->
190 maps[EFUSE_CTRL] + 3);
191 k++;
192 if (k == 100) {
193 k = 0;
194 break;
195 }
196 }
197 }
198
199}
200
201static void read_efuse_byte(struct ieee80211_hw *hw, u16 _offset, u8 *pbuf)
202{
203 struct rtl_priv *rtlpriv = rtl_priv(hw);
204 u32 value32;
205 u8 readbyte;
206 u16 retry;
207
208 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
209 (_offset & 0xff));
210 readbyte = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2);
211 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
212 ((_offset >> 8) & 0x03) | (readbyte & 0xfc));
213
214 readbyte = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3);
215 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3,
216 (readbyte & 0x7f));
217
218 retry = 0;
219 value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
220 while (!(((value32 >> 24) & 0xff) & 0x80) && (retry < 10000)) {
221 value32 = rtl_read_dword(rtlpriv,
222 rtlpriv->cfg->maps[EFUSE_CTRL]);
223 retry++;
224 }
225
226 udelay(50);
227 value32 = rtl_read_dword(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
228
229 *pbuf = (u8) (value32 & 0xff);
230}
231
232void read_efuse(struct ieee80211_hw *hw, u16 _offset, u16 _size_byte, u8 *pbuf)
233{
234 struct rtl_priv *rtlpriv = rtl_priv(hw);
235 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
236 u8 efuse_tbl[EFUSE_MAP_LEN];
237 u8 rtemp8[1];
238 u16 efuse_addr = 0;
239 u8 offset, wren;
240 u16 i;
241 u16 j;
242 u16 efuse_word[EFUSE_MAX_SECTION][EFUSE_MAX_WORD_UNIT];
243 u16 efuse_utilized = 0;
244 u8 efuse_usage;
245
246 if ((_offset + _size_byte) > EFUSE_MAP_LEN) {
247 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
248 ("read_efuse(): Invalid offset(%#x) with read "
249 "bytes(%#x)!!\n", _offset, _size_byte));
250 return;
251 }
252
253 for (i = 0; i < EFUSE_MAX_SECTION; i++)
254 for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++)
255 efuse_word[i][j] = 0xFFFF;
256
257 read_efuse_byte(hw, efuse_addr, rtemp8);
258 if (*rtemp8 != 0xFF) {
259 efuse_utilized++;
260 RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
261 ("Addr=%d\n", efuse_addr));
262 efuse_addr++;
263 }
264
265 while ((*rtemp8 != 0xFF) && (efuse_addr < EFUSE_REAL_CONTENT_LEN)) {
266 offset = ((*rtemp8 >> 4) & 0x0f);
267
268 if (offset < EFUSE_MAX_SECTION) {
269 wren = (*rtemp8 & 0x0f);
270 RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
271 ("offset-%d Worden=%x\n", offset, wren));
272
273 for (i = 0; i < EFUSE_MAX_WORD_UNIT; i++) {
274 if (!(wren & 0x01)) {
275 RTPRINT(rtlpriv, FEEPROM,
276 EFUSE_READ_ALL, ("Addr=%d\n",
277 efuse_addr));
278
279 read_efuse_byte(hw, efuse_addr, rtemp8);
280 efuse_addr++;
281 efuse_utilized++;
282 efuse_word[offset][i] = (*rtemp8 & 0xff);
283
284 if (efuse_addr >= EFUSE_REAL_CONTENT_LEN)
285 break;
286
287 RTPRINT(rtlpriv, FEEPROM,
288 EFUSE_READ_ALL, ("Addr=%d\n",
289 efuse_addr));
290
291 read_efuse_byte(hw, efuse_addr, rtemp8);
292 efuse_addr++;
293 efuse_utilized++;
294 efuse_word[offset][i] |=
295 (((u16)*rtemp8 << 8) & 0xff00);
296
297 if (efuse_addr >= EFUSE_REAL_CONTENT_LEN)
298 break;
299 }
300
301 wren >>= 1;
302 }
303 }
304
305 RTPRINT(rtlpriv, FEEPROM, EFUSE_READ_ALL,
306 ("Addr=%d\n", efuse_addr));
307 read_efuse_byte(hw, efuse_addr, rtemp8);
308 if (*rtemp8 != 0xFF && (efuse_addr < 512)) {
309 efuse_utilized++;
310 efuse_addr++;
311 }
312 }
313
314 for (i = 0; i < EFUSE_MAX_SECTION; i++) {
315 for (j = 0; j < EFUSE_MAX_WORD_UNIT; j++) {
316 efuse_tbl[(i * 8) + (j * 2)] =
317 (efuse_word[i][j] & 0xff);
318 efuse_tbl[(i * 8) + ((j * 2) + 1)] =
319 ((efuse_word[i][j] >> 8) & 0xff);
320 }
321 }
322
323 for (i = 0; i < _size_byte; i++)
324 pbuf[i] = efuse_tbl[_offset + i];
325
326 rtlefuse->efuse_usedbytes = efuse_utilized;
327 efuse_usage = (u8)((efuse_utilized * 100) / EFUSE_REAL_CONTENT_LEN);
328 rtlefuse->efuse_usedpercentage = efuse_usage;
329 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_BYTES,
330 (u8 *)&efuse_utilized);
331 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_EFUSE_USAGE,
332 (u8 *)&efuse_usage);
333}
334
335bool efuse_shadow_update_chk(struct ieee80211_hw *hw)
336{
337 struct rtl_priv *rtlpriv = rtl_priv(hw);
338 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
339 u8 section_idx, i, Base;
340 u16 words_need = 0, hdr_num = 0, totalbytes, efuse_used;
341 bool bwordchanged, bresult = true;
342
343 for (section_idx = 0; section_idx < 16; section_idx++) {
344 Base = section_idx * 8;
345 bwordchanged = false;
346
347 for (i = 0; i < 8; i = i + 2) {
348 if ((rtlefuse->efuse_map[EFUSE_INIT_MAP][Base + i] !=
349 rtlefuse->efuse_map[EFUSE_MODIFY_MAP][Base + i]) ||
350 (rtlefuse->efuse_map[EFUSE_INIT_MAP][Base + i + 1] !=
351 rtlefuse->efuse_map[EFUSE_MODIFY_MAP][Base + i +
352 1])) {
353 words_need++;
354 bwordchanged = true;
355 }
356 }
357
358 if (bwordchanged == true)
359 hdr_num++;
360 }
361
362 totalbytes = hdr_num + words_need * 2;
363 efuse_used = rtlefuse->efuse_usedbytes;
364
365 if ((totalbytes + efuse_used) >=
366 (EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES))
367 bresult = false;
368
369 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
370 ("efuse_shadow_update_chk(): totalbytes(%#x), "
371 "hdr_num(%#x), words_need(%#x), efuse_used(%d)\n",
372 totalbytes, hdr_num, words_need, efuse_used));
373
374 return bresult;
375}
376
377void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
378 u16 offset, u32 *value)
379{
380 if (type == 1)
381 efuse_shadow_read_1byte(hw, offset, (u8 *) value);
382 else if (type == 2)
383 efuse_shadow_read_2byte(hw, offset, (u16 *) value);
384 else if (type == 4)
385 efuse_shadow_read_4byte(hw, offset, (u32 *) value);
386
387}
388
389void efuse_shadow_write(struct ieee80211_hw *hw, u8 type, u16 offset,
390 u32 value)
391{
392 if (type == 1)
393 efuse_shadow_write_1byte(hw, offset, (u8) value);
394 else if (type == 2)
395 efuse_shadow_write_2byte(hw, offset, (u16) value);
396 else if (type == 4)
397 efuse_shadow_write_4byte(hw, offset, (u32) value);
398
399}
400
401bool efuse_shadow_update(struct ieee80211_hw *hw)
402{
403 struct rtl_priv *rtlpriv = rtl_priv(hw);
404 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
405 u16 i, offset, base;
406 u8 word_en = 0x0F;
407 u8 first_pg = false;
408
409 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, ("--->\n"));
410
411 if (!efuse_shadow_update_chk(hw)) {
412 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
413 memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
414 (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
415 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
416
417 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
418 ("<---efuse out of capacity!!\n"));
419 return false;
420 }
421 efuse_power_switch(hw, true, true);
422
423 for (offset = 0; offset < 16; offset++) {
424
425 word_en = 0x0F;
426 base = offset * 8;
427
428 for (i = 0; i < 8; i++) {
429 if (first_pg == true) {
430
431 word_en &= ~(BIT(i / 2));
432
433 rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] =
434 rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i];
435 } else {
436
437 if (rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] !=
438 rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i]) {
439 word_en &= ~(BIT(i / 2));
440
441 rtlefuse->efuse_map[EFUSE_INIT_MAP][base + i] =
442 rtlefuse->efuse_map[EFUSE_MODIFY_MAP][base + i];
443 }
444 }
445 }
446
447 if (word_en != 0x0F) {
448 u8 tmpdata[8];
449 memcpy((void *)tmpdata,
450 (void *)(&rtlefuse->
451 efuse_map[EFUSE_MODIFY_MAP][base]), 8);
452 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD,
453 ("U-efuse\n"), tmpdata, 8);
454
455 if (!efuse_pg_packet_write(hw, (u8) offset, word_en,
456 tmpdata)) {
457 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
458 ("PG section(%#x) fail!!\n", offset));
459 break;
460 }
461 }
462
463 }
464
465 efuse_power_switch(hw, true, false);
466 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
467
468 memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
469 (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
470 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
471
472 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD, ("<---\n"));
473 return true;
474}
475
476void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw)
477{
478 struct rtl_priv *rtlpriv = rtl_priv(hw);
479 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
480
481 if (rtlefuse->autoload_failflag == true) {
482 memset((void *)(&rtlefuse->efuse_map[EFUSE_INIT_MAP][0]), 128,
483 0xFF);
484 } else
485 efuse_read_all_map(hw, &rtlefuse->efuse_map[EFUSE_INIT_MAP][0]);
486
487 memcpy((void *)&rtlefuse->efuse_map[EFUSE_MODIFY_MAP][0],
488 (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
489 rtlpriv->cfg->maps[EFUSE_HWSET_MAX_SIZE]);
490
491}
492EXPORT_SYMBOL(rtl_efuse_shadow_map_update);
493
494void efuse_force_write_vendor_Id(struct ieee80211_hw *hw)
495{
496 u8 tmpdata[8] = { 0xFF, 0xFF, 0xEC, 0x10, 0xFF, 0xFF, 0xFF, 0xFF };
497
498 efuse_power_switch(hw, true, true);
499
500 efuse_pg_packet_write(hw, 1, 0xD, tmpdata);
501
502 efuse_power_switch(hw, true, false);
503
504}
505
506void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx)
507{
508}
509
510static void efuse_shadow_read_1byte(struct ieee80211_hw *hw,
511 u16 offset, u8 *value)
512{
513 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
514 *value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset];
515}
516
517static void efuse_shadow_read_2byte(struct ieee80211_hw *hw,
518 u16 offset, u16 *value)
519{
520 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
521
522 *value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset];
523 *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] << 8;
524
525}
526
527static void efuse_shadow_read_4byte(struct ieee80211_hw *hw,
528 u16 offset, u32 *value)
529{
530 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
531
532 *value = rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset];
533 *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] << 8;
534 *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 2] << 16;
535 *value |= rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 3] << 24;
536}
537
538static void efuse_shadow_write_1byte(struct ieee80211_hw *hw,
539 u16 offset, u8 value)
540{
541 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
542
543 rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] = value;
544}
545
546static void efuse_shadow_write_2byte(struct ieee80211_hw *hw,
547 u16 offset, u16 value)
548{
549 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
550
551 rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] = value & 0x00FF;
552 rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] = value >> 8;
553
554}
555
556static void efuse_shadow_write_4byte(struct ieee80211_hw *hw,
557 u16 offset, u32 value)
558{
559 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
560
561 rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset] =
562 (u8) (value & 0x000000FF);
563 rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 1] =
564 (u8) ((value >> 8) & 0x0000FF);
565 rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 2] =
566 (u8) ((value >> 16) & 0x00FF);
567 rtlefuse->efuse_map[EFUSE_MODIFY_MAP][offset + 3] =
568 (u8) ((value >> 24) & 0xFF);
569
570}
571
572static int efuse_one_byte_read(struct ieee80211_hw *hw, u16 addr, u8 *data)
573{
574 struct rtl_priv *rtlpriv = rtl_priv(hw);
575 u8 tmpidx = 0;
576 int bresult;
577
578 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 1,
579 (u8) (addr & 0xff));
580 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
581 ((u8) ((addr >> 8) & 0x03)) |
582 (rtl_read_byte(rtlpriv,
583 rtlpriv->cfg->maps[EFUSE_CTRL] + 2) &
584 0xFC));
585
586 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0x72);
587
588 while (!(0x80 & rtl_read_byte(rtlpriv,
589 rtlpriv->cfg->maps[EFUSE_CTRL] + 3))
590 && (tmpidx < 100)) {
591 tmpidx++;
592 }
593
594 if (tmpidx < 100) {
595 *data = rtl_read_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL]);
596 bresult = true;
597 } else {
598 *data = 0xff;
599 bresult = false;
600 }
601 return bresult;
602}
603
604static int efuse_one_byte_write(struct ieee80211_hw *hw, u16 addr, u8 data)
605{
606 struct rtl_priv *rtlpriv = rtl_priv(hw);
607 u8 tmpidx = 0;
608 bool bresult;
609
610 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
611 ("Addr = %x Data=%x\n", addr, data));
612
613 rtl_write_byte(rtlpriv,
614 rtlpriv->cfg->maps[EFUSE_CTRL] + 1, (u8) (addr & 0xff));
615 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 2,
616 (rtl_read_byte(rtlpriv,
617 rtlpriv->cfg->maps[EFUSE_CTRL] +
618 2) & 0xFC) | (u8) ((addr >> 8) & 0x03));
619
620 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL], data);
621 rtl_write_byte(rtlpriv, rtlpriv->cfg->maps[EFUSE_CTRL] + 3, 0xF2);
622
623 while ((0x80 & rtl_read_byte(rtlpriv,
624 rtlpriv->cfg->maps[EFUSE_CTRL] + 3))
625 && (tmpidx < 100)) {
626 tmpidx++;
627 }
628
629 if (tmpidx < 100)
630 bresult = true;
631 else
632 bresult = false;
633
634 return bresult;
635}
636
637static void efuse_read_all_map(struct ieee80211_hw *hw, u8 * efuse)
638{
639 efuse_power_switch(hw, false, true);
640 read_efuse(hw, 0, 128, efuse);
641 efuse_power_switch(hw, false, false);
642}
643
644static void efuse_read_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
645 u8 efuse_data, u8 offset, u8 *tmpdata,
646 u8 *readstate)
647{
648 bool bdataempty = true;
649 u8 hoffset;
650 u8 tmpidx;
651 u8 hworden;
652 u8 word_cnts;
653
654 hoffset = (efuse_data >> 4) & 0x0F;
655 hworden = efuse_data & 0x0F;
656 word_cnts = efuse_calculate_word_cnts(hworden);
657
658 if (hoffset == offset) {
659 for (tmpidx = 0; tmpidx < word_cnts * 2; tmpidx++) {
660 if (efuse_one_byte_read(hw, *efuse_addr + 1 + tmpidx,
661 &efuse_data)) {
662 tmpdata[tmpidx] = efuse_data;
663 if (efuse_data != 0xff)
664 bdataempty = true;
665 }
666 }
667
668 if (bdataempty == true)
669 *readstate = PG_STATE_DATA;
670 else {
671 *efuse_addr = *efuse_addr + (word_cnts * 2) + 1;
672 *readstate = PG_STATE_HEADER;
673 }
674
675 } else {
676 *efuse_addr = *efuse_addr + (word_cnts * 2) + 1;
677 *readstate = PG_STATE_HEADER;
678 }
679}
680
681static int efuse_pg_packet_read(struct ieee80211_hw *hw, u8 offset, u8 *data)
682{
683 u8 readstate = PG_STATE_HEADER;
684
685 bool bcontinual = true;
686
687 u8 efuse_data, word_cnts = 0;
688 u16 efuse_addr = 0;
689 u8 hworden;
690 u8 tmpdata[8];
691
692 if (data == NULL)
693 return false;
694 if (offset > 15)
695 return false;
696
697 memset((void *)data, PGPKT_DATA_SIZE * sizeof(u8), 0xff);
698 memset((void *)tmpdata, PGPKT_DATA_SIZE * sizeof(u8), 0xff);
699
700 while (bcontinual && (efuse_addr < EFUSE_MAX_SIZE)) {
701 if (readstate & PG_STATE_HEADER) {
702 if (efuse_one_byte_read(hw, efuse_addr, &efuse_data)
703 && (efuse_data != 0xFF))
704 efuse_read_data_case1(hw, &efuse_addr,
705 efuse_data,
706 offset, tmpdata,
707 &readstate);
708 else
709 bcontinual = false;
710 } else if (readstate & PG_STATE_DATA) {
711 efuse_word_enable_data_read(hworden, tmpdata, data);
712 efuse_addr = efuse_addr + (word_cnts * 2) + 1;
713 readstate = PG_STATE_HEADER;
714 }
715
716 }
717
718 if ((data[0] == 0xff) && (data[1] == 0xff) &&
719 (data[2] == 0xff) && (data[3] == 0xff) &&
720 (data[4] == 0xff) && (data[5] == 0xff) &&
721 (data[6] == 0xff) && (data[7] == 0xff))
722 return false;
723 else
724 return true;
725
726}
727
728static void efuse_write_data_case1(struct ieee80211_hw *hw, u16 *efuse_addr,
729 u8 efuse_data, u8 offset, int *bcontinual,
730 u8 *write_state, struct pgpkt_struct target_pkt,
731 int *repeat_times, int *bresult, u8 word_en)
732{
733 struct rtl_priv *rtlpriv = rtl_priv(hw);
734 struct pgpkt_struct tmp_pkt;
735 int bdataempty = true;
736 u8 originaldata[8 * sizeof(u8)];
737 u8 badworden = 0x0F;
738 u8 match_word_en, tmp_word_en;
739 u8 tmpindex;
740 u8 tmp_header = efuse_data;
741 u8 tmp_word_cnts;
742
743 tmp_pkt.offset = (tmp_header >> 4) & 0x0F;
744 tmp_pkt.word_en = tmp_header & 0x0F;
745 tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
746
747 if (tmp_pkt.offset != target_pkt.offset) {
748 efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1;
749 *write_state = PG_STATE_HEADER;
750 } else {
751 for (tmpindex = 0; tmpindex < (tmp_word_cnts * 2); tmpindex++) {
752 u16 address = *efuse_addr + 1 + tmpindex;
753 if (efuse_one_byte_read(hw, address,
754 &efuse_data) && (efuse_data != 0xFF))
755 bdataempty = false;
756 }
757
758 if (bdataempty == false) {
759 efuse_addr = efuse_addr + (tmp_word_cnts * 2) + 1;
760 *write_state = PG_STATE_HEADER;
761 } else {
762 match_word_en = 0x0F;
763 if (!((target_pkt.word_en & BIT(0)) |
764 (tmp_pkt.word_en & BIT(0))))
765 match_word_en &= (~BIT(0));
766
767 if (!((target_pkt.word_en & BIT(1)) |
768 (tmp_pkt.word_en & BIT(1))))
769 match_word_en &= (~BIT(1));
770
771 if (!((target_pkt.word_en & BIT(2)) |
772 (tmp_pkt.word_en & BIT(2))))
773 match_word_en &= (~BIT(2));
774
775 if (!((target_pkt.word_en & BIT(3)) |
776 (tmp_pkt.word_en & BIT(3))))
777 match_word_en &= (~BIT(3));
778
779 if ((match_word_en & 0x0F) != 0x0F) {
780 badworden = efuse_word_enable_data_write(
781 hw, *efuse_addr + 1,
782 tmp_pkt.word_en,
783 target_pkt.data);
784
785 if (0x0F != (badworden & 0x0F)) {
786 u8 reorg_offset = offset;
787 u8 reorg_worden = badworden;
788 efuse_pg_packet_write(hw, reorg_offset,
789 reorg_worden,
790 originaldata);
791 }
792
793 tmp_word_en = 0x0F;
794 if ((target_pkt.word_en & BIT(0)) ^
795 (match_word_en & BIT(0)))
796 tmp_word_en &= (~BIT(0));
797
798 if ((target_pkt.word_en & BIT(1)) ^
799 (match_word_en & BIT(1)))
800 tmp_word_en &= (~BIT(1));
801
802 if ((target_pkt.word_en & BIT(2)) ^
803 (match_word_en & BIT(2)))
804 tmp_word_en &= (~BIT(2));
805
806 if ((target_pkt.word_en & BIT(3)) ^
807 (match_word_en & BIT(3)))
808 tmp_word_en &= (~BIT(3));
809
810 if ((tmp_word_en & 0x0F) != 0x0F) {
811 *efuse_addr = efuse_get_current_size(hw);
812 target_pkt.offset = offset;
813 target_pkt.word_en = tmp_word_en;
814 } else
815 *bcontinual = false;
816 *write_state = PG_STATE_HEADER;
817 *repeat_times += 1;
818 if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
819 *bcontinual = false;
820 *bresult = false;
821 }
822 } else {
823 *efuse_addr += (2 * tmp_word_cnts) + 1;
824 target_pkt.offset = offset;
825 target_pkt.word_en = word_en;
826 *write_state = PG_STATE_HEADER;
827 }
828 }
829 }
830 RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, ("efuse PG_STATE_HEADER-1\n"));
831}
832
833static void efuse_write_data_case2(struct ieee80211_hw *hw, u16 *efuse_addr,
834 int *bcontinual, u8 *write_state,
835 struct pgpkt_struct target_pkt,
836 int *repeat_times, int *bresult)
837{
838 struct rtl_priv *rtlpriv = rtl_priv(hw);
839 struct pgpkt_struct tmp_pkt;
840 u8 pg_header;
841 u8 tmp_header;
842 u8 originaldata[8 * sizeof(u8)];
843 u8 tmp_word_cnts;
844 u8 badworden = 0x0F;
845
846 pg_header = ((target_pkt.offset << 4) & 0xf0) | target_pkt.word_en;
847 efuse_one_byte_write(hw, *efuse_addr, pg_header);
848 efuse_one_byte_read(hw, *efuse_addr, &tmp_header);
849
850 if (tmp_header == pg_header)
851 *write_state = PG_STATE_DATA;
852 else if (tmp_header == 0xFF) {
853 *write_state = PG_STATE_HEADER;
854 *repeat_times += 1;
855 if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
856 *bcontinual = false;
857 *bresult = false;
858 }
859 } else {
860 tmp_pkt.offset = (tmp_header >> 4) & 0x0F;
861 tmp_pkt.word_en = tmp_header & 0x0F;
862
863 tmp_word_cnts = efuse_calculate_word_cnts(tmp_pkt.word_en);
864
865 memset((void *)originaldata, 8 * sizeof(u8), 0xff);
866
867 if (efuse_pg_packet_read(hw, tmp_pkt.offset, originaldata)) {
868 badworden = efuse_word_enable_data_write(hw,
869 *efuse_addr + 1, tmp_pkt.word_en,
870 originaldata);
871
872 if (0x0F != (badworden & 0x0F)) {
873 u8 reorg_offset = tmp_pkt.offset;
874 u8 reorg_worden = badworden;
875 efuse_pg_packet_write(hw, reorg_offset,
876 reorg_worden,
877 originaldata);
878 *efuse_addr = efuse_get_current_size(hw);
879 } else
880 *efuse_addr = *efuse_addr + (tmp_word_cnts * 2)
881 + 1;
882 } else
883 *efuse_addr = *efuse_addr + (tmp_word_cnts * 2) + 1;
884
885 *write_state = PG_STATE_HEADER;
886 *repeat_times += 1;
887 if (*repeat_times > EFUSE_REPEAT_THRESHOLD_) {
888 *bcontinual = false;
889 *bresult = false;
890 }
891
892 RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
893 ("efuse PG_STATE_HEADER-2\n"));
894 }
895}
896
897static int efuse_pg_packet_write(struct ieee80211_hw *hw,
898 u8 offset, u8 word_en, u8 *data)
899{
900 struct rtl_priv *rtlpriv = rtl_priv(hw);
901 struct pgpkt_struct target_pkt;
902 u8 write_state = PG_STATE_HEADER;
903 int bcontinual = true, bdataempty = true, bresult = true;
904 u16 efuse_addr = 0;
905 u8 efuse_data;
906 u8 target_word_cnts = 0;
907 u8 badworden = 0x0F;
908 static int repeat_times;
909
910 if (efuse_get_current_size(hw) >=
911 (EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES)) {
912 RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
913 ("efuse_pg_packet_write error\n"));
914 return false;
915 }
916
917 target_pkt.offset = offset;
918 target_pkt.word_en = word_en;
919
920 memset((void *)target_pkt.data, 8 * sizeof(u8), 0xFF);
921
922 efuse_word_enable_data_read(word_en, data, target_pkt.data);
923 target_word_cnts = efuse_calculate_word_cnts(target_pkt.word_en);
924
925 RTPRINT(rtlpriv, FEEPROM, EFUSE_PG, ("efuse Power ON\n"));
926
927 while (bcontinual && (efuse_addr <
928 (EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES))) {
929
930 if (write_state == PG_STATE_HEADER) {
931 bdataempty = true;
932 badworden = 0x0F;
933 RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
934 ("efuse PG_STATE_HEADER\n"));
935
936 if (efuse_one_byte_read(hw, efuse_addr, &efuse_data) &&
937 (efuse_data != 0xFF))
938 efuse_write_data_case1(hw, &efuse_addr,
939 efuse_data, offset,
940 &bcontinual,
941 &write_state, target_pkt,
942 &repeat_times, &bresult,
943 word_en);
944 else
945 efuse_write_data_case2(hw, &efuse_addr,
946 &bcontinual,
947 &write_state,
948 target_pkt,
949 &repeat_times,
950 &bresult);
951
952 } else if (write_state == PG_STATE_DATA) {
953 RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
954 ("efuse PG_STATE_DATA\n"));
955 badworden = 0x0f;
956 badworden =
957 efuse_word_enable_data_write(hw, efuse_addr + 1,
958 target_pkt.word_en,
959 target_pkt.data);
960
961 if ((badworden & 0x0F) == 0x0F) {
962 bcontinual = false;
963 } else {
964 efuse_addr =
965 efuse_addr + (2 * target_word_cnts) + 1;
966
967 target_pkt.offset = offset;
968 target_pkt.word_en = badworden;
969 target_word_cnts =
970 efuse_calculate_word_cnts(target_pkt.
971 word_en);
972 write_state = PG_STATE_HEADER;
973 repeat_times++;
974 if (repeat_times > EFUSE_REPEAT_THRESHOLD_) {
975 bcontinual = false;
976 bresult = false;
977 }
978 RTPRINT(rtlpriv, FEEPROM, EFUSE_PG,
979 ("efuse PG_STATE_HEADER-3\n"));
980 }
981 }
982 }
983
984 if (efuse_addr >= (EFUSE_MAX_SIZE - EFUSE_OOB_PROTECT_BYTES)) {
985 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
986 ("efuse_addr(%#x) Out of size!!\n", efuse_addr));
987 }
988
989 return true;
990}
991
992static void efuse_word_enable_data_read(u8 word_en,
993 u8 *sourdata, u8 *targetdata)
994{
995 if (!(word_en & BIT(0))) {
996 targetdata[0] = sourdata[0];
997 targetdata[1] = sourdata[1];
998 }
999
1000 if (!(word_en & BIT(1))) {
1001 targetdata[2] = sourdata[2];
1002 targetdata[3] = sourdata[3];
1003 }
1004
1005 if (!(word_en & BIT(2))) {
1006 targetdata[4] = sourdata[4];
1007 targetdata[5] = sourdata[5];
1008 }
1009
1010 if (!(word_en & BIT(3))) {
1011 targetdata[6] = sourdata[6];
1012 targetdata[7] = sourdata[7];
1013 }
1014}
1015
1016static u8 efuse_word_enable_data_write(struct ieee80211_hw *hw,
1017 u16 efuse_addr, u8 word_en, u8 *data)
1018{
1019 struct rtl_priv *rtlpriv = rtl_priv(hw);
1020 u16 tmpaddr;
1021 u16 start_addr = efuse_addr;
1022 u8 badworden = 0x0F;
1023 u8 tmpdata[8];
1024
1025 memset((void *)tmpdata, PGPKT_DATA_SIZE, 0xff);
1026 RT_TRACE(rtlpriv, COMP_EFUSE, DBG_LOUD,
1027 ("word_en = %x efuse_addr=%x\n", word_en, efuse_addr));
1028
1029 if (!(word_en & BIT(0))) {
1030 tmpaddr = start_addr;
1031 efuse_one_byte_write(hw, start_addr++, data[0]);
1032 efuse_one_byte_write(hw, start_addr++, data[1]);
1033
1034 efuse_one_byte_read(hw, tmpaddr, &tmpdata[0]);
1035 efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[1]);
1036 if ((data[0] != tmpdata[0]) || (data[1] != tmpdata[1]))
1037 badworden &= (~BIT(0));
1038 }
1039
1040 if (!(word_en & BIT(1))) {
1041 tmpaddr = start_addr;
1042 efuse_one_byte_write(hw, start_addr++, data[2]);
1043 efuse_one_byte_write(hw, start_addr++, data[3]);
1044
1045 efuse_one_byte_read(hw, tmpaddr, &tmpdata[2]);
1046 efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[3]);
1047 if ((data[2] != tmpdata[2]) || (data[3] != tmpdata[3]))
1048 badworden &= (~BIT(1));
1049 }
1050
1051 if (!(word_en & BIT(2))) {
1052 tmpaddr = start_addr;
1053 efuse_one_byte_write(hw, start_addr++, data[4]);
1054 efuse_one_byte_write(hw, start_addr++, data[5]);
1055
1056 efuse_one_byte_read(hw, tmpaddr, &tmpdata[4]);
1057 efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[5]);
1058 if ((data[4] != tmpdata[4]) || (data[5] != tmpdata[5]))
1059 badworden &= (~BIT(2));
1060 }
1061
1062 if (!(word_en & BIT(3))) {
1063 tmpaddr = start_addr;
1064 efuse_one_byte_write(hw, start_addr++, data[6]);
1065 efuse_one_byte_write(hw, start_addr++, data[7]);
1066
1067 efuse_one_byte_read(hw, tmpaddr, &tmpdata[6]);
1068 efuse_one_byte_read(hw, tmpaddr + 1, &tmpdata[7]);
1069 if ((data[6] != tmpdata[6]) || (data[7] != tmpdata[7]))
1070 badworden &= (~BIT(3));
1071 }
1072
1073 return badworden;
1074}
1075
1076static void efuse_power_switch(struct ieee80211_hw *hw, u8 bwrite, u8 pwrstate)
1077{
1078 struct rtl_priv *rtlpriv = rtl_priv(hw);
1079 u8 tempval;
1080 u16 tmpV16;
1081
1082 if (pwrstate == true) {
1083 tmpV16 = rtl_read_word(rtlpriv,
1084 rtlpriv->cfg->maps[SYS_ISO_CTRL]);
1085 if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_PWC_EV12V])) {
1086 tmpV16 |= rtlpriv->cfg->maps[EFUSE_PWC_EV12V];
1087 rtl_write_word(rtlpriv,
1088 rtlpriv->cfg->maps[SYS_ISO_CTRL],
1089 tmpV16);
1090 }
1091
1092 tmpV16 = rtl_read_word(rtlpriv,
1093 rtlpriv->cfg->maps[SYS_FUNC_EN]);
1094 if (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_FEN_ELDR])) {
1095 tmpV16 |= rtlpriv->cfg->maps[EFUSE_FEN_ELDR];
1096 rtl_write_word(rtlpriv,
1097 rtlpriv->cfg->maps[SYS_FUNC_EN], tmpV16);
1098 }
1099
1100 tmpV16 = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_CLK]);
1101 if ((!(tmpV16 & rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN])) ||
1102 (!(tmpV16 & rtlpriv->cfg->maps[EFUSE_ANA8M]))) {
1103 tmpV16 |= (rtlpriv->cfg->maps[EFUSE_LOADER_CLK_EN] |
1104 rtlpriv->cfg->maps[EFUSE_ANA8M]);
1105 rtl_write_word(rtlpriv,
1106 rtlpriv->cfg->maps[SYS_CLK], tmpV16);
1107 }
1108 }
1109
1110 if (pwrstate == true) {
1111 if (bwrite == true) {
1112 tempval = rtl_read_byte(rtlpriv,
1113 rtlpriv->cfg->maps[EFUSE_TEST] +
1114 3);
1115 tempval &= 0x0F;
1116 tempval |= (VOLTAGE_V25 << 4);
1117 rtl_write_byte(rtlpriv,
1118 rtlpriv->cfg->maps[EFUSE_TEST] + 3,
1119 (tempval | 0x80));
1120 }
1121
1122 } else {
1123 if (bwrite == true) {
1124 tempval = rtl_read_byte(rtlpriv,
1125 rtlpriv->cfg->maps[EFUSE_TEST] +
1126 3);
1127 rtl_write_byte(rtlpriv,
1128 rtlpriv->cfg->maps[EFUSE_TEST] + 3,
1129 (tempval & 0x7F));
1130 }
1131
1132 }
1133
1134}
1135
1136static u16 efuse_get_current_size(struct ieee80211_hw *hw)
1137{
1138 int bcontinual = true;
1139 u16 efuse_addr = 0;
1140 u8 hoffset, hworden;
1141 u8 efuse_data, word_cnts;
1142
1143 while (bcontinual && efuse_one_byte_read(hw, efuse_addr, &efuse_data)
1144 && (efuse_addr < EFUSE_MAX_SIZE)) {
1145 if (efuse_data != 0xFF) {
1146 hoffset = (efuse_data >> 4) & 0x0F;
1147 hworden = efuse_data & 0x0F;
1148 word_cnts = efuse_calculate_word_cnts(hworden);
1149 efuse_addr = efuse_addr + (word_cnts * 2) + 1;
1150 } else {
1151 bcontinual = false;
1152 }
1153 }
1154
1155 return efuse_addr;
1156}
1157
1158static u8 efuse_calculate_word_cnts(u8 word_en)
1159{
1160 u8 word_cnts = 0;
1161 if (!(word_en & BIT(0)))
1162 word_cnts++;
1163 if (!(word_en & BIT(1)))
1164 word_cnts++;
1165 if (!(word_en & BIT(2)))
1166 word_cnts++;
1167 if (!(word_en & BIT(3)))
1168 word_cnts++;
1169 return word_cnts;
1170}
1171
1172void efuse_reset_loader(struct ieee80211_hw *hw)
1173{
1174 struct rtl_priv *rtlpriv = rtl_priv(hw);
1175 u16 tmp_u2b;
1176
1177 tmp_u2b = rtl_read_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN]);
1178 rtl_write_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN],
1179 (tmp_u2b & ~(BIT(12))));
1180 udelay(10000);
1181 rtl_write_word(rtlpriv, rtlpriv->cfg->maps[SYS_FUNC_EN],
1182 (tmp_u2b | BIT(12)));
1183 udelay(10000);
1184}
1185
1186bool efuse_program_map(struct ieee80211_hw *hw, char *p_filename, u8 tabletype)
1187{
1188 return true;
1189}
diff --git a/drivers/net/wireless/rtlwifi/efuse.h b/drivers/net/wireless/rtlwifi/efuse.h
new file mode 100644
index 000000000000..2d39a4df181b
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/efuse.h
@@ -0,0 +1,124 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL_EFUSE_H_
31#define __RTL_EFUSE_H_
32
33#define EFUSE_REAL_CONTENT_LEN 512
34#define EFUSE_MAP_LEN 128
35#define EFUSE_MAX_SECTION 16
36#define EFUSE_MAX_WORD_UNIT 4
37
38#define EFUSE_INIT_MAP 0
39#define EFUSE_MODIFY_MAP 1
40
41#define PG_STATE_HEADER 0x01
42#define PG_STATE_WORD_0 0x02
43#define PG_STATE_WORD_1 0x04
44#define PG_STATE_WORD_2 0x08
45#define PG_STATE_WORD_3 0x10
46#define PG_STATE_DATA 0x20
47
48#define PG_SWBYTE_H 0x01
49#define PG_SWBYTE_L 0x02
50
51#define _POWERON_DELAY_
52#define _PRE_EXECUTE_READ_CMD_
53
54#define EFUSE_REPEAT_THRESHOLD_ 3
55
56struct efuse_map {
57 u8 offset;
58 u8 word_start;
59 u8 byte_start;
60 u8 byte_cnts;
61};
62
63struct pgpkt_struct {
64 u8 offset;
65 u8 word_en;
66 u8 data[8];
67};
68
69enum efuse_data_item {
70 EFUSE_CHIP_ID = 0,
71 EFUSE_LDO_SETTING,
72 EFUSE_CLK_SETTING,
73 EFUSE_SDIO_SETTING,
74 EFUSE_CCCR,
75 EFUSE_SDIO_MODE,
76 EFUSE_OCR,
77 EFUSE_F0CIS,
78 EFUSE_F1CIS,
79 EFUSE_MAC_ADDR,
80 EFUSE_EEPROM_VER,
81 EFUSE_CHAN_PLAN,
82 EFUSE_TXPW_TAB
83};
84
85enum {
86 VOLTAGE_V25 = 0x03,
87 LDOE25_SHIFT = 28,
88};
89
90struct efuse_priv {
91 u8 id[2];
92 u8 ldo_setting[2];
93 u8 clk_setting[2];
94 u8 cccr;
95 u8 sdio_mode;
96 u8 ocr[3];
97 u8 cis0[17];
98 u8 cis1[48];
99 u8 mac_addr[6];
100 u8 eeprom_verno;
101 u8 channel_plan;
102 u8 tx_power_b[14];
103 u8 tx_power_g[14];
104};
105
106extern void efuse_initialize(struct ieee80211_hw *hw);
107extern u8 efuse_read_1byte(struct ieee80211_hw *hw, u16 address);
108extern void efuse_write_1byte(struct ieee80211_hw *hw, u16 address, u8 value);
109extern void read_efuse(struct ieee80211_hw *hw, u16 _offset,
110 u16 _size_byte, u8 *pbuf);
111extern void efuse_shadow_read(struct ieee80211_hw *hw, u8 type,
112 u16 offset, u32 *value);
113extern void efuse_shadow_write(struct ieee80211_hw *hw, u8 type,
114 u16 offset, u32 value);
115extern bool efuse_shadow_update(struct ieee80211_hw *hw);
116extern bool efuse_shadow_update_chk(struct ieee80211_hw *hw);
117extern void rtl_efuse_shadow_map_update(struct ieee80211_hw *hw);
118extern void efuse_force_write_vendor_Id(struct ieee80211_hw *hw);
119extern void efuse_re_pg_section(struct ieee80211_hw *hw, u8 section_idx);
120extern bool efuse_program_map(struct ieee80211_hw *hw,
121 char *p_filename, u8 tabletype);
122extern void efuse_reset_loader(struct ieee80211_hw *hw);
123
124#endif
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
new file mode 100644
index 000000000000..0fa36aa6701a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -0,0 +1,1945 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "core.h"
31#include "wifi.h"
32#include "pci.h"
33#include "base.h"
34#include "ps.h"
35
36static const u16 pcibridge_vendors[PCI_BRIDGE_VENDOR_MAX] = {
37 INTEL_VENDOR_ID,
38 ATI_VENDOR_ID,
39 AMD_VENDOR_ID,
40 SIS_VENDOR_ID
41};
42
43/* Update PCI dependent default settings*/
44static void _rtl_pci_update_default_setting(struct ieee80211_hw *hw)
45{
46 struct rtl_priv *rtlpriv = rtl_priv(hw);
47 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
48 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
49 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
50 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
51
52 ppsc->reg_rfps_level = 0;
53 ppsc->b_support_aspm = 0;
54
55 /*Update PCI ASPM setting */
56 ppsc->const_amdpci_aspm = rtlpci->const_amdpci_aspm;
57 switch (rtlpci->const_pci_aspm) {
58 case 0:
59 /*No ASPM */
60 break;
61
62 case 1:
63 /*ASPM dynamically enabled/disable. */
64 ppsc->reg_rfps_level |= RT_RF_LPS_LEVEL_ASPM;
65 break;
66
67 case 2:
68 /*ASPM with Clock Req dynamically enabled/disable. */
69 ppsc->reg_rfps_level |= (RT_RF_LPS_LEVEL_ASPM |
70 RT_RF_OFF_LEVL_CLK_REQ);
71 break;
72
73 case 3:
74 /*
75 * Always enable ASPM and Clock Req
76 * from initialization to halt.
77 * */
78 ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM);
79 ppsc->reg_rfps_level |= (RT_RF_PS_LEVEL_ALWAYS_ASPM |
80 RT_RF_OFF_LEVL_CLK_REQ);
81 break;
82
83 case 4:
84 /*
85 * Always enable ASPM without Clock Req
86 * from initialization to halt.
87 * */
88 ppsc->reg_rfps_level &= ~(RT_RF_LPS_LEVEL_ASPM |
89 RT_RF_OFF_LEVL_CLK_REQ);
90 ppsc->reg_rfps_level |= RT_RF_PS_LEVEL_ALWAYS_ASPM;
91 break;
92 }
93
94 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
95
96 /*Update Radio OFF setting */
97 switch (rtlpci->const_hwsw_rfoff_d3) {
98 case 1:
99 if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
100 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
101 break;
102
103 case 2:
104 if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM)
105 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_ASPM;
106 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_HALT_NIC;
107 break;
108
109 case 3:
110 ppsc->reg_rfps_level |= RT_RF_OFF_LEVL_PCI_D3;
111 break;
112 }
113
114 /*Set HW definition to determine if it supports ASPM. */
115 switch (rtlpci->const_support_pciaspm) {
116 case 0:{
117 /*Not support ASPM. */
118 bool b_support_aspm = false;
119 ppsc->b_support_aspm = b_support_aspm;
120 break;
121 }
122 case 1:{
123 /*Support ASPM. */
124 bool b_support_aspm = true;
125 bool b_support_backdoor = true;
126 ppsc->b_support_aspm = b_support_aspm;
127
128 /*if(priv->oem_id == RT_CID_TOSHIBA &&
129 !priv->ndis_adapter.amd_l1_patch)
130 b_support_backdoor = false; */
131
132 ppsc->b_support_backdoor = b_support_backdoor;
133
134 break;
135 }
136 case 2:
137 /*ASPM value set by chipset. */
138 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL) {
139 bool b_support_aspm = true;
140 ppsc->b_support_aspm = b_support_aspm;
141 }
142 break;
143 default:
144 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
145 ("switch case not process\n"));
146 break;
147 }
148}
149
150static bool _rtl_pci_platform_switch_device_pci_aspm(
151 struct ieee80211_hw *hw,
152 u8 value)
153{
154 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
155 bool bresult = false;
156
157 value |= 0x40;
158
159 pci_write_config_byte(rtlpci->pdev, 0x80, value);
160
161 return bresult;
162}
163
164/*When we set 0x01 to enable clk request. Set 0x0 to disable clk req.*/
165static bool _rtl_pci_switch_clk_req(struct ieee80211_hw *hw, u8 value)
166{
167 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
168 u8 buffer;
169 bool bresult = false;
170
171 buffer = value;
172
173 pci_write_config_byte(rtlpci->pdev, 0x81, value);
174 bresult = true;
175
176 return bresult;
177}
178
179/*Disable RTL8192SE ASPM & Disable Pci Bridge ASPM*/
180static void rtl_pci_disable_aspm(struct ieee80211_hw *hw)
181{
182 struct rtl_priv *rtlpriv = rtl_priv(hw);
183 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
184 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
185 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
186 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
187 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
188 u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
189 /*Retrieve original configuration settings. */
190 u8 linkctrl_reg = pcipriv->ndis_adapter.linkctrl_reg;
191 u16 pcibridge_linkctrlreg = pcipriv->ndis_adapter.
192 pcibridge_linkctrlreg;
193 u16 aspmlevel = 0;
194
195 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
196 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
197 ("PCI(Bridge) UNKNOWN.\n"));
198
199 return;
200 }
201
202 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
203 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
204 _rtl_pci_switch_clk_req(hw, 0x0);
205 }
206
207 if (1) {
208 /*for promising device will in L0 state after an I/O. */
209 u8 tmp_u1b;
210 pci_read_config_byte(rtlpci->pdev, 0x80, &tmp_u1b);
211 }
212
213 /*Set corresponding value. */
214 aspmlevel |= BIT(0) | BIT(1);
215 linkctrl_reg &= ~aspmlevel;
216 pcibridge_linkctrlreg &= ~(BIT(0) | BIT(1));
217
218 _rtl_pci_platform_switch_device_pci_aspm(hw, linkctrl_reg);
219 udelay(50);
220
221 /*4 Disable Pci Bridge ASPM */
222 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
223 pcicfg_addrport + (num4bytes << 2));
224 rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, pcibridge_linkctrlreg);
225
226 udelay(50);
227
228}
229
230/*
231 *Enable RTL8192SE ASPM & Enable Pci Bridge ASPM for
232 *power saving We should follow the sequence to enable
233 *RTL8192SE first then enable Pci Bridge ASPM
234 *or the system will show bluescreen.
235 */
236static void rtl_pci_enable_aspm(struct ieee80211_hw *hw)
237{
238 struct rtl_priv *rtlpriv = rtl_priv(hw);
239 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
240 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
241 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
242 u8 pcibridge_busnum = pcipriv->ndis_adapter.pcibridge_busnum;
243 u8 pcibridge_devnum = pcipriv->ndis_adapter.pcibridge_devnum;
244 u8 pcibridge_funcnum = pcipriv->ndis_adapter.pcibridge_funcnum;
245 u8 pcibridge_vendor = pcipriv->ndis_adapter.pcibridge_vendor;
246 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
247 u8 num4bytes = pcipriv->ndis_adapter.num4bytes;
248 u16 aspmlevel;
249 u8 u_pcibridge_aspmsetting;
250 u8 u_device_aspmsetting;
251
252 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_UNKNOWN) {
253 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
254 ("PCI(Bridge) UNKNOWN.\n"));
255 return;
256 }
257
258 /*4 Enable Pci Bridge ASPM */
259 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
260 pcicfg_addrport + (num4bytes << 2));
261
262 u_pcibridge_aspmsetting =
263 pcipriv->ndis_adapter.pcibridge_linkctrlreg |
264 rtlpci->const_hostpci_aspm_setting;
265
266 if (pcibridge_vendor == PCI_BRIDGE_VENDOR_INTEL)
267 u_pcibridge_aspmsetting &= ~BIT(0);
268
269 rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, u_pcibridge_aspmsetting);
270
271 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
272 ("PlatformEnableASPM():PciBridge busnumber[%x], "
273 "DevNumbe[%x], funcnumber[%x], Write reg[%x] = %x\n",
274 pcibridge_busnum, pcibridge_devnum, pcibridge_funcnum,
275 (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10),
276 u_pcibridge_aspmsetting));
277
278 udelay(50);
279
280 /*Get ASPM level (with/without Clock Req) */
281 aspmlevel = rtlpci->const_devicepci_aspm_setting;
282 u_device_aspmsetting = pcipriv->ndis_adapter.linkctrl_reg;
283
284 /*_rtl_pci_platform_switch_device_pci_aspm(dev,*/
285 /*(priv->ndis_adapter.linkctrl_reg | ASPMLevel)); */
286
287 u_device_aspmsetting |= aspmlevel;
288
289 _rtl_pci_platform_switch_device_pci_aspm(hw, u_device_aspmsetting);
290
291 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_CLK_REQ) {
292 _rtl_pci_switch_clk_req(hw, (ppsc->reg_rfps_level &
293 RT_RF_OFF_LEVL_CLK_REQ) ? 1 : 0);
294 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_CLK_REQ);
295 }
296 udelay(200);
297}
298
299static bool rtl_pci_get_amd_l1_patch(struct ieee80211_hw *hw)
300{
301 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
302 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
303
304 bool status = false;
305 u8 offset_e0;
306 unsigned offset_e4;
307
308 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
309 pcicfg_addrport + 0xE0);
310 rtl_pci_raw_write_port_uchar(PCI_CONF_DATA, 0xA0);
311
312 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
313 pcicfg_addrport + 0xE0);
314 rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &offset_e0);
315
316 if (offset_e0 == 0xA0) {
317 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
318 pcicfg_addrport + 0xE4);
319 rtl_pci_raw_read_port_ulong(PCI_CONF_DATA, &offset_e4);
320 if (offset_e4 & BIT(23))
321 status = true;
322 }
323
324 return status;
325}
326
327static void rtl_pci_get_linkcontrol_field(struct ieee80211_hw *hw)
328{
329 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
330 u8 capabilityoffset = pcipriv->ndis_adapter.pcibridge_pciehdr_offset;
331 u32 pcicfg_addrport = pcipriv->ndis_adapter.pcicfg_addrport;
332 u8 linkctrl_reg;
333 u8 num4bBytes;
334
335 num4bBytes = (capabilityoffset + 0x10) / 4;
336
337 /*Read Link Control Register */
338 rtl_pci_raw_write_port_ulong(PCI_CONF_ADDRESS,
339 pcicfg_addrport + (num4bBytes << 2));
340 rtl_pci_raw_read_port_uchar(PCI_CONF_DATA, &linkctrl_reg);
341
342 pcipriv->ndis_adapter.pcibridge_linkctrlreg = linkctrl_reg;
343}
344
345static void rtl_pci_parse_configuration(struct pci_dev *pdev,
346 struct ieee80211_hw *hw)
347{
348 struct rtl_priv *rtlpriv = rtl_priv(hw);
349 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
350
351 u8 tmp;
352 int pos;
353 u8 linkctrl_reg;
354
355 /*Link Control Register */
356 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
357 pci_read_config_byte(pdev, pos + PCI_EXP_LNKCTL, &linkctrl_reg);
358 pcipriv->ndis_adapter.linkctrl_reg = linkctrl_reg;
359
360 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
361 ("Link Control Register =%x\n",
362 pcipriv->ndis_adapter.linkctrl_reg));
363
364 pci_read_config_byte(pdev, 0x98, &tmp);
365 tmp |= BIT(4);
366 pci_write_config_byte(pdev, 0x98, tmp);
367
368 tmp = 0x17;
369 pci_write_config_byte(pdev, 0x70f, tmp);
370}
371
372static void _rtl_pci_initialize_adapter_common(struct ieee80211_hw *hw)
373{
374 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
375
376 _rtl_pci_update_default_setting(hw);
377
378 if (ppsc->reg_rfps_level & RT_RF_PS_LEVEL_ALWAYS_ASPM) {
379 /*Always enable ASPM & Clock Req. */
380 rtl_pci_enable_aspm(hw);
381 RT_SET_PS_LEVEL(ppsc, RT_RF_PS_LEVEL_ALWAYS_ASPM);
382 }
383
384}
385
386static void rtl_pci_init_aspm(struct ieee80211_hw *hw)
387{
388 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
389
390 /*close ASPM for AMD defaultly */
391 rtlpci->const_amdpci_aspm = 0;
392
393 /*
394 * ASPM PS mode.
395 * 0 - Disable ASPM,
396 * 1 - Enable ASPM without Clock Req,
397 * 2 - Enable ASPM with Clock Req,
398 * 3 - Alwyas Enable ASPM with Clock Req,
399 * 4 - Always Enable ASPM without Clock Req.
400 * set defult to RTL8192CE:3 RTL8192E:2
401 * */
402 rtlpci->const_pci_aspm = 3;
403
404 /*Setting for PCI-E device */
405 rtlpci->const_devicepci_aspm_setting = 0x03;
406
407 /*Setting for PCI-E bridge */
408 rtlpci->const_hostpci_aspm_setting = 0x02;
409
410 /*
411 * In Hw/Sw Radio Off situation.
412 * 0 - Default,
413 * 1 - From ASPM setting without low Mac Pwr,
414 * 2 - From ASPM setting with low Mac Pwr,
415 * 3 - Bus D3
416 * set default to RTL8192CE:0 RTL8192SE:2
417 */
418 rtlpci->const_hwsw_rfoff_d3 = 0;
419
420 /*
421 * This setting works for those device with
422 * backdoor ASPM setting such as EPHY setting.
423 * 0 - Not support ASPM,
424 * 1 - Support ASPM,
425 * 2 - According to chipset.
426 */
427 rtlpci->const_support_pciaspm = 1;
428
429 _rtl_pci_initialize_adapter_common(hw);
430}
431
432static void _rtl_pci_io_handler_init(struct device *dev,
433 struct ieee80211_hw *hw)
434{
435 struct rtl_priv *rtlpriv = rtl_priv(hw);
436
437 rtlpriv->io.dev = dev;
438
439 rtlpriv->io.write8_async = pci_write8_async;
440 rtlpriv->io.write16_async = pci_write16_async;
441 rtlpriv->io.write32_async = pci_write32_async;
442
443 rtlpriv->io.read8_sync = pci_read8_sync;
444 rtlpriv->io.read16_sync = pci_read16_sync;
445 rtlpriv->io.read32_sync = pci_read32_sync;
446
447}
448
449static void _rtl_pci_io_handler_release(struct ieee80211_hw *hw)
450{
451}
452
453static void _rtl_pci_tx_isr(struct ieee80211_hw *hw, int prio)
454{
455 struct rtl_priv *rtlpriv = rtl_priv(hw);
456 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
457
458 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
459
460 while (skb_queue_len(&ring->queue)) {
461 struct rtl_tx_desc *entry = &ring->desc[ring->idx];
462 struct sk_buff *skb;
463 struct ieee80211_tx_info *info;
464
465 u8 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) entry, true,
466 HW_DESC_OWN);
467
468 /*
469 *beacon packet will only use the first
470 *descriptor defautly,and the own may not
471 *be cleared by the hardware
472 */
473 if (own)
474 return;
475 ring->idx = (ring->idx + 1) % ring->entries;
476
477 skb = __skb_dequeue(&ring->queue);
478 pci_unmap_single(rtlpci->pdev,
479 le32_to_cpu(rtlpriv->cfg->ops->
480 get_desc((u8 *) entry, true,
481 HW_DESC_TXBUFF_ADDR)),
482 skb->len, PCI_DMA_TODEVICE);
483
484 RT_TRACE(rtlpriv, (COMP_INTR | COMP_SEND), DBG_TRACE,
485 ("new ring->idx:%d, "
486 "free: skb_queue_len:%d, free: seq:%x\n",
487 ring->idx,
488 skb_queue_len(&ring->queue),
489 *(u16 *) (skb->data + 22)));
490
491 info = IEEE80211_SKB_CB(skb);
492 ieee80211_tx_info_clear_status(info);
493
494 info->flags |= IEEE80211_TX_STAT_ACK;
495 /*info->status.rates[0].count = 1; */
496
497 ieee80211_tx_status_irqsafe(hw, skb);
498
499 if ((ring->entries - skb_queue_len(&ring->queue))
500 == 2) {
501
502 RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
503 ("more desc left, wake"
504 "skb_queue@%d,ring->idx = %d,"
505 "skb_queue_len = 0x%d\n",
506 prio, ring->idx,
507 skb_queue_len(&ring->queue)));
508
509 ieee80211_wake_queue(hw,
510 skb_get_queue_mapping
511 (skb));
512 }
513
514 skb = NULL;
515 }
516
517 if (((rtlpriv->link_info.num_rx_inperiod +
518 rtlpriv->link_info.num_tx_inperiod) > 8) ||
519 (rtlpriv->link_info.num_rx_inperiod > 2)) {
520 rtl_lps_leave(hw);
521 }
522}
523
524static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
525{
526 struct rtl_priv *rtlpriv = rtl_priv(hw);
527 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
528 int rx_queue_idx = RTL_PCI_RX_MPDU_QUEUE;
529
530 struct ieee80211_rx_status rx_status = { 0 };
531 unsigned int count = rtlpci->rxringcount;
532 u8 own;
533 u8 tmp_one;
534 u32 bufferaddress;
535 bool unicast = false;
536
537 struct rtl_stats stats = {
538 .signal = 0,
539 .noise = -98,
540 .rate = 0,
541 };
542
543 /*RX NORMAL PKT */
544 while (count--) {
545 /*rx descriptor */
546 struct rtl_rx_desc *pdesc = &rtlpci->rx_ring[rx_queue_idx].desc[
547 rtlpci->rx_ring[rx_queue_idx].idx];
548 /*rx pkt */
549 struct sk_buff *skb = rtlpci->rx_ring[rx_queue_idx].rx_buf[
550 rtlpci->rx_ring[rx_queue_idx].idx];
551
552 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
553 false, HW_DESC_OWN);
554
555 if (own) {
556 /*wait data to be filled by hardware */
557 return;
558 } else {
559 struct ieee80211_hdr *hdr;
560 u16 fc;
561 struct sk_buff *new_skb = NULL;
562
563 rtlpriv->cfg->ops->query_rx_desc(hw, &stats,
564 &rx_status,
565 (u8 *) pdesc, skb);
566
567 pci_unmap_single(rtlpci->pdev,
568 *((dma_addr_t *) skb->cb),
569 rtlpci->rxbuffersize,
570 PCI_DMA_FROMDEVICE);
571
572 skb_put(skb, rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
573 false,
574 HW_DESC_RXPKT_LEN));
575 skb_reserve(skb,
576 stats.rx_drvinfo_size + stats.rx_bufshift);
577
578 /*
579 *NOTICE This can not be use for mac80211,
580 *this is done in mac80211 code,
581 *if you done here sec DHCP will fail
582 *skb_trim(skb, skb->len - 4);
583 */
584
585 hdr = (struct ieee80211_hdr *)(skb->data);
586 fc = le16_to_cpu(hdr->frame_control);
587
588 if (!stats.b_crc) {
589 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status,
590 sizeof(rx_status));
591
592 if (is_broadcast_ether_addr(hdr->addr1))
593 ;/*TODO*/
594 else {
595 if (is_multicast_ether_addr(hdr->addr1))
596 ;/*TODO*/
597 else {
598 unicast = true;
599 rtlpriv->stats.rxbytesunicast +=
600 skb->len;
601 }
602 }
603
604 rtl_is_special_data(hw, skb, false);
605
606 if (ieee80211_is_data(fc)) {
607 rtlpriv->cfg->ops->led_control(hw,
608 LED_CTL_RX);
609
610 if (unicast)
611 rtlpriv->link_info.
612 num_rx_inperiod++;
613 }
614
615 if (unlikely(!rtl_action_proc(hw, skb,
616 false))) {
617 dev_kfree_skb_any(skb);
618 } else {
619 struct sk_buff *uskb = NULL;
620 u8 *pdata;
621 uskb = dev_alloc_skb(skb->len + 128);
622 memcpy(IEEE80211_SKB_RXCB(uskb),
623 &rx_status,
624 sizeof(rx_status));
625 pdata = (u8 *)skb_put(uskb, skb->len);
626 memcpy(pdata, skb->data, skb->len);
627 dev_kfree_skb_any(skb);
628
629 ieee80211_rx_irqsafe(hw, uskb);
630 }
631 } else {
632 dev_kfree_skb_any(skb);
633 }
634
635 if (((rtlpriv->link_info.num_rx_inperiod +
636 rtlpriv->link_info.num_tx_inperiod) > 8) ||
637 (rtlpriv->link_info.num_rx_inperiod > 2)) {
638 rtl_lps_leave(hw);
639 }
640
641 new_skb = dev_alloc_skb(rtlpci->rxbuffersize);
642 if (unlikely(!new_skb)) {
643 RT_TRACE(rtlpriv, (COMP_INTR | COMP_RECV),
644 DBG_DMESG,
645 ("can't alloc skb for rx\n"));
646 goto done;
647 }
648 skb = new_skb;
649 /*skb->dev = dev; */
650
651 rtlpci->rx_ring[rx_queue_idx].rx_buf[rtlpci->
652 rx_ring
653 [rx_queue_idx].
654 idx] = skb;
655 *((dma_addr_t *) skb->cb) =
656 pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
657 rtlpci->rxbuffersize,
658 PCI_DMA_FROMDEVICE);
659
660 }
661done:
662 bufferaddress = cpu_to_le32(*((dma_addr_t *) skb->cb));
663 tmp_one = 1;
664 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, false,
665 HW_DESC_RXBUFF_ADDR,
666 (u8 *)&bufferaddress);
667 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false, HW_DESC_RXOWN,
668 (u8 *)&tmp_one);
669 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
670 HW_DESC_RXPKT_LEN,
671 (u8 *)&rtlpci->rxbuffersize);
672
673 if (rtlpci->rx_ring[rx_queue_idx].idx ==
674 rtlpci->rxringcount - 1)
675 rtlpriv->cfg->ops->set_desc((u8 *)pdesc, false,
676 HW_DESC_RXERO,
677 (u8 *)&tmp_one);
678
679 rtlpci->rx_ring[rx_queue_idx].idx =
680 (rtlpci->rx_ring[rx_queue_idx].idx + 1) %
681 rtlpci->rxringcount;
682 }
683
684}
685
686void _rtl_pci_tx_interrupt(struct ieee80211_hw *hw)
687{
688 struct rtl_priv *rtlpriv = rtl_priv(hw);
689 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
690 int prio;
691
692 for (prio = 0; prio < RTL_PCI_MAX_TX_QUEUE_COUNT; prio++) {
693 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
694
695 while (skb_queue_len(&ring->queue)) {
696 struct rtl_tx_desc *entry = &ring->desc[ring->idx];
697 struct sk_buff *skb;
698 struct ieee80211_tx_info *info;
699 u8 own;
700
701 /*
702 *beacon packet will only use the first
703 *descriptor defautly, and the own may not
704 *be cleared by the hardware, and
705 *beacon will free in prepare beacon
706 */
707 if (prio == BEACON_QUEUE || prio == TXCMD_QUEUE ||
708 prio == HCCA_QUEUE)
709 break;
710
711 own = (u8)rtlpriv->cfg->ops->get_desc((u8 *)entry,
712 true,
713 HW_DESC_OWN);
714
715 if (own)
716 break;
717
718 skb = __skb_dequeue(&ring->queue);
719 pci_unmap_single(rtlpci->pdev,
720 le32_to_cpu(rtlpriv->cfg->ops->
721 get_desc((u8 *) entry,
722 true,
723 HW_DESC_TXBUFF_ADDR)),
724 skb->len, PCI_DMA_TODEVICE);
725
726 ring->idx = (ring->idx + 1) % ring->entries;
727
728 info = IEEE80211_SKB_CB(skb);
729 ieee80211_tx_info_clear_status(info);
730
731 info->flags |= IEEE80211_TX_STAT_ACK;
732 /*info->status.rates[0].count = 1; */
733
734 ieee80211_tx_status_irqsafe(hw, skb);
735
736 if ((ring->entries - skb_queue_len(&ring->queue))
737 == 2 && prio != BEACON_QUEUE) {
738 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
739 ("more desc left, wake "
740 "skb_queue@%d,ring->idx = %d,"
741 "skb_queue_len = 0x%d\n",
742 prio, ring->idx,
743 skb_queue_len(&ring->queue)));
744
745 ieee80211_wake_queue(hw,
746 skb_get_queue_mapping
747 (skb));
748 }
749
750 skb = NULL;
751 }
752 }
753}
754
755static irqreturn_t _rtl_pci_interrupt(int irq, void *dev_id)
756{
757 struct ieee80211_hw *hw = dev_id;
758 struct rtl_priv *rtlpriv = rtl_priv(hw);
759 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
760 unsigned long flags;
761 u32 inta = 0;
762 u32 intb = 0;
763
764 if (rtlpci->irq_enabled == 0)
765 return IRQ_HANDLED;
766
767 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
768
769 /*read ISR: 4/8bytes */
770 rtlpriv->cfg->ops->interrupt_recognized(hw, &inta, &intb);
771
772 /*Shared IRQ or HW disappared */
773 if (!inta || inta == 0xffff)
774 goto done;
775
776 /*<1> beacon related */
777 if (inta & rtlpriv->cfg->maps[RTL_IMR_TBDOK]) {
778 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
779 ("beacon ok interrupt!\n"));
780 }
781
782 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TBDER])) {
783 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
784 ("beacon err interrupt!\n"));
785 }
786
787 if (inta & rtlpriv->cfg->maps[RTL_IMR_BDOK]) {
788 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
789 ("beacon interrupt!\n"));
790 }
791
792 if (inta & rtlpriv->cfg->maps[RTL_IMR_BcnInt]) {
793 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
794 ("prepare beacon for interrupt!\n"));
795 tasklet_schedule(&rtlpriv->works.irq_prepare_bcn_tasklet);
796 }
797
798 /*<3> Tx related */
799 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_TXFOVW]))
800 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("IMR_TXFOVW!\n"));
801
802 if (inta & rtlpriv->cfg->maps[RTL_IMR_MGNTDOK]) {
803 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
804 ("Manage ok interrupt!\n"));
805 _rtl_pci_tx_isr(hw, MGNT_QUEUE);
806 }
807
808 if (inta & rtlpriv->cfg->maps[RTL_IMR_HIGHDOK]) {
809 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
810 ("HIGH_QUEUE ok interrupt!\n"));
811 _rtl_pci_tx_isr(hw, HIGH_QUEUE);
812 }
813
814 if (inta & rtlpriv->cfg->maps[RTL_IMR_BKDOK]) {
815 rtlpriv->link_info.num_tx_inperiod++;
816
817 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
818 ("BK Tx OK interrupt!\n"));
819 _rtl_pci_tx_isr(hw, BK_QUEUE);
820 }
821
822 if (inta & rtlpriv->cfg->maps[RTL_IMR_BEDOK]) {
823 rtlpriv->link_info.num_tx_inperiod++;
824
825 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
826 ("BE TX OK interrupt!\n"));
827 _rtl_pci_tx_isr(hw, BE_QUEUE);
828 }
829
830 if (inta & rtlpriv->cfg->maps[RTL_IMR_VIDOK]) {
831 rtlpriv->link_info.num_tx_inperiod++;
832
833 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
834 ("VI TX OK interrupt!\n"));
835 _rtl_pci_tx_isr(hw, VI_QUEUE);
836 }
837
838 if (inta & rtlpriv->cfg->maps[RTL_IMR_VODOK]) {
839 rtlpriv->link_info.num_tx_inperiod++;
840
841 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE,
842 ("Vo TX OK interrupt!\n"));
843 _rtl_pci_tx_isr(hw, VO_QUEUE);
844 }
845
846 /*<2> Rx related */
847 if (inta & rtlpriv->cfg->maps[RTL_IMR_ROK]) {
848 RT_TRACE(rtlpriv, COMP_INTR, DBG_TRACE, ("Rx ok interrupt!\n"));
849 tasklet_schedule(&rtlpriv->works.irq_tasklet);
850 }
851
852 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RDU])) {
853 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
854 ("rx descriptor unavailable!\n"));
855 tasklet_schedule(&rtlpriv->works.irq_tasklet);
856 }
857
858 if (unlikely(inta & rtlpriv->cfg->maps[RTL_IMR_RXFOVW])) {
859 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING, ("rx overflow !\n"));
860 tasklet_schedule(&rtlpriv->works.irq_tasklet);
861 }
862
863 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
864 return IRQ_HANDLED;
865
866done:
867 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
868 return IRQ_HANDLED;
869}
870
871static void _rtl_pci_irq_tasklet(struct ieee80211_hw *hw)
872{
873 _rtl_pci_rx_interrupt(hw);
874}
875
876static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
877{
878 struct rtl_priv *rtlpriv = rtl_priv(hw);
879 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
880 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
881 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[BEACON_QUEUE];
882 struct ieee80211_hdr *hdr = NULL;
883 struct ieee80211_tx_info *info = NULL;
884 struct sk_buff *pskb = NULL;
885 struct rtl_tx_desc *pdesc = NULL;
886 unsigned int queue_index;
887 u8 temp_one = 1;
888
889 ring = &rtlpci->tx_ring[BEACON_QUEUE];
890 pskb = __skb_dequeue(&ring->queue);
891 if (pskb)
892 kfree_skb(pskb);
893
894 /*NB: the beacon data buffer must be 32-bit aligned. */
895 pskb = ieee80211_beacon_get(hw, mac->vif);
896 if (pskb == NULL)
897 return;
898 hdr = (struct ieee80211_hdr *)(pskb->data);
899 info = IEEE80211_SKB_CB(pskb);
900
901 queue_index = BEACON_QUEUE;
902
903 pdesc = &ring->desc[0];
904 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
905 info, pskb, queue_index);
906
907 __skb_queue_tail(&ring->queue, pskb);
908
909 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true, HW_DESC_OWN,
910 (u8 *)&temp_one);
911
912 return;
913}
914
915static void _rtl_pci_init_trx_var(struct ieee80211_hw *hw)
916{
917 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
918 u8 i;
919
920 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
921 rtlpci->txringcount[i] = RT_TXDESC_NUM;
922
923 /*
924 *we just alloc 2 desc for beacon queue,
925 *because we just need first desc in hw beacon.
926 */
927 rtlpci->txringcount[BEACON_QUEUE] = 2;
928
929 /*
930 *BE queue need more descriptor for performance
931 *consideration or, No more tx desc will happen,
932 *and may cause mac80211 mem leakage.
933 */
934 rtlpci->txringcount[BE_QUEUE] = RT_TXDESC_NUM_BE_QUEUE;
935
936 rtlpci->rxbuffersize = 9100; /*2048/1024; */
937 rtlpci->rxringcount = RTL_PCI_MAX_RX_COUNT; /*64; */
938}
939
940static void _rtl_pci_init_struct(struct ieee80211_hw *hw,
941 struct pci_dev *pdev)
942{
943 struct rtl_priv *rtlpriv = rtl_priv(hw);
944 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
945 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
946 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
947 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
948
949 rtlpci->up_first_time = true;
950 rtlpci->being_init_adapter = false;
951
952 rtlhal->hw = hw;
953 rtlpci->pdev = pdev;
954
955 ppsc->b_inactiveps = false;
956 ppsc->b_leisure_ps = true;
957 ppsc->b_fwctrl_lps = true;
958 ppsc->b_reg_fwctrl_lps = 3;
959 ppsc->reg_max_lps_awakeintvl = 5;
960
961 if (ppsc->b_reg_fwctrl_lps == 1)
962 ppsc->fwctrl_psmode = FW_PS_MIN_MODE;
963 else if (ppsc->b_reg_fwctrl_lps == 2)
964 ppsc->fwctrl_psmode = FW_PS_MAX_MODE;
965 else if (ppsc->b_reg_fwctrl_lps == 3)
966 ppsc->fwctrl_psmode = FW_PS_DTIM_MODE;
967
968 /*Tx/Rx related var */
969 _rtl_pci_init_trx_var(hw);
970
971 /*IBSS*/ mac->beacon_interval = 100;
972
973 /*AMPDU*/ mac->min_space_cfg = 0;
974 mac->max_mss_density = 0;
975 /*set sane AMPDU defaults */
976 mac->current_ampdu_density = 7;
977 mac->current_ampdu_factor = 3;
978
979 /*QOS*/ rtlpci->acm_method = eAcmWay2_SW;
980
981 /*task */
982 tasklet_init(&rtlpriv->works.irq_tasklet,
983 (void (*)(unsigned long))_rtl_pci_irq_tasklet,
984 (unsigned long)hw);
985 tasklet_init(&rtlpriv->works.irq_prepare_bcn_tasklet,
986 (void (*)(unsigned long))_rtl_pci_prepare_bcn_tasklet,
987 (unsigned long)hw);
988}
989
990static int _rtl_pci_init_tx_ring(struct ieee80211_hw *hw,
991 unsigned int prio, unsigned int entries)
992{
993 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
994 struct rtl_priv *rtlpriv = rtl_priv(hw);
995 struct rtl_tx_desc *ring;
996 dma_addr_t dma;
997 u32 nextdescaddress;
998 int i;
999
1000 ring = pci_alloc_consistent(rtlpci->pdev,
1001 sizeof(*ring) * entries, &dma);
1002
1003 if (!ring || (unsigned long)ring & 0xFF) {
1004 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1005 ("Cannot allocate TX ring (prio = %d)\n", prio));
1006 return -ENOMEM;
1007 }
1008
1009 memset(ring, 0, sizeof(*ring) * entries);
1010 rtlpci->tx_ring[prio].desc = ring;
1011 rtlpci->tx_ring[prio].dma = dma;
1012 rtlpci->tx_ring[prio].idx = 0;
1013 rtlpci->tx_ring[prio].entries = entries;
1014 skb_queue_head_init(&rtlpci->tx_ring[prio].queue);
1015
1016 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1017 ("queue:%d, ring_addr:%p\n", prio, ring));
1018
1019 for (i = 0; i < entries; i++) {
1020 nextdescaddress = cpu_to_le32((u32) dma +
1021 ((i + 1) % entries) *
1022 sizeof(*ring));
1023
1024 rtlpriv->cfg->ops->set_desc((u8 *)&(ring[i]),
1025 true, HW_DESC_TX_NEXTDESC_ADDR,
1026 (u8 *)&nextdescaddress);
1027 }
1028
1029 return 0;
1030}
1031
1032static int _rtl_pci_init_rx_ring(struct ieee80211_hw *hw)
1033{
1034 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1035 struct rtl_priv *rtlpriv = rtl_priv(hw);
1036 struct rtl_rx_desc *entry = NULL;
1037 int i, rx_queue_idx;
1038 u8 tmp_one = 1;
1039
1040 /*
1041 *rx_queue_idx 0:RX_MPDU_QUEUE
1042 *rx_queue_idx 1:RX_CMD_QUEUE
1043 */
1044 for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
1045 rx_queue_idx++) {
1046 rtlpci->rx_ring[rx_queue_idx].desc =
1047 pci_alloc_consistent(rtlpci->pdev,
1048 sizeof(*rtlpci->rx_ring[rx_queue_idx].
1049 desc) * rtlpci->rxringcount,
1050 &rtlpci->rx_ring[rx_queue_idx].dma);
1051
1052 if (!rtlpci->rx_ring[rx_queue_idx].desc ||
1053 (unsigned long)rtlpci->rx_ring[rx_queue_idx].desc & 0xFF) {
1054 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1055 ("Cannot allocate RX ring\n"));
1056 return -ENOMEM;
1057 }
1058
1059 memset(rtlpci->rx_ring[rx_queue_idx].desc, 0,
1060 sizeof(*rtlpci->rx_ring[rx_queue_idx].desc) *
1061 rtlpci->rxringcount);
1062
1063 rtlpci->rx_ring[rx_queue_idx].idx = 0;
1064
1065 for (i = 0; i < rtlpci->rxringcount; i++) {
1066 struct sk_buff *skb =
1067 dev_alloc_skb(rtlpci->rxbuffersize);
1068 u32 bufferaddress;
1069 entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
1070 if (!skb)
1071 return 0;
1072
1073 /*skb->dev = dev; */
1074
1075 rtlpci->rx_ring[rx_queue_idx].rx_buf[i] = skb;
1076
1077 /*
1078 *just set skb->cb to mapping addr
1079 *for pci_unmap_single use
1080 */
1081 *((dma_addr_t *) skb->cb) =
1082 pci_map_single(rtlpci->pdev, skb_tail_pointer(skb),
1083 rtlpci->rxbuffersize,
1084 PCI_DMA_FROMDEVICE);
1085
1086 bufferaddress = cpu_to_le32(*((dma_addr_t *)skb->cb));
1087 rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
1088 HW_DESC_RXBUFF_ADDR,
1089 (u8 *)&bufferaddress);
1090 rtlpriv->cfg->ops->set_desc((u8 *)entry, false,
1091 HW_DESC_RXPKT_LEN,
1092 (u8 *)&rtlpci->
1093 rxbuffersize);
1094 rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
1095 HW_DESC_RXOWN,
1096 (u8 *)&tmp_one);
1097 }
1098
1099 rtlpriv->cfg->ops->set_desc((u8 *) entry, false,
1100 HW_DESC_RXERO, (u8 *)&tmp_one);
1101 }
1102 return 0;
1103}
1104
1105static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
1106 unsigned int prio)
1107{
1108 struct rtl_priv *rtlpriv = rtl_priv(hw);
1109 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1110 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[prio];
1111
1112 while (skb_queue_len(&ring->queue)) {
1113 struct rtl_tx_desc *entry = &ring->desc[ring->idx];
1114 struct sk_buff *skb = __skb_dequeue(&ring->queue);
1115
1116 pci_unmap_single(rtlpci->pdev,
1117 le32_to_cpu(rtlpriv->cfg->
1118 ops->get_desc((u8 *) entry, true,
1119 HW_DESC_TXBUFF_ADDR)),
1120 skb->len, PCI_DMA_TODEVICE);
1121 kfree_skb(skb);
1122 ring->idx = (ring->idx + 1) % ring->entries;
1123 }
1124
1125 pci_free_consistent(rtlpci->pdev,
1126 sizeof(*ring->desc) * ring->entries,
1127 ring->desc, ring->dma);
1128 ring->desc = NULL;
1129}
1130
1131static void _rtl_pci_free_rx_ring(struct rtl_pci *rtlpci)
1132{
1133 int i, rx_queue_idx;
1134
1135 /*rx_queue_idx 0:RX_MPDU_QUEUE */
1136 /*rx_queue_idx 1:RX_CMD_QUEUE */
1137 for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
1138 rx_queue_idx++) {
1139 for (i = 0; i < rtlpci->rxringcount; i++) {
1140 struct sk_buff *skb =
1141 rtlpci->rx_ring[rx_queue_idx].rx_buf[i];
1142 if (!skb)
1143 continue;
1144
1145 pci_unmap_single(rtlpci->pdev,
1146 *((dma_addr_t *) skb->cb),
1147 rtlpci->rxbuffersize,
1148 PCI_DMA_FROMDEVICE);
1149 kfree_skb(skb);
1150 }
1151
1152 pci_free_consistent(rtlpci->pdev,
1153 sizeof(*rtlpci->rx_ring[rx_queue_idx].
1154 desc) * rtlpci->rxringcount,
1155 rtlpci->rx_ring[rx_queue_idx].desc,
1156 rtlpci->rx_ring[rx_queue_idx].dma);
1157 rtlpci->rx_ring[rx_queue_idx].desc = NULL;
1158 }
1159}
1160
1161static int _rtl_pci_init_trx_ring(struct ieee80211_hw *hw)
1162{
1163 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1164 int ret;
1165 int i;
1166
1167 ret = _rtl_pci_init_rx_ring(hw);
1168 if (ret)
1169 return ret;
1170
1171 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1172 ret = _rtl_pci_init_tx_ring(hw, i,
1173 rtlpci->txringcount[i]);
1174 if (ret)
1175 goto err_free_rings;
1176 }
1177
1178 return 0;
1179
1180err_free_rings:
1181 _rtl_pci_free_rx_ring(rtlpci);
1182
1183 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
1184 if (rtlpci->tx_ring[i].desc)
1185 _rtl_pci_free_tx_ring(hw, i);
1186
1187 return 1;
1188}
1189
1190static int _rtl_pci_deinit_trx_ring(struct ieee80211_hw *hw)
1191{
1192 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1193 u32 i;
1194
1195 /*free rx rings */
1196 _rtl_pci_free_rx_ring(rtlpci);
1197
1198 /*free tx rings */
1199 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++)
1200 _rtl_pci_free_tx_ring(hw, i);
1201
1202 return 0;
1203}
1204
1205int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1206{
1207 struct rtl_priv *rtlpriv = rtl_priv(hw);
1208 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1209 int i, rx_queue_idx;
1210 unsigned long flags;
1211 u8 tmp_one = 1;
1212
1213 /*rx_queue_idx 0:RX_MPDU_QUEUE */
1214 /*rx_queue_idx 1:RX_CMD_QUEUE */
1215 for (rx_queue_idx = 0; rx_queue_idx < RTL_PCI_MAX_RX_QUEUE;
1216 rx_queue_idx++) {
1217 /*
1218 *force the rx_ring[RX_MPDU_QUEUE/
1219 *RX_CMD_QUEUE].idx to the first one
1220 */
1221 if (rtlpci->rx_ring[rx_queue_idx].desc) {
1222 struct rtl_rx_desc *entry = NULL;
1223
1224 for (i = 0; i < rtlpci->rxringcount; i++) {
1225 entry = &rtlpci->rx_ring[rx_queue_idx].desc[i];
1226 rtlpriv->cfg->ops->set_desc((u8 *) entry,
1227 false,
1228 HW_DESC_RXOWN,
1229 (u8 *)&tmp_one);
1230 }
1231 rtlpci->rx_ring[rx_queue_idx].idx = 0;
1232 }
1233 }
1234
1235 /*
1236 *after reset, release previous pending packet,
1237 *and force the tx idx to the first one
1238 */
1239 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1240 for (i = 0; i < RTL_PCI_MAX_TX_QUEUE_COUNT; i++) {
1241 if (rtlpci->tx_ring[i].desc) {
1242 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[i];
1243
1244 while (skb_queue_len(&ring->queue)) {
1245 struct rtl_tx_desc *entry =
1246 &ring->desc[ring->idx];
1247 struct sk_buff *skb =
1248 __skb_dequeue(&ring->queue);
1249
1250 pci_unmap_single(rtlpci->pdev,
1251 le32_to_cpu(rtlpriv->cfg->ops->
1252 get_desc((u8 *)
1253 entry,
1254 true,
1255 HW_DESC_TXBUFF_ADDR)),
1256 skb->len, PCI_DMA_TODEVICE);
1257 kfree_skb(skb);
1258 ring->idx = (ring->idx + 1) % ring->entries;
1259 }
1260 ring->idx = 0;
1261 }
1262 }
1263
1264 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1265
1266 return 0;
1267}
1268
1269unsigned int _rtl_mac_to_hwqueue(u16 fc,
1270 unsigned int mac80211_queue_index)
1271{
1272 unsigned int hw_queue_index;
1273
1274 if (unlikely(ieee80211_is_beacon(fc))) {
1275 hw_queue_index = BEACON_QUEUE;
1276 goto out;
1277 }
1278
1279 if (ieee80211_is_mgmt(fc)) {
1280 hw_queue_index = MGNT_QUEUE;
1281 goto out;
1282 }
1283
1284 switch (mac80211_queue_index) {
1285 case 0:
1286 hw_queue_index = VO_QUEUE;
1287 break;
1288 case 1:
1289 hw_queue_index = VI_QUEUE;
1290 break;
1291 case 2:
1292 hw_queue_index = BE_QUEUE;;
1293 break;
1294 case 3:
1295 hw_queue_index = BK_QUEUE;
1296 break;
1297 default:
1298 hw_queue_index = BE_QUEUE;
1299 RT_ASSERT(false, ("QSLT_BE queue, skb_queue:%d\n",
1300 mac80211_queue_index));
1301 break;
1302 }
1303
1304out:
1305 return hw_queue_index;
1306}
1307
1308int rtl_pci_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1309{
1310 struct rtl_priv *rtlpriv = rtl_priv(hw);
1311 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1312 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1313 struct rtl8192_tx_ring *ring;
1314 struct rtl_tx_desc *pdesc;
1315 u8 idx;
1316 unsigned int queue_index, hw_queue;
1317 unsigned long flags;
1318 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
1319 u16 fc = le16_to_cpu(hdr->frame_control);
1320 u8 *pda_addr = hdr->addr1;
1321 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1322 /*ssn */
1323 u8 *qc = NULL;
1324 u8 tid = 0;
1325 u16 seq_number = 0;
1326 u8 own;
1327 u8 temp_one = 1;
1328
1329 if (ieee80211_is_mgmt(fc))
1330 rtl_tx_mgmt_proc(hw, skb);
1331 rtl_action_proc(hw, skb, true);
1332
1333 queue_index = skb_get_queue_mapping(skb);
1334 hw_queue = _rtl_mac_to_hwqueue(fc, queue_index);
1335
1336 if (is_multicast_ether_addr(pda_addr))
1337 rtlpriv->stats.txbytesmulticast += skb->len;
1338 else if (is_broadcast_ether_addr(pda_addr))
1339 rtlpriv->stats.txbytesbroadcast += skb->len;
1340 else
1341 rtlpriv->stats.txbytesunicast += skb->len;
1342
1343 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
1344
1345 ring = &rtlpci->tx_ring[hw_queue];
1346 if (hw_queue != BEACON_QUEUE)
1347 idx = (ring->idx + skb_queue_len(&ring->queue)) %
1348 ring->entries;
1349 else
1350 idx = 0;
1351
1352 pdesc = &ring->desc[idx];
1353 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc,
1354 true, HW_DESC_OWN);
1355
1356 if ((own == 1) && (hw_queue != BEACON_QUEUE)) {
1357 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1358 ("No more TX desc@%d, ring->idx = %d,"
1359 "idx = %d, skb_queue_len = 0x%d\n",
1360 hw_queue, ring->idx, idx,
1361 skb_queue_len(&ring->queue)));
1362
1363 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1364 return skb->len;
1365 }
1366
1367 /*
1368 *if(ieee80211_is_nullfunc(fc)) {
1369 * spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1370 * return 1;
1371 *}
1372 */
1373
1374 if (ieee80211_is_data_qos(fc)) {
1375 qc = ieee80211_get_qos_ctl(hdr);
1376 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
1377
1378 seq_number = mac->tids[tid].seq_number;
1379 seq_number &= IEEE80211_SCTL_SEQ;
1380 /*
1381 *hdr->seq_ctrl = hdr->seq_ctrl &
1382 *cpu_to_le16(IEEE80211_SCTL_FRAG);
1383 *hdr->seq_ctrl |= cpu_to_le16(seq_number);
1384 */
1385
1386 seq_number += 1;
1387 }
1388
1389 if (ieee80211_is_data(fc))
1390 rtlpriv->cfg->ops->led_control(hw, LED_CTL_TX);
1391
1392 rtlpriv->cfg->ops->fill_tx_desc(hw, hdr, (u8 *) pdesc,
1393 info, skb, hw_queue);
1394
1395 __skb_queue_tail(&ring->queue, skb);
1396
1397 rtlpriv->cfg->ops->set_desc((u8 *) pdesc, true,
1398 HW_DESC_OWN, (u8 *)&temp_one);
1399
1400 if (!ieee80211_has_morefrags(hdr->frame_control)) {
1401 if (qc)
1402 mac->tids[tid].seq_number = seq_number;
1403 }
1404
1405 if ((ring->entries - skb_queue_len(&ring->queue)) < 2 &&
1406 hw_queue != BEACON_QUEUE) {
1407
1408 RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
1409 ("less desc left, stop skb_queue@%d, "
1410 "ring->idx = %d,"
1411 "idx = %d, skb_queue_len = 0x%d\n",
1412 hw_queue, ring->idx, idx,
1413 skb_queue_len(&ring->queue)));
1414
1415 ieee80211_stop_queue(hw, skb_get_queue_mapping(skb));
1416 }
1417
1418 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
1419
1420 rtlpriv->cfg->ops->tx_polling(hw, hw_queue);
1421
1422 return 0;
1423}
1424
1425void rtl_pci_deinit(struct ieee80211_hw *hw)
1426{
1427 struct rtl_priv *rtlpriv = rtl_priv(hw);
1428 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1429
1430 _rtl_pci_deinit_trx_ring(hw);
1431
1432 synchronize_irq(rtlpci->pdev->irq);
1433 tasklet_kill(&rtlpriv->works.irq_tasklet);
1434
1435 flush_workqueue(rtlpriv->works.rtl_wq);
1436 destroy_workqueue(rtlpriv->works.rtl_wq);
1437
1438}
1439
1440int rtl_pci_init(struct ieee80211_hw *hw, struct pci_dev *pdev)
1441{
1442 struct rtl_priv *rtlpriv = rtl_priv(hw);
1443 int err;
1444
1445 _rtl_pci_init_struct(hw, pdev);
1446
1447 err = _rtl_pci_init_trx_ring(hw);
1448 if (err) {
1449 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1450 ("tx ring initialization failed"));
1451 return err;
1452 }
1453
1454 return 1;
1455}
1456
1457int rtl_pci_start(struct ieee80211_hw *hw)
1458{
1459 struct rtl_priv *rtlpriv = rtl_priv(hw);
1460 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1461 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1462 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1463
1464 int err;
1465
1466 rtl_pci_reset_trx_ring(hw);
1467
1468 rtlpci->driver_is_goingto_unload = false;
1469 err = rtlpriv->cfg->ops->hw_init(hw);
1470 if (err) {
1471 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1472 ("Failed to config hardware!\n"));
1473 return err;
1474 }
1475
1476 rtlpriv->cfg->ops->enable_interrupt(hw);
1477 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("enable_interrupt OK\n"));
1478
1479 rtl_init_rx_config(hw);
1480
1481 /*should after adapter start and interrupt enable. */
1482 set_hal_start(rtlhal);
1483
1484 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
1485
1486 rtlpci->up_first_time = false;
1487
1488 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("OK\n"));
1489 return 0;
1490}
1491
1492void rtl_pci_stop(struct ieee80211_hw *hw)
1493{
1494 struct rtl_priv *rtlpriv = rtl_priv(hw);
1495 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1496 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1497 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1498 unsigned long flags;
1499 u8 RFInProgressTimeOut = 0;
1500
1501 /*
1502 *should before disable interrrupt&adapter
1503 *and will do it immediately.
1504 */
1505 set_hal_stop(rtlhal);
1506
1507 rtlpriv->cfg->ops->disable_interrupt(hw);
1508
1509 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1510 while (ppsc->rfchange_inprogress) {
1511 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1512 if (RFInProgressTimeOut > 100) {
1513 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1514 break;
1515 }
1516 mdelay(1);
1517 RFInProgressTimeOut++;
1518 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1519 }
1520 ppsc->rfchange_inprogress = true;
1521 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1522
1523 rtlpci->driver_is_goingto_unload = true;
1524 rtlpriv->cfg->ops->hw_disable(hw);
1525 rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
1526
1527 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flags);
1528 ppsc->rfchange_inprogress = false;
1529 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags);
1530
1531 rtl_pci_enable_aspm(hw);
1532}
1533
1534static bool _rtl_pci_find_adapter(struct pci_dev *pdev,
1535 struct ieee80211_hw *hw)
1536{
1537 struct rtl_priv *rtlpriv = rtl_priv(hw);
1538 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1539 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1540 struct pci_dev *bridge_pdev = pdev->bus->self;
1541 u16 venderid;
1542 u16 deviceid;
1543 u8 revisionid;
1544 u16 irqline;
1545 u8 tmp;
1546
1547 venderid = pdev->vendor;
1548 deviceid = pdev->device;
1549 pci_read_config_byte(pdev, 0x8, &revisionid);
1550 pci_read_config_word(pdev, 0x3C, &irqline);
1551
1552 if (deviceid == RTL_PCI_8192_DID ||
1553 deviceid == RTL_PCI_0044_DID ||
1554 deviceid == RTL_PCI_0047_DID ||
1555 deviceid == RTL_PCI_8192SE_DID ||
1556 deviceid == RTL_PCI_8174_DID ||
1557 deviceid == RTL_PCI_8173_DID ||
1558 deviceid == RTL_PCI_8172_DID ||
1559 deviceid == RTL_PCI_8171_DID) {
1560 switch (revisionid) {
1561 case RTL_PCI_REVISION_ID_8192PCIE:
1562 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1563 ("8192 PCI-E is found - "
1564 "vid/did=%x/%x\n", venderid, deviceid));
1565 rtlhal->hw_type = HARDWARE_TYPE_RTL8192E;
1566 break;
1567 case RTL_PCI_REVISION_ID_8192SE:
1568 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1569 ("8192SE is found - "
1570 "vid/did=%x/%x\n", venderid, deviceid));
1571 rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
1572 break;
1573 default:
1574 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1575 ("Err: Unknown device - "
1576 "vid/did=%x/%x\n", venderid, deviceid));
1577 rtlhal->hw_type = HARDWARE_TYPE_RTL8192SE;
1578 break;
1579
1580 }
1581 } else if (deviceid == RTL_PCI_8192CET_DID ||
1582 deviceid == RTL_PCI_8192CE_DID ||
1583 deviceid == RTL_PCI_8191CE_DID ||
1584 deviceid == RTL_PCI_8188CE_DID) {
1585 rtlhal->hw_type = HARDWARE_TYPE_RTL8192CE;
1586 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1587 ("8192C PCI-E is found - "
1588 "vid/did=%x/%x\n", venderid, deviceid));
1589 } else {
1590 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1591 ("Err: Unknown device -"
1592 " vid/did=%x/%x\n", venderid, deviceid));
1593
1594 rtlhal->hw_type = RTL_DEFAULT_HARDWARE_TYPE;
1595 }
1596
1597 /*find bus info */
1598 pcipriv->ndis_adapter.busnumber = pdev->bus->number;
1599 pcipriv->ndis_adapter.devnumber = PCI_SLOT(pdev->devfn);
1600 pcipriv->ndis_adapter.funcnumber = PCI_FUNC(pdev->devfn);
1601
1602 /*find bridge info */
1603 pcipriv->ndis_adapter.pcibridge_vendorid = bridge_pdev->vendor;
1604 for (tmp = 0; tmp < PCI_BRIDGE_VENDOR_MAX; tmp++) {
1605 if (bridge_pdev->vendor == pcibridge_vendors[tmp]) {
1606 pcipriv->ndis_adapter.pcibridge_vendor = tmp;
1607 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1608 ("Pci Bridge Vendor is found index: %d\n",
1609 tmp));
1610 break;
1611 }
1612 }
1613
1614 if (pcipriv->ndis_adapter.pcibridge_vendor !=
1615 PCI_BRIDGE_VENDOR_UNKNOWN) {
1616 pcipriv->ndis_adapter.pcibridge_busnum =
1617 bridge_pdev->bus->number;
1618 pcipriv->ndis_adapter.pcibridge_devnum =
1619 PCI_SLOT(bridge_pdev->devfn);
1620 pcipriv->ndis_adapter.pcibridge_funcnum =
1621 PCI_FUNC(bridge_pdev->devfn);
1622 pcipriv->ndis_adapter.pcibridge_pciehdr_offset =
1623 pci_pcie_cap(bridge_pdev);
1624 pcipriv->ndis_adapter.pcicfg_addrport =
1625 (pcipriv->ndis_adapter.pcibridge_busnum << 16) |
1626 (pcipriv->ndis_adapter.pcibridge_devnum << 11) |
1627 (pcipriv->ndis_adapter.pcibridge_funcnum << 8) | (1 << 31);
1628 pcipriv->ndis_adapter.num4bytes =
1629 (pcipriv->ndis_adapter.pcibridge_pciehdr_offset + 0x10) / 4;
1630
1631 rtl_pci_get_linkcontrol_field(hw);
1632
1633 if (pcipriv->ndis_adapter.pcibridge_vendor ==
1634 PCI_BRIDGE_VENDOR_AMD) {
1635 pcipriv->ndis_adapter.amd_l1_patch =
1636 rtl_pci_get_amd_l1_patch(hw);
1637 }
1638 }
1639
1640 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1641 ("pcidev busnumber:devnumber:funcnumber:"
1642 "vendor:link_ctl %d:%d:%d:%x:%x\n",
1643 pcipriv->ndis_adapter.busnumber,
1644 pcipriv->ndis_adapter.devnumber,
1645 pcipriv->ndis_adapter.funcnumber,
1646 pdev->vendor, pcipriv->ndis_adapter.linkctrl_reg));
1647
1648 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1649 ("pci_bridge busnumber:devnumber:funcnumber:vendor:"
1650 "pcie_cap:link_ctl_reg:amd %d:%d:%d:%x:%x:%x:%x\n",
1651 pcipriv->ndis_adapter.pcibridge_busnum,
1652 pcipriv->ndis_adapter.pcibridge_devnum,
1653 pcipriv->ndis_adapter.pcibridge_funcnum,
1654 pcibridge_vendors[pcipriv->ndis_adapter.pcibridge_vendor],
1655 pcipriv->ndis_adapter.pcibridge_pciehdr_offset,
1656 pcipriv->ndis_adapter.pcibridge_linkctrlreg,
1657 pcipriv->ndis_adapter.amd_l1_patch));
1658
1659 rtl_pci_parse_configuration(pdev, hw);
1660
1661 return true;
1662}
1663
1664int __devinit rtl_pci_probe(struct pci_dev *pdev,
1665 const struct pci_device_id *id)
1666{
1667 struct ieee80211_hw *hw = NULL;
1668
1669 struct rtl_priv *rtlpriv = NULL;
1670 struct rtl_pci_priv *pcipriv = NULL;
1671 struct rtl_pci *rtlpci;
1672 unsigned long pmem_start, pmem_len, pmem_flags;
1673 int err;
1674
1675 err = pci_enable_device(pdev);
1676 if (err) {
1677 RT_ASSERT(false,
1678 ("%s : Cannot enable new PCI device\n",
1679 pci_name(pdev)));
1680 return err;
1681 }
1682
1683 if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) {
1684 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
1685 RT_ASSERT(false, ("Unable to obtain 32bit DMA "
1686 "for consistent allocations\n"));
1687 pci_disable_device(pdev);
1688 return -ENOMEM;
1689 }
1690 }
1691
1692 pci_set_master(pdev);
1693
1694 hw = ieee80211_alloc_hw(sizeof(struct rtl_pci_priv) +
1695 sizeof(struct rtl_priv), &rtl_ops);
1696 if (!hw) {
1697 RT_ASSERT(false,
1698 ("%s : ieee80211 alloc failed\n", pci_name(pdev)));
1699 err = -ENOMEM;
1700 goto fail1;
1701 }
1702
1703 SET_IEEE80211_DEV(hw, &pdev->dev);
1704 pci_set_drvdata(pdev, hw);
1705
1706 rtlpriv = hw->priv;
1707 pcipriv = (void *)rtlpriv->priv;
1708 pcipriv->dev.pdev = pdev;
1709
1710 /*
1711 *init dbgp flags before all
1712 *other functions, because we will
1713 *use it in other funtions like
1714 *RT_TRACE/RT_PRINT/RTL_PRINT_DATA
1715 *you can not use these macro
1716 *before this
1717 */
1718 rtl_dbgp_flag_init(hw);
1719
1720 /* MEM map */
1721 err = pci_request_regions(pdev, KBUILD_MODNAME);
1722 if (err) {
1723 RT_ASSERT(false, ("Can't obtain PCI resources\n"));
1724 return err;
1725 }
1726
1727 pmem_start = pci_resource_start(pdev, 2);
1728 pmem_len = pci_resource_len(pdev, 2);
1729 pmem_flags = pci_resource_flags(pdev, 2);
1730
1731 /*shared mem start */
1732 rtlpriv->io.pci_mem_start =
1733 (unsigned long)pci_iomap(pdev, 2, pmem_len);
1734 if (rtlpriv->io.pci_mem_start == 0) {
1735 RT_ASSERT(false, ("Can't map PCI mem\n"));
1736 goto fail2;
1737 }
1738
1739 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1740 ("mem mapped space: start: 0x%08lx len:%08lx "
1741 "flags:%08lx, after map:0x%08lx\n",
1742 pmem_start, pmem_len, pmem_flags,
1743 rtlpriv->io.pci_mem_start));
1744
1745 /* Disable Clk Request */
1746 pci_write_config_byte(pdev, 0x81, 0);
1747 /* leave D3 mode */
1748 pci_write_config_byte(pdev, 0x44, 0);
1749 pci_write_config_byte(pdev, 0x04, 0x06);
1750 pci_write_config_byte(pdev, 0x04, 0x07);
1751
1752 /* init cfg & intf_ops */
1753 rtlpriv->rtlhal.interface = INTF_PCI;
1754 rtlpriv->cfg = (struct rtl_hal_cfg *)(id->driver_data);
1755 rtlpriv->intf_ops = &rtl_pci_ops;
1756
1757 /* find adapter */
1758 _rtl_pci_find_adapter(pdev, hw);
1759
1760 /* Init IO handler */
1761 _rtl_pci_io_handler_init(&pdev->dev, hw);
1762
1763 /*like read eeprom and so on */
1764 rtlpriv->cfg->ops->read_eeprom_info(hw);
1765
1766 if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
1767 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1768 ("Can't init_sw_vars.\n"));
1769 goto fail3;
1770 }
1771
1772 rtlpriv->cfg->ops->init_sw_leds(hw);
1773
1774 /*aspm */
1775 rtl_pci_init_aspm(hw);
1776
1777 /* Init mac80211 sw */
1778 err = rtl_init_core(hw);
1779 if (err) {
1780 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1781 ("Can't allocate sw for mac80211.\n"));
1782 goto fail3;
1783 }
1784
1785 /* Init PCI sw */
1786 err = !rtl_pci_init(hw, pdev);
1787 if (err) {
1788 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1789 ("Failed to init PCI.\n"));
1790 goto fail3;
1791 }
1792
1793 err = ieee80211_register_hw(hw);
1794 if (err) {
1795 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1796 ("Can't register mac80211 hw.\n"));
1797 goto fail3;
1798 } else {
1799 rtlpriv->mac80211.mac80211_registered = 1;
1800 }
1801
1802 err = sysfs_create_group(&pdev->dev.kobj, &rtl_attribute_group);
1803 if (err) {
1804 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1805 ("failed to create sysfs device attributes\n"));
1806 goto fail3;
1807 }
1808
1809 /*init rfkill */
1810 rtl_init_rfkill(hw);
1811
1812 rtlpci = rtl_pcidev(pcipriv);
1813 err = request_irq(rtlpci->pdev->irq, &_rtl_pci_interrupt,
1814 IRQF_SHARED, KBUILD_MODNAME, hw);
1815 if (err) {
1816 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1817 ("%s: failed to register IRQ handler\n",
1818 wiphy_name(hw->wiphy)));
1819 goto fail3;
1820 } else {
1821 rtlpci->irq_alloc = 1;
1822 }
1823
1824 set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
1825 return 0;
1826
1827fail3:
1828 pci_set_drvdata(pdev, NULL);
1829 rtl_deinit_core(hw);
1830 _rtl_pci_io_handler_release(hw);
1831 ieee80211_free_hw(hw);
1832
1833 if (rtlpriv->io.pci_mem_start != 0)
1834 pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start);
1835
1836fail2:
1837 pci_release_regions(pdev);
1838
1839fail1:
1840
1841 pci_disable_device(pdev);
1842
1843 return -ENODEV;
1844
1845}
1846EXPORT_SYMBOL(rtl_pci_probe);
1847
1848void rtl_pci_disconnect(struct pci_dev *pdev)
1849{
1850 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
1851 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1852 struct rtl_priv *rtlpriv = rtl_priv(hw);
1853 struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
1854 struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
1855
1856 clear_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
1857
1858 sysfs_remove_group(&pdev->dev.kobj, &rtl_attribute_group);
1859
1860 /*ieee80211_unregister_hw will call ops_stop */
1861 if (rtlmac->mac80211_registered == 1) {
1862 ieee80211_unregister_hw(hw);
1863 rtlmac->mac80211_registered = 0;
1864 } else {
1865 rtl_deinit_deferred_work(hw);
1866 rtlpriv->intf_ops->adapter_stop(hw);
1867 }
1868
1869 /*deinit rfkill */
1870 rtl_deinit_rfkill(hw);
1871
1872 rtl_pci_deinit(hw);
1873 rtl_deinit_core(hw);
1874 rtlpriv->cfg->ops->deinit_sw_leds(hw);
1875 _rtl_pci_io_handler_release(hw);
1876 rtlpriv->cfg->ops->deinit_sw_vars(hw);
1877
1878 if (rtlpci->irq_alloc) {
1879 free_irq(rtlpci->pdev->irq, hw);
1880 rtlpci->irq_alloc = 0;
1881 }
1882
1883 if (rtlpriv->io.pci_mem_start != 0) {
1884 pci_iounmap(pdev, (void *)rtlpriv->io.pci_mem_start);
1885 pci_release_regions(pdev);
1886 }
1887
1888 pci_disable_device(pdev);
1889 pci_set_drvdata(pdev, NULL);
1890
1891 ieee80211_free_hw(hw);
1892}
1893EXPORT_SYMBOL(rtl_pci_disconnect);
1894
1895/***************************************
1896kernel pci power state define:
1897PCI_D0 ((pci_power_t __force) 0)
1898PCI_D1 ((pci_power_t __force) 1)
1899PCI_D2 ((pci_power_t __force) 2)
1900PCI_D3hot ((pci_power_t __force) 3)
1901PCI_D3cold ((pci_power_t __force) 4)
1902PCI_UNKNOWN ((pci_power_t __force) 5)
1903
1904This function is called when system
1905goes into suspend state mac80211 will
1906call rtl_mac_stop() from the mac80211
1907suspend function first, So there is
1908no need to call hw_disable here.
1909****************************************/
1910int rtl_pci_suspend(struct pci_dev *pdev, pm_message_t state)
1911{
1912 pci_save_state(pdev);
1913 pci_disable_device(pdev);
1914 pci_set_power_state(pdev, PCI_D3hot);
1915
1916 return 0;
1917}
1918EXPORT_SYMBOL(rtl_pci_suspend);
1919
1920int rtl_pci_resume(struct pci_dev *pdev)
1921{
1922 int ret;
1923
1924 pci_set_power_state(pdev, PCI_D0);
1925 ret = pci_enable_device(pdev);
1926 if (ret) {
1927 RT_ASSERT(false, ("ERR: <======\n"));
1928 return ret;
1929 }
1930
1931 pci_restore_state(pdev);
1932
1933 return 0;
1934}
1935EXPORT_SYMBOL(rtl_pci_resume);
1936
1937struct rtl_intf_ops rtl_pci_ops = {
1938 .adapter_start = rtl_pci_start,
1939 .adapter_stop = rtl_pci_stop,
1940 .adapter_tx = rtl_pci_tx,
1941 .reset_trx_ring = rtl_pci_reset_trx_ring,
1942
1943 .disable_aspm = rtl_pci_disable_aspm,
1944 .enable_aspm = rtl_pci_enable_aspm,
1945};
diff --git a/drivers/net/wireless/rtlwifi/pci.h b/drivers/net/wireless/rtlwifi/pci.h
new file mode 100644
index 000000000000..d36a66939958
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/pci.h
@@ -0,0 +1,302 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL_PCI_H__
31#define __RTL_PCI_H__
32
33#include <linux/pci.h>
34/*
351: MSDU packet queue,
362: Rx Command Queue
37*/
38#define RTL_PCI_RX_MPDU_QUEUE 0
39#define RTL_PCI_RX_CMD_QUEUE 1
40#define RTL_PCI_MAX_RX_QUEUE 2
41
42#define RTL_PCI_MAX_RX_COUNT 64
43#define RTL_PCI_MAX_TX_QUEUE_COUNT 9
44
45#define RT_TXDESC_NUM 128
46#define RT_TXDESC_NUM_BE_QUEUE 256
47
48#define BK_QUEUE 0
49#define BE_QUEUE 1
50#define VI_QUEUE 2
51#define VO_QUEUE 3
52#define BEACON_QUEUE 4
53#define TXCMD_QUEUE 5
54#define MGNT_QUEUE 6
55#define HIGH_QUEUE 7
56#define HCCA_QUEUE 8
57
58#define RTL_PCI_DEVICE(vend, dev, cfg) \
59 .vendor = (vend), \
60 .device = (dev), \
61 .subvendor = PCI_ANY_ID, \
62 .subdevice = PCI_ANY_ID,\
63 .driver_data = (kernel_ulong_t)&(cfg)
64
65#define INTEL_VENDOR_ID 0x8086
66#define SIS_VENDOR_ID 0x1039
67#define ATI_VENDOR_ID 0x1002
68#define ATI_DEVICE_ID 0x7914
69#define AMD_VENDOR_ID 0x1022
70
71#define PCI_MAX_BRIDGE_NUMBER 255
72#define PCI_MAX_DEVICES 32
73#define PCI_MAX_FUNCTION 8
74
75#define PCI_CONF_ADDRESS 0x0CF8 /*PCI Configuration Space Address */
76#define PCI_CONF_DATA 0x0CFC /*PCI Configuration Space Data */
77
78#define PCI_CLASS_BRIDGE_DEV 0x06
79#define PCI_SUBCLASS_BR_PCI_TO_PCI 0x04
80#define PCI_CAPABILITY_ID_PCI_EXPRESS 0x10
81#define PCI_CAP_ID_EXP 0x10
82
83#define U1DONTCARE 0xFF
84#define U2DONTCARE 0xFFFF
85#define U4DONTCARE 0xFFFFFFFF
86
87#define RTL_PCI_8192_DID 0x8192 /*8192 PCI-E */
88#define RTL_PCI_8192SE_DID 0x8192 /*8192 SE */
89#define RTL_PCI_8174_DID 0x8174 /*8192 SE */
90#define RTL_PCI_8173_DID 0x8173 /*8191 SE Crab */
91#define RTL_PCI_8172_DID 0x8172 /*8191 SE RE */
92#define RTL_PCI_8171_DID 0x8171 /*8191 SE Unicron */
93#define RTL_PCI_0045_DID 0x0045 /*8190 PCI for Ceraga */
94#define RTL_PCI_0046_DID 0x0046 /*8190 Cardbus for Ceraga */
95#define RTL_PCI_0044_DID 0x0044 /*8192e PCIE for Ceraga */
96#define RTL_PCI_0047_DID 0x0047 /*8192e Express Card for Ceraga */
97#define RTL_PCI_700F_DID 0x700F
98#define RTL_PCI_701F_DID 0x701F
99#define RTL_PCI_DLINK_DID 0x3304
100#define RTL_PCI_8192CET_DID 0x8191 /*8192ce */
101#define RTL_PCI_8192CE_DID 0x8178 /*8192ce */
102#define RTL_PCI_8191CE_DID 0x8177 /*8192ce */
103#define RTL_PCI_8188CE_DID 0x8176 /*8192ce */
104#define RTL_PCI_8192CU_DID 0x8191 /*8192ce */
105#define RTL_PCI_8192DE_DID 0x092D /*8192ce */
106#define RTL_PCI_8192DU_DID 0x092D /*8192ce */
107
108/*8192 support 16 pages of IO registers*/
109#define RTL_MEM_MAPPED_IO_RANGE_8190PCI 0x1000
110#define RTL_MEM_MAPPED_IO_RANGE_8192PCIE 0x4000
111#define RTL_MEM_MAPPED_IO_RANGE_8192SE 0x4000
112#define RTL_MEM_MAPPED_IO_RANGE_8192CE 0x4000
113#define RTL_MEM_MAPPED_IO_RANGE_8192DE 0x4000
114
115#define RTL_PCI_REVISION_ID_8190PCI 0x00
116#define RTL_PCI_REVISION_ID_8192PCIE 0x01
117#define RTL_PCI_REVISION_ID_8192SE 0x10
118#define RTL_PCI_REVISION_ID_8192CE 0x1
119#define RTL_PCI_REVISION_ID_8192DE 0x0
120
121#define RTL_DEFAULT_HARDWARE_TYPE HARDWARE_TYPE_RTL8192CE
122
123enum pci_bridge_vendor {
124 PCI_BRIDGE_VENDOR_INTEL = 0x0, /*0b'0000,0001 */
125 PCI_BRIDGE_VENDOR_ATI, /*0b'0000,0010*/
126 PCI_BRIDGE_VENDOR_AMD, /*0b'0000,0100*/
127 PCI_BRIDGE_VENDOR_SIS, /*0b'0000,1000*/
128 PCI_BRIDGE_VENDOR_UNKNOWN, /*0b'0100,0000*/
129 PCI_BRIDGE_VENDOR_MAX,
130};
131
132struct rtl_rx_desc {
133 u32 dword[8];
134} __packed;
135
136struct rtl_tx_desc {
137 u32 dword[16];
138} __packed;
139
140struct rtl_tx_cmd_desc {
141 u32 dword[16];
142} __packed;
143
144struct rtl8192_tx_ring {
145 struct rtl_tx_desc *desc;
146 dma_addr_t dma;
147 unsigned int idx;
148 unsigned int entries;
149 struct sk_buff_head queue;
150};
151
152struct rtl8192_rx_ring {
153 struct rtl_rx_desc *desc;
154 dma_addr_t dma;
155 unsigned int idx;
156 struct sk_buff *rx_buf[RTL_PCI_MAX_RX_COUNT];
157};
158
159struct rtl_pci {
160 struct pci_dev *pdev;
161
162 bool driver_is_goingto_unload;
163 bool up_first_time;
164 bool being_init_adapter;
165 bool irq_enabled;
166
167 /*Tx */
168 struct rtl8192_tx_ring tx_ring[RTL_PCI_MAX_TX_QUEUE_COUNT];
169 int txringcount[RTL_PCI_MAX_TX_QUEUE_COUNT];
170 u32 transmit_config;
171
172 /*Rx */
173 struct rtl8192_rx_ring rx_ring[RTL_PCI_MAX_RX_QUEUE];
174 int rxringcount;
175 u16 rxbuffersize;
176 u32 receive_config;
177
178 /*irq */
179 u8 irq_alloc;
180 u32 irq_mask[2];
181
182 /*Bcn control register setting */
183 u32 reg_bcn_ctrl_val;
184
185 /*ASPM*/ u8 const_pci_aspm;
186 u8 const_amdpci_aspm;
187 u8 const_hwsw_rfoff_d3;
188 u8 const_support_pciaspm;
189 /*pci-e bridge */
190 u8 const_hostpci_aspm_setting;
191 /*pci-e device */
192 u8 const_devicepci_aspm_setting;
193 /*If it supports ASPM, Offset[560h] = 0x40,
194 otherwise Offset[560h] = 0x00. */
195 bool b_support_aspm;
196 bool b_support_backdoor;
197
198 /*QOS & EDCA */
199 enum acm_method acm_method;
200};
201
202struct mp_adapter {
203 u8 linkctrl_reg;
204
205 u8 busnumber;
206 u8 devnumber;
207 u8 funcnumber;
208
209 u8 pcibridge_busnum;
210 u8 pcibridge_devnum;
211 u8 pcibridge_funcnum;
212
213 u8 pcibridge_vendor;
214 u16 pcibridge_vendorid;
215 u16 pcibridge_deviceid;
216
217 u32 pcicfg_addrport;
218 u8 num4bytes;
219
220 u8 pcibridge_pciehdr_offset;
221 u8 pcibridge_linkctrlreg;
222
223 bool amd_l1_patch;
224};
225
226struct rtl_pci_priv {
227 struct rtl_pci dev;
228 struct mp_adapter ndis_adapter;
229 struct rtl_led_ctl ledctl;
230};
231
232#define rtl_pcipriv(hw) (((struct rtl_pci_priv *)(rtl_priv(hw))->priv))
233#define rtl_pcidev(pcipriv) (&((pcipriv)->dev))
234
235int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw);
236
237extern struct rtl_intf_ops rtl_pci_ops;
238
239int __devinit rtl_pci_probe(struct pci_dev *pdev,
240 const struct pci_device_id *id);
241void rtl_pci_disconnect(struct pci_dev *pdev);
242int rtl_pci_suspend(struct pci_dev *pdev, pm_message_t state);
243int rtl_pci_resume(struct pci_dev *pdev);
244
245static inline u8 pci_read8_sync(struct rtl_priv *rtlpriv, u32 addr)
246{
247 return 0xff & readb((u8 *) rtlpriv->io.pci_mem_start + addr);
248}
249
250static inline u16 pci_read16_sync(struct rtl_priv *rtlpriv, u32 addr)
251{
252 return readw((u8 *) rtlpriv->io.pci_mem_start + addr);
253}
254
255static inline u32 pci_read32_sync(struct rtl_priv *rtlpriv, u32 addr)
256{
257 return readl((u8 *) rtlpriv->io.pci_mem_start + addr);
258}
259
260static inline void pci_write8_async(struct rtl_priv *rtlpriv, u32 addr, u8 val)
261{
262 writeb(val, (u8 *) rtlpriv->io.pci_mem_start + addr);
263}
264
265static inline void pci_write16_async(struct rtl_priv *rtlpriv,
266 u32 addr, u16 val)
267{
268 writew(val, (u8 *) rtlpriv->io.pci_mem_start + addr);
269}
270
271static inline void pci_write32_async(struct rtl_priv *rtlpriv,
272 u32 addr, u32 val)
273{
274 writel(val, (u8 *) rtlpriv->io.pci_mem_start + addr);
275}
276
277static inline void rtl_pci_raw_write_port_ulong(u32 port, u32 val)
278{
279 outl(val, port);
280}
281
282static inline void rtl_pci_raw_write_port_uchar(u32 port, u8 val)
283{
284 outb(val, port);
285}
286
287static inline void rtl_pci_raw_read_port_uchar(u32 port, u8 *pval)
288{
289 *pval = inb(port);
290}
291
292static inline void rtl_pci_raw_read_port_ushort(u32 port, u16 *pval)
293{
294 *pval = inw(port);
295}
296
297static inline void rtl_pci_raw_read_port_ulong(u32 port, u32 *pval)
298{
299 *pval = inl(port);
300}
301
302#endif
diff --git a/drivers/net/wireless/rtlwifi/ps.c b/drivers/net/wireless/rtlwifi/ps.c
new file mode 100644
index 000000000000..d2326c13449e
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/ps.c
@@ -0,0 +1,493 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "wifi.h"
31#include "base.h"
32#include "ps.h"
33
34bool rtl_ps_enable_nic(struct ieee80211_hw *hw)
35{
36 struct rtl_priv *rtlpriv = rtl_priv(hw);
37 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
38 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
39 bool init_status = true;
40
41 /*<1> reset trx ring */
42 if (rtlhal->interface == INTF_PCI)
43 rtlpriv->intf_ops->reset_trx_ring(hw);
44
45 if (is_hal_stop(rtlhal))
46 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
47 ("Driver is already down!\n"));
48
49 /*<2> Enable Adapter */
50 rtlpriv->cfg->ops->hw_init(hw);
51 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
52 /*init_status = false; */
53
54 /*<3> Enable Interrupt */
55 rtlpriv->cfg->ops->enable_interrupt(hw);
56
57 /*<enable timer> */
58 rtl_watch_dog_timer_callback((unsigned long)hw);
59
60 return init_status;
61}
62EXPORT_SYMBOL(rtl_ps_enable_nic);
63
64bool rtl_ps_disable_nic(struct ieee80211_hw *hw)
65{
66 bool status = true;
67 struct rtl_priv *rtlpriv = rtl_priv(hw);
68
69 /*<1> Stop all timer */
70 rtl_deinit_deferred_work(hw);
71
72 /*<2> Disable Interrupt */
73 rtlpriv->cfg->ops->disable_interrupt(hw);
74
75 /*<3> Disable Adapter */
76 rtlpriv->cfg->ops->hw_disable(hw);
77
78 return status;
79}
80EXPORT_SYMBOL(rtl_ps_disable_nic);
81
82bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
83 enum rf_pwrstate state_toset,
84 u32 changesource, bool protect_or_not)
85{
86 struct rtl_priv *rtlpriv = rtl_priv(hw);
87 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
88 enum rf_pwrstate rtstate;
89 bool b_actionallowed = false;
90 u16 rfwait_cnt = 0;
91 unsigned long flag;
92
93 /*protect_or_not = true; */
94
95 if (protect_or_not)
96 goto no_protect;
97
98 /*
99 *Only one thread can change
100 *the RF state at one time, and others
101 *should wait to be executed.
102 */
103 while (true) {
104 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
105 if (ppsc->rfchange_inprogress) {
106 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock,
107 flag);
108
109 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
110 ("RF Change in progress!"
111 "Wait to set..state_toset(%d).\n",
112 state_toset));
113
114 /* Set RF after the previous action is done. */
115 while (ppsc->rfchange_inprogress) {
116 rfwait_cnt++;
117 mdelay(1);
118
119 /*
120 *Wait too long, return false to avoid
121 *to be stuck here.
122 */
123 if (rfwait_cnt > 100)
124 return false;
125 }
126 } else {
127 ppsc->rfchange_inprogress = true;
128 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock,
129 flag);
130 break;
131 }
132 }
133
134no_protect:
135 rtstate = ppsc->rfpwr_state;
136
137 switch (state_toset) {
138 case ERFON:
139 ppsc->rfoff_reason &= (~changesource);
140
141 if ((changesource == RF_CHANGE_BY_HW) &&
142 (ppsc->b_hwradiooff == true)) {
143 ppsc->b_hwradiooff = false;
144 }
145
146 if (!ppsc->rfoff_reason) {
147 ppsc->rfoff_reason = 0;
148 b_actionallowed = true;
149 }
150
151 break;
152
153 case ERFOFF:
154
155 if ((changesource == RF_CHANGE_BY_HW)
156 && (ppsc->b_hwradiooff == false)) {
157 ppsc->b_hwradiooff = true;
158 }
159
160 ppsc->rfoff_reason |= changesource;
161 b_actionallowed = true;
162 break;
163
164 case ERFSLEEP:
165 ppsc->rfoff_reason |= changesource;
166 b_actionallowed = true;
167 break;
168
169 default:
170 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
171 ("switch case not process\n"));
172 break;
173 }
174
175 if (b_actionallowed)
176 rtlpriv->cfg->ops->set_rf_power_state(hw, state_toset);
177
178 if (!protect_or_not) {
179 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
180 ppsc->rfchange_inprogress = false;
181 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
182 }
183
184 return b_actionallowed;
185}
186EXPORT_SYMBOL(rtl_ps_set_rf_state);
187
188static void _rtl_ps_inactive_ps(struct ieee80211_hw *hw)
189{
190 struct rtl_priv *rtlpriv = rtl_priv(hw);
191 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
192 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
193
194 ppsc->b_swrf_processing = true;
195
196 if (ppsc->inactive_pwrstate == ERFON && rtlhal->interface == INTF_PCI) {
197 if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
198 RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM) &&
199 rtlhal->interface == INTF_PCI) {
200 rtlpriv->intf_ops->disable_aspm(hw);
201 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
202 }
203 }
204
205 rtl_ps_set_rf_state(hw, ppsc->inactive_pwrstate,
206 RF_CHANGE_BY_IPS, false);
207
208 if (ppsc->inactive_pwrstate == ERFOFF &&
209 rtlhal->interface == INTF_PCI) {
210 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) {
211 rtlpriv->intf_ops->enable_aspm(hw);
212 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
213 }
214 }
215
216 ppsc->b_swrf_processing = false;
217}
218
219void rtl_ips_nic_off_wq_callback(void *data)
220{
221 struct rtl_works *rtlworks =
222 container_of_dwork_rtl(data, struct rtl_works, ips_nic_off_wq);
223 struct ieee80211_hw *hw = rtlworks->hw;
224 struct rtl_priv *rtlpriv = rtl_priv(hw);
225 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
226 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
227 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
228 enum rf_pwrstate rtstate;
229
230 if (mac->opmode != NL80211_IFTYPE_STATION) {
231 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
232 ("not station return\n"));
233 return;
234 }
235
236 if (is_hal_stop(rtlhal))
237 return;
238
239 if (rtlpriv->sec.being_setkey)
240 return;
241
242 if (ppsc->b_inactiveps) {
243 rtstate = ppsc->rfpwr_state;
244
245 /*
246 *Do not enter IPS in the following conditions:
247 *(1) RF is already OFF or Sleep
248 *(2) b_swrf_processing (indicates the IPS is still under going)
249 *(3) Connectted (only disconnected can trigger IPS)
250 *(4) IBSS (send Beacon)
251 *(5) AP mode (send Beacon)
252 *(6) monitor mode (rcv packet)
253 */
254
255 if (rtstate == ERFON &&
256 !ppsc->b_swrf_processing &&
257 (mac->link_state == MAC80211_NOLINK) &&
258 !mac->act_scanning) {
259 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
260 ("IPSEnter(): Turn off RF.\n"));
261
262 ppsc->inactive_pwrstate = ERFOFF;
263 ppsc->b_in_powersavemode = true;
264
265 /*rtl_pci_reset_trx_ring(hw); */
266 _rtl_ps_inactive_ps(hw);
267 }
268 }
269}
270
271void rtl_ips_nic_off(struct ieee80211_hw *hw)
272{
273 struct rtl_priv *rtlpriv = rtl_priv(hw);
274
275 /*
276 *because when link with ap, mac80211 will ask us
277 *to disable nic quickly after scan before linking,
278 *this will cause link failed, so we delay 100ms here
279 */
280 queue_delayed_work(rtlpriv->works.rtl_wq,
281 &rtlpriv->works.ips_nic_off_wq, MSECS(100));
282}
283
284void rtl_ips_nic_on(struct ieee80211_hw *hw)
285{
286 struct rtl_priv *rtlpriv = rtl_priv(hw);
287 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
288 enum rf_pwrstate rtstate;
289 unsigned long flags;
290
291 spin_lock_irqsave(&rtlpriv->locks.ips_lock, flags);
292
293 if (ppsc->b_inactiveps) {
294 rtstate = ppsc->rfpwr_state;
295
296 if (rtstate != ERFON &&
297 !ppsc->b_swrf_processing &&
298 ppsc->rfoff_reason <= RF_CHANGE_BY_IPS) {
299
300 ppsc->inactive_pwrstate = ERFON;
301 ppsc->b_in_powersavemode = false;
302
303 _rtl_ps_inactive_ps(hw);
304 }
305 }
306
307 spin_unlock_irqrestore(&rtlpriv->locks.ips_lock, flags);
308}
309
310/*for FW LPS*/
311
312/*
313 *Determine if we can set Fw into PS mode
314 *in current condition.Return TRUE if it
315 *can enter PS mode.
316 */
317static bool rtl_get_fwlps_doze(struct ieee80211_hw *hw)
318{
319 struct rtl_priv *rtlpriv = rtl_priv(hw);
320 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
321 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
322 u32 ps_timediff;
323
324 ps_timediff = jiffies_to_msecs(jiffies -
325 ppsc->last_delaylps_stamp_jiffies);
326
327 if (ps_timediff < 2000) {
328 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
329 ("Delay enter Fw LPS for DHCP, ARP,"
330 " or EAPOL exchanging state.\n"));
331 return false;
332 }
333
334 if (mac->link_state != MAC80211_LINKED)
335 return false;
336
337 if (mac->opmode == NL80211_IFTYPE_ADHOC)
338 return false;
339
340 return true;
341}
342
343/* Change current and default preamble mode.*/
344static void rtl_lps_set_psmode(struct ieee80211_hw *hw, u8 rt_psmode)
345{
346 struct rtl_priv *rtlpriv = rtl_priv(hw);
347 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
348 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
349 u8 rpwm_val, fw_pwrmode;
350
351 if (mac->opmode == NL80211_IFTYPE_ADHOC)
352 return;
353
354 if (mac->link_state != MAC80211_LINKED)
355 return;
356
357 if (ppsc->dot11_psmode == rt_psmode)
358 return;
359
360 /* Update power save mode configured. */
361 ppsc->dot11_psmode = rt_psmode;
362
363 /*
364 *<FW control LPS>
365 *1. Enter PS mode
366 * Set RPWM to Fw to turn RF off and send H2C fw_pwrmode
367 * cmd to set Fw into PS mode.
368 *2. Leave PS mode
369 * Send H2C fw_pwrmode cmd to Fw to set Fw into Active
370 * mode and set RPWM to turn RF on.
371 */
372
373 if ((ppsc->b_fwctrl_lps) && (ppsc->b_leisure_ps) &&
374 ppsc->report_linked) {
375 bool b_fw_current_inps;
376 if (ppsc->dot11_psmode == EACTIVE) {
377 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
378 ("FW LPS leave ps_mode:%x\n",
379 FW_PS_ACTIVE_MODE));
380
381 rpwm_val = 0x0C; /* RF on */
382 fw_pwrmode = FW_PS_ACTIVE_MODE;
383 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SET_RPWM,
384 (u8 *) (&rpwm_val));
385 rtlpriv->cfg->ops->set_hw_reg(hw,
386 HW_VAR_H2C_FW_PWRMODE,
387 (u8 *) (&fw_pwrmode));
388 b_fw_current_inps = false;
389
390 rtlpriv->cfg->ops->set_hw_reg(hw,
391 HW_VAR_FW_PSMODE_STATUS,
392 (u8 *) (&b_fw_current_inps));
393
394 } else {
395 if (rtl_get_fwlps_doze(hw)) {
396 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
397 ("FW LPS enter ps_mode:%x\n",
398 ppsc->fwctrl_psmode));
399
400 rpwm_val = 0x02; /* RF off */
401 b_fw_current_inps = true;
402 rtlpriv->cfg->ops->set_hw_reg(hw,
403 HW_VAR_FW_PSMODE_STATUS,
404 (u8 *) (&b_fw_current_inps));
405 rtlpriv->cfg->ops->set_hw_reg(hw,
406 HW_VAR_H2C_FW_PWRMODE,
407 (u8 *) (&ppsc->fwctrl_psmode));
408
409 rtlpriv->cfg->ops->set_hw_reg(hw,
410 HW_VAR_SET_RPWM,
411 (u8 *) (&rpwm_val));
412 } else {
413 /* Reset the power save related parameters. */
414 ppsc->dot11_psmode = EACTIVE;
415 }
416 }
417 }
418}
419
420/*Enter the leisure power save mode.*/
421void rtl_lps_enter(struct ieee80211_hw *hw)
422{
423 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
424 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
425 struct rtl_priv *rtlpriv = rtl_priv(hw);
426 unsigned long flag;
427
428 if (!(ppsc->b_fwctrl_lps && ppsc->b_leisure_ps))
429 return;
430
431 if (rtlpriv->sec.being_setkey)
432 return;
433
434 if (rtlpriv->link_info.b_busytraffic)
435 return;
436
437 /*sleep after linked 10s, to let DHCP and 4-way handshake ok enough!! */
438 if (mac->cnt_after_linked < 5)
439 return;
440
441 if (mac->opmode == NL80211_IFTYPE_ADHOC)
442 return;
443
444 if (mac->link_state != MAC80211_LINKED)
445 return;
446
447 spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
448
449 if (ppsc->b_leisure_ps) {
450 /* Idle for a while if we connect to AP a while ago. */
451 if (mac->cnt_after_linked >= 2) {
452 if (ppsc->dot11_psmode == EACTIVE) {
453 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
454 ("Enter 802.11 power save mode...\n"));
455
456 rtl_lps_set_psmode(hw, EAUTOPS);
457 }
458 }
459 }
460 spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
461}
462
463/*Leave the leisure power save mode.*/
464void rtl_lps_leave(struct ieee80211_hw *hw)
465{
466 struct rtl_priv *rtlpriv = rtl_priv(hw);
467 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
468 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
469 unsigned long flag;
470
471 spin_lock_irqsave(&rtlpriv->locks.lps_lock, flag);
472
473 if (ppsc->b_fwctrl_lps && ppsc->b_leisure_ps) {
474 if (ppsc->dot11_psmode != EACTIVE) {
475
476 /*FIX ME */
477 rtlpriv->cfg->ops->enable_interrupt(hw);
478
479 if (ppsc->reg_rfps_level & RT_RF_LPS_LEVEL_ASPM &&
480 RT_IN_PS_LEVEL(ppsc, RT_RF_LPS_LEVEL_ASPM) &&
481 rtlhal->interface == INTF_PCI) {
482 rtlpriv->intf_ops->disable_aspm(hw);
483 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_LPS_LEVEL_ASPM);
484 }
485
486 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
487 ("Busy Traffic,Leave 802.11 power save..\n"));
488
489 rtl_lps_set_psmode(hw, EACTIVE);
490 }
491 }
492 spin_unlock_irqrestore(&rtlpriv->locks.lps_lock, flag);
493}
diff --git a/drivers/net/wireless/rtlwifi/ps.h b/drivers/net/wireless/rtlwifi/ps.h
new file mode 100644
index 000000000000..ae56da801a23
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/ps.h
@@ -0,0 +1,43 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __REALTEK_RTL_PCI_PS_H__
31#define __REALTEK_RTL_PCI_PS_H__
32
33bool rtl_ps_set_rf_state(struct ieee80211_hw *hw,
34 enum rf_pwrstate state_toset, u32 changesource,
35 bool protect_or_not);
36bool rtl_ps_enable_nic(struct ieee80211_hw *hw);
37bool rtl_ps_disable_nic(struct ieee80211_hw *hw);
38void rtl_ips_nic_off(struct ieee80211_hw *hw);
39void rtl_ips_nic_on(struct ieee80211_hw *hw);
40void rtl_ips_nic_off_wq_callback(void *data);
41void rtl_lps_enter(struct ieee80211_hw *hw);
42void rtl_lps_leave(struct ieee80211_hw *hw);
43#endif
diff --git a/drivers/net/wireless/rtlwifi/rc.c b/drivers/net/wireless/rtlwifi/rc.c
new file mode 100644
index 000000000000..91634107434a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rc.c
@@ -0,0 +1,329 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "wifi.h"
31#include "base.h"
32#include "rc.h"
33
34/*
35 *Finds the highest rate index we can use
36 *if skb is special data like DHCP/EAPOL, we set should
37 *it to lowest rate CCK_1M, otherwise we set rate to
38 *CCK11M or OFDM_54M based on wireless mode.
39 */
40static u8 _rtl_rc_get_highest_rix(struct rtl_priv *rtlpriv,
41 struct sk_buff *skb, bool not_data)
42{
43 struct rtl_mac *rtlmac = rtl_mac(rtlpriv);
44
45 /*
46 *mgt use 1M, although we have check it
47 *before this function use rate_control_send_low,
48 *we still check it here
49 */
50 if (not_data)
51 return rtlpriv->cfg->maps[RTL_RC_CCK_RATE1M];
52
53 /*
54 *this rate is no use for true rate, firmware
55 *will control rate at all it just used for
56 *1.show in iwconfig in B/G mode
57 *2.in rtl_get_tcb_desc when we check rate is
58 * 1M we will not use FW rate but user rate.
59 */
60 if (rtl_is_special_data(rtlpriv->mac80211.hw, skb, true)) {
61 return rtlpriv->cfg->maps[RTL_RC_CCK_RATE1M];
62 } else {
63 if (rtlmac->mode == WIRELESS_MODE_B)
64 return rtlpriv->cfg->maps[RTL_RC_CCK_RATE11M];
65 else
66 return rtlpriv->cfg->maps[RTL_RC_OFDM_RATE54M];
67 }
68}
69
70static void _rtl_rc_rate_set_series(struct rtl_priv *rtlpriv,
71 struct ieee80211_tx_rate *rate,
72 struct ieee80211_tx_rate_control *txrc,
73 u8 tries, u8 rix, int rtsctsenable,
74 bool not_data)
75{
76 struct rtl_mac *mac = rtl_mac(rtlpriv);
77
78 rate->count = tries;
79 rate->idx = (rix > 0x2) ? rix : 0x2;
80
81 if (!not_data) {
82 if (txrc->short_preamble)
83 rate->flags |= IEEE80211_TX_RC_USE_SHORT_PREAMBLE;
84 if (mac->bw_40)
85 rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
86 if (mac->sgi_20 || mac->sgi_40)
87 rate->flags |= IEEE80211_TX_RC_SHORT_GI;
88 if (mac->ht_enable)
89 rate->flags |= IEEE80211_TX_RC_MCS;
90 }
91}
92
93static void rtl_get_rate(void *ppriv, struct ieee80211_sta *sta,
94 void *priv_sta, struct ieee80211_tx_rate_control *txrc)
95{
96 struct rtl_priv *rtlpriv = ppriv;
97 struct sk_buff *skb = txrc->skb;
98 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
99 struct ieee80211_tx_rate *rates = tx_info->control.rates;
100 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
101 __le16 fc = hdr->frame_control;
102 u8 try_per_rate, i, rix;
103 bool not_data = !ieee80211_is_data(fc);
104
105 if (rate_control_send_low(sta, priv_sta, txrc))
106 return;
107
108 rix = _rtl_rc_get_highest_rix(rtlpriv, skb, not_data);
109
110 try_per_rate = 1;
111 _rtl_rc_rate_set_series(rtlpriv, &rates[0], txrc,
112 try_per_rate, rix, 1, not_data);
113
114 if (!not_data) {
115 for (i = 1; i < 4; i++)
116 _rtl_rc_rate_set_series(rtlpriv, &rates[i],
117 txrc, i, (rix - i), 1,
118 not_data);
119 }
120}
121
122static bool _rtl_tx_aggr_check(struct rtl_priv *rtlpriv, u16 tid)
123{
124 struct rtl_mac *mac = rtl_mac(rtlpriv);
125
126 if (mac->act_scanning)
127 return false;
128
129 if (mac->cnt_after_linked < 3)
130 return false;
131
132 if (mac->tids[tid].agg.agg_state == RTL_AGG_OFF)
133 return true;
134
135 return false;
136}
137
138/*mac80211 Rate Control callbacks*/
139static void rtl_tx_status(void *ppriv,
140 struct ieee80211_supported_band *sband,
141 struct ieee80211_sta *sta, void *priv_sta,
142 struct sk_buff *skb)
143{
144 struct rtl_priv *rtlpriv = ppriv;
145 struct rtl_mac *mac = rtl_mac(rtlpriv);
146 struct ieee80211_hdr *hdr;
147 __le16 fc;
148
149 hdr = (struct ieee80211_hdr *)skb->data;
150 fc = hdr->frame_control;
151
152 if (!priv_sta || !ieee80211_is_data(fc))
153 return;
154
155 if (rtl_is_special_data(mac->hw, skb, true))
156 return;
157
158 if (is_multicast_ether_addr(ieee80211_get_DA(hdr))
159 || is_broadcast_ether_addr(ieee80211_get_DA(hdr)))
160 return;
161
162 /* Check if aggregation has to be enabled for this tid */
163 if (conf_is_ht(&mac->hw->conf) &&
164 !(skb->protocol == cpu_to_be16(ETH_P_PAE))) {
165 if (ieee80211_is_data_qos(fc)) {
166 u8 *qc, tid;
167
168 qc = ieee80211_get_qos_ctl(hdr);
169 tid = qc[0] & 0xf;
170
171 if (_rtl_tx_aggr_check(rtlpriv, tid))
172 ieee80211_start_tx_ba_session(sta, tid, 5000);
173 }
174 }
175}
176
177static void rtl_rate_init(void *ppriv,
178 struct ieee80211_supported_band *sband,
179 struct ieee80211_sta *sta, void *priv_sta)
180{
181 struct rtl_priv *rtlpriv = ppriv;
182 struct rtl_mac *mac = rtl_mac(rtlpriv);
183 u8 is_ht = conf_is_ht(&mac->hw->conf);
184
185 if ((mac->opmode == NL80211_IFTYPE_STATION) ||
186 (mac->opmode == NL80211_IFTYPE_MESH_POINT) ||
187 (mac->opmode == NL80211_IFTYPE_ADHOC)) {
188
189 switch (sband->band) {
190 case IEEE80211_BAND_2GHZ:
191 rtlpriv->rate_priv->cur_ratetab_idx =
192 RATR_INX_WIRELESS_G;
193 if (is_ht)
194 rtlpriv->rate_priv->cur_ratetab_idx =
195 RATR_INX_WIRELESS_NGB;
196 break;
197 case IEEE80211_BAND_5GHZ:
198 rtlpriv->rate_priv->cur_ratetab_idx =
199 RATR_INX_WIRELESS_A;
200 if (is_ht)
201 rtlpriv->rate_priv->cur_ratetab_idx =
202 RATR_INX_WIRELESS_NGB;
203 break;
204 default:
205 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
206 ("Invalid band\n"));
207 rtlpriv->rate_priv->cur_ratetab_idx =
208 RATR_INX_WIRELESS_NGB;
209 break;
210 }
211
212 RT_TRACE(rtlpriv, COMP_RATE, DBG_DMESG,
213 ("Choosing rate table index: %d\n",
214 rtlpriv->rate_priv->cur_ratetab_idx));
215
216 }
217
218}
219
220static void rtl_rate_update(void *ppriv,
221 struct ieee80211_supported_band *sband,
222 struct ieee80211_sta *sta, void *priv_sta,
223 u32 changed,
224 enum nl80211_channel_type oper_chan_type)
225{
226 struct rtl_priv *rtlpriv = ppriv;
227 struct rtl_mac *mac = rtl_mac(rtlpriv);
228 struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
229 bool oper_cw40 = false, oper_sgi40;
230 bool local_cw40 = mac->bw_40;
231 bool local_sgi40 = mac->sgi_40;
232 u8 is_ht = conf_is_ht(&mac->hw->conf);
233
234 if (changed & IEEE80211_RC_HT_CHANGED) {
235 if (mac->opmode != NL80211_IFTYPE_STATION)
236 return;
237
238 if (rtlhal->hw->conf.channel_type == NL80211_CHAN_HT40MINUS ||
239 rtlhal->hw->conf.channel_type == NL80211_CHAN_HT40PLUS)
240 oper_cw40 = true;
241
242 oper_sgi40 = mac->sgi_40;
243
244 if ((local_cw40 != oper_cw40) || (local_sgi40 != oper_sgi40)) {
245 switch (sband->band) {
246 case IEEE80211_BAND_2GHZ:
247 rtlpriv->rate_priv->cur_ratetab_idx =
248 RATR_INX_WIRELESS_G;
249 if (is_ht)
250 rtlpriv->rate_priv->cur_ratetab_idx =
251 RATR_INX_WIRELESS_NGB;
252 break;
253 case IEEE80211_BAND_5GHZ:
254 rtlpriv->rate_priv->cur_ratetab_idx =
255 RATR_INX_WIRELESS_A;
256 if (is_ht)
257 rtlpriv->rate_priv->cur_ratetab_idx =
258 RATR_INX_WIRELESS_NGB;
259 break;
260 default:
261 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
262 ("Invalid band\n"));
263 rtlpriv->rate_priv->cur_ratetab_idx =
264 RATR_INX_WIRELESS_NGB;
265 break;
266 }
267 }
268 }
269}
270
271static void *rtl_rate_alloc(struct ieee80211_hw *hw,
272 struct dentry *debugfsdir)
273{
274 struct rtl_priv *rtlpriv = rtl_priv(hw);
275 return rtlpriv;
276}
277
278static void rtl_rate_free(void *rtlpriv)
279{
280 return;
281}
282
283static void *rtl_rate_alloc_sta(void *ppriv,
284 struct ieee80211_sta *sta, gfp_t gfp)
285{
286 struct rtl_priv *rtlpriv = ppriv;
287 struct rtl_rate_priv *rate_priv;
288
289 rate_priv = kzalloc(sizeof(struct rtl_rate_priv), gfp);
290 if (!rate_priv) {
291 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
292 ("Unable to allocate private rc structure\n"));
293 return NULL;
294 }
295
296 rtlpriv->rate_priv = rate_priv;
297
298 return rate_priv;
299}
300
301static void rtl_rate_free_sta(void *rtlpriv,
302 struct ieee80211_sta *sta, void *priv_sta)
303{
304 struct rtl_rate_priv *rate_priv = priv_sta;
305 kfree(rate_priv);
306}
307
308static struct rate_control_ops rtl_rate_ops = {
309 .module = NULL,
310 .name = "rtl_rc",
311 .alloc = rtl_rate_alloc,
312 .free = rtl_rate_free,
313 .alloc_sta = rtl_rate_alloc_sta,
314 .free_sta = rtl_rate_free_sta,
315 .rate_init = rtl_rate_init,
316 .rate_update = rtl_rate_update,
317 .tx_status = rtl_tx_status,
318 .get_rate = rtl_get_rate,
319};
320
321int rtl_rate_control_register(void)
322{
323 return ieee80211_rate_control_register(&rtl_rate_ops);
324}
325
326void rtl_rate_control_unregister(void)
327{
328 ieee80211_rate_control_unregister(&rtl_rate_ops);
329}
diff --git a/drivers/net/wireless/rtlwifi/rc.h b/drivers/net/wireless/rtlwifi/rc.h
new file mode 100644
index 000000000000..b4667c035f0b
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rc.h
@@ -0,0 +1,40 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL_RC_H__
31#define __RTL_RC_H__
32
33struct rtl_rate_priv {
34 u8 cur_ratetab_idx;
35 u8 ht_cap;
36};
37
38int rtl_rate_control_register(void);
39void rtl_rate_control_unregister(void);
40#endif
diff --git a/drivers/net/wireless/rtlwifi/regd.c b/drivers/net/wireless/rtlwifi/regd.c
new file mode 100644
index 000000000000..3336ca999dfd
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/regd.c
@@ -0,0 +1,400 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "wifi.h"
31#include "regd.h"
32
33static struct country_code_to_enum_rd allCountries[] = {
34 {COUNTRY_CODE_FCC, "US"},
35 {COUNTRY_CODE_IC, "US"},
36 {COUNTRY_CODE_ETSI, "EC"},
37 {COUNTRY_CODE_SPAIN, "EC"},
38 {COUNTRY_CODE_FRANCE, "EC"},
39 {COUNTRY_CODE_MKK, "JP"},
40 {COUNTRY_CODE_MKK1, "JP"},
41 {COUNTRY_CODE_ISRAEL, "EC"},
42 {COUNTRY_CODE_TELEC, "JP"},
43 {COUNTRY_CODE_MIC, "JP"},
44 {COUNTRY_CODE_GLOBAL_DOMAIN, "JP"},
45 {COUNTRY_CODE_WORLD_WIDE_13, "EC"},
46 {COUNTRY_CODE_TELEC_NETGEAR, "EC"},
47};
48
49/*
50 *Only these channels all allow active
51 *scan on all world regulatory domains
52 */
53#define RTL819x_2GHZ_CH01_11 \
54 REG_RULE(2412-10, 2462+10, 40, 0, 20, 0)
55
56/*
57 *We enable active scan on these a case
58 *by case basis by regulatory domain
59 */
60#define RTL819x_2GHZ_CH12_13 \
61 REG_RULE(2467-10, 2472+10, 40, 0, 20,\
62 NL80211_RRF_PASSIVE_SCAN)
63
64#define RTL819x_2GHZ_CH14 \
65 REG_RULE(2484-10, 2484+10, 40, 0, 20, \
66 NL80211_RRF_PASSIVE_SCAN | \
67 NL80211_RRF_NO_OFDM)
68
69static const struct ieee80211_regdomain rtl_regdom_11 = {
70 .n_reg_rules = 1,
71 .alpha2 = "99",
72 .reg_rules = {
73 RTL819x_2GHZ_CH01_11,
74 }
75};
76
77static const struct ieee80211_regdomain rtl_regdom_global = {
78 .n_reg_rules = 3,
79 .alpha2 = "99",
80 .reg_rules = {
81 RTL819x_2GHZ_CH01_11,
82 RTL819x_2GHZ_CH12_13,
83 RTL819x_2GHZ_CH14,
84 }
85};
86
87static const struct ieee80211_regdomain rtl_regdom_world = {
88 .n_reg_rules = 2,
89 .alpha2 = "99",
90 .reg_rules = {
91 RTL819x_2GHZ_CH01_11,
92 RTL819x_2GHZ_CH12_13,
93 }
94};
95
96static bool _rtl_is_radar_freq(u16 center_freq)
97{
98 return (center_freq >= 5260 && center_freq <= 5700);
99}
100
101static void _rtl_reg_apply_beaconing_flags(struct wiphy *wiphy,
102 enum nl80211_reg_initiator initiator)
103{
104 enum ieee80211_band band;
105 struct ieee80211_supported_band *sband;
106 const struct ieee80211_reg_rule *reg_rule;
107 struct ieee80211_channel *ch;
108 unsigned int i;
109 u32 bandwidth = 0;
110 int r;
111
112 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
113
114 if (!wiphy->bands[band])
115 continue;
116
117 sband = wiphy->bands[band];
118
119 for (i = 0; i < sband->n_channels; i++) {
120 ch = &sband->channels[i];
121 if (_rtl_is_radar_freq(ch->center_freq) ||
122 (ch->flags & IEEE80211_CHAN_RADAR))
123 continue;
124 if (initiator == NL80211_REGDOM_SET_BY_COUNTRY_IE) {
125 r = freq_reg_info(wiphy, ch->center_freq,
126 bandwidth, &reg_rule);
127 if (r)
128 continue;
129
130 /*
131 *If 11d had a rule for this channel ensure
132 *we enable adhoc/beaconing if it allows us to
133 *use it. Note that we would have disabled it
134 *by applying our static world regdomain by
135 *default during init, prior to calling our
136 *regulatory_hint().
137 */
138
139 if (!(reg_rule->flags & NL80211_RRF_NO_IBSS))
140 ch->flags &= ~IEEE80211_CHAN_NO_IBSS;
141 if (!(reg_rule->
142 flags & NL80211_RRF_PASSIVE_SCAN))
143 ch->flags &=
144 ~IEEE80211_CHAN_PASSIVE_SCAN;
145 } else {
146 if (ch->beacon_found)
147 ch->flags &= ~(IEEE80211_CHAN_NO_IBSS |
148 IEEE80211_CHAN_PASSIVE_SCAN);
149 }
150 }
151 }
152}
153
154/* Allows active scan scan on Ch 12 and 13 */
155static void _rtl_reg_apply_active_scan_flags(struct wiphy *wiphy,
156 enum nl80211_reg_initiator
157 initiator)
158{
159 struct ieee80211_supported_band *sband;
160 struct ieee80211_channel *ch;
161 const struct ieee80211_reg_rule *reg_rule;
162 u32 bandwidth = 0;
163 int r;
164
165 sband = wiphy->bands[IEEE80211_BAND_2GHZ];
166
167 /*
168 *If no country IE has been received always enable active scan
169 *on these channels. This is only done for specific regulatory SKUs
170 */
171 if (initiator != NL80211_REGDOM_SET_BY_COUNTRY_IE) {
172 ch = &sband->channels[11]; /* CH 12 */
173 if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
174 ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
175 ch = &sband->channels[12]; /* CH 13 */
176 if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
177 ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
178 return;
179 }
180
181 /*
182 *If a country IE has been recieved check its rule for this
183 *channel first before enabling active scan. The passive scan
184 *would have been enforced by the initial processing of our
185 *custom regulatory domain.
186 */
187
188 ch = &sband->channels[11]; /* CH 12 */
189 r = freq_reg_info(wiphy, ch->center_freq, bandwidth, &reg_rule);
190 if (!r) {
191 if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
192 if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
193 ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
194 }
195
196 ch = &sband->channels[12]; /* CH 13 */
197 r = freq_reg_info(wiphy, ch->center_freq, bandwidth, &reg_rule);
198 if (!r) {
199 if (!(reg_rule->flags & NL80211_RRF_PASSIVE_SCAN))
200 if (ch->flags & IEEE80211_CHAN_PASSIVE_SCAN)
201 ch->flags &= ~IEEE80211_CHAN_PASSIVE_SCAN;
202 }
203}
204
205/*
206 *Always apply Radar/DFS rules on
207 *freq range 5260 MHz - 5700 MHz
208 */
209static void _rtl_reg_apply_radar_flags(struct wiphy *wiphy)
210{
211 struct ieee80211_supported_band *sband;
212 struct ieee80211_channel *ch;
213 unsigned int i;
214
215 if (!wiphy->bands[IEEE80211_BAND_5GHZ])
216 return;
217
218 sband = wiphy->bands[IEEE80211_BAND_5GHZ];
219
220 for (i = 0; i < sband->n_channels; i++) {
221 ch = &sband->channels[i];
222 if (!_rtl_is_radar_freq(ch->center_freq))
223 continue;
224
225 /*
226 *We always enable radar detection/DFS on this
227 *frequency range. Additionally we also apply on
228 *this frequency range:
229 *- If STA mode does not yet have DFS supports disable
230 * active scanning
231 *- If adhoc mode does not support DFS yet then disable
232 * adhoc in the frequency.
233 *- If AP mode does not yet support radar detection/DFS
234 *do not allow AP mode
235 */
236 if (!(ch->flags & IEEE80211_CHAN_DISABLED))
237 ch->flags |= IEEE80211_CHAN_RADAR |
238 IEEE80211_CHAN_NO_IBSS |
239 IEEE80211_CHAN_PASSIVE_SCAN;
240 }
241}
242
243static void _rtl_reg_apply_world_flags(struct wiphy *wiphy,
244 enum nl80211_reg_initiator initiator,
245 struct rtl_regulatory *reg)
246{
247 _rtl_reg_apply_beaconing_flags(wiphy, initiator);
248 _rtl_reg_apply_active_scan_flags(wiphy, initiator);
249 return;
250}
251
252static void _rtl_dump_channel_map(struct wiphy *wiphy)
253{
254 enum ieee80211_band band;
255 struct ieee80211_supported_band *sband;
256 struct ieee80211_channel *ch;
257 unsigned int i;
258
259 for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
260 if (!wiphy->bands[band])
261 continue;
262 sband = wiphy->bands[band];
263 for (i = 0; i < sband->n_channels; i++)
264 ch = &sband->channels[i];
265 }
266}
267
268static int _rtl_reg_notifier_apply(struct wiphy *wiphy,
269 struct regulatory_request *request,
270 struct rtl_regulatory *reg)
271{
272 /* We always apply this */
273 _rtl_reg_apply_radar_flags(wiphy);
274
275 switch (request->initiator) {
276 case NL80211_REGDOM_SET_BY_DRIVER:
277 case NL80211_REGDOM_SET_BY_CORE:
278 case NL80211_REGDOM_SET_BY_USER:
279 break;
280 case NL80211_REGDOM_SET_BY_COUNTRY_IE:
281 _rtl_reg_apply_world_flags(wiphy, request->initiator, reg);
282 break;
283 }
284
285 _rtl_dump_channel_map(wiphy);
286
287 return 0;
288}
289
290static const struct ieee80211_regdomain *_rtl_regdomain_select(
291 struct rtl_regulatory *reg)
292{
293 switch (reg->country_code) {
294 case COUNTRY_CODE_FCC:
295 case COUNTRY_CODE_IC:
296 return &rtl_regdom_11;
297 case COUNTRY_CODE_ETSI:
298 case COUNTRY_CODE_SPAIN:
299 case COUNTRY_CODE_FRANCE:
300 case COUNTRY_CODE_ISRAEL:
301 case COUNTRY_CODE_TELEC_NETGEAR:
302 return &rtl_regdom_world;
303 case COUNTRY_CODE_MKK:
304 case COUNTRY_CODE_MKK1:
305 case COUNTRY_CODE_TELEC:
306 case COUNTRY_CODE_MIC:
307 return &rtl_regdom_global;
308 case COUNTRY_CODE_GLOBAL_DOMAIN:
309 return &rtl_regdom_global;
310 case COUNTRY_CODE_WORLD_WIDE_13:
311 return &rtl_regdom_world;
312 default:
313 return &rtl_regdom_world;
314 }
315}
316
317static int _rtl_regd_init_wiphy(struct rtl_regulatory *reg,
318 struct wiphy *wiphy,
319 int (*reg_notifier) (struct wiphy *wiphy,
320 struct regulatory_request *
321 request))
322{
323 const struct ieee80211_regdomain *regd;
324
325 wiphy->reg_notifier = reg_notifier;
326 wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY;
327 wiphy->flags &= ~WIPHY_FLAG_STRICT_REGULATORY;
328 wiphy->flags &= ~WIPHY_FLAG_DISABLE_BEACON_HINTS;
329 regd = _rtl_regdomain_select(reg);
330 wiphy_apply_custom_regulatory(wiphy, regd);
331 _rtl_reg_apply_radar_flags(wiphy);
332 _rtl_reg_apply_world_flags(wiphy, NL80211_REGDOM_SET_BY_DRIVER, reg);
333 return 0;
334}
335
336static struct country_code_to_enum_rd *_rtl_regd_find_country(u16 countrycode)
337{
338 int i;
339
340 for (i = 0; i < ARRAY_SIZE(allCountries); i++) {
341 if (allCountries[i].countrycode == countrycode)
342 return &allCountries[i];
343 }
344 return NULL;
345}
346
347int rtl_regd_init(struct ieee80211_hw *hw,
348 int (*reg_notifier) (struct wiphy *wiphy,
349 struct regulatory_request *request))
350{
351 struct rtl_priv *rtlpriv = rtl_priv(hw);
352 struct wiphy *wiphy = hw->wiphy;
353 struct country_code_to_enum_rd *country = NULL;
354
355 if (wiphy == NULL || &rtlpriv->regd == NULL)
356 return -EINVAL;
357
358 /* force the channel plan to world wide 13 */
359 rtlpriv->regd.country_code = COUNTRY_CODE_WORLD_WIDE_13;
360
361 RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE,
362 (KERN_DEBUG "rtl: EEPROM regdomain: 0x%0x\n",
363 rtlpriv->regd.country_code));
364
365 if (rtlpriv->regd.country_code >= COUNTRY_CODE_MAX) {
366 RT_TRACE(rtlpriv, COMP_REGD, DBG_DMESG,
367 (KERN_DEBUG "rtl: EEPROM indicates invalid contry code"
368 "world wide 13 should be used\n"));
369
370 rtlpriv->regd.country_code = COUNTRY_CODE_WORLD_WIDE_13;
371 }
372
373 country = _rtl_regd_find_country(rtlpriv->regd.country_code);
374
375 if (country) {
376 rtlpriv->regd.alpha2[0] = country->isoName[0];
377 rtlpriv->regd.alpha2[1] = country->isoName[1];
378 } else {
379 rtlpriv->regd.alpha2[0] = '0';
380 rtlpriv->regd.alpha2[1] = '0';
381 }
382
383 RT_TRACE(rtlpriv, COMP_REGD, DBG_TRACE,
384 (KERN_DEBUG "rtl: Country alpha2 being used: %c%c\n",
385 rtlpriv->regd.alpha2[0], rtlpriv->regd.alpha2[1]));
386
387 _rtl_regd_init_wiphy(&rtlpriv->regd, wiphy, reg_notifier);
388
389 return 0;
390}
391
392int rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
393{
394 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
395 struct rtl_priv *rtlpriv = rtl_priv(hw);
396
397 RT_TRACE(rtlpriv, COMP_REGD, DBG_LOUD, ("\n"));
398
399 return _rtl_reg_notifier_apply(wiphy, request, &rtlpriv->regd);
400}
diff --git a/drivers/net/wireless/rtlwifi/regd.h b/drivers/net/wireless/rtlwifi/regd.h
new file mode 100644
index 000000000000..4cdbc4ae76d4
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/regd.h
@@ -0,0 +1,61 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL_REGD_H__
31#define __RTL_REGD_H__
32
33struct country_code_to_enum_rd {
34 u16 countrycode;
35 const char *isoName;
36};
37
38enum country_code_type_t {
39 COUNTRY_CODE_FCC = 0,
40 COUNTRY_CODE_IC = 1,
41 COUNTRY_CODE_ETSI = 2,
42 COUNTRY_CODE_SPAIN = 3,
43 COUNTRY_CODE_FRANCE = 4,
44 COUNTRY_CODE_MKK = 5,
45 COUNTRY_CODE_MKK1 = 6,
46 COUNTRY_CODE_ISRAEL = 7,
47 COUNTRY_CODE_TELEC = 8,
48 COUNTRY_CODE_MIC = 9,
49 COUNTRY_CODE_GLOBAL_DOMAIN = 10,
50 COUNTRY_CODE_WORLD_WIDE_13 = 11,
51 COUNTRY_CODE_TELEC_NETGEAR = 12,
52
53 /*add new channel plan above this line */
54 COUNTRY_CODE_MAX
55};
56
57int rtl_regd_init(struct ieee80211_hw *hw,
58 int (*reg_notifier) (struct wiphy *wiphy,
59 struct regulatory_request *request));
60int rtl_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request);
61#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile b/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile
new file mode 100644
index 000000000000..0f0be7c763b8
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/Makefile
@@ -0,0 +1,12 @@
1rtl8192ce-objs := \
2 dm.o \
3 fw.o \
4 hw.o \
5 led.o \
6 phy.o \
7 rf.o \
8 sw.o \
9 table.o \
10 trx.o
11
12obj-$(CONFIG_RTL8192CE) += rtl8192ce.o
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
new file mode 100644
index 000000000000..83cd64895292
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
@@ -0,0 +1,257 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92C_DEF_H__
31#define __RTL92C_DEF_H__
32
33#define HAL_RETRY_LIMIT_INFRA 48
34#define HAL_RETRY_LIMIT_AP_ADHOC 7
35
36#define PHY_RSSI_SLID_WIN_MAX 100
37#define PHY_LINKQUALITY_SLID_WIN_MAX 20
38#define PHY_BEACON_RSSI_SLID_WIN_MAX 10
39
40#define RESET_DELAY_8185 20
41
42#define RT_IBSS_INT_MASKS (IMR_BCNINT | IMR_TBDOK | IMR_TBDER)
43#define RT_AC_INT_MASKS (IMR_VIDOK | IMR_VODOK | IMR_BEDOK|IMR_BKDOK)
44
45#define NUM_OF_FIRMWARE_QUEUE 10
46#define NUM_OF_PAGES_IN_FW 0x100
47#define NUM_OF_PAGE_IN_FW_QUEUE_BK 0x07
48#define NUM_OF_PAGE_IN_FW_QUEUE_BE 0x07
49#define NUM_OF_PAGE_IN_FW_QUEUE_VI 0x07
50#define NUM_OF_PAGE_IN_FW_QUEUE_VO 0x07
51#define NUM_OF_PAGE_IN_FW_QUEUE_HCCA 0x0
52#define NUM_OF_PAGE_IN_FW_QUEUE_CMD 0x0
53#define NUM_OF_PAGE_IN_FW_QUEUE_MGNT 0x02
54#define NUM_OF_PAGE_IN_FW_QUEUE_HIGH 0x02
55#define NUM_OF_PAGE_IN_FW_QUEUE_BCN 0x2
56#define NUM_OF_PAGE_IN_FW_QUEUE_PUB 0xA1
57
58#define NUM_OF_PAGE_IN_FW_QUEUE_BK_DTM 0x026
59#define NUM_OF_PAGE_IN_FW_QUEUE_BE_DTM 0x048
60#define NUM_OF_PAGE_IN_FW_QUEUE_VI_DTM 0x048
61#define NUM_OF_PAGE_IN_FW_QUEUE_VO_DTM 0x026
62#define NUM_OF_PAGE_IN_FW_QUEUE_PUB_DTM 0x00
63
64#define MAX_LINES_HWCONFIG_TXT 1000
65#define MAX_BYTES_LINE_HWCONFIG_TXT 256
66
67#define SW_THREE_WIRE 0
68#define HW_THREE_WIRE 2
69
70#define BT_DEMO_BOARD 0
71#define BT_QA_BOARD 1
72#define BT_FPGA 2
73
74#define RX_SMOOTH_FACTOR 20
75
76#define HAL_PRIME_CHNL_OFFSET_DONT_CARE 0
77#define HAL_PRIME_CHNL_OFFSET_LOWER 1
78#define HAL_PRIME_CHNL_OFFSET_UPPER 2
79
80#define MAX_H2C_QUEUE_NUM 10
81
82#define RX_MPDU_QUEUE 0
83#define RX_CMD_QUEUE 1
84#define RX_MAX_QUEUE 2
85#define AC2QUEUEID(_AC) (_AC)
86
87#define C2H_RX_CMD_HDR_LEN 8
88#define GET_C2H_CMD_CMD_LEN(__prxhdr) \
89 LE_BITS_TO_4BYTE((__prxhdr), 0, 16)
90#define GET_C2H_CMD_ELEMENT_ID(__prxhdr) \
91 LE_BITS_TO_4BYTE((__prxhdr), 16, 8)
92#define GET_C2H_CMD_CMD_SEQ(__prxhdr) \
93 LE_BITS_TO_4BYTE((__prxhdr), 24, 7)
94#define GET_C2H_CMD_CONTINUE(__prxhdr) \
95 LE_BITS_TO_4BYTE((__prxhdr), 31, 1)
96#define GET_C2H_CMD_CONTENT(__prxhdr) \
97 ((u8 *)(__prxhdr) + C2H_RX_CMD_HDR_LEN)
98
99#define GET_C2H_CMD_FEEDBACK_ELEMENT_ID(__pcmdfbhdr) \
100 LE_BITS_TO_4BYTE((__pcmdfbhdr), 0, 8)
101#define GET_C2H_CMD_FEEDBACK_CCX_LEN(__pcmdfbhdr) \
102 LE_BITS_TO_4BYTE((__pcmdfbhdr), 8, 8)
103#define GET_C2H_CMD_FEEDBACK_CCX_CMD_CNT(__pcmdfbhdr) \
104 LE_BITS_TO_4BYTE((__pcmdfbhdr), 16, 16)
105#define GET_C2H_CMD_FEEDBACK_CCX_MAC_ID(__pcmdfbhdr) \
106 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 0, 5)
107#define GET_C2H_CMD_FEEDBACK_CCX_VALID(__pcmdfbhdr) \
108 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 7, 1)
109#define GET_C2H_CMD_FEEDBACK_CCX_RETRY_CNT(__pcmdfbhdr) \
110 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 8, 5)
111#define GET_C2H_CMD_FEEDBACK_CCX_TOK(__pcmdfbhdr) \
112 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 15, 1)
113#define GET_C2H_CMD_FEEDBACK_CCX_QSEL(__pcmdfbhdr) \
114 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4)
115#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \
116 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12)
117
118#define CHIP_VER_B BIT(4)
119#define CHIP_92C_BITMASK BIT(0)
120#define CHIP_92C_1T2R 0x03
121#define CHIP_92C 0x01
122#define CHIP_88C 0x00
123
124enum version_8192c {
125 VERSION_A_CHIP_92C = 0x01,
126 VERSION_A_CHIP_88C = 0x00,
127 VERSION_B_CHIP_92C = 0x11,
128 VERSION_B_CHIP_88C = 0x10,
129 VERSION_UNKNOWN = 0x88,
130};
131
132#define IS_CHIP_VER_B(version) ((version & CHIP_VER_B) ? true : false)
133#define IS_92C_SERIAL(version) ((version & CHIP_92C_BITMASK) ? true : false)
134
135enum rtl819x_loopback_e {
136 RTL819X_NO_LOOPBACK = 0,
137 RTL819X_MAC_LOOPBACK = 1,
138 RTL819X_DMA_LOOPBACK = 2,
139 RTL819X_CCK_LOOPBACK = 3,
140};
141
142enum rf_optype {
143 RF_OP_BY_SW_3WIRE = 0,
144 RF_OP_BY_FW,
145 RF_OP_MAX
146};
147
148enum rf_power_state {
149 RF_ON,
150 RF_OFF,
151 RF_SLEEP,
152 RF_SHUT_DOWN,
153};
154
155enum power_save_mode {
156 POWER_SAVE_MODE_ACTIVE,
157 POWER_SAVE_MODE_SAVE,
158};
159
160enum power_polocy_config {
161 POWERCFG_MAX_POWER_SAVINGS,
162 POWERCFG_GLOBAL_POWER_SAVINGS,
163 POWERCFG_LOCAL_POWER_SAVINGS,
164 POWERCFG_LENOVO,
165};
166
167enum interface_select_pci {
168 INTF_SEL1_MINICARD = 0,
169 INTF_SEL0_PCIE = 1,
170 INTF_SEL2_RSV = 2,
171 INTF_SEL3_RSV = 3,
172};
173
174enum hal_fw_c2h_cmd_id {
175 HAL_FW_C2H_CMD_Read_MACREG = 0,
176 HAL_FW_C2H_CMD_Read_BBREG = 1,
177 HAL_FW_C2H_CMD_Read_RFREG = 2,
178 HAL_FW_C2H_CMD_Read_EEPROM = 3,
179 HAL_FW_C2H_CMD_Read_EFUSE = 4,
180 HAL_FW_C2H_CMD_Read_CAM = 5,
181 HAL_FW_C2H_CMD_Get_BasicRate = 6,
182 HAL_FW_C2H_CMD_Get_DataRate = 7,
183 HAL_FW_C2H_CMD_Survey = 8,
184 HAL_FW_C2H_CMD_SurveyDone = 9,
185 HAL_FW_C2H_CMD_JoinBss = 10,
186 HAL_FW_C2H_CMD_AddSTA = 11,
187 HAL_FW_C2H_CMD_DelSTA = 12,
188 HAL_FW_C2H_CMD_AtimDone = 13,
189 HAL_FW_C2H_CMD_TX_Report = 14,
190 HAL_FW_C2H_CMD_CCX_Report = 15,
191 HAL_FW_C2H_CMD_DTM_Report = 16,
192 HAL_FW_C2H_CMD_TX_Rate_Statistics = 17,
193 HAL_FW_C2H_CMD_C2HLBK = 18,
194 HAL_FW_C2H_CMD_C2HDBG = 19,
195 HAL_FW_C2H_CMD_C2HFEEDBACK = 20,
196 HAL_FW_C2H_CMD_MAX
197};
198
199enum rtl_desc_qsel {
200 QSLT_BK = 0x2,
201 QSLT_BE = 0x0,
202 QSLT_VI = 0x5,
203 QSLT_VO = 0x7,
204 QSLT_BEACON = 0x10,
205 QSLT_HIGH = 0x11,
206 QSLT_MGNT = 0x12,
207 QSLT_CMD = 0x13,
208};
209
210enum rtl_desc92c_rate {
211 DESC92C_RATE1M = 0x00,
212 DESC92C_RATE2M = 0x01,
213 DESC92C_RATE5_5M = 0x02,
214 DESC92C_RATE11M = 0x03,
215
216 DESC92C_RATE6M = 0x04,
217 DESC92C_RATE9M = 0x05,
218 DESC92C_RATE12M = 0x06,
219 DESC92C_RATE18M = 0x07,
220 DESC92C_RATE24M = 0x08,
221 DESC92C_RATE36M = 0x09,
222 DESC92C_RATE48M = 0x0a,
223 DESC92C_RATE54M = 0x0b,
224
225 DESC92C_RATEMCS0 = 0x0c,
226 DESC92C_RATEMCS1 = 0x0d,
227 DESC92C_RATEMCS2 = 0x0e,
228 DESC92C_RATEMCS3 = 0x0f,
229 DESC92C_RATEMCS4 = 0x10,
230 DESC92C_RATEMCS5 = 0x11,
231 DESC92C_RATEMCS6 = 0x12,
232 DESC92C_RATEMCS7 = 0x13,
233 DESC92C_RATEMCS8 = 0x14,
234 DESC92C_RATEMCS9 = 0x15,
235 DESC92C_RATEMCS10 = 0x16,
236 DESC92C_RATEMCS11 = 0x17,
237 DESC92C_RATEMCS12 = 0x18,
238 DESC92C_RATEMCS13 = 0x19,
239 DESC92C_RATEMCS14 = 0x1a,
240 DESC92C_RATEMCS15 = 0x1b,
241 DESC92C_RATEMCS15_SG = 0x1c,
242 DESC92C_RATEMCS32 = 0x20,
243};
244
245struct phy_sts_cck_8192s_t {
246 u8 adc_pwdb_X[4];
247 u8 sq_rpt;
248 u8 cck_agc_rpt;
249};
250
251struct h2c_cmd_8192c {
252 u8 element_id;
253 u32 cmd_len;
254 u8 *p_cmdbuffer;
255};
256
257#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
new file mode 100644
index 000000000000..62e7c64e087b
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.c
@@ -0,0 +1,1473 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../base.h"
32#include "reg.h"
33#include "def.h"
34#include "phy.h"
35#include "dm.h"
36#include "fw.h"
37
38struct dig_t dm_digtable;
39static struct ps_t dm_pstable;
40
41static const u32 ofdmswing_table[OFDM_TABLE_SIZE] = {
42 0x7f8001fe,
43 0x788001e2,
44 0x71c001c7,
45 0x6b8001ae,
46 0x65400195,
47 0x5fc0017f,
48 0x5a400169,
49 0x55400155,
50 0x50800142,
51 0x4c000130,
52 0x47c0011f,
53 0x43c0010f,
54 0x40000100,
55 0x3c8000f2,
56 0x390000e4,
57 0x35c000d7,
58 0x32c000cb,
59 0x300000c0,
60 0x2d4000b5,
61 0x2ac000ab,
62 0x288000a2,
63 0x26000098,
64 0x24000090,
65 0x22000088,
66 0x20000080,
67 0x1e400079,
68 0x1c800072,
69 0x1b00006c,
70 0x19800066,
71 0x18000060,
72 0x16c0005b,
73 0x15800056,
74 0x14400051,
75 0x1300004c,
76 0x12000048,
77 0x11000044,
78 0x10000040,
79};
80
81static const u8 cckswing_table_ch1ch13[CCK_TABLE_SIZE][8] = {
82 {0x36, 0x35, 0x2e, 0x25, 0x1c, 0x12, 0x09, 0x04},
83 {0x33, 0x32, 0x2b, 0x23, 0x1a, 0x11, 0x08, 0x04},
84 {0x30, 0x2f, 0x29, 0x21, 0x19, 0x10, 0x08, 0x03},
85 {0x2d, 0x2d, 0x27, 0x1f, 0x18, 0x0f, 0x08, 0x03},
86 {0x2b, 0x2a, 0x25, 0x1e, 0x16, 0x0e, 0x07, 0x03},
87 {0x28, 0x28, 0x22, 0x1c, 0x15, 0x0d, 0x07, 0x03},
88 {0x26, 0x25, 0x21, 0x1b, 0x14, 0x0d, 0x06, 0x03},
89 {0x24, 0x23, 0x1f, 0x19, 0x13, 0x0c, 0x06, 0x03},
90 {0x22, 0x21, 0x1d, 0x18, 0x11, 0x0b, 0x06, 0x02},
91 {0x20, 0x20, 0x1b, 0x16, 0x11, 0x08, 0x05, 0x02},
92 {0x1f, 0x1e, 0x1a, 0x15, 0x10, 0x0a, 0x05, 0x02},
93 {0x1d, 0x1c, 0x18, 0x14, 0x0f, 0x0a, 0x05, 0x02},
94 {0x1b, 0x1a, 0x17, 0x13, 0x0e, 0x09, 0x04, 0x02},
95 {0x1a, 0x19, 0x16, 0x12, 0x0d, 0x09, 0x04, 0x02},
96 {0x18, 0x17, 0x15, 0x11, 0x0c, 0x08, 0x04, 0x02},
97 {0x17, 0x16, 0x13, 0x10, 0x0c, 0x08, 0x04, 0x02},
98 {0x16, 0x15, 0x12, 0x0f, 0x0b, 0x07, 0x04, 0x01},
99 {0x14, 0x14, 0x11, 0x0e, 0x0b, 0x07, 0x03, 0x02},
100 {0x13, 0x13, 0x10, 0x0d, 0x0a, 0x06, 0x03, 0x01},
101 {0x12, 0x12, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
102 {0x11, 0x11, 0x0f, 0x0c, 0x09, 0x06, 0x03, 0x01},
103 {0x10, 0x10, 0x0e, 0x0b, 0x08, 0x05, 0x03, 0x01},
104 {0x0f, 0x0f, 0x0d, 0x0b, 0x08, 0x05, 0x03, 0x01},
105 {0x0e, 0x0e, 0x0c, 0x0a, 0x08, 0x05, 0x02, 0x01},
106 {0x0d, 0x0d, 0x0c, 0x0a, 0x07, 0x05, 0x02, 0x01},
107 {0x0d, 0x0c, 0x0b, 0x09, 0x07, 0x04, 0x02, 0x01},
108 {0x0c, 0x0c, 0x0a, 0x09, 0x06, 0x04, 0x02, 0x01},
109 {0x0b, 0x0b, 0x0a, 0x08, 0x06, 0x04, 0x02, 0x01},
110 {0x0b, 0x0a, 0x09, 0x08, 0x06, 0x04, 0x02, 0x01},
111 {0x0a, 0x0a, 0x09, 0x07, 0x05, 0x03, 0x02, 0x01},
112 {0x0a, 0x09, 0x08, 0x07, 0x05, 0x03, 0x02, 0x01},
113 {0x09, 0x09, 0x08, 0x06, 0x05, 0x03, 0x01, 0x01},
114 {0x09, 0x08, 0x07, 0x06, 0x04, 0x03, 0x01, 0x01}
115};
116
117static const u8 cckswing_table_ch14[CCK_TABLE_SIZE][8] = {
118 {0x36, 0x35, 0x2e, 0x1b, 0x00, 0x00, 0x00, 0x00},
119 {0x33, 0x32, 0x2b, 0x19, 0x00, 0x00, 0x00, 0x00},
120 {0x30, 0x2f, 0x29, 0x18, 0x00, 0x00, 0x00, 0x00},
121 {0x2d, 0x2d, 0x17, 0x17, 0x00, 0x00, 0x00, 0x00},
122 {0x2b, 0x2a, 0x25, 0x15, 0x00, 0x00, 0x00, 0x00},
123 {0x28, 0x28, 0x24, 0x14, 0x00, 0x00, 0x00, 0x00},
124 {0x26, 0x25, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00},
125 {0x24, 0x23, 0x1f, 0x12, 0x00, 0x00, 0x00, 0x00},
126 {0x22, 0x21, 0x1d, 0x11, 0x00, 0x00, 0x00, 0x00},
127 {0x20, 0x20, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x00},
128 {0x1f, 0x1e, 0x1a, 0x0f, 0x00, 0x00, 0x00, 0x00},
129 {0x1d, 0x1c, 0x18, 0x0e, 0x00, 0x00, 0x00, 0x00},
130 {0x1b, 0x1a, 0x17, 0x0e, 0x00, 0x00, 0x00, 0x00},
131 {0x1a, 0x19, 0x16, 0x0d, 0x00, 0x00, 0x00, 0x00},
132 {0x18, 0x17, 0x15, 0x0c, 0x00, 0x00, 0x00, 0x00},
133 {0x17, 0x16, 0x13, 0x0b, 0x00, 0x00, 0x00, 0x00},
134 {0x16, 0x15, 0x12, 0x0b, 0x00, 0x00, 0x00, 0x00},
135 {0x14, 0x14, 0x11, 0x0a, 0x00, 0x00, 0x00, 0x00},
136 {0x13, 0x13, 0x10, 0x0a, 0x00, 0x00, 0x00, 0x00},
137 {0x12, 0x12, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
138 {0x11, 0x11, 0x0f, 0x09, 0x00, 0x00, 0x00, 0x00},
139 {0x10, 0x10, 0x0e, 0x08, 0x00, 0x00, 0x00, 0x00},
140 {0x0f, 0x0f, 0x0d, 0x08, 0x00, 0x00, 0x00, 0x00},
141 {0x0e, 0x0e, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
142 {0x0d, 0x0d, 0x0c, 0x07, 0x00, 0x00, 0x00, 0x00},
143 {0x0d, 0x0c, 0x0b, 0x06, 0x00, 0x00, 0x00, 0x00},
144 {0x0c, 0x0c, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
145 {0x0b, 0x0b, 0x0a, 0x06, 0x00, 0x00, 0x00, 0x00},
146 {0x0b, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
147 {0x0a, 0x0a, 0x09, 0x05, 0x00, 0x00, 0x00, 0x00},
148 {0x0a, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
149 {0x09, 0x09, 0x08, 0x05, 0x00, 0x00, 0x00, 0x00},
150 {0x09, 0x08, 0x07, 0x04, 0x00, 0x00, 0x00, 0x00}
151};
152
153static void rtl92c_dm_diginit(struct ieee80211_hw *hw)
154{
155 dm_digtable.dig_enable_flag = true;
156 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
157 dm_digtable.cur_igvalue = 0x20;
158 dm_digtable.pre_igvalue = 0x0;
159 dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
160 dm_digtable.presta_connectstate = DIG_STA_DISCONNECT;
161 dm_digtable.curmultista_connectstate = DIG_MULTISTA_DISCONNECT;
162 dm_digtable.rssi_lowthresh = DM_DIG_THRESH_LOW;
163 dm_digtable.rssi_highthresh = DM_DIG_THRESH_HIGH;
164 dm_digtable.fa_lowthresh = DM_FALSEALARM_THRESH_LOW;
165 dm_digtable.fa_highthresh = DM_FALSEALARM_THRESH_HIGH;
166 dm_digtable.rx_gain_range_max = DM_DIG_MAX;
167 dm_digtable.rx_gain_range_min = DM_DIG_MIN;
168 dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
169 dm_digtable.backoff_val_range_max = DM_DIG_BACKOFF_MAX;
170 dm_digtable.backoff_val_range_min = DM_DIG_BACKOFF_MIN;
171 dm_digtable.pre_cck_pd_state = CCK_PD_STAGE_MAX;
172 dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
173}
174
175static u8 rtl92c_dm_initial_gain_min_pwdb(struct ieee80211_hw *hw)
176{
177 struct rtl_priv *rtlpriv = rtl_priv(hw);
178 long rssi_val_min = 0;
179
180 if ((dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) &&
181 (dm_digtable.cursta_connectctate == DIG_STA_CONNECT)) {
182 if (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb != 0)
183 rssi_val_min =
184 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb >
185 rtlpriv->dm.undecorated_smoothed_pwdb) ?
186 rtlpriv->dm.undecorated_smoothed_pwdb :
187 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
188 else
189 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
190 } else if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT ||
191 dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT) {
192 rssi_val_min = rtlpriv->dm.undecorated_smoothed_pwdb;
193 } else if (dm_digtable.curmultista_connectstate ==
194 DIG_MULTISTA_CONNECT) {
195 rssi_val_min = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
196 }
197
198 return (u8) rssi_val_min;
199}
200
201static void rtl92c_dm_false_alarm_counter_statistics(struct ieee80211_hw *hw)
202{
203 u32 ret_value;
204 struct rtl_priv *rtlpriv = rtl_priv(hw);
205 struct false_alarm_statistics *falsealm_cnt = &(rtlpriv->falsealm_cnt);
206
207 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER1, MASKDWORD);
208 falsealm_cnt->cnt_parity_fail = ((ret_value & 0xffff0000) >> 16);
209
210 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER2, MASKDWORD);
211 falsealm_cnt->cnt_rate_illegal = (ret_value & 0xffff);
212 falsealm_cnt->cnt_crc8_fail = ((ret_value & 0xffff0000) >> 16);
213
214 ret_value = rtl_get_bbreg(hw, ROFDM_PHYCOUNTER3, MASKDWORD);
215 falsealm_cnt->cnt_mcs_fail = (ret_value & 0xffff);
216 falsealm_cnt->cnt_ofdm_fail = falsealm_cnt->cnt_parity_fail +
217 falsealm_cnt->cnt_rate_illegal +
218 falsealm_cnt->cnt_crc8_fail + falsealm_cnt->cnt_mcs_fail;
219
220 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, BIT(14), 1);
221 ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERLOWER, MASKBYTE0);
222 falsealm_cnt->cnt_cck_fail = ret_value;
223
224 ret_value = rtl_get_bbreg(hw, RCCK0_FACOUNTERUPPER, MASKBYTE3);
225 falsealm_cnt->cnt_cck_fail += (ret_value & 0xff) << 8;
226 falsealm_cnt->cnt_all = (falsealm_cnt->cnt_parity_fail +
227 falsealm_cnt->cnt_rate_illegal +
228 falsealm_cnt->cnt_crc8_fail +
229 falsealm_cnt->cnt_mcs_fail +
230 falsealm_cnt->cnt_cck_fail);
231
232 rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 1);
233 rtl_set_bbreg(hw, ROFDM1_LSTF, 0x08000000, 0);
234 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 0);
235 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT, 0x0000c000, 2);
236
237 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
238 ("cnt_parity_fail = %d, cnt_rate_illegal = %d, "
239 "cnt_crc8_fail = %d, cnt_mcs_fail = %d\n",
240 falsealm_cnt->cnt_parity_fail,
241 falsealm_cnt->cnt_rate_illegal,
242 falsealm_cnt->cnt_crc8_fail, falsealm_cnt->cnt_mcs_fail));
243
244 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
245 ("cnt_ofdm_fail = %x, cnt_cck_fail = %x, cnt_all = %x\n",
246 falsealm_cnt->cnt_ofdm_fail,
247 falsealm_cnt->cnt_cck_fail, falsealm_cnt->cnt_all));
248}
249
250static void rtl92c_dm_ctrl_initgain_by_fa(struct ieee80211_hw *hw)
251{
252 struct rtl_priv *rtlpriv = rtl_priv(hw);
253 u8 value_igi = dm_digtable.cur_igvalue;
254
255 if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH0)
256 value_igi--;
257 else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH1)
258 value_igi += 0;
259 else if (rtlpriv->falsealm_cnt.cnt_all < DM_DIG_FA_TH2)
260 value_igi++;
261 else if (rtlpriv->falsealm_cnt.cnt_all >= DM_DIG_FA_TH2)
262 value_igi += 2;
263 if (value_igi > DM_DIG_FA_UPPER)
264 value_igi = DM_DIG_FA_UPPER;
265 else if (value_igi < DM_DIG_FA_LOWER)
266 value_igi = DM_DIG_FA_LOWER;
267 if (rtlpriv->falsealm_cnt.cnt_all > 10000)
268 value_igi = 0x32;
269
270 dm_digtable.cur_igvalue = value_igi;
271 rtl92c_dm_write_dig(hw);
272}
273
274static void rtl92c_dm_ctrl_initgain_by_rssi(struct ieee80211_hw *hw)
275{
276 struct rtl_priv *rtlpriv = rtl_priv(hw);
277
278 if (rtlpriv->falsealm_cnt.cnt_all > dm_digtable.fa_highthresh) {
279 if ((dm_digtable.backoff_val - 2) <
280 dm_digtable.backoff_val_range_min)
281 dm_digtable.backoff_val =
282 dm_digtable.backoff_val_range_min;
283 else
284 dm_digtable.backoff_val -= 2;
285 } else if (rtlpriv->falsealm_cnt.cnt_all < dm_digtable.fa_lowthresh) {
286 if ((dm_digtable.backoff_val + 2) >
287 dm_digtable.backoff_val_range_max)
288 dm_digtable.backoff_val =
289 dm_digtable.backoff_val_range_max;
290 else
291 dm_digtable.backoff_val += 2;
292 }
293
294 if ((dm_digtable.rssi_val_min + 10 - dm_digtable.backoff_val) >
295 dm_digtable.rx_gain_range_max)
296 dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_max;
297 else if ((dm_digtable.rssi_val_min + 10 -
298 dm_digtable.backoff_val) < dm_digtable.rx_gain_range_min)
299 dm_digtable.cur_igvalue = dm_digtable.rx_gain_range_min;
300 else
301 dm_digtable.cur_igvalue = dm_digtable.rssi_val_min + 10 -
302 dm_digtable.backoff_val;
303
304 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
305 ("rssi_val_min = %x backoff_val %x\n",
306 dm_digtable.rssi_val_min, dm_digtable.backoff_val));
307
308 rtl92c_dm_write_dig(hw);
309}
310
311static void rtl92c_dm_initial_gain_multi_sta(struct ieee80211_hw *hw)
312{
313 static u8 binitialized; /* initialized to false */
314 struct rtl_priv *rtlpriv = rtl_priv(hw);
315 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
316 long rssi_strength = rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
317 bool b_multi_sta = false;
318
319 if (mac->opmode == NL80211_IFTYPE_ADHOC)
320 b_multi_sta = true;
321
322 if ((b_multi_sta == false) || (dm_digtable.cursta_connectctate !=
323 DIG_STA_DISCONNECT)) {
324 binitialized = false;
325 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
326 return;
327 } else if (binitialized == false) {
328 binitialized = true;
329 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
330 dm_digtable.cur_igvalue = 0x20;
331 rtl92c_dm_write_dig(hw);
332 }
333
334 if (dm_digtable.curmultista_connectstate == DIG_MULTISTA_CONNECT) {
335 if ((rssi_strength < dm_digtable.rssi_lowthresh) &&
336 (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_1)) {
337
338 if (dm_digtable.dig_ext_port_stage ==
339 DIG_EXT_PORT_STAGE_2) {
340 dm_digtable.cur_igvalue = 0x20;
341 rtl92c_dm_write_dig(hw);
342 }
343
344 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_1;
345 } else if (rssi_strength > dm_digtable.rssi_highthresh) {
346 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_2;
347 rtl92c_dm_ctrl_initgain_by_fa(hw);
348 }
349 } else if (dm_digtable.dig_ext_port_stage != DIG_EXT_PORT_STAGE_0) {
350 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_0;
351 dm_digtable.cur_igvalue = 0x20;
352 rtl92c_dm_write_dig(hw);
353 }
354
355 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
356 ("curmultista_connectstate = "
357 "%x dig_ext_port_stage %x\n",
358 dm_digtable.curmultista_connectstate,
359 dm_digtable.dig_ext_port_stage));
360}
361
362static void rtl92c_dm_initial_gain_sta(struct ieee80211_hw *hw)
363{
364 struct rtl_priv *rtlpriv = rtl_priv(hw);
365
366 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
367 ("presta_connectstate = %x,"
368 " cursta_connectctate = %x\n",
369 dm_digtable.presta_connectstate,
370 dm_digtable.cursta_connectctate));
371
372 if (dm_digtable.presta_connectstate == dm_digtable.cursta_connectctate
373 || dm_digtable.cursta_connectctate == DIG_STA_BEFORE_CONNECT
374 || dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
375
376 if (dm_digtable.cursta_connectctate != DIG_STA_DISCONNECT) {
377 dm_digtable.rssi_val_min =
378 rtl92c_dm_initial_gain_min_pwdb(hw);
379 rtl92c_dm_ctrl_initgain_by_rssi(hw);
380 }
381 } else {
382 dm_digtable.rssi_val_min = 0;
383 dm_digtable.dig_ext_port_stage = DIG_EXT_PORT_STAGE_MAX;
384 dm_digtable.backoff_val = DM_DIG_BACKOFF_DEFAULT;
385 dm_digtable.cur_igvalue = 0x20;
386 dm_digtable.pre_igvalue = 0;
387 rtl92c_dm_write_dig(hw);
388 }
389}
390
391static void rtl92c_dm_cck_packet_detection_thresh(struct ieee80211_hw *hw)
392{
393 struct rtl_priv *rtlpriv = rtl_priv(hw);
394 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
395
396 if (dm_digtable.cursta_connectctate == DIG_STA_CONNECT) {
397 dm_digtable.rssi_val_min = rtl92c_dm_initial_gain_min_pwdb(hw);
398
399 if (dm_digtable.pre_cck_pd_state == CCK_PD_STAGE_LowRssi) {
400 if (dm_digtable.rssi_val_min <= 25)
401 dm_digtable.cur_cck_pd_state =
402 CCK_PD_STAGE_LowRssi;
403 else
404 dm_digtable.cur_cck_pd_state =
405 CCK_PD_STAGE_HighRssi;
406 } else {
407 if (dm_digtable.rssi_val_min <= 20)
408 dm_digtable.cur_cck_pd_state =
409 CCK_PD_STAGE_LowRssi;
410 else
411 dm_digtable.cur_cck_pd_state =
412 CCK_PD_STAGE_HighRssi;
413 }
414 } else {
415 dm_digtable.cur_cck_pd_state = CCK_PD_STAGE_MAX;
416 }
417
418 if (dm_digtable.pre_cck_pd_state != dm_digtable.cur_cck_pd_state) {
419 if (dm_digtable.cur_cck_pd_state == CCK_PD_STAGE_LowRssi) {
420 if (rtlpriv->falsealm_cnt.cnt_cck_fail > 800)
421 dm_digtable.cur_cck_fa_state =
422 CCK_FA_STAGE_High;
423 else
424 dm_digtable.cur_cck_fa_state = CCK_FA_STAGE_Low;
425
426 if (dm_digtable.pre_cck_fa_state !=
427 dm_digtable.cur_cck_fa_state) {
428 if (dm_digtable.cur_cck_fa_state ==
429 CCK_FA_STAGE_Low)
430 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
431 0x83);
432 else
433 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2,
434 0xcd);
435
436 dm_digtable.pre_cck_fa_state =
437 dm_digtable.cur_cck_fa_state;
438 }
439
440 rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x40);
441
442 if (IS_92C_SERIAL(rtlhal->version))
443 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
444 MASKBYTE2, 0xd7);
445 } else {
446 rtl_set_bbreg(hw, RCCK0_CCA, MASKBYTE2, 0xcd);
447 rtl_set_bbreg(hw, RCCK0_SYSTEM, MASKBYTE1, 0x47);
448
449 if (IS_92C_SERIAL(rtlhal->version))
450 rtl_set_bbreg(hw, RCCK0_FALSEALARMREPORT,
451 MASKBYTE2, 0xd3);
452 }
453 dm_digtable.pre_cck_pd_state = dm_digtable.cur_cck_pd_state;
454 }
455
456 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
457 ("CCKPDStage=%x\n", dm_digtable.cur_cck_pd_state));
458
459 RT_TRACE(rtlpriv, COMP_DIG, DBG_TRACE,
460 ("is92C=%x\n", IS_92C_SERIAL(rtlhal->version)));
461}
462
463static void rtl92c_dm_ctrl_initgain_by_twoport(struct ieee80211_hw *hw)
464{
465 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
466
467 if (mac->act_scanning == true)
468 return;
469
470 if ((mac->link_state > MAC80211_NOLINK) &&
471 (mac->link_state < MAC80211_LINKED))
472 dm_digtable.cursta_connectctate = DIG_STA_BEFORE_CONNECT;
473 else if (mac->link_state >= MAC80211_LINKED)
474 dm_digtable.cursta_connectctate = DIG_STA_CONNECT;
475 else
476 dm_digtable.cursta_connectctate = DIG_STA_DISCONNECT;
477
478 rtl92c_dm_initial_gain_sta(hw);
479 rtl92c_dm_initial_gain_multi_sta(hw);
480 rtl92c_dm_cck_packet_detection_thresh(hw);
481
482 dm_digtable.presta_connectstate = dm_digtable.cursta_connectctate;
483
484}
485
486static void rtl92c_dm_dig(struct ieee80211_hw *hw)
487{
488 struct rtl_priv *rtlpriv = rtl_priv(hw);
489
490 if (rtlpriv->dm.b_dm_initialgain_enable == false)
491 return;
492 if (dm_digtable.dig_enable_flag == false)
493 return;
494
495 rtl92c_dm_ctrl_initgain_by_twoport(hw);
496
497}
498
499static void rtl92c_dm_init_dynamic_txpower(struct ieee80211_hw *hw)
500{
501 struct rtl_priv *rtlpriv = rtl_priv(hw);
502
503 rtlpriv->dm.bdynamic_txpower_enable = false;
504
505 rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
506 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
507}
508
509static void rtl92c_dm_dynamic_txpower(struct ieee80211_hw *hw)
510{
511 struct rtl_priv *rtlpriv = rtl_priv(hw);
512 struct rtl_phy *rtlphy = &(rtlpriv->phy);
513 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
514 long undecorated_smoothed_pwdb;
515
516 if (!rtlpriv->dm.bdynamic_txpower_enable)
517 return;
518
519 if (rtlpriv->dm.dm_flag & HAL_DM_HIPWR_DISABLE) {
520 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
521 return;
522 }
523
524 if ((mac->link_state < MAC80211_LINKED) &&
525 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
526 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
527 ("Not connected to any\n"));
528
529 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
530
531 rtlpriv->dm.last_dtp_lvl = TXHIGHPWRLEVEL_NORMAL;
532 return;
533 }
534
535 if (mac->link_state >= MAC80211_LINKED) {
536 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
537 undecorated_smoothed_pwdb =
538 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
539 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
540 ("AP Client PWDB = 0x%lx\n",
541 undecorated_smoothed_pwdb));
542 } else {
543 undecorated_smoothed_pwdb =
544 rtlpriv->dm.undecorated_smoothed_pwdb;
545 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
546 ("STA Default Port PWDB = 0x%lx\n",
547 undecorated_smoothed_pwdb));
548 }
549 } else {
550 undecorated_smoothed_pwdb =
551 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
552
553 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
554 ("AP Ext Port PWDB = 0x%lx\n",
555 undecorated_smoothed_pwdb));
556 }
557
558 if (undecorated_smoothed_pwdb >= TX_POWER_NEAR_FIELD_THRESH_LVL2) {
559 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
560 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
561 ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x0)\n"));
562 } else if ((undecorated_smoothed_pwdb <
563 (TX_POWER_NEAR_FIELD_THRESH_LVL2 - 3)) &&
564 (undecorated_smoothed_pwdb >=
565 TX_POWER_NEAR_FIELD_THRESH_LVL1)) {
566
567 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_LEVEL1;
568 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
569 ("TXHIGHPWRLEVEL_LEVEL1 (TxPwr=0x10)\n"));
570 } else if (undecorated_smoothed_pwdb <
571 (TX_POWER_NEAR_FIELD_THRESH_LVL1 - 5)) {
572 rtlpriv->dm.dynamic_txhighpower_lvl = TXHIGHPWRLEVEL_NORMAL;
573 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
574 ("TXHIGHPWRLEVEL_NORMAL\n"));
575 }
576
577 if ((rtlpriv->dm.dynamic_txhighpower_lvl != rtlpriv->dm.last_dtp_lvl)) {
578 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
579 ("PHY_SetTxPowerLevel8192S() Channel = %d\n",
580 rtlphy->current_channel));
581 rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
582 }
583
584 rtlpriv->dm.last_dtp_lvl = rtlpriv->dm.dynamic_txhighpower_lvl;
585}
586
587void rtl92c_dm_write_dig(struct ieee80211_hw *hw)
588{
589 struct rtl_priv *rtlpriv = rtl_priv(hw);
590
591 RT_TRACE(rtlpriv, COMP_DIG, DBG_LOUD,
592 ("cur_igvalue = 0x%x, "
593 "pre_igvalue = 0x%x, backoff_val = %d\n",
594 dm_digtable.cur_igvalue, dm_digtable.pre_igvalue,
595 dm_digtable.backoff_val));
596
597 if (dm_digtable.pre_igvalue != dm_digtable.cur_igvalue) {
598 rtl_set_bbreg(hw, ROFDM0_XAAGCCORE1, 0x7f,
599 dm_digtable.cur_igvalue);
600 rtl_set_bbreg(hw, ROFDM0_XBAGCCORE1, 0x7f,
601 dm_digtable.cur_igvalue);
602
603 dm_digtable.pre_igvalue = dm_digtable.cur_igvalue;
604 }
605}
606
607static void rtl92c_dm_pwdb_monitor(struct ieee80211_hw *hw)
608{
609 struct rtl_priv *rtlpriv = rtl_priv(hw);
610 long tmpentry_max_pwdb = 0, tmpentry_min_pwdb = 0xff;
611
612 u8 h2c_parameter[3] = { 0 };
613
614 return;
615
616 if (tmpentry_max_pwdb != 0) {
617 rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb =
618 tmpentry_max_pwdb;
619 } else {
620 rtlpriv->dm.entry_max_undecoratedsmoothed_pwdb = 0;
621 }
622
623 if (tmpentry_min_pwdb != 0xff) {
624 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb =
625 tmpentry_min_pwdb;
626 } else {
627 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb = 0;
628 }
629
630 h2c_parameter[2] = (u8) (rtlpriv->dm.undecorated_smoothed_pwdb & 0xFF);
631 h2c_parameter[0] = 0;
632
633 rtl92c_fill_h2c_cmd(hw, H2C_RSSI_REPORT, 3, h2c_parameter);
634}
635
636void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw)
637{
638 struct rtl_priv *rtlpriv = rtl_priv(hw);
639 rtlpriv->dm.bcurrent_turbo_edca = false;
640 rtlpriv->dm.bis_any_nonbepkts = false;
641 rtlpriv->dm.bis_cur_rdlstate = false;
642}
643
644static void rtl92c_dm_check_edca_turbo(struct ieee80211_hw *hw)
645{
646 struct rtl_priv *rtlpriv = rtl_priv(hw);
647 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
648 static u64 last_txok_cnt;
649 static u64 last_rxok_cnt;
650 u64 cur_txok_cnt;
651 u64 cur_rxok_cnt;
652 u32 edca_be_ul = 0x5ea42b;
653 u32 edca_be_dl = 0x5ea42b;
654
655 if (mac->opmode == NL80211_IFTYPE_ADHOC)
656 goto dm_checkedcaturbo_exit;
657
658 if (mac->link_state != MAC80211_LINKED) {
659 rtlpriv->dm.bcurrent_turbo_edca = false;
660 return;
661 }
662
663 if (!mac->ht_enable) { /*FIX MERGE */
664 if (!(edca_be_ul & 0xffff0000))
665 edca_be_ul |= 0x005e0000;
666
667 if (!(edca_be_dl & 0xffff0000))
668 edca_be_dl |= 0x005e0000;
669 }
670
671 if ((!rtlpriv->dm.bis_any_nonbepkts) &&
672 (!rtlpriv->dm.b_disable_framebursting)) {
673 cur_txok_cnt = rtlpriv->stats.txbytesunicast - last_txok_cnt;
674 cur_rxok_cnt = rtlpriv->stats.rxbytesunicast - last_rxok_cnt;
675 if (cur_rxok_cnt > 4 * cur_txok_cnt) {
676 if (!rtlpriv->dm.bis_cur_rdlstate ||
677 !rtlpriv->dm.bcurrent_turbo_edca) {
678 rtl_write_dword(rtlpriv,
679 REG_EDCA_BE_PARAM,
680 edca_be_dl);
681 rtlpriv->dm.bis_cur_rdlstate = true;
682 }
683 } else {
684 if (rtlpriv->dm.bis_cur_rdlstate ||
685 !rtlpriv->dm.bcurrent_turbo_edca) {
686 rtl_write_dword(rtlpriv,
687 REG_EDCA_BE_PARAM,
688 edca_be_ul);
689 rtlpriv->dm.bis_cur_rdlstate = false;
690 }
691 }
692 rtlpriv->dm.bcurrent_turbo_edca = true;
693 } else {
694 if (rtlpriv->dm.bcurrent_turbo_edca) {
695 u8 tmp = AC0_BE;
696 rtlpriv->cfg->ops->set_hw_reg(hw,
697 HW_VAR_AC_PARAM,
698 (u8 *) (&tmp));
699 rtlpriv->dm.bcurrent_turbo_edca = false;
700 }
701 }
702
703dm_checkedcaturbo_exit:
704 rtlpriv->dm.bis_any_nonbepkts = false;
705 last_txok_cnt = rtlpriv->stats.txbytesunicast;
706 last_rxok_cnt = rtlpriv->stats.rxbytesunicast;
707}
708
709static void rtl92c_dm_txpower_tracking_callback_thermalmeter(struct ieee80211_hw
710 *hw)
711{
712 struct rtl_priv *rtlpriv = rtl_priv(hw);
713 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
714 struct rtl_phy *rtlphy = &(rtlpriv->phy);
715 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
716 u8 thermalvalue, delta, delta_lck, delta_iqk;
717 long ele_a, ele_d, temp_cck, val_x, value32;
718 long val_y, ele_c;
719 u8 ofdm_index[2], cck_index, ofdm_index_old[2], cck_index_old;
720 int i;
721 bool is2t = IS_92C_SERIAL(rtlhal->version);
722 u8 txpwr_level[2] = {0, 0};
723 u8 ofdm_min_index = 6, rf;
724
725 rtlpriv->dm.btxpower_trackingInit = true;
726 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
727 ("rtl92c_dm_txpower_tracking_callback_thermalmeter\n"));
728
729 thermalvalue = (u8) rtl_get_rfreg(hw, RF90_PATH_A, RF_T_METER, 0x1f);
730
731 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
732 ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
733 "eeprom_thermalmeter 0x%x\n",
734 thermalvalue, rtlpriv->dm.thermalvalue,
735 rtlefuse->eeprom_thermalmeter));
736
737 rtl92c_phy_ap_calibrate(hw, (thermalvalue -
738 rtlefuse->eeprom_thermalmeter));
739 if (is2t)
740 rf = 2;
741 else
742 rf = 1;
743
744 if (thermalvalue) {
745 ele_d = rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
746 MASKDWORD) & MASKOFDM_D;
747
748 for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
749 if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
750 ofdm_index_old[0] = (u8) i;
751
752 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
753 ("Initial pathA ele_d reg0x%x = 0x%lx, "
754 "ofdm_index=0x%x\n",
755 ROFDM0_XATXIQIMBALANCE,
756 ele_d, ofdm_index_old[0]));
757 break;
758 }
759 }
760
761 if (is2t) {
762 ele_d = rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
763 MASKDWORD) & MASKOFDM_D;
764
765 for (i = 0; i < OFDM_TABLE_LENGTH; i++) {
766 if (ele_d == (ofdmswing_table[i] & MASKOFDM_D)) {
767 ofdm_index_old[1] = (u8) i;
768
769 RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
770 DBG_LOUD,
771 ("Initial pathB ele_d reg0x%x = "
772 "0x%lx, ofdm_index=0x%x\n",
773 ROFDM0_XBTXIQIMBALANCE, ele_d,
774 ofdm_index_old[1]));
775 break;
776 }
777 }
778 }
779
780 temp_cck =
781 rtl_get_bbreg(hw, RCCK0_TXFILTER2, MASKDWORD) & MASKCCK;
782
783 for (i = 0; i < CCK_TABLE_LENGTH; i++) {
784 if (rtlpriv->dm.b_cck_inch14) {
785 if (memcmp((void *)&temp_cck,
786 (void *)&cckswing_table_ch14[i][2],
787 4) == 0) {
788 cck_index_old = (u8) i;
789
790 RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
791 DBG_LOUD,
792 ("Initial reg0x%x = 0x%lx, "
793 "cck_index=0x%x, ch 14 %d\n",
794 RCCK0_TXFILTER2, temp_cck,
795 cck_index_old,
796 rtlpriv->dm.b_cck_inch14));
797 break;
798 }
799 } else {
800 if (memcmp((void *)&temp_cck,
801 (void *)
802 &cckswing_table_ch1ch13[i][2],
803 4) == 0) {
804 cck_index_old = (u8) i;
805
806 RT_TRACE(rtlpriv, COMP_POWER_TRACKING,
807 DBG_LOUD,
808 ("Initial reg0x%x = 0x%lx, "
809 "cck_index=0x%x, ch14 %d\n",
810 RCCK0_TXFILTER2, temp_cck,
811 cck_index_old,
812 rtlpriv->dm.b_cck_inch14));
813 break;
814 }
815 }
816 }
817
818 if (!rtlpriv->dm.thermalvalue) {
819 rtlpriv->dm.thermalvalue =
820 rtlefuse->eeprom_thermalmeter;
821 rtlpriv->dm.thermalvalue_lck = thermalvalue;
822 rtlpriv->dm.thermalvalue_iqk = thermalvalue;
823 for (i = 0; i < rf; i++)
824 rtlpriv->dm.ofdm_index[i] = ofdm_index_old[i];
825 rtlpriv->dm.cck_index = cck_index_old;
826 }
827
828 delta = (thermalvalue > rtlpriv->dm.thermalvalue) ?
829 (thermalvalue - rtlpriv->dm.thermalvalue) :
830 (rtlpriv->dm.thermalvalue - thermalvalue);
831
832 delta_lck = (thermalvalue > rtlpriv->dm.thermalvalue_lck) ?
833 (thermalvalue - rtlpriv->dm.thermalvalue_lck) :
834 (rtlpriv->dm.thermalvalue_lck - thermalvalue);
835
836 delta_iqk = (thermalvalue > rtlpriv->dm.thermalvalue_iqk) ?
837 (thermalvalue - rtlpriv->dm.thermalvalue_iqk) :
838 (rtlpriv->dm.thermalvalue_iqk - thermalvalue);
839
840 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
841 ("Readback Thermal Meter = 0x%x pre thermal meter 0x%x "
842 "eeprom_thermalmeter 0x%x delta 0x%x "
843 "delta_lck 0x%x delta_iqk 0x%x\n",
844 thermalvalue, rtlpriv->dm.thermalvalue,
845 rtlefuse->eeprom_thermalmeter, delta, delta_lck,
846 delta_iqk));
847
848 if (delta_lck > 1) {
849 rtlpriv->dm.thermalvalue_lck = thermalvalue;
850 rtl92c_phy_lc_calibrate(hw);
851 }
852
853 if (delta > 0 && rtlpriv->dm.txpower_track_control) {
854 if (thermalvalue > rtlpriv->dm.thermalvalue) {
855 for (i = 0; i < rf; i++)
856 rtlpriv->dm.ofdm_index[i] -= delta;
857 rtlpriv->dm.cck_index -= delta;
858 } else {
859 for (i = 0; i < rf; i++)
860 rtlpriv->dm.ofdm_index[i] += delta;
861 rtlpriv->dm.cck_index += delta;
862 }
863
864 if (is2t) {
865 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
866 ("temp OFDM_A_index=0x%x, "
867 "OFDM_B_index=0x%x,"
868 "cck_index=0x%x\n",
869 rtlpriv->dm.ofdm_index[0],
870 rtlpriv->dm.ofdm_index[1],
871 rtlpriv->dm.cck_index));
872 } else {
873 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
874 ("temp OFDM_A_index=0x%x,"
875 "cck_index=0x%x\n",
876 rtlpriv->dm.ofdm_index[0],
877 rtlpriv->dm.cck_index));
878 }
879
880 if (thermalvalue > rtlefuse->eeprom_thermalmeter) {
881 for (i = 0; i < rf; i++)
882 ofdm_index[i] =
883 rtlpriv->dm.ofdm_index[i]
884 + 1;
885 cck_index = rtlpriv->dm.cck_index + 1;
886 } else {
887 for (i = 0; i < rf; i++)
888 ofdm_index[i] =
889 rtlpriv->dm.ofdm_index[i];
890 cck_index = rtlpriv->dm.cck_index;
891 }
892
893 for (i = 0; i < rf; i++) {
894 if (txpwr_level[i] >= 0 &&
895 txpwr_level[i] <= 26) {
896 if (thermalvalue >
897 rtlefuse->eeprom_thermalmeter) {
898 if (delta < 5)
899 ofdm_index[i] -= 1;
900
901 else
902 ofdm_index[i] -= 2;
903 } else if (delta > 5 && thermalvalue <
904 rtlefuse->
905 eeprom_thermalmeter) {
906 ofdm_index[i] += 1;
907 }
908 } else if (txpwr_level[i] >= 27 &&
909 txpwr_level[i] <= 32
910 && thermalvalue >
911 rtlefuse->eeprom_thermalmeter) {
912 if (delta < 5)
913 ofdm_index[i] -= 1;
914
915 else
916 ofdm_index[i] -= 2;
917 } else if (txpwr_level[i] >= 32 &&
918 txpwr_level[i] <= 38 &&
919 thermalvalue >
920 rtlefuse->eeprom_thermalmeter
921 && delta > 5) {
922 ofdm_index[i] -= 1;
923 }
924 }
925
926 if (txpwr_level[i] >= 0 && txpwr_level[i] <= 26) {
927 if (thermalvalue >
928 rtlefuse->eeprom_thermalmeter) {
929 if (delta < 5)
930 cck_index -= 1;
931
932 else
933 cck_index -= 2;
934 } else if (delta > 5 && thermalvalue <
935 rtlefuse->eeprom_thermalmeter) {
936 cck_index += 1;
937 }
938 } else if (txpwr_level[i] >= 27 &&
939 txpwr_level[i] <= 32 &&
940 thermalvalue >
941 rtlefuse->eeprom_thermalmeter) {
942 if (delta < 5)
943 cck_index -= 1;
944
945 else
946 cck_index -= 2;
947 } else if (txpwr_level[i] >= 32 &&
948 txpwr_level[i] <= 38 &&
949 thermalvalue > rtlefuse->eeprom_thermalmeter
950 && delta > 5) {
951 cck_index -= 1;
952 }
953
954 for (i = 0; i < rf; i++) {
955 if (ofdm_index[i] > OFDM_TABLE_SIZE - 1)
956 ofdm_index[i] = OFDM_TABLE_SIZE - 1;
957
958 else if (ofdm_index[i] < ofdm_min_index)
959 ofdm_index[i] = ofdm_min_index;
960 }
961
962 if (cck_index > CCK_TABLE_SIZE - 1)
963 cck_index = CCK_TABLE_SIZE - 1;
964 else if (cck_index < 0)
965 cck_index = 0;
966
967 if (is2t) {
968 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
969 ("new OFDM_A_index=0x%x, "
970 "OFDM_B_index=0x%x,"
971 "cck_index=0x%x\n",
972 ofdm_index[0], ofdm_index[1],
973 cck_index));
974 } else {
975 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
976 ("new OFDM_A_index=0x%x,"
977 "cck_index=0x%x\n",
978 ofdm_index[0], cck_index));
979 }
980 }
981
982 if (rtlpriv->dm.txpower_track_control && delta != 0) {
983 ele_d =
984 (ofdmswing_table[ofdm_index[0]] & 0xFFC00000) >> 22;
985 val_x = rtlphy->reg_e94;
986 val_y = rtlphy->reg_e9c;
987
988 if (val_x != 0) {
989 if ((val_x & 0x00000200) != 0)
990 val_x = val_x | 0xFFFFFC00;
991 ele_a = ((val_x * ele_d) >> 8) & 0x000003FF;
992
993 if ((val_y & 0x00000200) != 0)
994 val_y = val_y | 0xFFFFFC00;
995 ele_c = ((val_y * ele_d) >> 8) & 0x000003FF;
996
997 value32 = (ele_d << 22) |
998 ((ele_c & 0x3F) << 16) | ele_a;
999
1000 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
1001 MASKDWORD, value32);
1002
1003 value32 = (ele_c & 0x000003C0) >> 6;
1004 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
1005 value32);
1006
1007 value32 = ((val_x * ele_d) >> 7) & 0x01;
1008 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1009 BIT(31), value32);
1010
1011 value32 = ((val_y * ele_d) >> 7) & 0x01;
1012 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1013 BIT(29), value32);
1014 } else {
1015 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
1016 MASKDWORD,
1017 ofdmswing_table[ofdm_index[0]]);
1018
1019 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, MASKH4BITS,
1020 0x00);
1021 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1022 BIT(31) | BIT(29), 0x00);
1023 }
1024
1025 if (!rtlpriv->dm.b_cck_inch14) {
1026 rtl_write_byte(rtlpriv, 0xa22,
1027 cckswing_table_ch1ch13[cck_index]
1028 [0]);
1029 rtl_write_byte(rtlpriv, 0xa23,
1030 cckswing_table_ch1ch13[cck_index]
1031 [1]);
1032 rtl_write_byte(rtlpriv, 0xa24,
1033 cckswing_table_ch1ch13[cck_index]
1034 [2]);
1035 rtl_write_byte(rtlpriv, 0xa25,
1036 cckswing_table_ch1ch13[cck_index]
1037 [3]);
1038 rtl_write_byte(rtlpriv, 0xa26,
1039 cckswing_table_ch1ch13[cck_index]
1040 [4]);
1041 rtl_write_byte(rtlpriv, 0xa27,
1042 cckswing_table_ch1ch13[cck_index]
1043 [5]);
1044 rtl_write_byte(rtlpriv, 0xa28,
1045 cckswing_table_ch1ch13[cck_index]
1046 [6]);
1047 rtl_write_byte(rtlpriv, 0xa29,
1048 cckswing_table_ch1ch13[cck_index]
1049 [7]);
1050 } else {
1051 rtl_write_byte(rtlpriv, 0xa22,
1052 cckswing_table_ch14[cck_index]
1053 [0]);
1054 rtl_write_byte(rtlpriv, 0xa23,
1055 cckswing_table_ch14[cck_index]
1056 [1]);
1057 rtl_write_byte(rtlpriv, 0xa24,
1058 cckswing_table_ch14[cck_index]
1059 [2]);
1060 rtl_write_byte(rtlpriv, 0xa25,
1061 cckswing_table_ch14[cck_index]
1062 [3]);
1063 rtl_write_byte(rtlpriv, 0xa26,
1064 cckswing_table_ch14[cck_index]
1065 [4]);
1066 rtl_write_byte(rtlpriv, 0xa27,
1067 cckswing_table_ch14[cck_index]
1068 [5]);
1069 rtl_write_byte(rtlpriv, 0xa28,
1070 cckswing_table_ch14[cck_index]
1071 [6]);
1072 rtl_write_byte(rtlpriv, 0xa29,
1073 cckswing_table_ch14[cck_index]
1074 [7]);
1075 }
1076
1077 if (is2t) {
1078 ele_d = (ofdmswing_table[ofdm_index[1]] &
1079 0xFFC00000) >> 22;
1080
1081 val_x = rtlphy->reg_eb4;
1082 val_y = rtlphy->reg_ebc;
1083
1084 if (val_x != 0) {
1085 if ((val_x & 0x00000200) != 0)
1086 val_x = val_x | 0xFFFFFC00;
1087 ele_a = ((val_x * ele_d) >> 8) &
1088 0x000003FF;
1089
1090 if ((val_y & 0x00000200) != 0)
1091 val_y = val_y | 0xFFFFFC00;
1092 ele_c = ((val_y * ele_d) >> 8) &
1093 0x00003FF;
1094
1095 value32 = (ele_d << 22) |
1096 ((ele_c & 0x3F) << 16) | ele_a;
1097 rtl_set_bbreg(hw,
1098 ROFDM0_XBTXIQIMBALANCE,
1099 MASKDWORD, value32);
1100
1101 value32 = (ele_c & 0x000003C0) >> 6;
1102 rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
1103 MASKH4BITS, value32);
1104
1105 value32 = ((val_x * ele_d) >> 7) & 0x01;
1106 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1107 BIT(27), value32);
1108
1109 value32 = ((val_y * ele_d) >> 7) & 0x01;
1110 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1111 BIT(25), value32);
1112 } else {
1113 rtl_set_bbreg(hw,
1114 ROFDM0_XBTXIQIMBALANCE,
1115 MASKDWORD,
1116 ofdmswing_table[ofdm_index
1117 [1]]);
1118 rtl_set_bbreg(hw, ROFDM0_XDTXAFE,
1119 MASKH4BITS, 0x00);
1120 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD,
1121 BIT(27) | BIT(25), 0x00);
1122 }
1123
1124 }
1125 }
1126
1127 if (delta_iqk > 3) {
1128 rtlpriv->dm.thermalvalue_iqk = thermalvalue;
1129 rtl92c_phy_iq_calibrate(hw, false);
1130 }
1131
1132 if (rtlpriv->dm.txpower_track_control)
1133 rtlpriv->dm.thermalvalue = thermalvalue;
1134 }
1135
1136 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD, ("<===\n"));
1137
1138}
1139
1140static void rtl92c_dm_initialize_txpower_tracking_thermalmeter(
1141 struct ieee80211_hw *hw)
1142{
1143 struct rtl_priv *rtlpriv = rtl_priv(hw);
1144
1145 rtlpriv->dm.btxpower_tracking = true;
1146 rtlpriv->dm.btxpower_trackingInit = false;
1147
1148 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1149 ("pMgntInfo->btxpower_tracking = %d\n",
1150 rtlpriv->dm.btxpower_tracking));
1151}
1152
1153static void rtl92c_dm_initialize_txpower_tracking(struct ieee80211_hw *hw)
1154{
1155 rtl92c_dm_initialize_txpower_tracking_thermalmeter(hw);
1156}
1157
1158static void rtl92c_dm_txpower_tracking_directcall(struct ieee80211_hw *hw)
1159{
1160 rtl92c_dm_txpower_tracking_callback_thermalmeter(hw);
1161}
1162
1163static void rtl92c_dm_check_txpower_tracking_thermal_meter(
1164 struct ieee80211_hw *hw)
1165{
1166 struct rtl_priv *rtlpriv = rtl_priv(hw);
1167 static u8 tm_trigger;
1168
1169 if (!rtlpriv->dm.btxpower_tracking)
1170 return;
1171
1172 if (!tm_trigger) {
1173 rtl_set_rfreg(hw, RF90_PATH_A, RF_T_METER, RFREG_OFFSET_MASK,
1174 0x60);
1175 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1176 ("Trigger 92S Thermal Meter!!\n"));
1177 tm_trigger = 1;
1178 return;
1179 } else {
1180 RT_TRACE(rtlpriv, COMP_POWER_TRACKING, DBG_LOUD,
1181 ("Schedule TxPowerTracking direct call!!\n"));
1182 rtl92c_dm_txpower_tracking_directcall(hw);
1183 tm_trigger = 0;
1184 }
1185}
1186
1187void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw)
1188{
1189 rtl92c_dm_check_txpower_tracking_thermal_meter(hw);
1190}
1191
1192void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw)
1193{
1194 struct rtl_priv *rtlpriv = rtl_priv(hw);
1195 struct rate_adaptive *p_ra = &(rtlpriv->ra);
1196
1197 p_ra->ratr_state = DM_RATR_STA_INIT;
1198 p_ra->pre_ratr_state = DM_RATR_STA_INIT;
1199
1200 if (rtlpriv->dm.dm_type == DM_TYPE_BYDRIVER)
1201 rtlpriv->dm.b_useramask = true;
1202 else
1203 rtlpriv->dm.b_useramask = false;
1204
1205}
1206
1207static void rtl92c_dm_refresh_rate_adaptive_mask(struct ieee80211_hw *hw)
1208{
1209 struct rtl_priv *rtlpriv = rtl_priv(hw);
1210 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1211 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1212 struct rate_adaptive *p_ra = &(rtlpriv->ra);
1213 u32 low_rssithresh_for_ra, high_rssithresh_for_ra;
1214
1215 if (is_hal_stop(rtlhal)) {
1216 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1217 ("<---- driver is going to unload\n"));
1218 return;
1219 }
1220
1221 if (!rtlpriv->dm.b_useramask) {
1222 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1223 ("<---- driver does not control rate adaptive mask\n"));
1224 return;
1225 }
1226
1227 if (mac->link_state == MAC80211_LINKED) {
1228
1229 switch (p_ra->pre_ratr_state) {
1230 case DM_RATR_STA_HIGH:
1231 high_rssithresh_for_ra = 50;
1232 low_rssithresh_for_ra = 20;
1233 break;
1234 case DM_RATR_STA_MIDDLE:
1235 high_rssithresh_for_ra = 55;
1236 low_rssithresh_for_ra = 20;
1237 break;
1238 case DM_RATR_STA_LOW:
1239 high_rssithresh_for_ra = 50;
1240 low_rssithresh_for_ra = 25;
1241 break;
1242 default:
1243 high_rssithresh_for_ra = 50;
1244 low_rssithresh_for_ra = 20;
1245 break;
1246 }
1247
1248 if (rtlpriv->dm.undecorated_smoothed_pwdb >
1249 (long)high_rssithresh_for_ra)
1250 p_ra->ratr_state = DM_RATR_STA_HIGH;
1251 else if (rtlpriv->dm.undecorated_smoothed_pwdb >
1252 (long)low_rssithresh_for_ra)
1253 p_ra->ratr_state = DM_RATR_STA_MIDDLE;
1254 else
1255 p_ra->ratr_state = DM_RATR_STA_LOW;
1256
1257 if (p_ra->pre_ratr_state != p_ra->ratr_state) {
1258 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1259 ("RSSI = %ld\n",
1260 rtlpriv->dm.undecorated_smoothed_pwdb));
1261 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1262 ("RSSI_LEVEL = %d\n", p_ra->ratr_state));
1263 RT_TRACE(rtlpriv, COMP_RATE, DBG_LOUD,
1264 ("PreState = %d, CurState = %d\n",
1265 p_ra->pre_ratr_state, p_ra->ratr_state));
1266
1267 rtlpriv->cfg->ops->update_rate_mask(hw,
1268 p_ra->ratr_state);
1269
1270 p_ra->pre_ratr_state = p_ra->ratr_state;
1271 }
1272 }
1273}
1274
1275static void rtl92c_dm_init_dynamic_bb_powersaving(struct ieee80211_hw *hw)
1276{
1277 dm_pstable.pre_ccastate = CCA_MAX;
1278 dm_pstable.cur_ccasate = CCA_MAX;
1279 dm_pstable.pre_rfstate = RF_MAX;
1280 dm_pstable.cur_rfstate = RF_MAX;
1281 dm_pstable.rssi_val_min = 0;
1282}
1283
1284static void rtl92c_dm_1r_cca(struct ieee80211_hw *hw)
1285{
1286 struct rtl_priv *rtlpriv = rtl_priv(hw);
1287 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1288
1289 if (dm_pstable.rssi_val_min != 0) {
1290 if (dm_pstable.pre_ccastate == CCA_2R) {
1291 if (dm_pstable.rssi_val_min >= 35)
1292 dm_pstable.cur_ccasate = CCA_1R;
1293 else
1294 dm_pstable.cur_ccasate = CCA_2R;
1295 } else {
1296 if (dm_pstable.rssi_val_min <= 30)
1297 dm_pstable.cur_ccasate = CCA_2R;
1298 else
1299 dm_pstable.cur_ccasate = CCA_1R;
1300 }
1301 } else {
1302 dm_pstable.cur_ccasate = CCA_MAX;
1303 }
1304
1305 if (dm_pstable.pre_ccastate != dm_pstable.cur_ccasate) {
1306 if (dm_pstable.cur_ccasate == CCA_1R) {
1307 if (get_rf_type(rtlphy) == RF_2T2R) {
1308 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
1309 MASKBYTE0, 0x13);
1310 rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x20);
1311 } else {
1312 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE,
1313 MASKBYTE0, 0x23);
1314 rtl_set_bbreg(hw, 0xe70, 0x7fc00000, 0x10c);
1315 }
1316 } else {
1317 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0,
1318 0x33);
1319 rtl_set_bbreg(hw, 0xe70, MASKBYTE3, 0x63);
1320 }
1321 dm_pstable.pre_ccastate = dm_pstable.cur_ccasate;
1322 }
1323
1324 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD, ("CCAStage = %s\n",
1325 (dm_pstable.cur_ccasate ==
1326 0) ? "1RCCA" : "2RCCA"));
1327}
1328
1329void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal)
1330{
1331 static u8 initialize;
1332 static u32 reg_874, reg_c70, reg_85c, reg_a74;
1333
1334 if (initialize == 0) {
1335 reg_874 = (rtl_get_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1336 MASKDWORD) & 0x1CC000) >> 14;
1337
1338 reg_c70 = (rtl_get_bbreg(hw, ROFDM0_AGCPARAMETER1,
1339 MASKDWORD) & BIT(3)) >> 3;
1340
1341 reg_85c = (rtl_get_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
1342 MASKDWORD) & 0xFF000000) >> 24;
1343
1344 reg_a74 = (rtl_get_bbreg(hw, 0xa74, MASKDWORD) & 0xF000) >> 12;
1345
1346 initialize = 1;
1347 }
1348
1349 if (!bforce_in_normal) {
1350 if (dm_pstable.rssi_val_min != 0) {
1351 if (dm_pstable.pre_rfstate == RF_NORMAL) {
1352 if (dm_pstable.rssi_val_min >= 30)
1353 dm_pstable.cur_rfstate = RF_SAVE;
1354 else
1355 dm_pstable.cur_rfstate = RF_NORMAL;
1356 } else {
1357 if (dm_pstable.rssi_val_min <= 25)
1358 dm_pstable.cur_rfstate = RF_NORMAL;
1359 else
1360 dm_pstable.cur_rfstate = RF_SAVE;
1361 }
1362 } else {
1363 dm_pstable.cur_rfstate = RF_MAX;
1364 }
1365 } else {
1366 dm_pstable.cur_rfstate = RF_NORMAL;
1367 }
1368
1369 if (dm_pstable.pre_rfstate != dm_pstable.cur_rfstate) {
1370 if (dm_pstable.cur_rfstate == RF_SAVE) {
1371 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1372 0x1C0000, 0x2);
1373 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3), 0);
1374 rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL,
1375 0xFF000000, 0x63);
1376 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1377 0xC000, 0x2);
1378 rtl_set_bbreg(hw, 0xa74, 0xF000, 0x3);
1379 rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
1380 rtl_set_bbreg(hw, 0x818, BIT(28), 0x1);
1381 } else {
1382 rtl_set_bbreg(hw, RFPGA0_XCD_RFINTERFACESW,
1383 0x1CC000, reg_874);
1384 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, BIT(3),
1385 reg_c70);
1386 rtl_set_bbreg(hw, RFPGA0_XCD_SWITCHCONTROL, 0xFF000000,
1387 reg_85c);
1388 rtl_set_bbreg(hw, 0xa74, 0xF000, reg_a74);
1389 rtl_set_bbreg(hw, 0x818, BIT(28), 0x0);
1390 }
1391
1392 dm_pstable.pre_rfstate = dm_pstable.cur_rfstate;
1393 }
1394}
1395
1396static void rtl92c_dm_dynamic_bb_powersaving(struct ieee80211_hw *hw)
1397{
1398 struct rtl_priv *rtlpriv = rtl_priv(hw);
1399 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1400 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1401
1402 if (((mac->link_state == MAC80211_NOLINK)) &&
1403 (rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb == 0)) {
1404 dm_pstable.rssi_val_min = 0;
1405 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1406 ("Not connected to any\n"));
1407 }
1408
1409 if (mac->link_state == MAC80211_LINKED) {
1410 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
1411 dm_pstable.rssi_val_min =
1412 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
1413 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1414 ("AP Client PWDB = 0x%lx\n",
1415 dm_pstable.rssi_val_min));
1416 } else {
1417 dm_pstable.rssi_val_min =
1418 rtlpriv->dm.undecorated_smoothed_pwdb;
1419 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1420 ("STA Default Port PWDB = 0x%lx\n",
1421 dm_pstable.rssi_val_min));
1422 }
1423 } else {
1424 dm_pstable.rssi_val_min =
1425 rtlpriv->dm.entry_min_undecoratedsmoothed_pwdb;
1426
1427 RT_TRACE(rtlpriv, DBG_LOUD, DBG_LOUD,
1428 ("AP Ext Port PWDB = 0x%lx\n",
1429 dm_pstable.rssi_val_min));
1430 }
1431
1432 if (IS_92C_SERIAL(rtlhal->version))
1433 rtl92c_dm_1r_cca(hw);
1434}
1435
1436void rtl92c_dm_init(struct ieee80211_hw *hw)
1437{
1438 struct rtl_priv *rtlpriv = rtl_priv(hw);
1439
1440 rtlpriv->dm.dm_type = DM_TYPE_BYDRIVER;
1441 rtl92c_dm_diginit(hw);
1442 rtl92c_dm_init_dynamic_txpower(hw);
1443 rtl92c_dm_init_edca_turbo(hw);
1444 rtl92c_dm_init_rate_adaptive_mask(hw);
1445 rtl92c_dm_initialize_txpower_tracking(hw);
1446 rtl92c_dm_init_dynamic_bb_powersaving(hw);
1447}
1448
1449void rtl92c_dm_watchdog(struct ieee80211_hw *hw)
1450{
1451 struct rtl_priv *rtlpriv = rtl_priv(hw);
1452 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1453 bool b_fw_current_inpsmode = false;
1454 bool b_fw_ps_awake = true;
1455
1456 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FW_PSMODE_STATUS,
1457 (u8 *) (&b_fw_current_inpsmode));
1458 rtlpriv->cfg->ops->get_hw_reg(hw, HW_VAR_FWLPS_RF_ON,
1459 (u8 *) (&b_fw_ps_awake));
1460
1461 if ((ppsc->rfpwr_state == ERFON) && ((!b_fw_current_inpsmode) &&
1462 b_fw_ps_awake)
1463 && (!ppsc->rfchange_inprogress)) {
1464 rtl92c_dm_pwdb_monitor(hw);
1465 rtl92c_dm_dig(hw);
1466 rtl92c_dm_false_alarm_counter_statistics(hw);
1467 rtl92c_dm_dynamic_bb_powersaving(hw);
1468 rtl92c_dm_dynamic_txpower(hw);
1469 rtl92c_dm_check_txpower_tracking(hw);
1470 rtl92c_dm_refresh_rate_adaptive_mask(hw);
1471 rtl92c_dm_check_edca_turbo(hw);
1472 }
1473}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
new file mode 100644
index 000000000000..463439e4074c
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/dm.h
@@ -0,0 +1,196 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92C_DM_H__
31#define __RTL92C_DM_H__
32
33#define HAL_DM_DIG_DISABLE BIT(0)
34#define HAL_DM_HIPWR_DISABLE BIT(1)
35
36#define OFDM_TABLE_LENGTH 37
37#define CCK_TABLE_LENGTH 33
38
39#define OFDM_TABLE_SIZE 37
40#define CCK_TABLE_SIZE 33
41
42#define BW_AUTO_SWITCH_HIGH_LOW 25
43#define BW_AUTO_SWITCH_LOW_HIGH 30
44
45#define DM_DIG_THRESH_HIGH 40
46#define DM_DIG_THRESH_LOW 35
47
48#define DM_FALSEALARM_THRESH_LOW 400
49#define DM_FALSEALARM_THRESH_HIGH 1000
50
51#define DM_DIG_MAX 0x3e
52#define DM_DIG_MIN 0x1e
53
54#define DM_DIG_FA_UPPER 0x32
55#define DM_DIG_FA_LOWER 0x20
56#define DM_DIG_FA_TH0 0x20
57#define DM_DIG_FA_TH1 0x100
58#define DM_DIG_FA_TH2 0x200
59
60#define DM_DIG_BACKOFF_MAX 12
61#define DM_DIG_BACKOFF_MIN -4
62#define DM_DIG_BACKOFF_DEFAULT 10
63
64#define RXPATHSELECTION_SS_TH_lOW 30
65#define RXPATHSELECTION_DIFF_TH 18
66
67#define DM_RATR_STA_INIT 0
68#define DM_RATR_STA_HIGH 1
69#define DM_RATR_STA_MIDDLE 2
70#define DM_RATR_STA_LOW 3
71
72#define CTS2SELF_THVAL 30
73#define REGC38_TH 20
74
75#define WAIOTTHVal 25
76
77#define TXHIGHPWRLEVEL_NORMAL 0
78#define TXHIGHPWRLEVEL_LEVEL1 1
79#define TXHIGHPWRLEVEL_LEVEL2 2
80#define TXHIGHPWRLEVEL_BT1 3
81#define TXHIGHPWRLEVEL_BT2 4
82
83#define DM_TYPE_BYFW 0
84#define DM_TYPE_BYDRIVER 1
85
86#define TX_POWER_NEAR_FIELD_THRESH_LVL2 74
87#define TX_POWER_NEAR_FIELD_THRESH_LVL1 67
88
89struct ps_t {
90 u8 pre_ccastate;
91 u8 cur_ccasate;
92 u8 pre_rfstate;
93 u8 cur_rfstate;
94 long rssi_val_min;
95};
96
97struct dig_t {
98 u8 dig_enable_flag;
99 u8 dig_ext_port_stage;
100 u32 rssi_lowthresh;
101 u32 rssi_highthresh;
102 u32 fa_lowthresh;
103 u32 fa_highthresh;
104 u8 cursta_connectctate;
105 u8 presta_connectstate;
106 u8 curmultista_connectstate;
107 u8 pre_igvalue;
108 u8 cur_igvalue;
109 char backoff_val;
110 char backoff_val_range_max;
111 char backoff_val_range_min;
112 u8 rx_gain_range_max;
113 u8 rx_gain_range_min;
114 u8 rssi_val_min;
115 u8 pre_cck_pd_state;
116 u8 cur_cck_pd_state;
117 u8 pre_cck_fa_state;
118 u8 cur_cck_fa_state;
119 u8 pre_ccastate;
120 u8 cur_ccasate;
121};
122
123struct swat_t {
124 u8 failure_cnt;
125 u8 try_flag;
126 u8 stop_trying;
127 long pre_rssi;
128 long trying_threshold;
129 u8 cur_antenna;
130 u8 pre_antenna;
131};
132
133enum tag_dynamic_init_gain_operation_type_definition {
134 DIG_TYPE_THRESH_HIGH = 0,
135 DIG_TYPE_THRESH_LOW = 1,
136 DIG_TYPE_BACKOFF = 2,
137 DIG_TYPE_RX_GAIN_MIN = 3,
138 DIG_TYPE_RX_GAIN_MAX = 4,
139 DIG_TYPE_ENABLE = 5,
140 DIG_TYPE_DISABLE = 6,
141 DIG_OP_TYPE_MAX
142};
143
144enum tag_cck_packet_detection_threshold_type_definition {
145 CCK_PD_STAGE_LowRssi = 0,
146 CCK_PD_STAGE_HighRssi = 1,
147 CCK_FA_STAGE_Low = 2,
148 CCK_FA_STAGE_High = 3,
149 CCK_PD_STAGE_MAX = 4,
150};
151
152enum dm_1r_cca_e {
153 CCA_1R = 0,
154 CCA_2R = 1,
155 CCA_MAX = 2,
156};
157
158enum dm_rf_e {
159 RF_SAVE = 0,
160 RF_NORMAL = 1,
161 RF_MAX = 2,
162};
163
164enum dm_sw_ant_switch_e {
165 ANS_ANTENNA_B = 1,
166 ANS_ANTENNA_A = 2,
167 ANS_ANTENNA_MAX = 3,
168};
169
170enum dm_dig_ext_port_alg_e {
171 DIG_EXT_PORT_STAGE_0 = 0,
172 DIG_EXT_PORT_STAGE_1 = 1,
173 DIG_EXT_PORT_STAGE_2 = 2,
174 DIG_EXT_PORT_STAGE_3 = 3,
175 DIG_EXT_PORT_STAGE_MAX = 4,
176};
177
178enum dm_dig_connect_e {
179 DIG_STA_DISCONNECT = 0,
180 DIG_STA_CONNECT = 1,
181 DIG_STA_BEFORE_CONNECT = 2,
182 DIG_MULTISTA_DISCONNECT = 3,
183 DIG_MULTISTA_CONNECT = 4,
184 DIG_CONNECT_MAX
185};
186
187extern struct dig_t dm_digtable;
188void rtl92c_dm_init(struct ieee80211_hw *hw);
189void rtl92c_dm_watchdog(struct ieee80211_hw *hw);
190void rtl92c_dm_write_dig(struct ieee80211_hw *hw);
191void rtl92c_dm_init_edca_turbo(struct ieee80211_hw *hw);
192void rtl92c_dm_check_txpower_tracking(struct ieee80211_hw *hw);
193void rtl92c_dm_init_rate_adaptive_mask(struct ieee80211_hw *hw);
194void rtl92c_dm_rf_saving(struct ieee80211_hw *hw, u8 bforce_in_normal);
195
196#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/fw.c
new file mode 100644
index 000000000000..11dd22b987e7
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/fw.c
@@ -0,0 +1,804 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include <linux/firmware.h>
31#include "../wifi.h"
32#include "../pci.h"
33#include "../base.h"
34#include "reg.h"
35#include "def.h"
36#include "fw.h"
37#include "table.h"
38
39static void _rtl92c_enable_fw_download(struct ieee80211_hw *hw, bool enable)
40{
41 struct rtl_priv *rtlpriv = rtl_priv(hw);
42 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
43
44 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CU) {
45 u32 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
46 if (enable)
47 value32 |= MCUFWDL_EN;
48 else
49 value32 &= ~MCUFWDL_EN;
50 rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
51 } else if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE) {
52 u8 tmp;
53 if (enable) {
54
55 tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
56 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1,
57 tmp | 0x04);
58
59 tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
60 rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp | 0x01);
61
62 tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL + 2);
63 rtl_write_byte(rtlpriv, REG_MCUFWDL + 2, tmp & 0xf7);
64 } else {
65
66 tmp = rtl_read_byte(rtlpriv, REG_MCUFWDL);
67 rtl_write_byte(rtlpriv, REG_MCUFWDL, tmp & 0xfe);
68
69 rtl_write_byte(rtlpriv, REG_MCUFWDL + 1, 0x00);
70 }
71 }
72}
73
74static void _rtl92c_fw_block_write(struct ieee80211_hw *hw,
75 const u8 *buffer, u32 size)
76{
77 struct rtl_priv *rtlpriv = rtl_priv(hw);
78 u32 blockSize = sizeof(u32);
79 u8 *bufferPtr = (u8 *) buffer;
80 u32 *pu4BytePtr = (u32 *) buffer;
81 u32 i, offset, blockCount, remainSize;
82
83 blockCount = size / blockSize;
84 remainSize = size % blockSize;
85
86 for (i = 0; i < blockCount; i++) {
87 offset = i * blockSize;
88 rtl_write_dword(rtlpriv, (FW_8192C_START_ADDRESS + offset),
89 *(pu4BytePtr + i));
90 }
91
92 if (remainSize) {
93 offset = blockCount * blockSize;
94 bufferPtr += offset;
95 for (i = 0; i < remainSize; i++) {
96 rtl_write_byte(rtlpriv, (FW_8192C_START_ADDRESS +
97 offset + i), *(bufferPtr + i));
98 }
99 }
100}
101
102static void _rtl92c_fw_page_write(struct ieee80211_hw *hw,
103 u32 page, const u8 *buffer, u32 size)
104{
105 struct rtl_priv *rtlpriv = rtl_priv(hw);
106 u8 value8;
107 u8 u8page = (u8) (page & 0x07);
108
109 value8 = (rtl_read_byte(rtlpriv, REG_MCUFWDL + 2) & 0xF8) | u8page;
110
111 rtl_write_byte(rtlpriv, (REG_MCUFWDL + 2), value8);
112 _rtl92c_fw_block_write(hw, buffer, size);
113}
114
115static void _rtl92c_fill_dummy(u8 *pfwbuf, u32 *pfwlen)
116{
117 u32 fwlen = *pfwlen;
118 u8 remain = (u8) (fwlen % 4);
119
120 remain = (remain == 0) ? 0 : (4 - remain);
121
122 while (remain > 0) {
123 pfwbuf[fwlen] = 0;
124 fwlen++;
125 remain--;
126 }
127
128 *pfwlen = fwlen;
129}
130
131static void _rtl92c_write_fw(struct ieee80211_hw *hw,
132 enum version_8192c version, u8 *buffer, u32 size)
133{
134 struct rtl_priv *rtlpriv = rtl_priv(hw);
135 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
136 bool is_version_b;
137 u8 *bufferPtr = (u8 *) buffer;
138
139 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE, ("FW size is %d bytes,\n", size));
140
141 is_version_b = IS_CHIP_VER_B(version);
142 if (is_version_b) {
143 u32 pageNums, remainSize;
144 u32 page, offset;
145
146 if (rtlhal->hw_type == HARDWARE_TYPE_RTL8192CE)
147 _rtl92c_fill_dummy(bufferPtr, &size);
148
149 pageNums = size / FW_8192C_PAGE_SIZE;
150 remainSize = size % FW_8192C_PAGE_SIZE;
151
152 if (pageNums > 4) {
153 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
154 ("Page numbers should not greater then 4\n"));
155 }
156
157 for (page = 0; page < pageNums; page++) {
158 offset = page * FW_8192C_PAGE_SIZE;
159 _rtl92c_fw_page_write(hw, page, (bufferPtr + offset),
160 FW_8192C_PAGE_SIZE);
161 }
162
163 if (remainSize) {
164 offset = pageNums * FW_8192C_PAGE_SIZE;
165 page = pageNums;
166 _rtl92c_fw_page_write(hw, page, (bufferPtr + offset),
167 remainSize);
168 }
169 } else {
170 _rtl92c_fw_block_write(hw, buffer, size);
171 }
172}
173
174static int _rtl92c_fw_free_to_go(struct ieee80211_hw *hw)
175{
176 struct rtl_priv *rtlpriv = rtl_priv(hw);
177 int err = -EIO;
178 u32 counter = 0;
179 u32 value32;
180
181 do {
182 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
183 } while ((counter++ < FW_8192C_POLLING_TIMEOUT_COUNT) &&
184 (!(value32 & FWDL_ChkSum_rpt)));
185
186 if (counter >= FW_8192C_POLLING_TIMEOUT_COUNT) {
187 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
188 ("chksum report faill ! REG_MCUFWDL:0x%08x .\n",
189 value32));
190 goto exit;
191 }
192
193 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
194 ("Checksum report OK ! REG_MCUFWDL:0x%08x .\n", value32));
195
196 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
197 value32 |= MCUFWDL_RDY;
198 value32 &= ~WINTINI_RDY;
199 rtl_write_dword(rtlpriv, REG_MCUFWDL, value32);
200
201 counter = 0;
202
203 do {
204 value32 = rtl_read_dword(rtlpriv, REG_MCUFWDL);
205 if (value32 & WINTINI_RDY) {
206 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
207 ("Polling FW ready success!!"
208 " REG_MCUFWDL:0x%08x .\n",
209 value32));
210 err = 0;
211 goto exit;
212 }
213
214 mdelay(FW_8192C_POLLING_DELAY);
215
216 } while (counter++ < FW_8192C_POLLING_TIMEOUT_COUNT);
217
218 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
219 ("Polling FW ready fail!! REG_MCUFWDL:0x%08x .\n", value32));
220
221exit:
222 return err;
223}
224
225int rtl92c_download_fw(struct ieee80211_hw *hw)
226{
227 struct rtl_priv *rtlpriv = rtl_priv(hw);
228 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
229 struct rtl92c_firmware_header *pfwheader;
230 u8 *pfwdata;
231 u32 fwsize;
232 int err;
233 enum version_8192c version = rtlhal->version;
234
235 const struct firmware *firmware = NULL;
236
237 err = request_firmware(&firmware, rtlpriv->cfg->fw_name,
238 rtlpriv->io.dev);
239 if (err) {
240 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
241 ("Failed to request firmware!\n"));
242 return 1;
243 }
244
245 if (firmware->size > 0x4000) {
246 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
247 ("Firmware is too big!\n"));
248 release_firmware(firmware);
249 return 1;
250 }
251
252 memcpy(rtlhal->pfirmware, firmware->data, firmware->size);
253 fwsize = firmware->size;
254 release_firmware(firmware);
255
256 pfwheader = (struct rtl92c_firmware_header *)rtlhal->pfirmware;
257 pfwdata = (u8 *) rtlhal->pfirmware;
258
259 if (IS_FW_HEADER_EXIST(pfwheader)) {
260 RT_TRACE(rtlpriv, COMP_FW, DBG_DMESG,
261 ("Firmware Version(%d), Signature(%#x),Size(%d)\n",
262 pfwheader->version, pfwheader->signature,
263 (uint)sizeof(struct rtl92c_firmware_header)));
264
265 pfwdata = pfwdata + sizeof(struct rtl92c_firmware_header);
266 fwsize = fwsize - sizeof(struct rtl92c_firmware_header);
267 }
268
269 _rtl92c_enable_fw_download(hw, true);
270 _rtl92c_write_fw(hw, version, pfwdata, fwsize);
271 _rtl92c_enable_fw_download(hw, false);
272
273 err = _rtl92c_fw_free_to_go(hw);
274 if (err) {
275 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
276 ("Firmware is not ready to run!\n"));
277 } else {
278 RT_TRACE(rtlpriv, COMP_FW, DBG_TRACE,
279 ("Firmware is ready to run!\n"));
280 }
281
282 return 0;
283}
284
285static bool _rtl92c_check_fw_read_last_h2c(struct ieee80211_hw *hw, u8 boxnum)
286{
287 struct rtl_priv *rtlpriv = rtl_priv(hw);
288 u8 val_hmetfr, val_mcutst_1;
289 bool result = false;
290
291 val_hmetfr = rtl_read_byte(rtlpriv, REG_HMETFR);
292 val_mcutst_1 = rtl_read_byte(rtlpriv, (REG_MCUTST_1 + boxnum));
293
294 if (((val_hmetfr >> boxnum) & BIT(0)) == 0 && val_mcutst_1 == 0)
295 result = true;
296 return result;
297}
298
299static void _rtl92c_fill_h2c_command(struct ieee80211_hw *hw,
300 u8 element_id, u32 cmd_len, u8 *p_cmdbuffer)
301{
302 struct rtl_priv *rtlpriv = rtl_priv(hw);
303 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
304 u8 boxnum;
305 u16 box_reg, box_extreg;
306 u8 u1b_tmp;
307 bool isfw_read = false;
308 u8 buf_index;
309 bool bwrite_sucess = false;
310 u8 wait_h2c_limmit = 100;
311 u8 wait_writeh2c_limmit = 100;
312 u8 boxcontent[4], boxextcontent[2];
313 u32 h2c_waitcounter = 0;
314 unsigned long flag;
315 u8 idx;
316
317 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("come in\n"));
318
319 while (true) {
320 spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
321 if (rtlhal->b_h2c_setinprogress) {
322 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
323 ("H2C set in progress! Wait to set.."
324 "element_id(%d).\n", element_id));
325
326 while (rtlhal->b_h2c_setinprogress) {
327 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock,
328 flag);
329 h2c_waitcounter++;
330 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
331 ("Wait 100 us (%d times)...\n",
332 h2c_waitcounter));
333 udelay(100);
334
335 if (h2c_waitcounter > 1000)
336 return;
337 spin_lock_irqsave(&rtlpriv->locks.h2c_lock,
338 flag);
339 }
340 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
341 } else {
342 rtlhal->b_h2c_setinprogress = true;
343 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
344 break;
345 }
346 }
347
348 while (!bwrite_sucess) {
349 wait_writeh2c_limmit--;
350 if (wait_writeh2c_limmit == 0) {
351 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
352 ("Write H2C fail because no trigger "
353 "for FW INT!\n"));
354 break;
355 }
356
357 boxnum = rtlhal->last_hmeboxnum;
358 switch (boxnum) {
359 case 0:
360 box_reg = REG_HMEBOX_0;
361 box_extreg = REG_HMEBOX_EXT_0;
362 break;
363 case 1:
364 box_reg = REG_HMEBOX_1;
365 box_extreg = REG_HMEBOX_EXT_1;
366 break;
367 case 2:
368 box_reg = REG_HMEBOX_2;
369 box_extreg = REG_HMEBOX_EXT_2;
370 break;
371 case 3:
372 box_reg = REG_HMEBOX_3;
373 box_extreg = REG_HMEBOX_EXT_3;
374 break;
375 default:
376 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
377 ("switch case not process\n"));
378 break;
379 }
380
381 isfw_read = _rtl92c_check_fw_read_last_h2c(hw, boxnum);
382 while (!isfw_read) {
383
384 wait_h2c_limmit--;
385 if (wait_h2c_limmit == 0) {
386 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
387 ("Wating too long for FW read "
388 "clear HMEBox(%d)!\n", boxnum));
389 break;
390 }
391
392 udelay(10);
393
394 isfw_read = _rtl92c_check_fw_read_last_h2c(hw, boxnum);
395 u1b_tmp = rtl_read_byte(rtlpriv, 0x1BF);
396 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
397 ("Wating for FW read clear HMEBox(%d)!!! "
398 "0x1BF = %2x\n", boxnum, u1b_tmp));
399 }
400
401 if (!isfw_read) {
402 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
403 ("Write H2C register BOX[%d] fail!!!!! "
404 "Fw do not read.\n", boxnum));
405 break;
406 }
407
408 memset(boxcontent, 0, sizeof(boxcontent));
409 memset(boxextcontent, 0, sizeof(boxextcontent));
410 boxcontent[0] = element_id;
411 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
412 ("Write element_id box_reg(%4x) = %2x\n",
413 box_reg, element_id));
414
415 switch (cmd_len) {
416 case 1:
417 boxcontent[0] &= ~(BIT(7));
418 memcpy((u8 *) (boxcontent) + 1,
419 p_cmdbuffer + buf_index, 1);
420
421 for (idx = 0; idx < 4; idx++) {
422 rtl_write_byte(rtlpriv, box_reg + idx,
423 boxcontent[idx]);
424 }
425 break;
426 case 2:
427 boxcontent[0] &= ~(BIT(7));
428 memcpy((u8 *) (boxcontent) + 1,
429 p_cmdbuffer + buf_index, 2);
430
431 for (idx = 0; idx < 4; idx++) {
432 rtl_write_byte(rtlpriv, box_reg + idx,
433 boxcontent[idx]);
434 }
435 break;
436 case 3:
437 boxcontent[0] &= ~(BIT(7));
438 memcpy((u8 *) (boxcontent) + 1,
439 p_cmdbuffer + buf_index, 3);
440
441 for (idx = 0; idx < 4; idx++) {
442 rtl_write_byte(rtlpriv, box_reg + idx,
443 boxcontent[idx]);
444 }
445 break;
446 case 4:
447 boxcontent[0] |= (BIT(7));
448 memcpy((u8 *) (boxextcontent),
449 p_cmdbuffer + buf_index, 2);
450 memcpy((u8 *) (boxcontent) + 1,
451 p_cmdbuffer + buf_index + 2, 2);
452
453 for (idx = 0; idx < 2; idx++) {
454 rtl_write_byte(rtlpriv, box_extreg + idx,
455 boxextcontent[idx]);
456 }
457
458 for (idx = 0; idx < 4; idx++) {
459 rtl_write_byte(rtlpriv, box_reg + idx,
460 boxcontent[idx]);
461 }
462 break;
463 case 5:
464 boxcontent[0] |= (BIT(7));
465 memcpy((u8 *) (boxextcontent),
466 p_cmdbuffer + buf_index, 2);
467 memcpy((u8 *) (boxcontent) + 1,
468 p_cmdbuffer + buf_index + 2, 3);
469
470 for (idx = 0; idx < 2; idx++) {
471 rtl_write_byte(rtlpriv, box_extreg + idx,
472 boxextcontent[idx]);
473 }
474
475 for (idx = 0; idx < 4; idx++) {
476 rtl_write_byte(rtlpriv, box_reg + idx,
477 boxcontent[idx]);
478 }
479 break;
480 default:
481 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
482 ("switch case not process\n"));
483 break;
484 }
485
486 bwrite_sucess = true;
487
488 rtlhal->last_hmeboxnum = boxnum + 1;
489 if (rtlhal->last_hmeboxnum == 4)
490 rtlhal->last_hmeboxnum = 0;
491
492 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD,
493 ("pHalData->last_hmeboxnum = %d\n",
494 rtlhal->last_hmeboxnum));
495 }
496
497 spin_lock_irqsave(&rtlpriv->locks.h2c_lock, flag);
498 rtlhal->b_h2c_setinprogress = false;
499 spin_unlock_irqrestore(&rtlpriv->locks.h2c_lock, flag);
500
501 RT_TRACE(rtlpriv, COMP_CMD, DBG_LOUD, ("go out\n"));
502}
503
504void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
505 u8 element_id, u32 cmd_len, u8 *p_cmdbuffer)
506{
507 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
508 u32 tmp_cmdbuf[2];
509
510 if (rtlhal->bfw_ready == false) {
511 RT_ASSERT(false, ("return H2C cmd because of Fw "
512 "download fail!!!\n"));
513 return;
514 }
515
516 memset(tmp_cmdbuf, 0, 8);
517 memcpy(tmp_cmdbuf, p_cmdbuffer, cmd_len);
518 _rtl92c_fill_h2c_command(hw, element_id, cmd_len, (u8 *)&tmp_cmdbuf);
519
520 return;
521}
522
523void rtl92c_firmware_selfreset(struct ieee80211_hw *hw)
524{
525 u8 u1b_tmp;
526 u8 delay = 100;
527 struct rtl_priv *rtlpriv = rtl_priv(hw);
528
529 rtl_write_byte(rtlpriv, REG_HMETFR + 3, 0x20);
530 u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
531
532 while (u1b_tmp & BIT(2)) {
533 delay--;
534 if (delay == 0) {
535 RT_ASSERT(false, ("8051 reset fail.\n"));
536 break;
537 }
538 udelay(50);
539 u1b_tmp = rtl_read_byte(rtlpriv, REG_SYS_FUNC_EN + 1);
540 }
541}
542
543void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode)
544{
545 struct rtl_priv *rtlpriv = rtl_priv(hw);
546 u8 u1_h2c_set_pwrmode[3] = {0};
547 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
548
549 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD, ("FW LPS mode = %d\n", mode));
550
551 SET_H2CCMD_PWRMODE_PARM_MODE(u1_h2c_set_pwrmode, mode);
552 SET_H2CCMD_PWRMODE_PARM_SMART_PS(u1_h2c_set_pwrmode, 1);
553 SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(u1_h2c_set_pwrmode,
554 ppsc->reg_max_lps_awakeintvl);
555
556 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
557 "rtl92c_set_fw_rsvdpagepkt(): u1_h2c_set_pwrmode\n",
558 u1_h2c_set_pwrmode, 3);
559 rtl92c_fill_h2c_cmd(hw, H2C_SETPWRMODE, 3, u1_h2c_set_pwrmode);
560
561}
562
563static bool _rtl92c_cmd_send_packet(struct ieee80211_hw *hw,
564 struct sk_buff *skb)
565{
566 struct rtl_priv *rtlpriv = rtl_priv(hw);
567 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
568 struct rtl8192_tx_ring *ring;
569 struct rtl_tx_desc *pdesc;
570 u8 own;
571 unsigned long flags;
572 struct sk_buff *pskb = NULL;
573
574 ring = &rtlpci->tx_ring[BEACON_QUEUE];
575
576 pskb = __skb_dequeue(&ring->queue);
577 if (pskb)
578 kfree_skb(pskb);
579
580 spin_lock_irqsave(&rtlpriv->locks.irq_th_lock, flags);
581
582 pdesc = &ring->desc[0];
583 own = (u8) rtlpriv->cfg->ops->get_desc((u8 *) pdesc, true, HW_DESC_OWN);
584
585 rtlpriv->cfg->ops->fill_tx_cmddesc(hw, (u8 *) pdesc, 1, 1, skb);
586
587 __skb_queue_tail(&ring->queue, skb);
588
589 spin_unlock_irqrestore(&rtlpriv->locks.irq_th_lock, flags);
590
591 rtlpriv->cfg->ops->tx_polling(hw, BEACON_QUEUE);
592
593 return true;
594}
595
596#define BEACON_PG 0 /*->1*/
597#define PSPOLL_PG 2
598#define NULL_PG 3
599#define PROBERSP_PG 4 /*->5*/
600
601#define TOTAL_RESERVED_PKT_LEN 768
602
603static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
604 /* page 0 beacon */
605 0x80, 0x00, 0x00, 0x00, 0xFF, 0xFF, 0xFF, 0xFF,
606 0xFF, 0xFF, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
607 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x50, 0x08,
608 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
609 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69,
610 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C,
611 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96,
612 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A,
613 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C,
614 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18,
615 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
616 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
617 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
618 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02,
619 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
620 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
621
622 /* page 1 beacon */
623 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
624 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
625 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
626 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
627 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
628 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
629 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
630 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
631 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
632 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
633 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
634 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
635 0x10, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x10, 0x00,
636 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
637 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
638 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
639
640 /* page 2 ps-poll */
641 0xA4, 0x10, 0x01, 0xC0, 0x00, 0x40, 0x10, 0x10,
642 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
643 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
644 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
645 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
646 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
647 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
648 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
649 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
650 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
651 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
652 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
653 0x18, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00,
654 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
655 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
656 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
657
658 /* page 3 null */
659 0x48, 0x01, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10,
660 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
661 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00,
662 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
663 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
664 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
665 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
666 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
667 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
668 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
669 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
670 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
671 0x72, 0x00, 0x20, 0x8C, 0x00, 0x12, 0x00, 0x00,
672 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80,
673 0x80, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
674 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
675
676 /* page 4 probe_resp */
677 0x50, 0x00, 0x00, 0x00, 0x00, 0x40, 0x10, 0x10,
678 0x00, 0x03, 0x00, 0xE0, 0x4C, 0x76, 0x00, 0x42,
679 0x00, 0x40, 0x10, 0x10, 0x00, 0x03, 0x00, 0x00,
680 0x9E, 0x46, 0x15, 0x32, 0x27, 0xF2, 0x2D, 0x00,
681 0x64, 0x00, 0x00, 0x04, 0x00, 0x0C, 0x6C, 0x69,
682 0x6E, 0x6B, 0x73, 0x79, 0x73, 0x5F, 0x77, 0x6C,
683 0x61, 0x6E, 0x01, 0x04, 0x82, 0x84, 0x8B, 0x96,
684 0x03, 0x01, 0x01, 0x06, 0x02, 0x00, 0x00, 0x2A,
685 0x01, 0x00, 0x32, 0x08, 0x24, 0x30, 0x48, 0x6C,
686 0x0C, 0x12, 0x18, 0x60, 0x2D, 0x1A, 0x6C, 0x18,
687 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
688 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
689 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
690 0x3D, 0x00, 0xDD, 0x06, 0x00, 0xE0, 0x4C, 0x02,
691 0x01, 0x70, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
692 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
693
694 /* page 5 probe_resp */
695 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
696 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
697 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
698 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
699 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
700 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
701 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
702 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
703 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
704 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
705 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
706 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
707 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
708 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
709 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
710 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
711};
712
713void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
714{
715 struct rtl_priv *rtlpriv = rtl_priv(hw);
716 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
717 struct sk_buff *skb = NULL;
718
719 u32 totalpacketlen;
720 bool rtstatus;
721 u8 u1RsvdPageLoc[3] = {0};
722 bool b_dlok = false;
723
724 u8 *beacon;
725 u8 *p_pspoll;
726 u8 *nullfunc;
727 u8 *p_probersp;
728 /*---------------------------------------------------------
729 (1) beacon
730 ---------------------------------------------------------*/
731 beacon = &reserved_page_packet[BEACON_PG * 128];
732 SET_80211_HDR_ADDRESS2(beacon, mac->mac_addr);
733 SET_80211_HDR_ADDRESS3(beacon, mac->bssid);
734
735 /*-------------------------------------------------------
736 (2) ps-poll
737 --------------------------------------------------------*/
738 p_pspoll = &reserved_page_packet[PSPOLL_PG * 128];
739 SET_80211_PS_POLL_AID(p_pspoll, (mac->assoc_id | 0xc000));
740 SET_80211_PS_POLL_BSSID(p_pspoll, mac->bssid);
741 SET_80211_PS_POLL_TA(p_pspoll, mac->mac_addr);
742
743 SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(u1RsvdPageLoc, PSPOLL_PG);
744
745 /*--------------------------------------------------------
746 (3) null data
747 ---------------------------------------------------------*/
748 nullfunc = &reserved_page_packet[NULL_PG * 128];
749 SET_80211_HDR_ADDRESS1(nullfunc, mac->bssid);
750 SET_80211_HDR_ADDRESS2(nullfunc, mac->mac_addr);
751 SET_80211_HDR_ADDRESS3(nullfunc, mac->bssid);
752
753 SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(u1RsvdPageLoc, NULL_PG);
754
755 /*---------------------------------------------------------
756 (4) probe response
757 ----------------------------------------------------------*/
758 p_probersp = &reserved_page_packet[PROBERSP_PG * 128];
759 SET_80211_HDR_ADDRESS1(p_probersp, mac->bssid);
760 SET_80211_HDR_ADDRESS2(p_probersp, mac->mac_addr);
761 SET_80211_HDR_ADDRESS3(p_probersp, mac->bssid);
762
763 SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(u1RsvdPageLoc, PROBERSP_PG);
764
765 totalpacketlen = TOTAL_RESERVED_PKT_LEN;
766
767 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
768 "rtl92c_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
769 &reserved_page_packet[0], totalpacketlen);
770 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
771 "rtl92c_set_fw_rsvdpagepkt(): HW_VAR_SET_TX_CMD: ALL\n",
772 u1RsvdPageLoc, 3);
773
774
775 skb = dev_alloc_skb(totalpacketlen);
776 memcpy((u8 *) skb_put(skb, totalpacketlen),
777 &reserved_page_packet, totalpacketlen);
778
779 rtstatus = _rtl92c_cmd_send_packet(hw, skb);
780
781 if (rtstatus)
782 b_dlok = true;
783
784 if (b_dlok) {
785 RT_TRACE(rtlpriv, COMP_POWER, DBG_LOUD,
786 ("Set RSVD page location to Fw.\n"));
787 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_DMESG,
788 "H2C_RSVDPAGE:\n",
789 u1RsvdPageLoc, 3);
790 rtl92c_fill_h2c_cmd(hw, H2C_RSVDPAGE,
791 sizeof(u1RsvdPageLoc), u1RsvdPageLoc);
792 } else
793 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
794 ("Set RSVD page location to Fw FAIL!!!!!!.\n"));
795}
796
797void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus)
798{
799 u8 u1_joinbssrpt_parm[1] = {0};
800
801 SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(u1_joinbssrpt_parm, mstatus);
802
803 rtl92c_fill_h2c_cmd(hw, H2C_JOINBSSRPT, 1, u1_joinbssrpt_parm);
804}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/fw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/fw.h
new file mode 100644
index 000000000000..3db33bd14666
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/fw.h
@@ -0,0 +1,98 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92C__FW__H__
31#define __RTL92C__FW__H__
32
33#define FW_8192C_SIZE 0x3000
34#define FW_8192C_START_ADDRESS 0x1000
35#define FW_8192C_END_ADDRESS 0x3FFF
36#define FW_8192C_PAGE_SIZE 4096
37#define FW_8192C_POLLING_DELAY 5
38#define FW_8192C_POLLING_TIMEOUT_COUNT 100
39
40#define IS_FW_HEADER_EXIST(_pfwhdr) \
41 ((_pfwhdr->signature&0xFFF0) == 0x92C0 ||\
42 (_pfwhdr->signature&0xFFF0) == 0x88C0)
43
44struct rtl92c_firmware_header {
45 u16 signature;
46 u8 category;
47 u8 function;
48 u16 version;
49 u8 subversion;
50 u8 rsvd1;
51 u8 month;
52 u8 date;
53 u8 hour;
54 u8 minute;
55 u16 ramcodeSize;
56 u16 rsvd2;
57 u32 svnindex;
58 u32 rsvd3;
59 u32 rsvd4;
60 u32 rsvd5;
61};
62
63enum rtl8192c_h2c_cmd {
64 H2C_AP_OFFLOAD = 0,
65 H2C_SETPWRMODE = 1,
66 H2C_JOINBSSRPT = 2,
67 H2C_RSVDPAGE = 3,
68 H2C_RSSI_REPORT = 5,
69 H2C_RA_MASK = 6,
70 MAX_H2CCMD
71};
72
73#define pagenum_128(_len) (u32)(((_len)>>7) + ((_len)&0x7F ? 1 : 0))
74
75#define SET_H2CCMD_PWRMODE_PARM_MODE(__ph2ccmd, __val) \
76 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
77#define SET_H2CCMD_PWRMODE_PARM_SMART_PS(__ph2ccmd, __val) \
78 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
79#define SET_H2CCMD_PWRMODE_PARM_BCN_PASS_TIME(__ph2ccmd, __val) \
80 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
81#define SET_H2CCMD_JOINBSSRPT_PARM_OPMODE(__ph2ccmd, __val) \
82 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
83#define SET_H2CCMD_RSVDPAGE_LOC_PROBE_RSP(__ph2ccmd, __val) \
84 SET_BITS_TO_LE_1BYTE(__ph2ccmd, 0, 8, __val)
85#define SET_H2CCMD_RSVDPAGE_LOC_PSPOLL(__ph2ccmd, __val) \
86 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+1, 0, 8, __val)
87#define SET_H2CCMD_RSVDPAGE_LOC_NULL_DATA(__ph2ccmd, __val) \
88 SET_BITS_TO_LE_1BYTE((__ph2ccmd)+2, 0, 8, __val)
89
90int rtl92c_download_fw(struct ieee80211_hw *hw);
91void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
92 u32 cmd_len, u8 *p_cmdbuffer);
93void rtl92c_firmware_selfreset(struct ieee80211_hw *hw);
94void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
95void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished);
96void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
97
98#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
new file mode 100644
index 000000000000..1c41a0c93506
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -0,0 +1,2162 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../efuse.h"
32#include "../base.h"
33#include "../cam.h"
34#include "../ps.h"
35#include "../pci.h"
36#include "reg.h"
37#include "def.h"
38#include "phy.h"
39#include "dm.h"
40#include "fw.h"
41#include "led.h"
42#include "hw.h"
43
44#define LLT_CONFIG 5
45
46static void _rtl92ce_set_bcn_ctrl_reg(struct ieee80211_hw *hw,
47 u8 set_bits, u8 clear_bits)
48{
49 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
50 struct rtl_priv *rtlpriv = rtl_priv(hw);
51
52 rtlpci->reg_bcn_ctrl_val |= set_bits;
53 rtlpci->reg_bcn_ctrl_val &= ~clear_bits;
54
55 rtl_write_byte(rtlpriv, REG_BCN_CTRL, (u8) rtlpci->reg_bcn_ctrl_val);
56}
57
58static void _rtl92ce_stop_tx_beacon(struct ieee80211_hw *hw)
59{
60 struct rtl_priv *rtlpriv = rtl_priv(hw);
61 u8 tmp1byte;
62
63 tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
64 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte & (~BIT(6)));
65 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0x64);
66 tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
67 tmp1byte &= ~(BIT(0));
68 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
69}
70
71static void _rtl92ce_resume_tx_beacon(struct ieee80211_hw *hw)
72{
73 struct rtl_priv *rtlpriv = rtl_priv(hw);
74 u8 tmp1byte;
75
76 tmp1byte = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2);
77 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, tmp1byte | BIT(6));
78 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
79 tmp1byte = rtl_read_byte(rtlpriv, REG_TBTT_PROHIBIT + 2);
80 tmp1byte |= BIT(0);
81 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 2, tmp1byte);
82}
83
84static void _rtl92ce_enable_bcn_sub_func(struct ieee80211_hw *hw)
85{
86 _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(1));
87}
88
89static void _rtl92ce_disable_bcn_sub_func(struct ieee80211_hw *hw)
90{
91 _rtl92ce_set_bcn_ctrl_reg(hw, BIT(1), 0);
92}
93
94void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
95{
96 struct rtl_priv *rtlpriv = rtl_priv(hw);
97 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
98 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
99
100 switch (variable) {
101 case HW_VAR_RCR:
102 *((u32 *) (val)) = rtlpci->receive_config;
103 break;
104 case HW_VAR_RF_STATE:
105 *((enum rf_pwrstate *)(val)) = ppsc->rfpwr_state;
106 break;
107 case HW_VAR_FWLPS_RF_ON:{
108 enum rf_pwrstate rfState;
109 u32 val_rcr;
110
111 rtlpriv->cfg->ops->get_hw_reg(hw,
112 HW_VAR_RF_STATE,
113 (u8 *) (&rfState));
114 if (rfState == ERFOFF) {
115 *((bool *) (val)) = true;
116 } else {
117 val_rcr = rtl_read_dword(rtlpriv, REG_RCR);
118 val_rcr &= 0x00070000;
119 if (val_rcr)
120 *((bool *) (val)) = false;
121 else
122 *((bool *) (val)) = true;
123 }
124 break;
125 }
126 case HW_VAR_FW_PSMODE_STATUS:
127 *((bool *) (val)) = ppsc->b_fw_current_inpsmode;
128 break;
129 case HW_VAR_CORRECT_TSF:{
130 u64 tsf;
131 u32 *ptsf_low = (u32 *)&tsf;
132 u32 *ptsf_high = ((u32 *)&tsf) + 1;
133
134 *ptsf_high = rtl_read_dword(rtlpriv, (REG_TSFTR + 4));
135 *ptsf_low = rtl_read_dword(rtlpriv, REG_TSFTR);
136
137 *((u64 *) (val)) = tsf;
138
139 break;
140 }
141 case HW_VAR_MGT_FILTER:
142 *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP0);
143 break;
144 case HW_VAR_CTRL_FILTER:
145 *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP1);
146 break;
147 case HW_VAR_DATA_FILTER:
148 *((u16 *) (val)) = rtl_read_word(rtlpriv, REG_RXFLTMAP2);
149 break;
150 default:
151 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
152 ("switch case not process\n"));
153 break;
154 }
155}
156
157void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
158{
159 struct rtl_priv *rtlpriv = rtl_priv(hw);
160 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
161 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
162 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
163 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
164 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
165 u8 idx;
166
167 switch (variable) {
168 case HW_VAR_ETHER_ADDR:{
169 for (idx = 0; idx < ETH_ALEN; idx++) {
170 rtl_write_byte(rtlpriv, (REG_MACID + idx),
171 val[idx]);
172 }
173 break;
174 }
175 case HW_VAR_BASIC_RATE:{
176 u16 b_rate_cfg = ((u16 *) val)[0];
177 u8 rate_index = 0;
178 b_rate_cfg = b_rate_cfg & 0x15f;
179 b_rate_cfg |= 0x01;
180 rtl_write_byte(rtlpriv, REG_RRSR, b_rate_cfg & 0xff);
181 rtl_write_byte(rtlpriv, REG_RRSR + 1,
182 (b_rate_cfg >> 8)&0xff);
183 while (b_rate_cfg > 0x1) {
184 b_rate_cfg = (b_rate_cfg >> 1);
185 rate_index++;
186 }
187 rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL,
188 rate_index);
189 break;
190 }
191 case HW_VAR_BSSID:{
192 for (idx = 0; idx < ETH_ALEN; idx++) {
193 rtl_write_byte(rtlpriv, (REG_BSSID + idx),
194 val[idx]);
195 }
196 break;
197 }
198 case HW_VAR_SIFS:{
199 rtl_write_byte(rtlpriv, REG_SIFS_CTX + 1, val[0]);
200 rtl_write_byte(rtlpriv, REG_SIFS_TRX + 1, val[1]);
201
202 rtl_write_byte(rtlpriv, REG_SPEC_SIFS + 1, val[0]);
203 rtl_write_byte(rtlpriv, REG_MAC_SPEC_SIFS + 1, val[0]);
204
205 if (!mac->ht_enable)
206 rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
207 0x0e0e);
208 else
209 rtl_write_word(rtlpriv, REG_RESP_SIFS_OFDM,
210 *((u16 *) val));
211 break;
212 }
213 case HW_VAR_SLOT_TIME:{
214 u8 e_aci;
215
216 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
217 ("HW_VAR_SLOT_TIME %x\n", val[0]));
218
219 rtl_write_byte(rtlpriv, REG_SLOT, val[0]);
220
221 for (e_aci = 0; e_aci < AC_MAX; e_aci++) {
222 rtlpriv->cfg->ops->set_hw_reg(hw,
223 HW_VAR_AC_PARAM,
224 (u8 *) (&e_aci));
225 }
226 break;
227 }
228 case HW_VAR_ACK_PREAMBLE:{
229 u8 reg_tmp;
230 u8 short_preamble = (bool) (*(u8 *) val);
231 reg_tmp = (mac->cur_40_prime_sc) << 5;
232 if (short_preamble)
233 reg_tmp |= 0x80;
234
235 rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_tmp);
236 break;
237 }
238 case HW_VAR_AMPDU_MIN_SPACE:{
239 u8 min_spacing_to_set;
240 u8 sec_min_space;
241
242 min_spacing_to_set = *((u8 *) val);
243 if (min_spacing_to_set <= 7) {
244 sec_min_space = 0;
245
246 if (min_spacing_to_set < sec_min_space)
247 min_spacing_to_set = sec_min_space;
248
249 mac->min_space_cfg = ((mac->min_space_cfg &
250 0xf8) |
251 min_spacing_to_set);
252
253 *val = min_spacing_to_set;
254
255 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
256 ("Set HW_VAR_AMPDU_MIN_SPACE: %#x\n",
257 mac->min_space_cfg));
258
259 rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
260 mac->min_space_cfg);
261 }
262 break;
263 }
264 case HW_VAR_SHORTGI_DENSITY:{
265 u8 density_to_set;
266
267 density_to_set = *((u8 *) val);
268 mac->min_space_cfg |= (density_to_set << 3);
269
270 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
271 ("Set HW_VAR_SHORTGI_DENSITY: %#x\n",
272 mac->min_space_cfg));
273
274 rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE,
275 mac->min_space_cfg);
276
277 break;
278 }
279 case HW_VAR_AMPDU_FACTOR:{
280 u8 regtoset_normal[4] = { 0x41, 0xa8, 0x72, 0xb9 };
281
282 u8 factor_toset;
283 u8 *p_regtoset = NULL;
284 u8 index = 0;
285
286 p_regtoset = regtoset_normal;
287
288 factor_toset = *((u8 *) val);
289 if (factor_toset <= 3) {
290 factor_toset = (1 << (factor_toset + 2));
291 if (factor_toset > 0xf)
292 factor_toset = 0xf;
293
294 for (index = 0; index < 4; index++) {
295 if ((p_regtoset[index] & 0xf0) >
296 (factor_toset << 4))
297 p_regtoset[index] =
298 (p_regtoset[index] & 0x0f) |
299 (factor_toset << 4);
300
301 if ((p_regtoset[index] & 0x0f) >
302 factor_toset)
303 p_regtoset[index] =
304 (p_regtoset[index] & 0xf0) |
305 (factor_toset);
306
307 rtl_write_byte(rtlpriv,
308 (REG_AGGLEN_LMT + index),
309 p_regtoset[index]);
310
311 }
312
313 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
314 ("Set HW_VAR_AMPDU_FACTOR: %#x\n",
315 factor_toset));
316 }
317 break;
318 }
319 case HW_VAR_AC_PARAM:{
320 u8 e_aci = *((u8 *) val);
321 u32 u4b_ac_param = 0;
322
323 u4b_ac_param |= (u32) mac->ac[e_aci].aifs;
324 u4b_ac_param |= ((u32) mac->ac[e_aci].cw_min
325 & 0xF) << AC_PARAM_ECW_MIN_OFFSET;
326 u4b_ac_param |= ((u32) mac->ac[e_aci].cw_max &
327 0xF) << AC_PARAM_ECW_MAX_OFFSET;
328 u4b_ac_param |= (u32) mac->ac[e_aci].tx_op
329 << AC_PARAM_TXOP_LIMIT_OFFSET;
330
331 RT_TRACE(rtlpriv, COMP_MLME, DBG_LOUD,
332 ("queue:%x, ac_param:%x\n", e_aci,
333 u4b_ac_param));
334
335 switch (e_aci) {
336 case AC1_BK:
337 rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM,
338 u4b_ac_param);
339 break;
340 case AC0_BE:
341 rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM,
342 u4b_ac_param);
343 break;
344 case AC2_VI:
345 rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM,
346 u4b_ac_param);
347 break;
348 case AC3_VO:
349 rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM,
350 u4b_ac_param);
351 break;
352 default:
353 RT_ASSERT(false,
354 ("SetHwReg8185(): invalid aci: %d !\n",
355 e_aci));
356 break;
357 }
358
359 if (rtlpci->acm_method != eAcmWay2_SW)
360 rtlpriv->cfg->ops->set_hw_reg(hw,
361 HW_VAR_ACM_CTRL,
362 (u8 *) (&e_aci));
363 break;
364 }
365 case HW_VAR_ACM_CTRL:{
366 u8 e_aci = *((u8 *) val);
367 union aci_aifsn *p_aci_aifsn =
368 (union aci_aifsn *)(&(mac->ac[0].aifs));
369 u8 acm = p_aci_aifsn->f.acm;
370 u8 acm_ctrl = rtl_read_byte(rtlpriv, REG_ACMHWCTRL);
371
372 acm_ctrl =
373 acm_ctrl | ((rtlpci->acm_method == 2) ? 0x0 : 0x1);
374
375 if (acm) {
376 switch (e_aci) {
377 case AC0_BE:
378 acm_ctrl |= AcmHw_BeqEn;
379 break;
380 case AC2_VI:
381 acm_ctrl |= AcmHw_ViqEn;
382 break;
383 case AC3_VO:
384 acm_ctrl |= AcmHw_VoqEn;
385 break;
386 default:
387 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
388 ("HW_VAR_ACM_CTRL acm set "
389 "failed: eACI is %d\n", acm));
390 break;
391 }
392 } else {
393 switch (e_aci) {
394 case AC0_BE:
395 acm_ctrl &= (~AcmHw_BeqEn);
396 break;
397 case AC2_VI:
398 acm_ctrl &= (~AcmHw_ViqEn);
399 break;
400 case AC3_VO:
401 acm_ctrl &= (~AcmHw_BeqEn);
402 break;
403 default:
404 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
405 ("switch case not process\n"));
406 break;
407 }
408 }
409
410 RT_TRACE(rtlpriv, COMP_QOS, DBG_TRACE,
411 ("SetHwReg8190pci(): [HW_VAR_ACM_CTRL] "
412 "Write 0x%X\n", acm_ctrl));
413 rtl_write_byte(rtlpriv, REG_ACMHWCTRL, acm_ctrl);
414 break;
415 }
416 case HW_VAR_RCR:{
417 rtl_write_dword(rtlpriv, REG_RCR, ((u32 *) (val))[0]);
418 rtlpci->receive_config = ((u32 *) (val))[0];
419 break;
420 }
421 case HW_VAR_RETRY_LIMIT:{
422 u8 retry_limit = ((u8 *) (val))[0];
423
424 rtl_write_word(rtlpriv, REG_RL,
425 retry_limit << RETRY_LIMIT_SHORT_SHIFT |
426 retry_limit << RETRY_LIMIT_LONG_SHIFT);
427 break;
428 }
429 case HW_VAR_DUAL_TSF_RST:
430 rtl_write_byte(rtlpriv, REG_DUAL_TSF_RST, (BIT(0) | BIT(1)));
431 break;
432 case HW_VAR_EFUSE_BYTES:
433 rtlefuse->efuse_usedbytes = *((u16 *) val);
434 break;
435 case HW_VAR_EFUSE_USAGE:
436 rtlefuse->efuse_usedpercentage = *((u8 *) val);
437 break;
438 case HW_VAR_IO_CMD:
439 rtl92c_phy_set_io_cmd(hw, (*(enum io_type *)val));
440 break;
441 case HW_VAR_WPA_CONFIG:
442 rtl_write_byte(rtlpriv, REG_SECCFG, *((u8 *) val));
443 break;
444 case HW_VAR_SET_RPWM:{
445 u8 rpwm_val;
446
447 rpwm_val = rtl_read_byte(rtlpriv, REG_PCIE_HRPWM);
448 udelay(1);
449
450 if (rpwm_val & BIT(7)) {
451 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
452 (*(u8 *) val));
453 } else {
454 rtl_write_byte(rtlpriv, REG_PCIE_HRPWM,
455 ((*(u8 *) val) | BIT(7)));
456 }
457
458 break;
459 }
460 case HW_VAR_H2C_FW_PWRMODE:{
461 u8 psmode = (*(u8 *) val);
462
463 if ((psmode != FW_PS_ACTIVE_MODE) &&
464 (!IS_92C_SERIAL(rtlhal->version))) {
465 rtl92c_dm_rf_saving(hw, true);
466 }
467
468 rtl92c_set_fw_pwrmode_cmd(hw, (*(u8 *) val));
469 break;
470 }
471 case HW_VAR_FW_PSMODE_STATUS:
472 ppsc->b_fw_current_inpsmode = *((bool *) val);
473 break;
474 case HW_VAR_H2C_FW_JOINBSSRPT:{
475 u8 mstatus = (*(u8 *) val);
476 u8 tmp_regcr, tmp_reg422;
477 bool b_recover = false;
478
479 if (mstatus == RT_MEDIA_CONNECT) {
480 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_AID,
481 NULL);
482
483 tmp_regcr = rtl_read_byte(rtlpriv, REG_CR + 1);
484 rtl_write_byte(rtlpriv, REG_CR + 1,
485 (tmp_regcr | BIT(0)));
486
487 _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(3));
488 _rtl92ce_set_bcn_ctrl_reg(hw, BIT(4), 0);
489
490 tmp_reg422 =
491 rtl_read_byte(rtlpriv,
492 REG_FWHW_TXQ_CTRL + 2);
493 if (tmp_reg422 & BIT(6))
494 b_recover = true;
495 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
496 tmp_reg422 & (~BIT(6)));
497
498 rtl92c_set_fw_rsvdpagepkt(hw, 0);
499
500 _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0);
501 _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4));
502
503 if (b_recover) {
504 rtl_write_byte(rtlpriv,
505 REG_FWHW_TXQ_CTRL + 2,
506 tmp_reg422);
507 }
508
509 rtl_write_byte(rtlpriv, REG_CR + 1,
510 (tmp_regcr & ~(BIT(0))));
511 }
512 rtl92c_set_fw_joinbss_report_cmd(hw, (*(u8 *) val));
513
514 break;
515 }
516 case HW_VAR_AID:{
517 u16 u2btmp;
518 u2btmp = rtl_read_word(rtlpriv, REG_BCN_PSR_RPT);
519 u2btmp &= 0xC000;
520 rtl_write_word(rtlpriv, REG_BCN_PSR_RPT, (u2btmp |
521 mac->assoc_id));
522
523 break;
524 }
525 case HW_VAR_CORRECT_TSF:{
526 u8 btype_ibss = ((u8 *) (val))[0];
527
528 /*btype_ibss = (mac->opmode == NL80211_IFTYPE_ADHOC) ?
529 1 : 0;*/
530
531 if (btype_ibss == true)
532 _rtl92ce_stop_tx_beacon(hw);
533
534 _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(3));
535
536 rtl_write_dword(rtlpriv, REG_TSFTR,
537 (u32) (mac->tsf & 0xffffffff));
538 rtl_write_dword(rtlpriv, REG_TSFTR + 4,
539 (u32) ((mac->tsf >> 32)&0xffffffff));
540
541 _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0);
542
543 if (btype_ibss == true)
544 _rtl92ce_resume_tx_beacon(hw);
545
546 break;
547
548 }
549 case HW_VAR_MGT_FILTER:
550 rtl_write_word(rtlpriv, REG_RXFLTMAP0, *(u16 *) val);
551 break;
552 case HW_VAR_CTRL_FILTER:
553 rtl_write_word(rtlpriv, REG_RXFLTMAP1, *(u16 *) val);
554 break;
555 case HW_VAR_DATA_FILTER:
556 rtl_write_word(rtlpriv, REG_RXFLTMAP2, *(u16 *) val);
557 break;
558 default:
559 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case "
560 "not process\n"));
561 break;
562 }
563}
564
565static bool _rtl92ce_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
566{
567 struct rtl_priv *rtlpriv = rtl_priv(hw);
568 bool status = true;
569 long count = 0;
570 u32 value = _LLT_INIT_ADDR(address) |
571 _LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
572
573 rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
574
575 do {
576 value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
577 if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
578 break;
579
580 if (count > POLLING_LLT_THRESHOLD) {
581 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
582 ("Failed to polling write LLT done at "
583 "address %d!\n", address));
584 status = false;
585 break;
586 }
587 } while (++count);
588
589 return status;
590}
591
592static bool _rtl92ce_llt_table_init(struct ieee80211_hw *hw)
593{
594 struct rtl_priv *rtlpriv = rtl_priv(hw);
595 unsigned short i;
596 u8 txpktbuf_bndy;
597 u8 maxPage;
598 bool status;
599
600#if LLT_CONFIG == 1
601 maxPage = 255;
602 txpktbuf_bndy = 252;
603#elif LLT_CONFIG == 2
604 maxPage = 127;
605 txpktbuf_bndy = 124;
606#elif LLT_CONFIG == 3
607 maxPage = 255;
608 txpktbuf_bndy = 174;
609#elif LLT_CONFIG == 4
610 maxPage = 255;
611 txpktbuf_bndy = 246;
612#elif LLT_CONFIG == 5
613 maxPage = 255;
614 txpktbuf_bndy = 246;
615#endif
616
617#if LLT_CONFIG == 1
618 rtl_write_byte(rtlpriv, REG_RQPN_NPQ, 0x1c);
619 rtl_write_dword(rtlpriv, REG_RQPN, 0x80a71c1c);
620#elif LLT_CONFIG == 2
621 rtl_write_dword(rtlpriv, REG_RQPN, 0x845B1010);
622#elif LLT_CONFIG == 3
623 rtl_write_dword(rtlpriv, REG_RQPN, 0x84838484);
624#elif LLT_CONFIG == 4
625 rtl_write_dword(rtlpriv, REG_RQPN, 0x80bd1c1c);
626#elif LLT_CONFIG == 5
627 rtl_write_word(rtlpriv, REG_RQPN_NPQ, 0x0000);
628
629 rtl_write_dword(rtlpriv, REG_RQPN, 0x80b01c29);
630#endif
631
632 rtl_write_dword(rtlpriv, REG_TRXFF_BNDY, (0x27FF0000 | txpktbuf_bndy));
633 rtl_write_byte(rtlpriv, REG_TDECTRL + 1, txpktbuf_bndy);
634
635 rtl_write_byte(rtlpriv, REG_TXPKTBUF_BCNQ_BDNY, txpktbuf_bndy);
636 rtl_write_byte(rtlpriv, REG_TXPKTBUF_MGQ_BDNY, txpktbuf_bndy);
637
638 rtl_write_byte(rtlpriv, 0x45D, txpktbuf_bndy);
639 rtl_write_byte(rtlpriv, REG_PBP, 0x11);
640 rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, 0x4);
641
642 for (i = 0; i < (txpktbuf_bndy - 1); i++) {
643 status = _rtl92ce_llt_write(hw, i, i + 1);
644 if (true != status)
645 return status;
646 }
647
648 status = _rtl92ce_llt_write(hw, (txpktbuf_bndy - 1), 0xFF);
649 if (true != status)
650 return status;
651
652 for (i = txpktbuf_bndy; i < maxPage; i++) {
653 status = _rtl92ce_llt_write(hw, i, (i + 1));
654 if (true != status)
655 return status;
656 }
657
658 status = _rtl92ce_llt_write(hw, maxPage, txpktbuf_bndy);
659 if (true != status)
660 return status;
661
662 return true;
663}
664
665static void _rtl92ce_gen_refresh_led_state(struct ieee80211_hw *hw)
666{
667 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
668 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
669 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
670 struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
671
672 if (rtlpci->up_first_time)
673 return;
674
675 if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS)
676 rtl92ce_sw_led_on(hw, pLed0);
677 else if (ppsc->rfoff_reason == RF_CHANGE_BY_INIT)
678 rtl92ce_sw_led_on(hw, pLed0);
679 else
680 rtl92ce_sw_led_off(hw, pLed0);
681
682}
683
684static bool _rtl92ce_init_mac(struct ieee80211_hw *hw)
685{
686 struct rtl_priv *rtlpriv = rtl_priv(hw);
687 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
688 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
689
690 unsigned char bytetmp;
691 unsigned short wordtmp;
692 u16 retry;
693
694 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x00);
695 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
696 rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL, 0x0F);
697
698 bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1) | BIT(0);
699 udelay(2);
700
701 rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, bytetmp);
702 udelay(2);
703
704 bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1);
705 udelay(2);
706
707 retry = 0;
708 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("reg0xec:%x:%x\n",
709 rtl_read_dword(rtlpriv, 0xEC),
710 bytetmp));
711
712 while ((bytetmp & BIT(0)) && retry < 1000) {
713 retry++;
714 udelay(50);
715 bytetmp = rtl_read_byte(rtlpriv, REG_APS_FSMCO + 1);
716 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("reg0xec:%x:%x\n",
717 rtl_read_dword(rtlpriv,
718 0xEC),
719 bytetmp));
720 udelay(50);
721 }
722
723 rtl_write_word(rtlpriv, REG_APS_FSMCO, 0x1012);
724
725 rtl_write_byte(rtlpriv, REG_SYS_ISO_CTRL + 1, 0x82);
726 udelay(2);
727
728 rtl_write_word(rtlpriv, REG_CR, 0x2ff);
729
730 if (_rtl92ce_llt_table_init(hw) == false)
731 return false;;
732
733 rtl_write_dword(rtlpriv, REG_HISR, 0xffffffff);
734 rtl_write_byte(rtlpriv, REG_HISRE, 0xff);
735
736 rtl_write_word(rtlpriv, REG_TRXFF_BNDY + 2, 0x27ff);
737
738 wordtmp = rtl_read_word(rtlpriv, REG_TRXDMA_CTRL);
739 wordtmp &= 0xf;
740 wordtmp |= 0xF771;
741 rtl_write_word(rtlpriv, REG_TRXDMA_CTRL, wordtmp);
742
743 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 1, 0x1F);
744 rtl_write_dword(rtlpriv, REG_RCR, rtlpci->receive_config);
745 rtl_write_dword(rtlpriv, REG_TCR, rtlpci->transmit_config);
746
747 rtl_write_byte(rtlpriv, 0x4d0, 0x0);
748
749 rtl_write_dword(rtlpriv, REG_BCNQ_DESA,
750 ((u64) rtlpci->tx_ring[BEACON_QUEUE].dma) &
751 DMA_BIT_MASK(32));
752 rtl_write_dword(rtlpriv, REG_MGQ_DESA,
753 (u64) rtlpci->tx_ring[MGNT_QUEUE].dma &
754 DMA_BIT_MASK(32));
755 rtl_write_dword(rtlpriv, REG_VOQ_DESA,
756 (u64) rtlpci->tx_ring[VO_QUEUE].dma & DMA_BIT_MASK(32));
757 rtl_write_dword(rtlpriv, REG_VIQ_DESA,
758 (u64) rtlpci->tx_ring[VI_QUEUE].dma & DMA_BIT_MASK(32));
759 rtl_write_dword(rtlpriv, REG_BEQ_DESA,
760 (u64) rtlpci->tx_ring[BE_QUEUE].dma & DMA_BIT_MASK(32));
761 rtl_write_dword(rtlpriv, REG_BKQ_DESA,
762 (u64) rtlpci->tx_ring[BK_QUEUE].dma & DMA_BIT_MASK(32));
763 rtl_write_dword(rtlpriv, REG_HQ_DESA,
764 (u64) rtlpci->tx_ring[HIGH_QUEUE].dma &
765 DMA_BIT_MASK(32));
766 rtl_write_dword(rtlpriv, REG_RX_DESA,
767 (u64) rtlpci->rx_ring[RX_MPDU_QUEUE].dma &
768 DMA_BIT_MASK(32));
769
770 if (IS_92C_SERIAL(rtlhal->version))
771 rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 3, 0x77);
772 else
773 rtl_write_byte(rtlpriv, REG_PCIE_CTRL_REG + 3, 0x22);
774
775 rtl_write_dword(rtlpriv, REG_INT_MIG, 0);
776
777 bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL);
778 rtl_write_byte(rtlpriv, REG_APSD_CTRL, bytetmp & ~BIT(6));
779 do {
780 retry++;
781 bytetmp = rtl_read_byte(rtlpriv, REG_APSD_CTRL);
782 } while ((retry < 200) && (bytetmp & BIT(7)));
783
784 _rtl92ce_gen_refresh_led_state(hw);
785
786 rtl_write_dword(rtlpriv, REG_MCUTST_1, 0x0);
787
788 return true;;
789}
790
791static void _rtl92ce_hw_configure(struct ieee80211_hw *hw)
792{
793 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
794 struct rtl_priv *rtlpriv = rtl_priv(hw);
795 u8 reg_bw_opmode;
796 u32 reg_ratr, reg_prsr;
797
798 reg_bw_opmode = BW_OPMODE_20MHZ;
799 reg_ratr = RATE_ALL_CCK | RATE_ALL_OFDM_AG |
800 RATE_ALL_OFDM_1SS | RATE_ALL_OFDM_2SS;
801 reg_prsr = RATE_ALL_CCK | RATE_ALL_OFDM_AG;
802
803 rtl_write_byte(rtlpriv, REG_INIRTS_RATE_SEL, 0x8);
804
805 rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
806
807 rtl_write_dword(rtlpriv, REG_RRSR, reg_prsr);
808
809 rtl_write_byte(rtlpriv, REG_SLOT, 0x09);
810
811 rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, 0x0);
812
813 rtl_write_word(rtlpriv, REG_FWHW_TXQ_CTRL, 0x1F80);
814
815 rtl_write_word(rtlpriv, REG_RL, 0x0707);
816
817 rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0x02012802);
818
819 rtl_write_byte(rtlpriv, REG_HWSEQ_CTRL, 0xFF);
820
821 rtl_write_dword(rtlpriv, REG_DARFRC, 0x01000000);
822 rtl_write_dword(rtlpriv, REG_DARFRC + 4, 0x07060504);
823 rtl_write_dword(rtlpriv, REG_RARFRC, 0x01000000);
824 rtl_write_dword(rtlpriv, REG_RARFRC + 4, 0x07060504);
825
826 rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0xb972a841);
827
828 rtl_write_byte(rtlpriv, REG_ATIMWND, 0x2);
829
830 rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xff);
831
832 rtlpci->reg_bcn_ctrl_val = 0x1f;
833 rtl_write_byte(rtlpriv, REG_BCN_CTRL, rtlpci->reg_bcn_ctrl_val);
834
835 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
836
837 rtl_write_byte(rtlpriv, REG_TBTT_PROHIBIT + 1, 0xff);
838
839 rtl_write_byte(rtlpriv, REG_PIFS, 0x1C);
840 rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
841
842 rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020);
843
844 rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0020);
845
846 rtl_write_dword(rtlpriv, REG_FAST_EDCA_CTRL, 0x086666);
847
848 rtl_write_byte(rtlpriv, REG_ACKTO, 0x40);
849
850 rtl_write_word(rtlpriv, REG_SPEC_SIFS, 0x1010);
851 rtl_write_word(rtlpriv, REG_MAC_SPEC_SIFS, 0x1010);
852
853 rtl_write_word(rtlpriv, REG_SIFS_CTX, 0x1010);
854
855 rtl_write_word(rtlpriv, REG_SIFS_TRX, 0x1010);
856
857 rtl_write_dword(rtlpriv, REG_MAR, 0xffffffff);
858 rtl_write_dword(rtlpriv, REG_MAR + 4, 0xffffffff);
859
860}
861
862static void _rtl92ce_enable_aspm_back_door(struct ieee80211_hw *hw)
863{
864 struct rtl_priv *rtlpriv = rtl_priv(hw);
865 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
866
867 rtl_write_byte(rtlpriv, 0x34b, 0x93);
868 rtl_write_word(rtlpriv, 0x350, 0x870c);
869 rtl_write_byte(rtlpriv, 0x352, 0x1);
870
871 if (ppsc->b_support_backdoor)
872 rtl_write_byte(rtlpriv, 0x349, 0x1b);
873 else
874 rtl_write_byte(rtlpriv, 0x349, 0x03);
875
876 rtl_write_word(rtlpriv, 0x350, 0x2718);
877 rtl_write_byte(rtlpriv, 0x352, 0x1);
878}
879
880void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw)
881{
882 struct rtl_priv *rtlpriv = rtl_priv(hw);
883 u8 sec_reg_value;
884
885 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
886 ("PairwiseEncAlgorithm = %d GroupEncAlgorithm = %d\n",
887 rtlpriv->sec.pairwise_enc_algorithm,
888 rtlpriv->sec.group_enc_algorithm));
889
890 if (rtlpriv->cfg->mod_params->sw_crypto || rtlpriv->sec.use_sw_sec) {
891 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("not open "
892 "hw encryption\n"));
893 return;
894 }
895
896 sec_reg_value = SCR_TxEncEnable | SCR_RxDecEnable;
897
898 if (rtlpriv->sec.use_defaultkey) {
899 sec_reg_value |= SCR_TxUseDK;
900 sec_reg_value |= SCR_RxUseDK;
901 }
902
903 sec_reg_value |= (SCR_RXBCUSEDK | SCR_TXBCUSEDK);
904
905 rtl_write_byte(rtlpriv, REG_CR + 1, 0x02);
906
907 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
908 ("The SECR-value %x\n", sec_reg_value));
909
910 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_WPA_CONFIG, &sec_reg_value);
911
912}
913
914int rtl92ce_hw_init(struct ieee80211_hw *hw)
915{
916 struct rtl_priv *rtlpriv = rtl_priv(hw);
917 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
918 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
919 struct rtl_phy *rtlphy = &(rtlpriv->phy);
920 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
921 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
922 static bool iqk_initialized; /* initialized to false */
923 bool rtstatus = true;
924 bool is92c;
925 int err;
926 u8 tmp_u1b;
927
928 rtlpci->being_init_adapter = true;
929 rtlpriv->intf_ops->disable_aspm(hw);
930 rtstatus = _rtl92ce_init_mac(hw);
931 if (rtstatus != true) {
932 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Init MAC failed\n"));
933 err = 1;
934 return err;
935 }
936
937 err = rtl92c_download_fw(hw);
938 if (err) {
939 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
940 ("Failed to download FW. Init HW "
941 "without FW now..\n"));
942 err = 1;
943 rtlhal->bfw_ready = false;
944 return err;
945 } else {
946 rtlhal->bfw_ready = true;
947 }
948
949 rtlhal->last_hmeboxnum = 0;
950 rtl92c_phy_mac_config(hw);
951 rtl92c_phy_bb_config(hw);
952 rtlphy->rf_mode = RF_OP_BY_SW_3WIRE;
953 rtl92c_phy_rf_config(hw);
954 rtlphy->rfreg_chnlval[0] = rtl_get_rfreg(hw, (enum radio_path)0,
955 RF_CHNLBW, RFREG_OFFSET_MASK);
956 rtlphy->rfreg_chnlval[1] = rtl_get_rfreg(hw, (enum radio_path)1,
957 RF_CHNLBW, RFREG_OFFSET_MASK);
958 rtl_set_bbreg(hw, RFPGA0_RFMOD, BCCKEN, 0x1);
959 rtl_set_bbreg(hw, RFPGA0_RFMOD, BOFDMEN, 0x1);
960 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);
961 _rtl92ce_hw_configure(hw);
962 rtl_cam_reset_all_entry(hw);
963 rtl92ce_enable_hw_security_config(hw);
964 ppsc->rfpwr_state = ERFON;
965 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_ETHER_ADDR, mac->mac_addr);
966 _rtl92ce_enable_aspm_back_door(hw);
967 rtlpriv->intf_ops->enable_aspm(hw);
968 if (ppsc->rfpwr_state == ERFON) {
969 rtl92c_phy_set_rfpath_switch(hw, 1);
970 if (iqk_initialized)
971 rtl92c_phy_iq_calibrate(hw, true);
972 else {
973 rtl92c_phy_iq_calibrate(hw, false);
974 iqk_initialized = true;
975 }
976
977 rtl92c_dm_check_txpower_tracking(hw);
978 rtl92c_phy_lc_calibrate(hw);
979 }
980
981 is92c = IS_92C_SERIAL(rtlhal->version);
982 tmp_u1b = efuse_read_1byte(hw, 0x1FA);
983 if (!(tmp_u1b & BIT(0))) {
984 rtl_set_rfreg(hw, RF90_PATH_A, 0x15, 0x0F, 0x05);
985 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("PA BIAS path A\n"));
986 }
987
988 if (!(tmp_u1b & BIT(1)) && is92c) {
989 rtl_set_rfreg(hw, RF90_PATH_B, 0x15, 0x0F, 0x05);
990 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("PA BIAS path B\n"));
991 }
992
993 if (!(tmp_u1b & BIT(4))) {
994 tmp_u1b = rtl_read_byte(rtlpriv, 0x16);
995 tmp_u1b &= 0x0F;
996 rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x80);
997 udelay(10);
998 rtl_write_byte(rtlpriv, 0x16, tmp_u1b | 0x90);
999 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("under 1.5V\n"));
1000 }
1001 rtl92c_dm_init(hw);
1002 rtlpci->being_init_adapter = false;
1003 return err;
1004}
1005
1006static enum version_8192c _rtl92ce_read_chip_version(struct ieee80211_hw *hw)
1007{
1008 struct rtl_priv *rtlpriv = rtl_priv(hw);
1009 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1010 enum version_8192c version = VERSION_UNKNOWN;
1011 u32 value32;
1012
1013 value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
1014 if (value32 & TRP_VAUX_EN) {
1015 version = (value32 & TYPE_ID) ? VERSION_A_CHIP_92C :
1016 VERSION_A_CHIP_88C;
1017 } else {
1018 version = (value32 & TYPE_ID) ? VERSION_B_CHIP_92C :
1019 VERSION_B_CHIP_88C;
1020 }
1021
1022 switch (version) {
1023 case VERSION_B_CHIP_92C:
1024 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1025 ("Chip Version ID: VERSION_B_CHIP_92C.\n"));
1026 break;
1027 case VERSION_B_CHIP_88C:
1028 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1029 ("Chip Version ID: VERSION_B_CHIP_88C.\n"));
1030 break;
1031 case VERSION_A_CHIP_92C:
1032 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1033 ("Chip Version ID: VERSION_A_CHIP_92C.\n"));
1034 break;
1035 case VERSION_A_CHIP_88C:
1036 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1037 ("Chip Version ID: VERSION_A_CHIP_88C.\n"));
1038 break;
1039 default:
1040 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1041 ("Chip Version ID: Unknown. Bug?\n"));
1042 break;
1043 }
1044
1045 switch (version & 0x3) {
1046 case CHIP_88C:
1047 rtlphy->rf_type = RF_1T1R;
1048 break;
1049 case CHIP_92C:
1050 rtlphy->rf_type = RF_2T2R;
1051 break;
1052 case CHIP_92C_1T2R:
1053 rtlphy->rf_type = RF_1T2R;
1054 break;
1055 default:
1056 rtlphy->rf_type = RF_1T1R;
1057 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1058 ("ERROR RF_Type is set!!"));
1059 break;
1060 }
1061
1062 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1063 ("Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
1064 "RF_2T2R" : "RF_1T1R"));
1065
1066 return version;
1067}
1068
1069static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
1070 enum nl80211_iftype type)
1071{
1072 struct rtl_priv *rtlpriv = rtl_priv(hw);
1073 u8 bt_msr = rtl_read_byte(rtlpriv, MSR);
1074 enum led_ctl_mode ledaction = LED_CTL_NO_LINK;
1075 bt_msr &= 0xfc;
1076
1077 if (type == NL80211_IFTYPE_UNSPECIFIED ||
1078 type == NL80211_IFTYPE_STATION) {
1079 _rtl92ce_stop_tx_beacon(hw);
1080 _rtl92ce_enable_bcn_sub_func(hw);
1081 } else if (type == NL80211_IFTYPE_ADHOC || type == NL80211_IFTYPE_AP) {
1082 _rtl92ce_resume_tx_beacon(hw);
1083 _rtl92ce_disable_bcn_sub_func(hw);
1084 } else {
1085 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1086 ("Set HW_VAR_MEDIA_STATUS: "
1087 "No such media status(%x).\n", type));
1088 }
1089
1090 switch (type) {
1091 case NL80211_IFTYPE_UNSPECIFIED:
1092 bt_msr |= MSR_NOLINK;
1093 ledaction = LED_CTL_LINK;
1094 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1095 ("Set Network type to NO LINK!\n"));
1096 break;
1097 case NL80211_IFTYPE_ADHOC:
1098 bt_msr |= MSR_ADHOC;
1099 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1100 ("Set Network type to Ad Hoc!\n"));
1101 break;
1102 case NL80211_IFTYPE_STATION:
1103 bt_msr |= MSR_INFRA;
1104 ledaction = LED_CTL_LINK;
1105 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1106 ("Set Network type to STA!\n"));
1107 break;
1108 case NL80211_IFTYPE_AP:
1109 bt_msr |= MSR_AP;
1110 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
1111 ("Set Network type to AP!\n"));
1112 break;
1113 default:
1114 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1115 ("Network type %d not support!\n", type));
1116 return 1;
1117 break;
1118
1119 }
1120
1121 rtl_write_byte(rtlpriv, (MSR), bt_msr);
1122 rtlpriv->cfg->ops->led_control(hw, ledaction);
1123 if ((bt_msr & 0xfc) == MSR_AP)
1124 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x00);
1125 else
1126 rtl_write_byte(rtlpriv, REG_BCNTCFG + 1, 0x66);
1127 return 0;
1128}
1129
1130static void _rtl92ce_set_check_bssid(struct ieee80211_hw *hw,
1131 enum nl80211_iftype type)
1132{
1133 struct rtl_priv *rtlpriv = rtl_priv(hw);
1134 u32 reg_rcr = rtl_read_dword(rtlpriv, REG_RCR);
1135 u8 filterout_non_associated_bssid = false;
1136
1137 switch (type) {
1138 case NL80211_IFTYPE_ADHOC:
1139 case NL80211_IFTYPE_STATION:
1140 filterout_non_associated_bssid = true;
1141 break;
1142 case NL80211_IFTYPE_UNSPECIFIED:
1143 case NL80211_IFTYPE_AP:
1144 default:
1145 break;
1146 }
1147
1148 if (filterout_non_associated_bssid == true) {
1149 reg_rcr |= (RCR_CBSSID_DATA | RCR_CBSSID_BCN);
1150 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_RCR,
1151 (u8 *) (&reg_rcr));
1152 _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4));
1153 } else if (filterout_non_associated_bssid == false) {
1154 reg_rcr &= (~(RCR_CBSSID_DATA | RCR_CBSSID_BCN));
1155 _rtl92ce_set_bcn_ctrl_reg(hw, BIT(4), 0);
1156 rtlpriv->cfg->ops->set_hw_reg(hw,
1157 HW_VAR_RCR, (u8 *) (&reg_rcr));
1158 }
1159}
1160
1161int rtl92ce_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
1162{
1163 if (_rtl92ce_set_media_status(hw, type))
1164 return -EOPNOTSUPP;
1165 _rtl92ce_set_check_bssid(hw, type);
1166 return 0;
1167}
1168
1169void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci)
1170{
1171 struct rtl_priv *rtlpriv = rtl_priv(hw);
1172 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1173
1174 u32 u4b_ac_param;
1175
1176 rtl92c_dm_init_edca_turbo(hw);
1177
1178 u4b_ac_param = (u32) mac->ac[aci].aifs;
1179 u4b_ac_param |=
1180 ((u32) mac->ac[aci].cw_min & 0xF) << AC_PARAM_ECW_MIN_OFFSET;
1181 u4b_ac_param |=
1182 ((u32) mac->ac[aci].cw_max & 0xF) << AC_PARAM_ECW_MAX_OFFSET;
1183 u4b_ac_param |= (u32) mac->ac[aci].tx_op << AC_PARAM_TXOP_LIMIT_OFFSET;
1184 RT_TRACE(rtlpriv, COMP_QOS, DBG_DMESG,
1185 ("queue:%x, ac_param:%x aifs:%x cwmin:%x cwmax:%x txop:%x\n",
1186 aci, u4b_ac_param, mac->ac[aci].aifs, mac->ac[aci].cw_min,
1187 mac->ac[aci].cw_max, mac->ac[aci].tx_op));
1188 switch (aci) {
1189 case AC1_BK:
1190 rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param);
1191 break;
1192 case AC0_BE:
1193 rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param);
1194 break;
1195 case AC2_VI:
1196 rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, u4b_ac_param);
1197 break;
1198 case AC3_VO:
1199 rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, u4b_ac_param);
1200 break;
1201 default:
1202 RT_ASSERT(false, ("invalid aci: %d !\n", aci));
1203 break;
1204 }
1205}
1206
1207void rtl92ce_enable_interrupt(struct ieee80211_hw *hw)
1208{
1209 struct rtl_priv *rtlpriv = rtl_priv(hw);
1210 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1211
1212 rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] & 0xFFFFFFFF);
1213 rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] & 0xFFFFFFFF);
1214 rtlpci->irq_enabled = true;
1215}
1216
1217void rtl92ce_disable_interrupt(struct ieee80211_hw *hw)
1218{
1219 struct rtl_priv *rtlpriv = rtl_priv(hw);
1220 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1221
1222 rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED);
1223 rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED);
1224 rtlpci->irq_enabled = false;
1225}
1226
1227static void _rtl92ce_poweroff_adapter(struct ieee80211_hw *hw)
1228{
1229 struct rtl_priv *rtlpriv = rtl_priv(hw);
1230 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1231 u8 u1b_tmp;
1232
1233 rtlpriv->intf_ops->enable_aspm(hw);
1234 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
1235 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
1236 rtl_write_byte(rtlpriv, REG_RF_CTRL, 0x00);
1237 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
1238 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
1239 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE0);
1240 if ((rtl_read_byte(rtlpriv, REG_MCUFWDL) & BIT(7)) && rtlhal->bfw_ready)
1241 rtl92c_firmware_selfreset(hw);
1242 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN + 1, 0x51);
1243 rtl_write_byte(rtlpriv, REG_MCUFWDL, 0x00);
1244 rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x00000000);
1245 u1b_tmp = rtl_read_byte(rtlpriv, REG_GPIO_PIN_CTRL);
1246 rtl_write_dword(rtlpriv, REG_GPIO_PIN_CTRL, 0x00FF0000 |
1247 (u1b_tmp << 8));
1248 rtl_write_word(rtlpriv, REG_GPIO_IO_SEL, 0x0790);
1249 rtl_write_word(rtlpriv, REG_LEDCFG0, 0x8080);
1250 rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x80);
1251 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x23);
1252 rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL, 0x0e);
1253 rtl_write_byte(rtlpriv, REG_RSV_CTRL, 0x0e);
1254 rtl_write_byte(rtlpriv, REG_APS_FSMCO + 1, 0x10);
1255}
1256
1257void rtl92ce_card_disable(struct ieee80211_hw *hw)
1258{
1259 struct rtl_priv *rtlpriv = rtl_priv(hw);
1260 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1261 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1262 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1263 enum nl80211_iftype opmode;
1264
1265 mac->link_state = MAC80211_NOLINK;
1266 opmode = NL80211_IFTYPE_UNSPECIFIED;
1267 _rtl92ce_set_media_status(hw, opmode);
1268 if (rtlpci->driver_is_goingto_unload ||
1269 ppsc->rfoff_reason > RF_CHANGE_BY_PS)
1270 rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF);
1271 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
1272 _rtl92ce_poweroff_adapter(hw);
1273}
1274
1275void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
1276 u32 *p_inta, u32 *p_intb)
1277{
1278 struct rtl_priv *rtlpriv = rtl_priv(hw);
1279 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1280
1281 *p_inta = rtl_read_dword(rtlpriv, ISR) & rtlpci->irq_mask[0];
1282 rtl_write_dword(rtlpriv, ISR, *p_inta);
1283
1284 /*
1285 * *p_intb = rtl_read_dword(rtlpriv, REG_HISRE) & rtlpci->irq_mask[1];
1286 * rtl_write_dword(rtlpriv, ISR + 4, *p_intb);
1287 */
1288}
1289
1290void rtl92ce_set_beacon_related_registers(struct ieee80211_hw *hw)
1291{
1292
1293 struct rtl_priv *rtlpriv = rtl_priv(hw);
1294 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1295 u16 bcn_interval, atim_window;
1296
1297 bcn_interval = mac->beacon_interval;
1298 atim_window = 2; /*FIX MERGE */
1299 rtl92ce_disable_interrupt(hw);
1300 rtl_write_word(rtlpriv, REG_ATIMWND, atim_window);
1301 rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
1302 rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660f);
1303 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_CCK, 0x18);
1304 rtl_write_byte(rtlpriv, REG_RXTSF_OFFSET_OFDM, 0x18);
1305 rtl_write_byte(rtlpriv, 0x606, 0x30);
1306 rtl92ce_enable_interrupt(hw);
1307}
1308
1309void rtl92ce_set_beacon_interval(struct ieee80211_hw *hw)
1310{
1311 struct rtl_priv *rtlpriv = rtl_priv(hw);
1312 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1313 u16 bcn_interval = mac->beacon_interval;
1314
1315 RT_TRACE(rtlpriv, COMP_BEACON, DBG_DMESG,
1316 ("beacon_interval:%d\n", bcn_interval));
1317 rtl92ce_disable_interrupt(hw);
1318 rtl_write_word(rtlpriv, REG_BCN_INTERVAL, bcn_interval);
1319 rtl92ce_enable_interrupt(hw);
1320}
1321
1322void rtl92ce_update_interrupt_mask(struct ieee80211_hw *hw,
1323 u32 add_msr, u32 rm_msr)
1324{
1325 struct rtl_priv *rtlpriv = rtl_priv(hw);
1326 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1327
1328 RT_TRACE(rtlpriv, COMP_INTR, DBG_LOUD,
1329 ("add_msr:%x, rm_msr:%x\n", add_msr, rm_msr));
1330 if (add_msr)
1331 rtlpci->irq_mask[0] |= add_msr;
1332 if (rm_msr)
1333 rtlpci->irq_mask[0] &= (~rm_msr);
1334 rtl92ce_disable_interrupt(hw);
1335 rtl92ce_enable_interrupt(hw);
1336}
1337
1338static u8 _rtl92c_get_chnl_group(u8 chnl)
1339{
1340 u8 group;
1341
1342 if (chnl < 3)
1343 group = 0;
1344 else if (chnl < 9)
1345 group = 1;
1346 else
1347 group = 2;
1348 return group;
1349}
1350
1351static void _rtl92ce_read_txpower_info_from_hwpg(struct ieee80211_hw *hw,
1352 bool autoload_fail,
1353 u8 *hwinfo)
1354{
1355 struct rtl_priv *rtlpriv = rtl_priv(hw);
1356 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1357 u8 rf_path, index, tempval;
1358 u16 i;
1359
1360 for (rf_path = 0; rf_path < 2; rf_path++) {
1361 for (i = 0; i < 3; i++) {
1362 if (!autoload_fail) {
1363 rtlefuse->
1364 eeprom_chnlarea_txpwr_cck[rf_path][i] =
1365 hwinfo[EEPROM_TXPOWERCCK + rf_path * 3 + i];
1366 rtlefuse->
1367 eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] =
1368 hwinfo[EEPROM_TXPOWERHT40_1S + rf_path * 3 +
1369 i];
1370 } else {
1371 rtlefuse->
1372 eeprom_chnlarea_txpwr_cck[rf_path][i] =
1373 EEPROM_DEFAULT_TXPOWERLEVEL;
1374 rtlefuse->
1375 eeprom_chnlarea_txpwr_ht40_1s[rf_path][i] =
1376 EEPROM_DEFAULT_TXPOWERLEVEL;
1377 }
1378 }
1379 }
1380
1381 for (i = 0; i < 3; i++) {
1382 if (!autoload_fail)
1383 tempval = hwinfo[EEPROM_TXPOWERHT40_2SDIFF + i];
1384 else
1385 tempval = EEPROM_DEFAULT_HT40_2SDIFF;
1386 rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_A][i] =
1387 (tempval & 0xf);
1388 rtlefuse->eeprom_chnlarea_txpwr_ht40_2sdiif[RF90_PATH_B][i] =
1389 ((tempval & 0xf0) >> 4);
1390 }
1391
1392 for (rf_path = 0; rf_path < 2; rf_path++)
1393 for (i = 0; i < 3; i++)
1394 RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
1395 ("RF(%d) EEPROM CCK Area(%d) = 0x%x\n", rf_path,
1396 i,
1397 rtlefuse->
1398 eeprom_chnlarea_txpwr_cck[rf_path][i]));
1399 for (rf_path = 0; rf_path < 2; rf_path++)
1400 for (i = 0; i < 3; i++)
1401 RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
1402 ("RF(%d) EEPROM HT40 1S Area(%d) = 0x%x\n",
1403 rf_path, i,
1404 rtlefuse->
1405 eeprom_chnlarea_txpwr_ht40_1s[rf_path][i]));
1406 for (rf_path = 0; rf_path < 2; rf_path++)
1407 for (i = 0; i < 3; i++)
1408 RTPRINT(rtlpriv, FINIT, INIT_EEPROM,
1409 ("RF(%d) EEPROM HT40 2S Diff Area(%d) = 0x%x\n",
1410 rf_path, i,
1411 rtlefuse->
1412 eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
1413 [i]));
1414
1415 for (rf_path = 0; rf_path < 2; rf_path++) {
1416 for (i = 0; i < 14; i++) {
1417 index = _rtl92c_get_chnl_group((u8) i);
1418
1419 rtlefuse->txpwrlevel_cck[rf_path][i] =
1420 rtlefuse->eeprom_chnlarea_txpwr_cck[rf_path][index];
1421 rtlefuse->txpwrlevel_ht40_1s[rf_path][i] =
1422 rtlefuse->
1423 eeprom_chnlarea_txpwr_ht40_1s[rf_path][index];
1424
1425 if ((rtlefuse->
1426 eeprom_chnlarea_txpwr_ht40_1s[rf_path][index] -
1427 rtlefuse->
1428 eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path][index])
1429 > 0) {
1430 rtlefuse->txpwrlevel_ht40_2s[rf_path][i] =
1431 rtlefuse->
1432 eeprom_chnlarea_txpwr_ht40_1s[rf_path]
1433 [index] -
1434 rtlefuse->
1435 eeprom_chnlarea_txpwr_ht40_2sdiif[rf_path]
1436 [index];
1437 } else {
1438 rtlefuse->txpwrlevel_ht40_2s[rf_path][i] = 0;
1439 }
1440 }
1441
1442 for (i = 0; i < 14; i++) {
1443 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1444 ("RF(%d)-Ch(%d) [CCK / HT40_1S / HT40_2S] = "
1445 "[0x%x / 0x%x / 0x%x]\n", rf_path, i,
1446 rtlefuse->txpwrlevel_cck[rf_path][i],
1447 rtlefuse->txpwrlevel_ht40_1s[rf_path][i],
1448 rtlefuse->txpwrlevel_ht40_2s[rf_path][i]));
1449 }
1450 }
1451
1452 for (i = 0; i < 3; i++) {
1453 if (!autoload_fail) {
1454 rtlefuse->eeprom_pwrlimit_ht40[i] =
1455 hwinfo[EEPROM_TXPWR_GROUP + i];
1456 rtlefuse->eeprom_pwrlimit_ht20[i] =
1457 hwinfo[EEPROM_TXPWR_GROUP + 3 + i];
1458 } else {
1459 rtlefuse->eeprom_pwrlimit_ht40[i] = 0;
1460 rtlefuse->eeprom_pwrlimit_ht20[i] = 0;
1461 }
1462 }
1463
1464 for (rf_path = 0; rf_path < 2; rf_path++) {
1465 for (i = 0; i < 14; i++) {
1466 index = _rtl92c_get_chnl_group((u8) i);
1467
1468 if (rf_path == RF90_PATH_A) {
1469 rtlefuse->pwrgroup_ht20[rf_path][i] =
1470 (rtlefuse->eeprom_pwrlimit_ht20[index]
1471 & 0xf);
1472 rtlefuse->pwrgroup_ht40[rf_path][i] =
1473 (rtlefuse->eeprom_pwrlimit_ht40[index]
1474 & 0xf);
1475 } else if (rf_path == RF90_PATH_B) {
1476 rtlefuse->pwrgroup_ht20[rf_path][i] =
1477 ((rtlefuse->eeprom_pwrlimit_ht20[index]
1478 & 0xf0) >> 4);
1479 rtlefuse->pwrgroup_ht40[rf_path][i] =
1480 ((rtlefuse->eeprom_pwrlimit_ht40[index]
1481 & 0xf0) >> 4);
1482 }
1483
1484 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1485 ("RF-%d pwrgroup_ht20[%d] = 0x%x\n",
1486 rf_path, i,
1487 rtlefuse->pwrgroup_ht20[rf_path][i]));
1488 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1489 ("RF-%d pwrgroup_ht40[%d] = 0x%x\n",
1490 rf_path, i,
1491 rtlefuse->pwrgroup_ht40[rf_path][i]));
1492 }
1493 }
1494
1495 for (i = 0; i < 14; i++) {
1496 index = _rtl92c_get_chnl_group((u8) i);
1497
1498 if (!autoload_fail)
1499 tempval = hwinfo[EEPROM_TXPOWERHT20DIFF + index];
1500 else
1501 tempval = EEPROM_DEFAULT_HT20_DIFF;
1502
1503 rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] = (tempval & 0xF);
1504 rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] =
1505 ((tempval >> 4) & 0xF);
1506
1507 if (rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] & BIT(3))
1508 rtlefuse->txpwr_ht20diff[RF90_PATH_A][i] |= 0xF0;
1509
1510 if (rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] & BIT(3))
1511 rtlefuse->txpwr_ht20diff[RF90_PATH_B][i] |= 0xF0;
1512
1513 index = _rtl92c_get_chnl_group((u8) i);
1514
1515 if (!autoload_fail)
1516 tempval = hwinfo[EEPROM_TXPOWER_OFDMDIFF + index];
1517 else
1518 tempval = EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF;
1519
1520 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i] = (tempval & 0xF);
1521 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i] =
1522 ((tempval >> 4) & 0xF);
1523 }
1524
1525 rtlefuse->legacy_ht_txpowerdiff =
1526 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][7];
1527
1528 for (i = 0; i < 14; i++)
1529 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1530 ("RF-A Ht20 to HT40 Diff[%d] = 0x%x\n", i,
1531 rtlefuse->txpwr_ht20diff[RF90_PATH_A][i]));
1532 for (i = 0; i < 14; i++)
1533 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1534 ("RF-A Legacy to Ht40 Diff[%d] = 0x%x\n", i,
1535 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_A][i]));
1536 for (i = 0; i < 14; i++)
1537 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1538 ("RF-B Ht20 to HT40 Diff[%d] = 0x%x\n", i,
1539 rtlefuse->txpwr_ht20diff[RF90_PATH_B][i]));
1540 for (i = 0; i < 14; i++)
1541 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1542 ("RF-B Legacy to HT40 Diff[%d] = 0x%x\n", i,
1543 rtlefuse->txpwr_legacyhtdiff[RF90_PATH_B][i]));
1544
1545 if (!autoload_fail)
1546 rtlefuse->eeprom_regulatory = (hwinfo[RF_OPTION1] & 0x7);
1547 else
1548 rtlefuse->eeprom_regulatory = 0;
1549 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1550 ("eeprom_regulatory = 0x%x\n", rtlefuse->eeprom_regulatory));
1551
1552 if (!autoload_fail) {
1553 rtlefuse->eeprom_tssi[RF90_PATH_A] = hwinfo[EEPROM_TSSI_A];
1554 rtlefuse->eeprom_tssi[RF90_PATH_B] = hwinfo[EEPROM_TSSI_B];
1555 } else {
1556 rtlefuse->eeprom_tssi[RF90_PATH_A] = EEPROM_DEFAULT_TSSI;
1557 rtlefuse->eeprom_tssi[RF90_PATH_B] = EEPROM_DEFAULT_TSSI;
1558 }
1559 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1560 ("TSSI_A = 0x%x, TSSI_B = 0x%x\n",
1561 rtlefuse->eeprom_tssi[RF90_PATH_A],
1562 rtlefuse->eeprom_tssi[RF90_PATH_B]));
1563
1564 if (!autoload_fail)
1565 tempval = hwinfo[EEPROM_THERMAL_METER];
1566 else
1567 tempval = EEPROM_DEFAULT_THERMALMETER;
1568 rtlefuse->eeprom_thermalmeter = (tempval & 0x1f);
1569
1570 if (rtlefuse->eeprom_thermalmeter == 0x1f || autoload_fail)
1571 rtlefuse->b_apk_thermalmeterignore = true;
1572
1573 rtlefuse->thermalmeter[0] = rtlefuse->eeprom_thermalmeter;
1574 RTPRINT(rtlpriv, FINIT, INIT_TxPower,
1575 ("thermalmeter = 0x%x\n", rtlefuse->eeprom_thermalmeter));
1576}
1577
1578static void _rtl92ce_read_adapter_info(struct ieee80211_hw *hw)
1579{
1580 struct rtl_priv *rtlpriv = rtl_priv(hw);
1581 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1582 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1583 u16 i, usvalue;
1584 u8 hwinfo[HWSET_MAX_SIZE];
1585 u16 eeprom_id;
1586
1587 if (rtlefuse->epromtype == EEPROM_BOOT_EFUSE) {
1588 rtl_efuse_shadow_map_update(hw);
1589
1590 memcpy((void *)hwinfo,
1591 (void *)&rtlefuse->efuse_map[EFUSE_INIT_MAP][0],
1592 HWSET_MAX_SIZE);
1593 } else if (rtlefuse->epromtype == EEPROM_93C46) {
1594 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1595 ("RTL819X Not boot from eeprom, check it !!"));
1596 }
1597
1598 RT_PRINT_DATA(rtlpriv, COMP_INIT, DBG_LOUD, ("MAP\n"),
1599 hwinfo, HWSET_MAX_SIZE);
1600
1601 eeprom_id = *((u16 *)&hwinfo[0]);
1602 if (eeprom_id != RTL8190_EEPROM_ID) {
1603 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1604 ("EEPROM ID(%#x) is invalid!!\n", eeprom_id));
1605 rtlefuse->autoload_failflag = true;
1606 } else {
1607 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
1608 rtlefuse->autoload_failflag = false;
1609 }
1610
1611 if (rtlefuse->autoload_failflag == true)
1612 return;
1613
1614 for (i = 0; i < 6; i += 2) {
1615 usvalue = *(u16 *)&hwinfo[EEPROM_MAC_ADDR + i];
1616 *((u16 *) (&rtlefuse->dev_addr[i])) = usvalue;
1617 }
1618
1619 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1620 (MAC_FMT "\n", MAC_ARG(rtlefuse->dev_addr)));
1621
1622 _rtl92ce_read_txpower_info_from_hwpg(hw,
1623 rtlefuse->autoload_failflag,
1624 hwinfo);
1625
1626 rtlefuse->eeprom_channelplan = *(u8 *)&hwinfo[EEPROM_CHANNELPLAN];
1627 rtlefuse->eeprom_version = *(u16 *)&hwinfo[EEPROM_VERSION];
1628 rtlefuse->b_txpwr_fromeprom = true;
1629 rtlefuse->eeprom_oemid = *(u8 *)&hwinfo[EEPROM_CUSTOMER_ID];
1630
1631 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1632 ("EEPROM Customer ID: 0x%2x\n", rtlefuse->eeprom_oemid));
1633
1634 if (rtlhal->oem_id == RT_CID_DEFAULT) {
1635 switch (rtlefuse->eeprom_oemid) {
1636 case EEPROM_CID_DEFAULT:
1637 if (rtlefuse->eeprom_did == 0x8176) {
1638 if ((rtlefuse->eeprom_svid == 0x103C &&
1639 rtlefuse->eeprom_smid == 0x1629))
1640 rtlhal->oem_id = RT_CID_819x_HP;
1641 else
1642 rtlhal->oem_id = RT_CID_DEFAULT;
1643 } else {
1644 rtlhal->oem_id = RT_CID_DEFAULT;
1645 }
1646 break;
1647 case EEPROM_CID_TOSHIBA:
1648 rtlhal->oem_id = RT_CID_TOSHIBA;
1649 break;
1650 case EEPROM_CID_QMI:
1651 rtlhal->oem_id = RT_CID_819x_QMI;
1652 break;
1653 case EEPROM_CID_WHQL:
1654 default:
1655 rtlhal->oem_id = RT_CID_DEFAULT;
1656 break;
1657
1658 }
1659 }
1660
1661}
1662
1663static void _rtl92ce_hal_customized_behavior(struct ieee80211_hw *hw)
1664{
1665 struct rtl_priv *rtlpriv = rtl_priv(hw);
1666 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
1667 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1668
1669 switch (rtlhal->oem_id) {
1670 case RT_CID_819x_HP:
1671 pcipriv->ledctl.bled_opendrain = true;
1672 break;
1673 case RT_CID_819x_Lenovo:
1674 case RT_CID_DEFAULT:
1675 case RT_CID_TOSHIBA:
1676 case RT_CID_CCX:
1677 case RT_CID_819x_Acer:
1678 case RT_CID_WHQL:
1679 default:
1680 break;
1681 }
1682 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
1683 ("RT Customized ID: 0x%02X\n", rtlhal->oem_id));
1684}
1685
1686void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw)
1687{
1688 struct rtl_priv *rtlpriv = rtl_priv(hw);
1689 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1690 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1691 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1692 u8 tmp_u1b;
1693
1694 rtlhal->version = _rtl92ce_read_chip_version(hw);
1695 if (get_rf_type(rtlphy) == RF_1T1R)
1696 rtlpriv->dm.brfpath_rxenable[0] = true;
1697 else
1698 rtlpriv->dm.brfpath_rxenable[0] =
1699 rtlpriv->dm.brfpath_rxenable[1] = true;
1700 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n",
1701 rtlhal->version));
1702 tmp_u1b = rtl_read_byte(rtlpriv, REG_9346CR);
1703 if (tmp_u1b & BIT(4)) {
1704 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from EEPROM\n"));
1705 rtlefuse->epromtype = EEPROM_93C46;
1706 } else {
1707 RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG, ("Boot from EFUSE\n"));
1708 rtlefuse->epromtype = EEPROM_BOOT_EFUSE;
1709 }
1710 if (tmp_u1b & BIT(5)) {
1711 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("Autoload OK\n"));
1712 rtlefuse->autoload_failflag = false;
1713 _rtl92ce_read_adapter_info(hw);
1714 } else {
1715 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Autoload ERR!!\n"));
1716 }
1717
1718 _rtl92ce_hal_customized_behavior(hw);
1719}
1720
1721void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw)
1722{
1723 struct rtl_priv *rtlpriv = rtl_priv(hw);
1724 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1725 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1726
1727 u32 ratr_value = (u32) mac->basic_rates;
1728 u8 *p_mcsrate = mac->mcs;
1729 u8 ratr_index = 0;
1730 u8 b_nmode = mac->ht_enable;
1731 u8 mimo_ps = 1;
1732 u16 shortgi_rate;
1733 u32 tmp_ratr_value;
1734 u8 b_curtxbw_40mhz = mac->bw_40;
1735 u8 b_curshortgi_40mhz = mac->sgi_40;
1736 u8 b_curshortgi_20mhz = mac->sgi_20;
1737 enum wireless_mode wirelessmode = mac->mode;
1738
1739 ratr_value |= EF2BYTE((*(u16 *) (p_mcsrate))) << 12;
1740
1741 switch (wirelessmode) {
1742 case WIRELESS_MODE_B:
1743 if (ratr_value & 0x0000000c)
1744 ratr_value &= 0x0000000d;
1745 else
1746 ratr_value &= 0x0000000f;
1747 break;
1748 case WIRELESS_MODE_G:
1749 ratr_value &= 0x00000FF5;
1750 break;
1751 case WIRELESS_MODE_N_24G:
1752 case WIRELESS_MODE_N_5G:
1753 b_nmode = 1;
1754 if (mimo_ps == 0) {
1755 ratr_value &= 0x0007F005;
1756 } else {
1757 u32 ratr_mask;
1758
1759 if (get_rf_type(rtlphy) == RF_1T2R ||
1760 get_rf_type(rtlphy) == RF_1T1R)
1761 ratr_mask = 0x000ff005;
1762 else
1763 ratr_mask = 0x0f0ff005;
1764
1765 ratr_value &= ratr_mask;
1766 }
1767 break;
1768 default:
1769 if (rtlphy->rf_type == RF_1T2R)
1770 ratr_value &= 0x000ff0ff;
1771 else
1772 ratr_value &= 0x0f0ff0ff;
1773
1774 break;
1775 }
1776
1777 ratr_value &= 0x0FFFFFFF;
1778
1779 if (b_nmode && ((b_curtxbw_40mhz &&
1780 b_curshortgi_40mhz) || (!b_curtxbw_40mhz &&
1781 b_curshortgi_20mhz))) {
1782
1783 ratr_value |= 0x10000000;
1784 tmp_ratr_value = (ratr_value >> 12);
1785
1786 for (shortgi_rate = 15; shortgi_rate > 0; shortgi_rate--) {
1787 if ((1 << shortgi_rate) & tmp_ratr_value)
1788 break;
1789 }
1790
1791 shortgi_rate = (shortgi_rate << 12) | (shortgi_rate << 8) |
1792 (shortgi_rate << 4) | (shortgi_rate);
1793 }
1794
1795 rtl_write_dword(rtlpriv, REG_ARFR0 + ratr_index * 4, ratr_value);
1796
1797 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
1798 ("%x\n", rtl_read_dword(rtlpriv, REG_ARFR0)));
1799}
1800
1801void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level)
1802{
1803 struct rtl_priv *rtlpriv = rtl_priv(hw);
1804 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1805 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1806 u32 ratr_bitmap = (u32) mac->basic_rates;
1807 u8 *p_mcsrate = mac->mcs;
1808 u8 ratr_index;
1809 u8 b_curtxbw_40mhz = mac->bw_40;
1810 u8 b_curshortgi_40mhz = mac->sgi_40;
1811 u8 b_curshortgi_20mhz = mac->sgi_20;
1812 enum wireless_mode wirelessmode = mac->mode;
1813 bool b_shortgi = false;
1814 u8 rate_mask[5];
1815 u8 macid = 0;
1816 u8 mimops = 1;
1817
1818 ratr_bitmap |= (p_mcsrate[1] << 20) | (p_mcsrate[0] << 12);
1819 switch (wirelessmode) {
1820 case WIRELESS_MODE_B:
1821 ratr_index = RATR_INX_WIRELESS_B;
1822 if (ratr_bitmap & 0x0000000c)
1823 ratr_bitmap &= 0x0000000d;
1824 else
1825 ratr_bitmap &= 0x0000000f;
1826 break;
1827 case WIRELESS_MODE_G:
1828 ratr_index = RATR_INX_WIRELESS_GB;
1829
1830 if (rssi_level == 1)
1831 ratr_bitmap &= 0x00000f00;
1832 else if (rssi_level == 2)
1833 ratr_bitmap &= 0x00000ff0;
1834 else
1835 ratr_bitmap &= 0x00000ff5;
1836 break;
1837 case WIRELESS_MODE_A:
1838 ratr_index = RATR_INX_WIRELESS_A;
1839 ratr_bitmap &= 0x00000ff0;
1840 break;
1841 case WIRELESS_MODE_N_24G:
1842 case WIRELESS_MODE_N_5G:
1843 ratr_index = RATR_INX_WIRELESS_NGB;
1844
1845 if (mimops == 0) {
1846 if (rssi_level == 1)
1847 ratr_bitmap &= 0x00070000;
1848 else if (rssi_level == 2)
1849 ratr_bitmap &= 0x0007f000;
1850 else
1851 ratr_bitmap &= 0x0007f005;
1852 } else {
1853 if (rtlphy->rf_type == RF_1T2R ||
1854 rtlphy->rf_type == RF_1T1R) {
1855 if (b_curtxbw_40mhz) {
1856 if (rssi_level == 1)
1857 ratr_bitmap &= 0x000f0000;
1858 else if (rssi_level == 2)
1859 ratr_bitmap &= 0x000ff000;
1860 else
1861 ratr_bitmap &= 0x000ff015;
1862 } else {
1863 if (rssi_level == 1)
1864 ratr_bitmap &= 0x000f0000;
1865 else if (rssi_level == 2)
1866 ratr_bitmap &= 0x000ff000;
1867 else
1868 ratr_bitmap &= 0x000ff005;
1869 }
1870 } else {
1871 if (b_curtxbw_40mhz) {
1872 if (rssi_level == 1)
1873 ratr_bitmap &= 0x0f0f0000;
1874 else if (rssi_level == 2)
1875 ratr_bitmap &= 0x0f0ff000;
1876 else
1877 ratr_bitmap &= 0x0f0ff015;
1878 } else {
1879 if (rssi_level == 1)
1880 ratr_bitmap &= 0x0f0f0000;
1881 else if (rssi_level == 2)
1882 ratr_bitmap &= 0x0f0ff000;
1883 else
1884 ratr_bitmap &= 0x0f0ff005;
1885 }
1886 }
1887 }
1888
1889 if ((b_curtxbw_40mhz && b_curshortgi_40mhz) ||
1890 (!b_curtxbw_40mhz && b_curshortgi_20mhz)) {
1891
1892 if (macid == 0)
1893 b_shortgi = true;
1894 else if (macid == 1)
1895 b_shortgi = false;
1896 }
1897 break;
1898 default:
1899 ratr_index = RATR_INX_WIRELESS_NGB;
1900
1901 if (rtlphy->rf_type == RF_1T2R)
1902 ratr_bitmap &= 0x000ff0ff;
1903 else
1904 ratr_bitmap &= 0x0f0ff0ff;
1905 break;
1906 }
1907 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG,
1908 ("ratr_bitmap :%x\n", ratr_bitmap));
1909 *(u32 *)&rate_mask = EF4BYTE((ratr_bitmap & 0x0fffffff) |
1910 (ratr_index << 28));
1911 rate_mask[4] = macid | (b_shortgi ? 0x20 : 0x00) | 0x80;
1912 RT_TRACE(rtlpriv, COMP_RATR, DBG_DMESG, ("Rate_index:%x, "
1913 "ratr_val:%x, %x:%x:%x:%x:%x\n",
1914 ratr_index, ratr_bitmap,
1915 rate_mask[0], rate_mask[1],
1916 rate_mask[2], rate_mask[3],
1917 rate_mask[4]));
1918 rtl92c_fill_h2c_cmd(hw, H2C_RA_MASK, 5, rate_mask);
1919}
1920
1921void rtl92ce_update_channel_access_setting(struct ieee80211_hw *hw)
1922{
1923 struct rtl_priv *rtlpriv = rtl_priv(hw);
1924 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1925 u16 sifs_timer;
1926
1927 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SLOT_TIME,
1928 (u8 *)&mac->slot_time);
1929 if (!mac->ht_enable)
1930 sifs_timer = 0x0a0a;
1931 else
1932 sifs_timer = 0x1010;
1933 rtlpriv->cfg->ops->set_hw_reg(hw, HW_VAR_SIFS, (u8 *)&sifs_timer);
1934}
1935
1936bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid)
1937{
1938 struct rtl_priv *rtlpriv = rtl_priv(hw);
1939 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
1940 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1941 enum rf_pwrstate e_rfpowerstate_toset, cur_rfstate;
1942 u8 u1tmp;
1943 bool b_actuallyset = false;
1944 unsigned long flag;
1945
1946 if ((rtlpci->up_first_time == 1) || (rtlpci->being_init_adapter))
1947 return false;
1948
1949 if (ppsc->b_swrf_processing)
1950 return false;
1951
1952 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
1953 if (ppsc->rfchange_inprogress) {
1954 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
1955 return false;
1956 } else {
1957 ppsc->rfchange_inprogress = true;
1958 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
1959 }
1960
1961 cur_rfstate = ppsc->rfpwr_state;
1962
1963 if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
1964 RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM)) {
1965 rtlpriv->intf_ops->disable_aspm(hw);
1966 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
1967 }
1968
1969 rtl_write_byte(rtlpriv, REG_MAC_PINMUX_CFG, rtl_read_byte(rtlpriv,
1970 REG_MAC_PINMUX_CFG)&~(BIT(3)));
1971
1972 u1tmp = rtl_read_byte(rtlpriv, REG_GPIO_IO_SEL);
1973 e_rfpowerstate_toset = (u1tmp & BIT(3)) ? ERFON : ERFOFF;
1974
1975 if ((ppsc->b_hwradiooff == true) && (e_rfpowerstate_toset == ERFON)) {
1976 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
1977 ("GPIOChangeRF - HW Radio ON, RF ON\n"));
1978
1979 e_rfpowerstate_toset = ERFON;
1980 ppsc->b_hwradiooff = false;
1981 b_actuallyset = true;
1982 } else if ((ppsc->b_hwradiooff == false)
1983 && (e_rfpowerstate_toset == ERFOFF)) {
1984 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
1985 ("GPIOChangeRF - HW Radio OFF, RF OFF\n"));
1986
1987 e_rfpowerstate_toset = ERFOFF;
1988 ppsc->b_hwradiooff = true;
1989 b_actuallyset = true;
1990 }
1991
1992 if (b_actuallyset) {
1993 if (e_rfpowerstate_toset == ERFON) {
1994 if ((ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) &&
1995 RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM)) {
1996 rtlpriv->intf_ops->disable_aspm(hw);
1997 RT_CLEAR_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
1998 }
1999 }
2000
2001 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
2002 ppsc->rfchange_inprogress = false;
2003 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
2004
2005 if (e_rfpowerstate_toset == ERFOFF) {
2006 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) {
2007 rtlpriv->intf_ops->enable_aspm(hw);
2008 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
2009 }
2010 }
2011
2012 } else if (e_rfpowerstate_toset == ERFOFF || cur_rfstate == ERFOFF) {
2013 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC)
2014 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
2015
2016 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_ASPM) {
2017 rtlpriv->intf_ops->enable_aspm(hw);
2018 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_ASPM);
2019 }
2020
2021 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
2022 ppsc->rfchange_inprogress = false;
2023 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
2024 } else {
2025 spin_lock_irqsave(&rtlpriv->locks.rf_ps_lock, flag);
2026 ppsc->rfchange_inprogress = false;
2027 spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flag);
2028 }
2029
2030 *valid = 1;
2031 return !ppsc->b_hwradiooff;
2032
2033}
2034
2035void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
2036 u8 *p_macaddr, bool is_group, u8 enc_algo,
2037 bool is_wepkey, bool clear_all)
2038{
2039 struct rtl_priv *rtlpriv = rtl_priv(hw);
2040 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2041 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
2042 u8 *macaddr = p_macaddr;
2043 u32 entry_id = 0;
2044 bool is_pairwise = false;
2045
2046 static u8 cam_const_addr[4][6] = {
2047 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
2048 {0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
2049 {0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
2050 {0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
2051 };
2052 static u8 cam_const_broad[] = {
2053 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
2054 };
2055
2056 if (clear_all) {
2057 u8 idx = 0;
2058 u8 cam_offset = 0;
2059 u8 clear_number = 5;
2060
2061 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("clear_all\n"));
2062
2063 for (idx = 0; idx < clear_number; idx++) {
2064 rtl_cam_mark_invalid(hw, cam_offset + idx);
2065 rtl_cam_empty_entry(hw, cam_offset + idx);
2066
2067 if (idx < 5) {
2068 memset(rtlpriv->sec.key_buf[idx], 0,
2069 MAX_KEY_LEN);
2070 rtlpriv->sec.key_len[idx] = 0;
2071 }
2072 }
2073
2074 } else {
2075 switch (enc_algo) {
2076 case WEP40_ENCRYPTION:
2077 enc_algo = CAM_WEP40;
2078 break;
2079 case WEP104_ENCRYPTION:
2080 enc_algo = CAM_WEP104;
2081 break;
2082 case TKIP_ENCRYPTION:
2083 enc_algo = CAM_TKIP;
2084 break;
2085 case AESCCMP_ENCRYPTION:
2086 enc_algo = CAM_AES;
2087 break;
2088 default:
2089 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("switch case "
2090 "not process\n"));
2091 enc_algo = CAM_TKIP;
2092 break;
2093 }
2094
2095 if (is_wepkey || rtlpriv->sec.use_defaultkey) {
2096 macaddr = cam_const_addr[key_index];
2097 entry_id = key_index;
2098 } else {
2099 if (is_group) {
2100 macaddr = cam_const_broad;
2101 entry_id = key_index;
2102 } else {
2103 key_index = PAIRWISE_KEYIDX;
2104 entry_id = CAM_PAIRWISE_KEY_POSITION;
2105 is_pairwise = true;
2106 }
2107 }
2108
2109 if (rtlpriv->sec.key_len[key_index] == 0) {
2110 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
2111 ("delete one entry\n"));
2112 rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
2113 } else {
2114 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
2115 ("The insert KEY length is %d\n",
2116 rtlpriv->sec.key_len[PAIRWISE_KEYIDX]));
2117 RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
2118 ("The insert KEY is %x %x\n",
2119 rtlpriv->sec.key_buf[0][0],
2120 rtlpriv->sec.key_buf[0][1]));
2121
2122 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
2123 ("add one entry\n"));
2124 if (is_pairwise) {
2125 RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
2126 "Pairwiase Key content :",
2127 rtlpriv->sec.pairwise_key,
2128 rtlpriv->sec.
2129 key_len[PAIRWISE_KEYIDX]);
2130
2131 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
2132 ("set Pairwiase key\n"));
2133
2134 rtl_cam_add_one_entry(hw, macaddr, key_index,
2135 entry_id, enc_algo,
2136 CAM_CONFIG_NO_USEDK,
2137 rtlpriv->sec.
2138 key_buf[key_index]);
2139 } else {
2140 RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
2141 ("set group key\n"));
2142
2143 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
2144 rtl_cam_add_one_entry(hw,
2145 rtlefuse->dev_addr,
2146 PAIRWISE_KEYIDX,
2147 CAM_PAIRWISE_KEY_POSITION,
2148 enc_algo,
2149 CAM_CONFIG_NO_USEDK,
2150 rtlpriv->sec.key_buf
2151 [entry_id]);
2152 }
2153
2154 rtl_cam_add_one_entry(hw, macaddr, key_index,
2155 entry_id, enc_algo,
2156 CAM_CONFIG_NO_USEDK,
2157 rtlpriv->sec.key_buf[entry_id]);
2158 }
2159
2160 }
2161 }
2162}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
new file mode 100644
index 000000000000..305c819c8c78
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.h
@@ -0,0 +1,57 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92CE_HW_H__
31#define __RTL92CE_HW_H__
32
33void rtl92ce_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
34void rtl92ce_read_eeprom_info(struct ieee80211_hw *hw);
35void rtl92ce_interrupt_recognized(struct ieee80211_hw *hw,
36 u32 *p_inta, u32 *p_intb);
37int rtl92ce_hw_init(struct ieee80211_hw *hw);
38void rtl92ce_card_disable(struct ieee80211_hw *hw);
39void rtl92ce_enable_interrupt(struct ieee80211_hw *hw);
40void rtl92ce_disable_interrupt(struct ieee80211_hw *hw);
41int rtl92ce_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type);
42void rtl92ce_set_qos(struct ieee80211_hw *hw, int aci);
43void rtl92ce_set_beacon_related_registers(struct ieee80211_hw *hw);
44void rtl92ce_set_beacon_interval(struct ieee80211_hw *hw);
45void rtl92ce_update_interrupt_mask(struct ieee80211_hw *hw,
46 u32 add_msr, u32 rm_msr);
47void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val);
48void rtl92ce_update_hal_rate_table(struct ieee80211_hw *hw);
49void rtl92ce_update_hal_rate_mask(struct ieee80211_hw *hw, u8 rssi_level);
50void rtl92ce_update_channel_access_setting(struct ieee80211_hw *hw);
51bool rtl92ce_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 *valid);
52void rtl92ce_enable_hw_security_config(struct ieee80211_hw *hw);
53void rtl92ce_set_key(struct ieee80211_hw *hw, u32 key_index,
54 u8 *p_macaddr, bool is_group, u8 enc_algo,
55 bool is_wepkey, bool clear_all);
56
57#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/led.c b/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
new file mode 100644
index 000000000000..78a0569208ea
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/led.c
@@ -0,0 +1,144 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../pci.h"
32#include "reg.h"
33#include "led.h"
34
35void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled)
36{
37 u8 ledcfg;
38 struct rtl_priv *rtlpriv = rtl_priv(hw);
39
40 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
41 ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
42
43 ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
44
45 switch (pled->ledpin) {
46 case LED_PIN_GPIO0:
47 break;
48 case LED_PIN_LED0:
49 rtl_write_byte(rtlpriv,
50 REG_LEDCFG2, (ledcfg & 0xf0) | BIT(5) | BIT(6));
51 break;
52 case LED_PIN_LED1:
53 rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg & 0x0f) | BIT(5));
54 break;
55 default:
56 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
57 ("switch case not process\n"));
58 break;
59 }
60 pled->b_ledon = true;
61}
62
63void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled)
64{
65 struct rtl_priv *rtlpriv = rtl_priv(hw);
66 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
67 u8 ledcfg;
68
69 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD,
70 ("LedAddr:%X ledpin=%d\n", REG_LEDCFG2, pled->ledpin));
71
72 ledcfg = rtl_read_byte(rtlpriv, REG_LEDCFG2);
73
74 switch (pled->ledpin) {
75 case LED_PIN_GPIO0:
76 break;
77 case LED_PIN_LED0:
78 ledcfg &= 0xf0;
79 if (pcipriv->ledctl.bled_opendrain == true)
80 rtl_write_byte(rtlpriv, REG_LEDCFG2,
81 (ledcfg | BIT(1) | BIT(5) | BIT(6)));
82 else
83 rtl_write_byte(rtlpriv, REG_LEDCFG2,
84 (ledcfg | BIT(3) | BIT(5) | BIT(6)));
85 break;
86 case LED_PIN_LED1:
87 ledcfg &= 0x0f;
88 rtl_write_byte(rtlpriv, REG_LEDCFG2, (ledcfg | BIT(3)));
89 break;
90 default:
91 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
92 ("switch case not process\n"));
93 break;
94 }
95 pled->b_ledon = false;
96}
97
98void rtl92ce_init_sw_leds(struct ieee80211_hw *hw)
99{
100}
101
102void rtl92ce_deinit_sw_leds(struct ieee80211_hw *hw)
103{
104}
105
106void _rtl92ce_sw_led_control(struct ieee80211_hw *hw,
107 enum led_ctl_mode ledaction)
108{
109 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
110 struct rtl_led *pLed0 = &(pcipriv->ledctl.sw_led0);
111 switch (ledaction) {
112 case LED_CTL_POWER_ON:
113 case LED_CTL_LINK:
114 case LED_CTL_NO_LINK:
115 rtl92ce_sw_led_on(hw, pLed0);
116 break;
117 case LED_CTL_POWER_OFF:
118 rtl92ce_sw_led_off(hw, pLed0);
119 break;
120 default:
121 break;
122 }
123}
124
125void rtl92ce_led_control(struct ieee80211_hw *hw,
126 enum led_ctl_mode ledaction)
127{
128 struct rtl_priv *rtlpriv = rtl_priv(hw);
129 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
130
131 if ((ppsc->rfoff_reason > RF_CHANGE_BY_PS) &&
132 (ledaction == LED_CTL_TX ||
133 ledaction == LED_CTL_RX ||
134 ledaction == LED_CTL_SITE_SURVEY ||
135 ledaction == LED_CTL_LINK ||
136 ledaction == LED_CTL_NO_LINK ||
137 ledaction == LED_CTL_START_TO_LINK ||
138 ledaction == LED_CTL_POWER_ON)) {
139 return;
140 }
141 RT_TRACE(rtlpriv, COMP_LED, DBG_LOUD, ("ledaction %d,\n",
142 ledaction));
143 _rtl92ce_sw_led_control(hw, ledaction);
144}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/led.h b/drivers/net/wireless/rtlwifi/rtl8192ce/led.h
new file mode 100644
index 000000000000..10da3018f4b7
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/led.h
@@ -0,0 +1,41 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92CE_LED_H__
31#define __RTL92CE_LED_H__
32
33void rtl92ce_init_sw_leds(struct ieee80211_hw *hw);
34void rtl92ce_deinit_sw_leds(struct ieee80211_hw *hw);
35void rtl92ce_sw_led_on(struct ieee80211_hw *hw, struct rtl_led *pled);
36void rtl92ce_sw_led_off(struct ieee80211_hw *hw, struct rtl_led *pled);
37void rtl92ce_led_control(struct ieee80211_hw *hw, enum led_ctl_mode ledaction);
38void _rtl92ce_sw_led_control(struct ieee80211_hw *hw,
39 enum led_ctl_mode ledaction);
40
41#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
new file mode 100644
index 000000000000..45044117139a
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.c
@@ -0,0 +1,2676 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../pci.h"
32#include "../ps.h"
33#include "reg.h"
34#include "def.h"
35#include "phy.h"
36#include "rf.h"
37#include "dm.h"
38#include "table.h"
39
40static u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
41 enum radio_path rfpath, u32 offset);
42static void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
43 enum radio_path rfpath, u32 offset,
44 u32 data);
45static u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
46 enum radio_path rfpath, u32 offset);
47static void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
48 enum radio_path rfpath, u32 offset,
49 u32 data);
50static u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask);
51static bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw);
52static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw);
53static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
54 u8 configtype);
55static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
56 u8 configtype);
57static void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw);
58static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
59 u32 cmdtableidx, u32 cmdtablesz,
60 enum swchnlcmd_id cmdid, u32 para1,
61 u32 para2, u32 msdelay);
62static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
63 u8 channel, u8 *stage, u8 *step,
64 u32 *delay);
65static u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
66 enum wireless_mode wirelessmode,
67 long power_indbm);
68static bool _rtl92c_phy_config_rf_external_pa(struct ieee80211_hw *hw,
69 enum radio_path rfpath);
70static long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
71 enum wireless_mode wirelessmode,
72 u8 txpwridx);
73u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw, u32 regaddr, u32 bitmask)
74{
75 struct rtl_priv *rtlpriv = rtl_priv(hw);
76 u32 returnvalue, originalvalue, bitshift;
77
78 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
79 "bitmask(%#x)\n", regaddr,
80 bitmask));
81 originalvalue = rtl_read_dword(rtlpriv, regaddr);
82 bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
83 returnvalue = (originalvalue & bitmask) >> bitshift;
84
85 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("BBR MASK=0x%x "
86 "Addr[0x%x]=0x%x\n", bitmask,
87 regaddr, originalvalue));
88
89 return returnvalue;
90
91}
92
93void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
94 u32 regaddr, u32 bitmask, u32 data)
95{
96 struct rtl_priv *rtlpriv = rtl_priv(hw);
97 u32 originalvalue, bitshift;
98
99 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
100 " data(%#x)\n", regaddr, bitmask,
101 data));
102
103 if (bitmask != MASKDWORD) {
104 originalvalue = rtl_read_dword(rtlpriv, regaddr);
105 bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
106 data = ((originalvalue & (~bitmask)) | (data << bitshift));
107 }
108
109 rtl_write_dword(rtlpriv, regaddr, data);
110
111 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), bitmask(%#x),"
112 " data(%#x)\n", regaddr, bitmask,
113 data));
114
115}
116
117u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
118 enum radio_path rfpath, u32 regaddr, u32 bitmask)
119{
120 struct rtl_priv *rtlpriv = rtl_priv(hw);
121 u32 original_value, readback_value, bitshift;
122 struct rtl_phy *rtlphy = &(rtlpriv->phy);
123 unsigned long flags;
124
125 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
126 "rfpath(%#x), bitmask(%#x)\n",
127 regaddr, rfpath, bitmask));
128
129 spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
130
131 if (rtlphy->rf_mode != RF_OP_BY_FW) {
132 original_value = _rtl92c_phy_rf_serial_read(hw,
133 rfpath, regaddr);
134 } else {
135 original_value = _rtl92c_phy_fw_rf_serial_read(hw,
136 rfpath, regaddr);
137 }
138
139 bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
140 readback_value = (original_value & bitmask) >> bitshift;
141
142 spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
143
144 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
145 ("regaddr(%#x), rfpath(%#x), "
146 "bitmask(%#x), original_value(%#x)\n",
147 regaddr, rfpath, bitmask, original_value));
148
149 return readback_value;
150}
151
152void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
153 enum radio_path rfpath,
154 u32 regaddr, u32 bitmask, u32 data)
155{
156 struct rtl_priv *rtlpriv = rtl_priv(hw);
157 struct rtl_phy *rtlphy = &(rtlpriv->phy);
158 u32 original_value, bitshift;
159 unsigned long flags;
160
161 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE,
162 ("regaddr(%#x), bitmask(%#x), data(%#x), rfpath(%#x)\n",
163 regaddr, bitmask, data, rfpath));
164
165 spin_lock_irqsave(&rtlpriv->locks.rf_lock, flags);
166
167 if (rtlphy->rf_mode != RF_OP_BY_FW) {
168 if (bitmask != RFREG_OFFSET_MASK) {
169 original_value = _rtl92c_phy_rf_serial_read(hw,
170 rfpath,
171 regaddr);
172 bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
173 data =
174 ((original_value & (~bitmask)) |
175 (data << bitshift));
176 }
177
178 _rtl92c_phy_rf_serial_write(hw, rfpath, regaddr, data);
179 } else {
180 if (bitmask != RFREG_OFFSET_MASK) {
181 original_value = _rtl92c_phy_fw_rf_serial_read(hw,
182 rfpath,
183 regaddr);
184 bitshift = _rtl92c_phy_calculate_bit_shift(bitmask);
185 data =
186 ((original_value & (~bitmask)) |
187 (data << bitshift));
188 }
189 _rtl92c_phy_fw_rf_serial_write(hw, rfpath, regaddr, data);
190 }
191
192 spin_unlock_irqrestore(&rtlpriv->locks.rf_lock, flags);
193
194 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("regaddr(%#x), "
195 "bitmask(%#x), data(%#x), "
196 "rfpath(%#x)\n", regaddr,
197 bitmask, data, rfpath));
198}
199
200static u32 _rtl92c_phy_fw_rf_serial_read(struct ieee80211_hw *hw,
201 enum radio_path rfpath, u32 offset)
202{
203 RT_ASSERT(false, ("deprecated!\n"));
204 return 0;
205}
206
207static void _rtl92c_phy_fw_rf_serial_write(struct ieee80211_hw *hw,
208 enum radio_path rfpath, u32 offset,
209 u32 data)
210{
211 RT_ASSERT(false, ("deprecated!\n"));
212}
213
214static u32 _rtl92c_phy_rf_serial_read(struct ieee80211_hw *hw,
215 enum radio_path rfpath, u32 offset)
216{
217 struct rtl_priv *rtlpriv = rtl_priv(hw);
218 struct rtl_phy *rtlphy = &(rtlpriv->phy);
219 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
220 u32 newoffset;
221 u32 tmplong, tmplong2;
222 u8 rfpi_enable = 0;
223 u32 retvalue;
224
225 offset &= 0x3f;
226 newoffset = offset;
227 if (RT_CANNOT_IO(hw)) {
228 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("return all one\n"));
229 return 0xFFFFFFFF;
230 }
231 tmplong = rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD);
232 if (rfpath == RF90_PATH_A)
233 tmplong2 = tmplong;
234 else
235 tmplong2 = rtl_get_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD);
236 tmplong2 = (tmplong2 & (~BLSSIREADADDRESS)) |
237 (newoffset << 23) | BLSSIREADEDGE;
238 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
239 tmplong & (~BLSSIREADEDGE));
240 mdelay(1);
241 rtl_set_bbreg(hw, pphyreg->rfhssi_para2, MASKDWORD, tmplong2);
242 mdelay(1);
243 rtl_set_bbreg(hw, RFPGA0_XA_HSSIPARAMETER2, MASKDWORD,
244 tmplong | BLSSIREADEDGE);
245 mdelay(1);
246 if (rfpath == RF90_PATH_A)
247 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XA_HSSIPARAMETER1,
248 BIT(8));
249 else if (rfpath == RF90_PATH_B)
250 rfpi_enable = (u8) rtl_get_bbreg(hw, RFPGA0_XB_HSSIPARAMETER1,
251 BIT(8));
252 if (rfpi_enable)
253 retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readbackpi,
254 BLSSIREADBACKDATA);
255 else
256 retvalue = rtl_get_bbreg(hw, pphyreg->rflssi_readback,
257 BLSSIREADBACKDATA);
258 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFR-%d Addr[0x%x]=0x%x\n",
259 rfpath, pphyreg->rflssi_readback,
260 retvalue));
261 return retvalue;
262}
263
264static void _rtl92c_phy_rf_serial_write(struct ieee80211_hw *hw,
265 enum radio_path rfpath, u32 offset,
266 u32 data)
267{
268 u32 data_and_addr;
269 u32 newoffset;
270 struct rtl_priv *rtlpriv = rtl_priv(hw);
271 struct rtl_phy *rtlphy = &(rtlpriv->phy);
272 struct bb_reg_def *pphyreg = &rtlphy->phyreg_def[rfpath];
273
274 if (RT_CANNOT_IO(hw)) {
275 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("stop\n"));
276 return;
277 }
278 offset &= 0x3f;
279 newoffset = offset;
280 data_and_addr = ((newoffset << 20) | (data & 0x000fffff)) & 0x0fffffff;
281 rtl_set_bbreg(hw, pphyreg->rf3wire_offset, MASKDWORD, data_and_addr);
282 RT_TRACE(rtlpriv, COMP_RF, DBG_TRACE, ("RFW-%d Addr[0x%x]=0x%x\n",
283 rfpath, pphyreg->rf3wire_offset,
284 data_and_addr));
285}
286
287static u32 _rtl92c_phy_calculate_bit_shift(u32 bitmask)
288{
289 u32 i;
290
291 for (i = 0; i <= 31; i++) {
292 if (((bitmask >> i) & 0x1) == 1)
293 break;
294 }
295 return i;
296}
297
298static void _rtl92c_phy_bb_config_1t(struct ieee80211_hw *hw)
299{
300 rtl_set_bbreg(hw, RFPGA0_TXINFO, 0x3, 0x2);
301 rtl_set_bbreg(hw, RFPGA1_TXINFO, 0x300033, 0x200022);
302 rtl_set_bbreg(hw, RCCK0_AFESETTING, MASKBYTE3, 0x45);
303 rtl_set_bbreg(hw, ROFDM0_TRXPATHENABLE, MASKBYTE0, 0x23);
304 rtl_set_bbreg(hw, ROFDM0_AGCPARAMETER1, 0x30, 0x1);
305 rtl_set_bbreg(hw, 0xe74, 0x0c000000, 0x2);
306 rtl_set_bbreg(hw, 0xe78, 0x0c000000, 0x2);
307 rtl_set_bbreg(hw, 0xe7c, 0x0c000000, 0x2);
308 rtl_set_bbreg(hw, 0xe80, 0x0c000000, 0x2);
309 rtl_set_bbreg(hw, 0xe88, 0x0c000000, 0x2);
310}
311
312bool rtl92c_phy_mac_config(struct ieee80211_hw *hw)
313{
314 struct rtl_priv *rtlpriv = rtl_priv(hw);
315 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
316 bool is92c = IS_92C_SERIAL(rtlhal->version);
317 bool rtstatus = _rtl92c_phy_config_mac_with_headerfile(hw);
318
319 if (is92c)
320 rtl_write_byte(rtlpriv, 0x14, 0x71);
321 return rtstatus;
322}
323
324bool rtl92c_phy_bb_config(struct ieee80211_hw *hw)
325{
326 bool rtstatus = true;
327 struct rtl_priv *rtlpriv = rtl_priv(hw);
328 u16 regval;
329 u32 regvaldw;
330 u8 b_reg_hwparafile = 1;
331
332 _rtl92c_phy_init_bb_rf_register_definition(hw);
333 regval = rtl_read_word(rtlpriv, REG_SYS_FUNC_EN);
334 rtl_write_word(rtlpriv, REG_SYS_FUNC_EN,
335 regval | BIT(13) | BIT(0) | BIT(1));
336 rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL, 0x83);
337 rtl_write_byte(rtlpriv, REG_AFE_PLL_CTRL + 1, 0xdb);
338 rtl_write_byte(rtlpriv, REG_RF_CTRL, RF_EN | RF_RSTB | RF_SDMRSTB);
339 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN,
340 FEN_PPLL | FEN_PCIEA | FEN_DIO_PCIE |
341 FEN_BB_GLB_RSTn | FEN_BBRSTB);
342 rtl_write_byte(rtlpriv, REG_AFE_XTAL_CTRL + 1, 0x80);
343 regvaldw = rtl_read_dword(rtlpriv, REG_LEDCFG0);
344 rtl_write_dword(rtlpriv, REG_LEDCFG0, regvaldw | BIT(23));
345 if (b_reg_hwparafile == 1)
346 rtstatus = _rtl92c_phy_bb8192c_config_parafile(hw);
347 return rtstatus;
348}
349
350bool rtl92c_phy_rf_config(struct ieee80211_hw *hw)
351{
352 return rtl92c_phy_rf6052_config(hw);
353}
354
355static bool _rtl92c_phy_bb8192c_config_parafile(struct ieee80211_hw *hw)
356{
357 struct rtl_priv *rtlpriv = rtl_priv(hw);
358 struct rtl_phy *rtlphy = &(rtlpriv->phy);
359 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
360 bool rtstatus;
361
362 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("==>\n"));
363 rtstatus = _rtl92c_phy_config_bb_with_headerfile(hw,
364 BASEBAND_CONFIG_PHY_REG);
365 if (rtstatus != true) {
366 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("Write BB Reg Fail!!"));
367 return false;
368 }
369 if (rtlphy->rf_type == RF_1T2R) {
370 _rtl92c_phy_bb_config_1t(hw);
371 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Config to 1T!!\n"));
372 }
373 if (rtlefuse->autoload_failflag == false) {
374 rtlphy->pwrgroup_cnt = 0;
375 rtstatus = _rtl92c_phy_config_bb_with_pgheaderfile(hw,
376 BASEBAND_CONFIG_PHY_REG);
377 }
378 if (rtstatus != true) {
379 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("BB_PG Reg Fail!!"));
380 return false;
381 }
382 rtstatus = _rtl92c_phy_config_bb_with_headerfile(hw,
383 BASEBAND_CONFIG_AGC_TAB);
384 if (rtstatus != true) {
385 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, ("AGC Table Fail\n"));
386 return false;
387 }
388 rtlphy->bcck_high_power = (bool) (rtl_get_bbreg(hw,
389 RFPGA0_XA_HSSIPARAMETER2,
390 0x200));
391 return true;
392}
393
394static bool _rtl92c_phy_config_mac_with_headerfile(struct ieee80211_hw *hw)
395{
396 struct rtl_priv *rtlpriv = rtl_priv(hw);
397 u32 i;
398 u32 arraylength;
399 u32 *ptrarray;
400
401 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Read Rtl819XMACPHY_Array\n"));
402 arraylength = MAC_2T_ARRAYLENGTH;
403 ptrarray = RTL8192CEMAC_2T_ARRAY;
404 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
405 ("Img:RTL8192CEMAC_2T_ARRAY\n"));
406 for (i = 0; i < arraylength; i = i + 2)
407 rtl_write_byte(rtlpriv, ptrarray[i], (u8) ptrarray[i + 1]);
408 return true;
409}
410
411void rtl92c_phy_config_bb_external_pa(struct ieee80211_hw *hw)
412{
413}
414
415static bool _rtl92c_phy_config_bb_with_headerfile(struct ieee80211_hw *hw,
416 u8 configtype)
417{
418 int i;
419 u32 *phy_regarray_table;
420 u32 *agctab_array_table;
421 u16 phy_reg_arraylen, agctab_arraylen;
422 struct rtl_priv *rtlpriv = rtl_priv(hw);
423 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
424
425 if (IS_92C_SERIAL(rtlhal->version)) {
426 agctab_arraylen = AGCTAB_2TARRAYLENGTH;
427 agctab_array_table = RTL8192CEAGCTAB_2TARRAY;
428 phy_reg_arraylen = PHY_REG_2TARRAY_LENGTH;
429 phy_regarray_table = RTL8192CEPHY_REG_2TARRAY;
430 } else {
431 agctab_arraylen = AGCTAB_1TARRAYLENGTH;
432 agctab_array_table = RTL8192CEAGCTAB_1TARRAY;
433 phy_reg_arraylen = PHY_REG_1TARRAY_LENGTH;
434 phy_regarray_table = RTL8192CEPHY_REG_1TARRAY;
435 }
436 if (configtype == BASEBAND_CONFIG_PHY_REG) {
437 for (i = 0; i < phy_reg_arraylen; i = i + 2) {
438 if (phy_regarray_table[i] == 0xfe)
439 mdelay(50);
440 else if (phy_regarray_table[i] == 0xfd)
441 mdelay(5);
442 else if (phy_regarray_table[i] == 0xfc)
443 mdelay(1);
444 else if (phy_regarray_table[i] == 0xfb)
445 udelay(50);
446 else if (phy_regarray_table[i] == 0xfa)
447 udelay(5);
448 else if (phy_regarray_table[i] == 0xf9)
449 udelay(1);
450 rtl_set_bbreg(hw, phy_regarray_table[i], MASKDWORD,
451 phy_regarray_table[i + 1]);
452 udelay(1);
453 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
454 ("The phy_regarray_table[0] is %x"
455 " Rtl819XPHY_REGArray[1] is %x\n",
456 phy_regarray_table[i],
457 phy_regarray_table[i + 1]));
458 }
459 rtl92c_phy_config_bb_external_pa(hw);
460 } else if (configtype == BASEBAND_CONFIG_AGC_TAB) {
461 for (i = 0; i < agctab_arraylen; i = i + 2) {
462 rtl_set_bbreg(hw, agctab_array_table[i], MASKDWORD,
463 agctab_array_table[i + 1]);
464 udelay(1);
465 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
466 ("The agctab_array_table[0] is "
467 "%x Rtl819XPHY_REGArray[1] is %x\n",
468 agctab_array_table[i],
469 agctab_array_table[i + 1]));
470 }
471 }
472 return true;
473}
474
475static void _rtl92c_store_pwrIndex_diffrate_offset(struct ieee80211_hw *hw,
476 u32 regaddr, u32 bitmask,
477 u32 data)
478{
479 struct rtl_priv *rtlpriv = rtl_priv(hw);
480 struct rtl_phy *rtlphy = &(rtlpriv->phy);
481
482 if (regaddr == RTXAGC_A_RATE18_06) {
483 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][0] =
484 data;
485 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
486 ("MCSTxPowerLevelOriginalOffset[%d][0] = 0x%x\n",
487 rtlphy->pwrgroup_cnt,
488 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
489 pwrgroup_cnt][0]));
490 }
491 if (regaddr == RTXAGC_A_RATE54_24) {
492 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][1] =
493 data;
494 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
495 ("MCSTxPowerLevelOriginalOffset[%d][1] = 0x%x\n",
496 rtlphy->pwrgroup_cnt,
497 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
498 pwrgroup_cnt][1]));
499 }
500 if (regaddr == RTXAGC_A_CCK1_MCS32) {
501 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][6] =
502 data;
503 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
504 ("MCSTxPowerLevelOriginalOffset[%d][6] = 0x%x\n",
505 rtlphy->pwrgroup_cnt,
506 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
507 pwrgroup_cnt][6]));
508 }
509 if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0xffffff00) {
510 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][7] =
511 data;
512 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
513 ("MCSTxPowerLevelOriginalOffset[%d][7] = 0x%x\n",
514 rtlphy->pwrgroup_cnt,
515 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
516 pwrgroup_cnt][7]));
517 }
518 if (regaddr == RTXAGC_A_MCS03_MCS00) {
519 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][2] =
520 data;
521 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
522 ("MCSTxPowerLevelOriginalOffset[%d][2] = 0x%x\n",
523 rtlphy->pwrgroup_cnt,
524 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
525 pwrgroup_cnt][2]));
526 }
527 if (regaddr == RTXAGC_A_MCS07_MCS04) {
528 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][3] =
529 data;
530 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
531 ("MCSTxPowerLevelOriginalOffset[%d][3] = 0x%x\n",
532 rtlphy->pwrgroup_cnt,
533 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
534 pwrgroup_cnt][3]));
535 }
536 if (regaddr == RTXAGC_A_MCS11_MCS08) {
537 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][4] =
538 data;
539 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
540 ("MCSTxPowerLevelOriginalOffset[%d][4] = 0x%x\n",
541 rtlphy->pwrgroup_cnt,
542 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
543 pwrgroup_cnt][4]));
544 }
545 if (regaddr == RTXAGC_A_MCS15_MCS12) {
546 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][5] =
547 data;
548 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
549 ("MCSTxPowerLevelOriginalOffset[%d][5] = 0x%x\n",
550 rtlphy->pwrgroup_cnt,
551 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
552 pwrgroup_cnt][5]));
553 }
554 if (regaddr == RTXAGC_B_RATE18_06) {
555 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][8] =
556 data;
557 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
558 ("MCSTxPowerLevelOriginalOffset[%d][8] = 0x%x\n",
559 rtlphy->pwrgroup_cnt,
560 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
561 pwrgroup_cnt][8]));
562 }
563 if (regaddr == RTXAGC_B_RATE54_24) {
564 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][9] =
565 data;
566
567 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
568 ("MCSTxPowerLevelOriginalOffset[%d][9] = 0x%x\n",
569 rtlphy->pwrgroup_cnt,
570 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
571 pwrgroup_cnt][9]));
572 }
573
574 if (regaddr == RTXAGC_B_CCK1_55_MCS32) {
575 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][14] =
576 data;
577
578 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
579 ("MCSTxPowerLevelOriginalOffset[%d][14] = 0x%x\n",
580 rtlphy->pwrgroup_cnt,
581 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
582 pwrgroup_cnt][14]));
583 }
584
585 if (regaddr == RTXAGC_B_CCK11_A_CCK2_11 && bitmask == 0x000000ff) {
586 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][15] =
587 data;
588
589 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
590 ("MCSTxPowerLevelOriginalOffset[%d][15] = 0x%x\n",
591 rtlphy->pwrgroup_cnt,
592 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
593 pwrgroup_cnt][15]));
594 }
595
596 if (regaddr == RTXAGC_B_MCS03_MCS00) {
597 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][10] =
598 data;
599
600 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
601 ("MCSTxPowerLevelOriginalOffset[%d][10] = 0x%x\n",
602 rtlphy->pwrgroup_cnt,
603 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
604 pwrgroup_cnt][10]));
605 }
606
607 if (regaddr == RTXAGC_B_MCS07_MCS04) {
608 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][11] =
609 data;
610
611 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
612 ("MCSTxPowerLevelOriginalOffset[%d][11] = 0x%x\n",
613 rtlphy->pwrgroup_cnt,
614 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
615 pwrgroup_cnt][11]));
616 }
617
618 if (regaddr == RTXAGC_B_MCS11_MCS08) {
619 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][12] =
620 data;
621
622 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
623 ("MCSTxPowerLevelOriginalOffset[%d][12] = 0x%x\n",
624 rtlphy->pwrgroup_cnt,
625 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
626 pwrgroup_cnt][12]));
627 }
628
629 if (regaddr == RTXAGC_B_MCS15_MCS12) {
630 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->pwrgroup_cnt][13] =
631 data;
632
633 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
634 ("MCSTxPowerLevelOriginalOffset[%d][13] = 0x%x\n",
635 rtlphy->pwrgroup_cnt,
636 rtlphy->mcs_txpwrlevel_origoffset[rtlphy->
637 pwrgroup_cnt][13]));
638
639 rtlphy->pwrgroup_cnt++;
640 }
641}
642
643static bool _rtl92c_phy_config_bb_with_pgheaderfile(struct ieee80211_hw *hw,
644 u8 configtype)
645{
646 struct rtl_priv *rtlpriv = rtl_priv(hw);
647 int i;
648 u32 *phy_regarray_table_pg;
649 u16 phy_regarray_pg_len;
650
651 phy_regarray_pg_len = PHY_REG_ARRAY_PGLENGTH;
652 phy_regarray_table_pg = RTL8192CEPHY_REG_ARRAY_PG;
653
654 if (configtype == BASEBAND_CONFIG_PHY_REG) {
655 for (i = 0; i < phy_regarray_pg_len; i = i + 3) {
656 if (phy_regarray_table_pg[i] == 0xfe)
657 mdelay(50);
658 else if (phy_regarray_table_pg[i] == 0xfd)
659 mdelay(5);
660 else if (phy_regarray_table_pg[i] == 0xfc)
661 mdelay(1);
662 else if (phy_regarray_table_pg[i] == 0xfb)
663 udelay(50);
664 else if (phy_regarray_table_pg[i] == 0xfa)
665 udelay(5);
666 else if (phy_regarray_table_pg[i] == 0xf9)
667 udelay(1);
668
669 _rtl92c_store_pwrIndex_diffrate_offset(hw,
670 phy_regarray_table_pg[i],
671 phy_regarray_table_pg[i + 1],
672 phy_regarray_table_pg[i + 2]);
673 }
674 } else {
675
676 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
677 ("configtype != BaseBand_Config_PHY_REG\n"));
678 }
679 return true;
680}
681
682static bool _rtl92c_phy_config_rf_external_pa(struct ieee80211_hw *hw,
683 enum radio_path rfpath)
684{
685 return true;
686}
687
688bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
689 enum radio_path rfpath)
690{
691
692 int i;
693 bool rtstatus = true;
694 u32 *radioa_array_table;
695 u32 *radiob_array_table;
696 u16 radioa_arraylen, radiob_arraylen;
697 struct rtl_priv *rtlpriv = rtl_priv(hw);
698 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
699
700 if (IS_92C_SERIAL(rtlhal->version)) {
701 radioa_arraylen = RADIOA_2TARRAYLENGTH;
702 radioa_array_table = RTL8192CERADIOA_2TARRAY;
703 radiob_arraylen = RADIOB_2TARRAYLENGTH;
704 radiob_array_table = RTL8192CE_RADIOB_2TARRAY;
705 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
706 ("Radio_A:RTL8192CERADIOA_2TARRAY\n"));
707 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
708 ("Radio_B:RTL8192CE_RADIOB_2TARRAY\n"));
709 } else {
710 radioa_arraylen = RADIOA_1TARRAYLENGTH;
711 radioa_array_table = RTL8192CE_RADIOA_1TARRAY;
712 radiob_arraylen = RADIOB_1TARRAYLENGTH;
713 radiob_array_table = RTL8192CE_RADIOB_1TARRAY;
714 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
715 ("Radio_A:RTL8192CE_RADIOA_1TARRAY\n"));
716 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
717 ("Radio_B:RTL8192CE_RADIOB_1TARRAY\n"));
718 }
719 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("Radio No %x\n", rfpath));
720 rtstatus = true;
721 switch (rfpath) {
722 case RF90_PATH_A:
723 for (i = 0; i < radioa_arraylen; i = i + 2) {
724 if (radioa_array_table[i] == 0xfe)
725 mdelay(50);
726 else if (radioa_array_table[i] == 0xfd)
727 mdelay(5);
728 else if (radioa_array_table[i] == 0xfc)
729 mdelay(1);
730 else if (radioa_array_table[i] == 0xfb)
731 udelay(50);
732 else if (radioa_array_table[i] == 0xfa)
733 udelay(5);
734 else if (radioa_array_table[i] == 0xf9)
735 udelay(1);
736 else {
737 rtl_set_rfreg(hw, rfpath, radioa_array_table[i],
738 RFREG_OFFSET_MASK,
739 radioa_array_table[i + 1]);
740 udelay(1);
741 }
742 }
743 _rtl92c_phy_config_rf_external_pa(hw, rfpath);
744 break;
745 case RF90_PATH_B:
746 for (i = 0; i < radiob_arraylen; i = i + 2) {
747 if (radiob_array_table[i] == 0xfe) {
748 mdelay(50);
749 } else if (radiob_array_table[i] == 0xfd)
750 mdelay(5);
751 else if (radiob_array_table[i] == 0xfc)
752 mdelay(1);
753 else if (radiob_array_table[i] == 0xfb)
754 udelay(50);
755 else if (radiob_array_table[i] == 0xfa)
756 udelay(5);
757 else if (radiob_array_table[i] == 0xf9)
758 udelay(1);
759 else {
760 rtl_set_rfreg(hw, rfpath, radiob_array_table[i],
761 RFREG_OFFSET_MASK,
762 radiob_array_table[i + 1]);
763 udelay(1);
764 }
765 }
766 break;
767 case RF90_PATH_C:
768 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
769 ("switch case not process\n"));
770 break;
771 case RF90_PATH_D:
772 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
773 ("switch case not process\n"));
774 break;
775 }
776 return true;
777}
778
779void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw)
780{
781 struct rtl_priv *rtlpriv = rtl_priv(hw);
782 struct rtl_phy *rtlphy = &(rtlpriv->phy);
783
784 rtlphy->default_initialgain[0] =
785 (u8) rtl_get_bbreg(hw, ROFDM0_XAAGCCORE1, MASKBYTE0);
786 rtlphy->default_initialgain[1] =
787 (u8) rtl_get_bbreg(hw, ROFDM0_XBAGCCORE1, MASKBYTE0);
788 rtlphy->default_initialgain[2] =
789 (u8) rtl_get_bbreg(hw, ROFDM0_XCAGCCORE1, MASKBYTE0);
790 rtlphy->default_initialgain[3] =
791 (u8) rtl_get_bbreg(hw, ROFDM0_XDAGCCORE1, MASKBYTE0);
792
793 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
794 ("Default initial gain (c50=0x%x, "
795 "c58=0x%x, c60=0x%x, c68=0x%x\n",
796 rtlphy->default_initialgain[0],
797 rtlphy->default_initialgain[1],
798 rtlphy->default_initialgain[2],
799 rtlphy->default_initialgain[3]));
800
801 rtlphy->framesync = (u8) rtl_get_bbreg(hw,
802 ROFDM0_RXDETECTOR3, MASKBYTE0);
803 rtlphy->framesync_c34 = rtl_get_bbreg(hw,
804 ROFDM0_RXDETECTOR2, MASKDWORD);
805
806 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
807 ("Default framesync (0x%x) = 0x%x\n",
808 ROFDM0_RXDETECTOR3, rtlphy->framesync));
809}
810
811static void _rtl92c_phy_init_bb_rf_register_definition(struct ieee80211_hw *hw)
812{
813 struct rtl_priv *rtlpriv = rtl_priv(hw);
814 struct rtl_phy *rtlphy = &(rtlpriv->phy);
815
816 rtlphy->phyreg_def[RF90_PATH_A].rfintfs = RFPGA0_XAB_RFINTERFACESW;
817 rtlphy->phyreg_def[RF90_PATH_B].rfintfs = RFPGA0_XAB_RFINTERFACESW;
818 rtlphy->phyreg_def[RF90_PATH_C].rfintfs = RFPGA0_XCD_RFINTERFACESW;
819 rtlphy->phyreg_def[RF90_PATH_D].rfintfs = RFPGA0_XCD_RFINTERFACESW;
820
821 rtlphy->phyreg_def[RF90_PATH_A].rfintfi = RFPGA0_XAB_RFINTERFACERB;
822 rtlphy->phyreg_def[RF90_PATH_B].rfintfi = RFPGA0_XAB_RFINTERFACERB;
823 rtlphy->phyreg_def[RF90_PATH_C].rfintfi = RFPGA0_XCD_RFINTERFACERB;
824 rtlphy->phyreg_def[RF90_PATH_D].rfintfi = RFPGA0_XCD_RFINTERFACERB;
825
826 rtlphy->phyreg_def[RF90_PATH_A].rfintfo = RFPGA0_XA_RFINTERFACEOE;
827 rtlphy->phyreg_def[RF90_PATH_B].rfintfo = RFPGA0_XB_RFINTERFACEOE;
828
829 rtlphy->phyreg_def[RF90_PATH_A].rfintfe = RFPGA0_XA_RFINTERFACEOE;
830 rtlphy->phyreg_def[RF90_PATH_B].rfintfe = RFPGA0_XB_RFINTERFACEOE;
831
832 rtlphy->phyreg_def[RF90_PATH_A].rf3wire_offset =
833 RFPGA0_XA_LSSIPARAMETER;
834 rtlphy->phyreg_def[RF90_PATH_B].rf3wire_offset =
835 RFPGA0_XB_LSSIPARAMETER;
836
837 rtlphy->phyreg_def[RF90_PATH_A].rflssi_select = rFPGA0_XAB_RFPARAMETER;
838 rtlphy->phyreg_def[RF90_PATH_B].rflssi_select = rFPGA0_XAB_RFPARAMETER;
839 rtlphy->phyreg_def[RF90_PATH_C].rflssi_select = rFPGA0_XCD_RFPARAMETER;
840 rtlphy->phyreg_def[RF90_PATH_D].rflssi_select = rFPGA0_XCD_RFPARAMETER;
841
842 rtlphy->phyreg_def[RF90_PATH_A].rftxgain_stage = RFPGA0_TXGAINSTAGE;
843 rtlphy->phyreg_def[RF90_PATH_B].rftxgain_stage = RFPGA0_TXGAINSTAGE;
844 rtlphy->phyreg_def[RF90_PATH_C].rftxgain_stage = RFPGA0_TXGAINSTAGE;
845 rtlphy->phyreg_def[RF90_PATH_D].rftxgain_stage = RFPGA0_TXGAINSTAGE;
846
847 rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para1 = RFPGA0_XA_HSSIPARAMETER1;
848 rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para1 = RFPGA0_XB_HSSIPARAMETER1;
849
850 rtlphy->phyreg_def[RF90_PATH_A].rfhssi_para2 = RFPGA0_XA_HSSIPARAMETER2;
851 rtlphy->phyreg_def[RF90_PATH_B].rfhssi_para2 = RFPGA0_XB_HSSIPARAMETER2;
852
853 rtlphy->phyreg_def[RF90_PATH_A].rfswitch_control =
854 RFPGA0_XAB_SWITCHCONTROL;
855 rtlphy->phyreg_def[RF90_PATH_B].rfswitch_control =
856 RFPGA0_XAB_SWITCHCONTROL;
857 rtlphy->phyreg_def[RF90_PATH_C].rfswitch_control =
858 RFPGA0_XCD_SWITCHCONTROL;
859 rtlphy->phyreg_def[RF90_PATH_D].rfswitch_control =
860 RFPGA0_XCD_SWITCHCONTROL;
861
862 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control1 = ROFDM0_XAAGCCORE1;
863 rtlphy->phyreg_def[RF90_PATH_B].rfagc_control1 = ROFDM0_XBAGCCORE1;
864 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control1 = ROFDM0_XCAGCCORE1;
865 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control1 = ROFDM0_XDAGCCORE1;
866
867 rtlphy->phyreg_def[RF90_PATH_A].rfagc_control2 = ROFDM0_XAAGCCORE2;
868 rtlphy->phyreg_def[RF90_PATH_B].rfagc_control2 = ROFDM0_XBAGCCORE2;
869 rtlphy->phyreg_def[RF90_PATH_C].rfagc_control2 = ROFDM0_XCAGCCORE2;
870 rtlphy->phyreg_def[RF90_PATH_D].rfagc_control2 = ROFDM0_XDAGCCORE2;
871
872 rtlphy->phyreg_def[RF90_PATH_A].rfrxiq_imbalance =
873 ROFDM0_XARXIQIMBALANCE;
874 rtlphy->phyreg_def[RF90_PATH_B].rfrxiq_imbalance =
875 ROFDM0_XBRXIQIMBALANCE;
876 rtlphy->phyreg_def[RF90_PATH_C].rfrxiq_imbalance =
877 ROFDM0_XCRXIQIMBANLANCE;
878 rtlphy->phyreg_def[RF90_PATH_D].rfrxiq_imbalance =
879 ROFDM0_XDRXIQIMBALANCE;
880
881 rtlphy->phyreg_def[RF90_PATH_A].rfrx_afe = ROFDM0_XARXAFE;
882 rtlphy->phyreg_def[RF90_PATH_B].rfrx_afe = ROFDM0_XBRXAFE;
883 rtlphy->phyreg_def[RF90_PATH_C].rfrx_afe = ROFDM0_XCRXAFE;
884 rtlphy->phyreg_def[RF90_PATH_D].rfrx_afe = ROFDM0_XDRXAFE;
885
886 rtlphy->phyreg_def[RF90_PATH_A].rftxiq_imbalance =
887 ROFDM0_XATXIQIMBALANCE;
888 rtlphy->phyreg_def[RF90_PATH_B].rftxiq_imbalance =
889 ROFDM0_XBTXIQIMBALANCE;
890 rtlphy->phyreg_def[RF90_PATH_C].rftxiq_imbalance =
891 ROFDM0_XCTXIQIMBALANCE;
892 rtlphy->phyreg_def[RF90_PATH_D].rftxiq_imbalance =
893 ROFDM0_XDTXIQIMBALANCE;
894
895 rtlphy->phyreg_def[RF90_PATH_A].rftx_afe = ROFDM0_XATXAFE;
896 rtlphy->phyreg_def[RF90_PATH_B].rftx_afe = ROFDM0_XBTXAFE;
897 rtlphy->phyreg_def[RF90_PATH_C].rftx_afe = ROFDM0_XCTXAFE;
898 rtlphy->phyreg_def[RF90_PATH_D].rftx_afe = ROFDM0_XDTXAFE;
899
900 rtlphy->phyreg_def[RF90_PATH_A].rflssi_readback =
901 RFPGA0_XA_LSSIREADBACK;
902 rtlphy->phyreg_def[RF90_PATH_B].rflssi_readback =
903 RFPGA0_XB_LSSIREADBACK;
904 rtlphy->phyreg_def[RF90_PATH_C].rflssi_readback =
905 RFPGA0_XC_LSSIREADBACK;
906 rtlphy->phyreg_def[RF90_PATH_D].rflssi_readback =
907 RFPGA0_XD_LSSIREADBACK;
908
909 rtlphy->phyreg_def[RF90_PATH_A].rflssi_readbackpi =
910 TRANSCEIVEA_HSPI_READBACK;
911 rtlphy->phyreg_def[RF90_PATH_B].rflssi_readbackpi =
912 TRANSCEIVEB_HSPI_READBACK;
913
914}
915
916void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw, long *powerlevel)
917{
918 struct rtl_priv *rtlpriv = rtl_priv(hw);
919 struct rtl_phy *rtlphy = &(rtlpriv->phy);
920 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
921 u8 txpwr_level;
922 long txpwr_dbm;
923
924 txpwr_level = rtlphy->cur_cck_txpwridx;
925 txpwr_dbm = _rtl92c_phy_txpwr_idx_to_dbm(hw,
926 WIRELESS_MODE_B, txpwr_level);
927 txpwr_level = rtlphy->cur_ofdm24g_txpwridx +
928 rtlefuse->legacy_ht_txpowerdiff;
929 if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
930 WIRELESS_MODE_G,
931 txpwr_level) > txpwr_dbm)
932 txpwr_dbm =
933 _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_G,
934 txpwr_level);
935 txpwr_level = rtlphy->cur_ofdm24g_txpwridx;
936 if (_rtl92c_phy_txpwr_idx_to_dbm(hw,
937 WIRELESS_MODE_N_24G,
938 txpwr_level) > txpwr_dbm)
939 txpwr_dbm =
940 _rtl92c_phy_txpwr_idx_to_dbm(hw, WIRELESS_MODE_N_24G,
941 txpwr_level);
942 *powerlevel = txpwr_dbm;
943}
944
945static void _rtl92c_get_txpower_index(struct ieee80211_hw *hw, u8 channel,
946 u8 *cckpowerlevel, u8 *ofdmpowerlevel)
947{
948 struct rtl_priv *rtlpriv = rtl_priv(hw);
949 struct rtl_phy *rtlphy = &(rtlpriv->phy);
950 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
951 u8 index = (channel - 1);
952
953 cckpowerlevel[RF90_PATH_A] =
954 rtlefuse->txpwrlevel_cck[RF90_PATH_A][index];
955 cckpowerlevel[RF90_PATH_B] =
956 rtlefuse->txpwrlevel_cck[RF90_PATH_B][index];
957 if (get_rf_type(rtlphy) == RF_1T2R || get_rf_type(rtlphy) == RF_1T1R) {
958 ofdmpowerlevel[RF90_PATH_A] =
959 rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_A][index];
960 ofdmpowerlevel[RF90_PATH_B] =
961 rtlefuse->txpwrlevel_ht40_1s[RF90_PATH_B][index];
962 } else if (get_rf_type(rtlphy) == RF_2T2R) {
963 ofdmpowerlevel[RF90_PATH_A] =
964 rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_A][index];
965 ofdmpowerlevel[RF90_PATH_B] =
966 rtlefuse->txpwrlevel_ht40_2s[RF90_PATH_B][index];
967 }
968}
969
970static void _rtl92c_ccxpower_index_check(struct ieee80211_hw *hw,
971 u8 channel, u8 *cckpowerlevel,
972 u8 *ofdmpowerlevel)
973{
974 struct rtl_priv *rtlpriv = rtl_priv(hw);
975 struct rtl_phy *rtlphy = &(rtlpriv->phy);
976
977 rtlphy->cur_cck_txpwridx = cckpowerlevel[0];
978 rtlphy->cur_ofdm24g_txpwridx = ofdmpowerlevel[0];
979}
980
981void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel)
982{
983 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
984 u8 cckpowerlevel[2], ofdmpowerlevel[2];
985
986 if (rtlefuse->b_txpwr_fromeprom == false)
987 return;
988 _rtl92c_get_txpower_index(hw, channel,
989 &cckpowerlevel[0], &ofdmpowerlevel[0]);
990 _rtl92c_ccxpower_index_check(hw,
991 channel, &cckpowerlevel[0],
992 &ofdmpowerlevel[0]);
993 rtl92c_phy_rf6052_set_cck_txpower(hw, &cckpowerlevel[0]);
994 rtl92c_phy_rf6052_set_ofdm_txpower(hw, &ofdmpowerlevel[0], channel);
995}
996
997bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw, long power_indbm)
998{
999 struct rtl_priv *rtlpriv = rtl_priv(hw);
1000 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1001 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1002 u8 idx;
1003 u8 rf_path;
1004
1005 u8 ccktxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
1006 WIRELESS_MODE_B,
1007 power_indbm);
1008 u8 ofdmtxpwridx = _rtl92c_phy_dbm_to_txpwr_Idx(hw,
1009 WIRELESS_MODE_N_24G,
1010 power_indbm);
1011 if (ofdmtxpwridx - rtlefuse->legacy_ht_txpowerdiff > 0)
1012 ofdmtxpwridx -= rtlefuse->legacy_ht_txpowerdiff;
1013 else
1014 ofdmtxpwridx = 0;
1015 RT_TRACE(rtlpriv, COMP_TXAGC, DBG_TRACE,
1016 ("%lx dBm, ccktxpwridx = %d, ofdmtxpwridx = %d\n",
1017 power_indbm, ccktxpwridx, ofdmtxpwridx));
1018 for (idx = 0; idx < 14; idx++) {
1019 for (rf_path = 0; rf_path < 2; rf_path++) {
1020 rtlefuse->txpwrlevel_cck[rf_path][idx] = ccktxpwridx;
1021 rtlefuse->txpwrlevel_ht40_1s[rf_path][idx] =
1022 ofdmtxpwridx;
1023 rtlefuse->txpwrlevel_ht40_2s[rf_path][idx] =
1024 ofdmtxpwridx;
1025 }
1026 }
1027 rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
1028 return true;
1029}
1030
1031void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw, u16 beaconinterval)
1032{
1033}
1034
1035static u8 _rtl92c_phy_dbm_to_txpwr_Idx(struct ieee80211_hw *hw,
1036 enum wireless_mode wirelessmode,
1037 long power_indbm)
1038{
1039 u8 txpwridx;
1040 long offset;
1041
1042 switch (wirelessmode) {
1043 case WIRELESS_MODE_B:
1044 offset = -7;
1045 break;
1046 case WIRELESS_MODE_G:
1047 case WIRELESS_MODE_N_24G:
1048 offset = -8;
1049 break;
1050 default:
1051 offset = -8;
1052 break;
1053 }
1054
1055 if ((power_indbm - offset) > 0)
1056 txpwridx = (u8) ((power_indbm - offset) * 2);
1057 else
1058 txpwridx = 0;
1059
1060 if (txpwridx > MAX_TXPWR_IDX_NMODE_92S)
1061 txpwridx = MAX_TXPWR_IDX_NMODE_92S;
1062
1063 return txpwridx;
1064}
1065
1066static long _rtl92c_phy_txpwr_idx_to_dbm(struct ieee80211_hw *hw,
1067 enum wireless_mode wirelessmode,
1068 u8 txpwridx)
1069{
1070 long offset;
1071 long pwrout_dbm;
1072
1073 switch (wirelessmode) {
1074 case WIRELESS_MODE_B:
1075 offset = -7;
1076 break;
1077 case WIRELESS_MODE_G:
1078 case WIRELESS_MODE_N_24G:
1079 offset = -8;
1080 break;
1081 default:
1082 offset = -8;
1083 break;
1084 }
1085 pwrout_dbm = txpwridx / 2 + offset;
1086 return pwrout_dbm;
1087}
1088
1089void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw, u8 operation)
1090{
1091 struct rtl_priv *rtlpriv = rtl_priv(hw);
1092 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1093 enum io_type iotype;
1094
1095 if (!is_hal_stop(rtlhal)) {
1096 switch (operation) {
1097 case SCAN_OPT_BACKUP:
1098 iotype = IO_CMD_PAUSE_DM_BY_SCAN;
1099 rtlpriv->cfg->ops->set_hw_reg(hw,
1100 HW_VAR_IO_CMD,
1101 (u8 *)&iotype);
1102
1103 break;
1104 case SCAN_OPT_RESTORE:
1105 iotype = IO_CMD_RESUME_DM_BY_SCAN;
1106 rtlpriv->cfg->ops->set_hw_reg(hw,
1107 HW_VAR_IO_CMD,
1108 (u8 *)&iotype);
1109 break;
1110 default:
1111 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1112 ("Unknown Scan Backup operation.\n"));
1113 break;
1114 }
1115 }
1116}
1117
1118void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw)
1119{
1120 struct rtl_priv *rtlpriv = rtl_priv(hw);
1121 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1122 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1123 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
1124 u8 reg_bw_opmode;
1125 u8 reg_prsr_rsc;
1126
1127 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
1128 ("Switch to %s bandwidth\n",
1129 rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20 ?
1130 "20MHz" : "40MHz"))
1131
1132 if (is_hal_stop(rtlhal))
1133 return;
1134
1135 reg_bw_opmode = rtl_read_byte(rtlpriv, REG_BWOPMODE);
1136 reg_prsr_rsc = rtl_read_byte(rtlpriv, REG_RRSR + 2);
1137
1138 switch (rtlphy->current_chan_bw) {
1139 case HT_CHANNEL_WIDTH_20:
1140 reg_bw_opmode |= BW_OPMODE_20MHZ;
1141 rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
1142 break;
1143
1144 case HT_CHANNEL_WIDTH_20_40:
1145 reg_bw_opmode &= ~BW_OPMODE_20MHZ;
1146 rtl_write_byte(rtlpriv, REG_BWOPMODE, reg_bw_opmode);
1147
1148 reg_prsr_rsc =
1149 (reg_prsr_rsc & 0x90) | (mac->cur_40_prime_sc << 5);
1150 rtl_write_byte(rtlpriv, REG_RRSR + 2, reg_prsr_rsc);
1151 break;
1152
1153 default:
1154 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1155 ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
1156 break;
1157 }
1158
1159 switch (rtlphy->current_chan_bw) {
1160 case HT_CHANNEL_WIDTH_20:
1161 rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x0);
1162 rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x0);
1163 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 1);
1164 break;
1165 case HT_CHANNEL_WIDTH_20_40:
1166 rtl_set_bbreg(hw, RFPGA0_RFMOD, BRFMOD, 0x1);
1167 rtl_set_bbreg(hw, RFPGA1_RFMOD, BRFMOD, 0x1);
1168 rtl_set_bbreg(hw, RCCK0_SYSTEM, BCCK_SIDEBAND,
1169 (mac->cur_40_prime_sc >> 1));
1170 rtl_set_bbreg(hw, ROFDM1_LSTF, 0xC00, mac->cur_40_prime_sc);
1171 rtl_set_bbreg(hw, RFPGA0_ANALOGPARAMETER2, BIT(10), 0);
1172 rtl_set_bbreg(hw, 0x818, (BIT(26) | BIT(27)),
1173 (mac->cur_40_prime_sc ==
1174 HAL_PRIME_CHNL_OFFSET_LOWER) ? 2 : 1);
1175 break;
1176 default:
1177 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1178 ("unknown bandwidth: %#X\n", rtlphy->current_chan_bw));
1179 break;
1180 }
1181 rtl92c_phy_rf6052_set_bandwidth(hw, rtlphy->current_chan_bw);
1182 rtlphy->set_bwmode_inprogress = false;
1183 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
1184}
1185
1186void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
1187 enum nl80211_channel_type ch_type)
1188{
1189 struct rtl_priv *rtlpriv = rtl_priv(hw);
1190 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1191 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1192 u8 tmp_bw = rtlphy->current_chan_bw;
1193
1194 if (rtlphy->set_bwmode_inprogress)
1195 return;
1196 rtlphy->set_bwmode_inprogress = true;
1197 if ((!is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw)))
1198 rtl92c_phy_set_bw_mode_callback(hw);
1199 else {
1200 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
1201 ("FALSE driver sleep or unload\n"));
1202 rtlphy->set_bwmode_inprogress = false;
1203 rtlphy->current_chan_bw = tmp_bw;
1204 }
1205}
1206
1207void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw)
1208{
1209 struct rtl_priv *rtlpriv = rtl_priv(hw);
1210 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1211 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1212 u32 delay;
1213
1214 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE,
1215 ("switch to channel%d\n", rtlphy->current_channel));
1216 if (is_hal_stop(rtlhal))
1217 return;
1218 do {
1219 if (!rtlphy->sw_chnl_inprogress)
1220 break;
1221 if (!_rtl92c_phy_sw_chnl_step_by_step
1222 (hw, rtlphy->current_channel, &rtlphy->sw_chnl_stage,
1223 &rtlphy->sw_chnl_step, &delay)) {
1224 if (delay > 0)
1225 mdelay(delay);
1226 else
1227 continue;
1228 } else
1229 rtlphy->sw_chnl_inprogress = false;
1230 break;
1231 } while (true);
1232 RT_TRACE(rtlpriv, COMP_SCAN, DBG_TRACE, ("<==\n"));
1233}
1234
1235u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw)
1236{
1237 struct rtl_priv *rtlpriv = rtl_priv(hw);
1238 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1239 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1240
1241 if (rtlphy->sw_chnl_inprogress)
1242 return 0;
1243 if (rtlphy->set_bwmode_inprogress)
1244 return 0;
1245 RT_ASSERT((rtlphy->current_channel <= 14),
1246 ("WIRELESS_MODE_G but channel>14"));
1247 rtlphy->sw_chnl_inprogress = true;
1248 rtlphy->sw_chnl_stage = 0;
1249 rtlphy->sw_chnl_step = 0;
1250 if (!(is_hal_stop(rtlhal)) && !(RT_CANNOT_IO(hw))) {
1251 rtl92c_phy_sw_chnl_callback(hw);
1252 RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
1253 ("sw_chnl_inprogress false schdule workitem\n"));
1254 rtlphy->sw_chnl_inprogress = false;
1255 } else {
1256 RT_TRACE(rtlpriv, COMP_CHAN, DBG_LOUD,
1257 ("sw_chnl_inprogress false driver sleep or"
1258 " unload\n"));
1259 rtlphy->sw_chnl_inprogress = false;
1260 }
1261 return 1;
1262}
1263
1264static bool _rtl92c_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
1265 u8 channel, u8 *stage, u8 *step,
1266 u32 *delay)
1267{
1268 struct rtl_priv *rtlpriv = rtl_priv(hw);
1269 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1270 struct swchnlcmd precommoncmd[MAX_PRECMD_CNT];
1271 u32 precommoncmdcnt;
1272 struct swchnlcmd postcommoncmd[MAX_POSTCMD_CNT];
1273 u32 postcommoncmdcnt;
1274 struct swchnlcmd rfdependcmd[MAX_RFDEPENDCMD_CNT];
1275 u32 rfdependcmdcnt;
1276 struct swchnlcmd *currentcmd = NULL;
1277 u8 rfpath;
1278 u8 num_total_rfpath = rtlphy->num_total_rfpath;
1279
1280 precommoncmdcnt = 0;
1281 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
1282 MAX_PRECMD_CNT,
1283 CMDID_SET_TXPOWEROWER_LEVEL, 0, 0, 0);
1284 _rtl92c_phy_set_sw_chnl_cmdarray(precommoncmd, precommoncmdcnt++,
1285 MAX_PRECMD_CNT, CMDID_END, 0, 0, 0);
1286
1287 postcommoncmdcnt = 0;
1288
1289 _rtl92c_phy_set_sw_chnl_cmdarray(postcommoncmd, postcommoncmdcnt++,
1290 MAX_POSTCMD_CNT, CMDID_END, 0, 0, 0);
1291
1292 rfdependcmdcnt = 0;
1293
1294 RT_ASSERT((channel >= 1 && channel <= 14),
1295 ("illegal channel for Zebra: %d\n", channel));
1296
1297 _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
1298 MAX_RFDEPENDCMD_CNT, CMDID_RF_WRITEREG,
1299 RF_CHNLBW, channel, 10);
1300
1301 _rtl92c_phy_set_sw_chnl_cmdarray(rfdependcmd, rfdependcmdcnt++,
1302 MAX_RFDEPENDCMD_CNT, CMDID_END, 0, 0,
1303 0);
1304
1305 do {
1306 switch (*stage) {
1307 case 0:
1308 currentcmd = &precommoncmd[*step];
1309 break;
1310 case 1:
1311 currentcmd = &rfdependcmd[*step];
1312 break;
1313 case 2:
1314 currentcmd = &postcommoncmd[*step];
1315 break;
1316 }
1317
1318 if (currentcmd->cmdid == CMDID_END) {
1319 if ((*stage) == 2) {
1320 return true;
1321 } else {
1322 (*stage)++;
1323 (*step) = 0;
1324 continue;
1325 }
1326 }
1327
1328 switch (currentcmd->cmdid) {
1329 case CMDID_SET_TXPOWEROWER_LEVEL:
1330 rtl92c_phy_set_txpower_level(hw, channel);
1331 break;
1332 case CMDID_WRITEPORT_ULONG:
1333 rtl_write_dword(rtlpriv, currentcmd->para1,
1334 currentcmd->para2);
1335 break;
1336 case CMDID_WRITEPORT_USHORT:
1337 rtl_write_word(rtlpriv, currentcmd->para1,
1338 (u16) currentcmd->para2);
1339 break;
1340 case CMDID_WRITEPORT_UCHAR:
1341 rtl_write_byte(rtlpriv, currentcmd->para1,
1342 (u8) currentcmd->para2);
1343 break;
1344 case CMDID_RF_WRITEREG:
1345 for (rfpath = 0; rfpath < num_total_rfpath; rfpath++) {
1346 rtlphy->rfreg_chnlval[rfpath] =
1347 ((rtlphy->rfreg_chnlval[rfpath] &
1348 0xfffffc00) | currentcmd->para2);
1349
1350 rtl_set_rfreg(hw, (enum radio_path)rfpath,
1351 currentcmd->para1,
1352 RFREG_OFFSET_MASK,
1353 rtlphy->rfreg_chnlval[rfpath]);
1354 }
1355 break;
1356 default:
1357 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1358 ("switch case not process\n"));
1359 break;
1360 }
1361
1362 break;
1363 } while (true);
1364
1365 (*delay) = currentcmd->msdelay;
1366 (*step)++;
1367 return false;
1368}
1369
1370static bool _rtl92c_phy_set_sw_chnl_cmdarray(struct swchnlcmd *cmdtable,
1371 u32 cmdtableidx, u32 cmdtablesz,
1372 enum swchnlcmd_id cmdid,
1373 u32 para1, u32 para2, u32 msdelay)
1374{
1375 struct swchnlcmd *pcmd;
1376
1377 if (cmdtable == NULL) {
1378 RT_ASSERT(false, ("cmdtable cannot be NULL.\n"));
1379 return false;
1380 }
1381
1382 if (cmdtableidx >= cmdtablesz)
1383 return false;
1384
1385 pcmd = cmdtable + cmdtableidx;
1386 pcmd->cmdid = cmdid;
1387 pcmd->para1 = para1;
1388 pcmd->para2 = para2;
1389 pcmd->msdelay = msdelay;
1390 return true;
1391}
1392
1393bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw, u32 rfpath)
1394{
1395 return true;
1396}
1397
1398static u8 _rtl92c_phy_path_a_iqk(struct ieee80211_hw *hw, bool config_pathb)
1399{
1400 u32 reg_eac, reg_e94, reg_e9c, reg_ea4;
1401 u8 result = 0x00;
1402
1403 rtl_set_bbreg(hw, 0xe30, MASKDWORD, 0x10008c1f);
1404 rtl_set_bbreg(hw, 0xe34, MASKDWORD, 0x10008c1f);
1405 rtl_set_bbreg(hw, 0xe38, MASKDWORD, 0x82140102);
1406 rtl_set_bbreg(hw, 0xe3c, MASKDWORD,
1407 config_pathb ? 0x28160202 : 0x28160502);
1408
1409 if (config_pathb) {
1410 rtl_set_bbreg(hw, 0xe50, MASKDWORD, 0x10008c22);
1411 rtl_set_bbreg(hw, 0xe54, MASKDWORD, 0x10008c22);
1412 rtl_set_bbreg(hw, 0xe58, MASKDWORD, 0x82140102);
1413 rtl_set_bbreg(hw, 0xe5c, MASKDWORD, 0x28160202);
1414 }
1415
1416 rtl_set_bbreg(hw, 0xe4c, MASKDWORD, 0x001028d1);
1417 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf9000000);
1418 rtl_set_bbreg(hw, 0xe48, MASKDWORD, 0xf8000000);
1419
1420 mdelay(IQK_DELAY_TIME);
1421
1422 reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
1423 reg_e94 = rtl_get_bbreg(hw, 0xe94, MASKDWORD);
1424 reg_e9c = rtl_get_bbreg(hw, 0xe9c, MASKDWORD);
1425 reg_ea4 = rtl_get_bbreg(hw, 0xea4, MASKDWORD);
1426
1427 if (!(reg_eac & BIT(28)) &&
1428 (((reg_e94 & 0x03FF0000) >> 16) != 0x142) &&
1429 (((reg_e9c & 0x03FF0000) >> 16) != 0x42))
1430 result |= 0x01;
1431 else
1432 return result;
1433
1434 if (!(reg_eac & BIT(27)) &&
1435 (((reg_ea4 & 0x03FF0000) >> 16) != 0x132) &&
1436 (((reg_eac & 0x03FF0000) >> 16) != 0x36))
1437 result |= 0x02;
1438 return result;
1439}
1440
1441static u8 _rtl92c_phy_path_b_iqk(struct ieee80211_hw *hw)
1442{
1443 u32 reg_eac, reg_eb4, reg_ebc, reg_ec4, reg_ecc;
1444 u8 result = 0x00;
1445
1446 rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000002);
1447 rtl_set_bbreg(hw, 0xe60, MASKDWORD, 0x00000000);
1448 mdelay(IQK_DELAY_TIME);
1449 reg_eac = rtl_get_bbreg(hw, 0xeac, MASKDWORD);
1450 reg_eb4 = rtl_get_bbreg(hw, 0xeb4, MASKDWORD);
1451 reg_ebc = rtl_get_bbreg(hw, 0xebc, MASKDWORD);
1452 reg_ec4 = rtl_get_bbreg(hw, 0xec4, MASKDWORD);
1453 reg_ecc = rtl_get_bbreg(hw, 0xecc, MASKDWORD);
1454 if (!(reg_eac & BIT(31)) &&
1455 (((reg_eb4 & 0x03FF0000) >> 16) != 0x142) &&
1456 (((reg_ebc & 0x03FF0000) >> 16) != 0x42))
1457 result |= 0x01;
1458 else
1459 return result;
1460
1461 if (!(reg_eac & BIT(30)) &&
1462 (((reg_ec4 & 0x03FF0000) >> 16) != 0x132) &&
1463 (((reg_ecc & 0x03FF0000) >> 16) != 0x36))
1464 result |= 0x02;
1465 return result;
1466}
1467
1468static void _rtl92c_phy_path_a_fill_iqk_matrix(struct ieee80211_hw *hw,
1469 bool b_iqk_ok, long result[][8],
1470 u8 final_candidate, bool btxonly)
1471{
1472 u32 oldval_0, x, tx0_a, reg;
1473 long y, tx0_c;
1474
1475 if (final_candidate == 0xFF)
1476 return;
1477 else if (b_iqk_ok) {
1478 oldval_0 = (rtl_get_bbreg(hw, ROFDM0_XATXIQIMBALANCE,
1479 MASKDWORD) >> 22) & 0x3FF;
1480 x = result[final_candidate][0];
1481 if ((x & 0x00000200) != 0)
1482 x = x | 0xFFFFFC00;
1483 tx0_a = (x * oldval_0) >> 8;
1484 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x3FF, tx0_a);
1485 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(31),
1486 ((x * oldval_0 >> 7) & 0x1));
1487 y = result[final_candidate][1];
1488 if ((y & 0x00000200) != 0)
1489 y = y | 0xFFFFFC00;
1490 tx0_c = (y * oldval_0) >> 8;
1491 rtl_set_bbreg(hw, ROFDM0_XCTXAFE, 0xF0000000,
1492 ((tx0_c & 0x3C0) >> 6));
1493 rtl_set_bbreg(hw, ROFDM0_XATXIQIMBALANCE, 0x003F0000,
1494 (tx0_c & 0x3F));
1495 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(29),
1496 ((y * oldval_0 >> 7) & 0x1));
1497 if (btxonly)
1498 return;
1499 reg = result[final_candidate][2];
1500 rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0x3FF, reg);
1501 reg = result[final_candidate][3] & 0x3F;
1502 rtl_set_bbreg(hw, ROFDM0_XARXIQIMBALANCE, 0xFC00, reg);
1503 reg = (result[final_candidate][3] >> 6) & 0xF;
1504 rtl_set_bbreg(hw, 0xca0, 0xF0000000, reg);
1505 }
1506}
1507
1508static void _rtl92c_phy_path_b_fill_iqk_matrix(struct ieee80211_hw *hw,
1509 bool b_iqk_ok, long result[][8],
1510 u8 final_candidate, bool btxonly)
1511{
1512 u32 oldval_1, x, tx1_a, reg;
1513 long y, tx1_c;
1514
1515 if (final_candidate == 0xFF)
1516 return;
1517 else if (b_iqk_ok) {
1518 oldval_1 = (rtl_get_bbreg(hw, ROFDM0_XBTXIQIMBALANCE,
1519 MASKDWORD) >> 22) & 0x3FF;
1520 x = result[final_candidate][4];
1521 if ((x & 0x00000200) != 0)
1522 x = x | 0xFFFFFC00;
1523 tx1_a = (x * oldval_1) >> 8;
1524 rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x3FF, tx1_a);
1525 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(27),
1526 ((x * oldval_1 >> 7) & 0x1));
1527 y = result[final_candidate][5];
1528 if ((y & 0x00000200) != 0)
1529 y = y | 0xFFFFFC00;
1530 tx1_c = (y * oldval_1) >> 8;
1531 rtl_set_bbreg(hw, ROFDM0_XDTXAFE, 0xF0000000,
1532 ((tx1_c & 0x3C0) >> 6));
1533 rtl_set_bbreg(hw, ROFDM0_XBTXIQIMBALANCE, 0x003F0000,
1534 (tx1_c & 0x3F));
1535 rtl_set_bbreg(hw, ROFDM0_ECCATHRESHOLD, BIT(25),
1536 ((y * oldval_1 >> 7) & 0x1));
1537 if (btxonly)
1538 return;
1539 reg = result[final_candidate][6];
1540 rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0x3FF, reg);
1541 reg = result[final_candidate][7] & 0x3F;
1542 rtl_set_bbreg(hw, ROFDM0_XBRXIQIMBALANCE, 0xFC00, reg);
1543 reg = (result[final_candidate][7] >> 6) & 0xF;
1544 rtl_set_bbreg(hw, ROFDM0_AGCRSSITABLE, 0x0000F000, reg);
1545 }
1546}
1547
1548static void _rtl92c_phy_save_adda_registers(struct ieee80211_hw *hw,
1549 u32 *addareg, u32 *addabackup,
1550 u32 registernum)
1551{
1552 u32 i;
1553
1554 for (i = 0; i < registernum; i++)
1555 addabackup[i] = rtl_get_bbreg(hw, addareg[i], MASKDWORD);
1556}
1557
1558static void _rtl92c_phy_save_mac_registers(struct ieee80211_hw *hw,
1559 u32 *macreg, u32 *macbackup)
1560{
1561 struct rtl_priv *rtlpriv = rtl_priv(hw);
1562 u32 i;
1563
1564 for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
1565 macbackup[i] = rtl_read_byte(rtlpriv, macreg[i]);
1566 macbackup[i] = rtl_read_dword(rtlpriv, macreg[i]);
1567}
1568
1569static void _rtl92c_phy_reload_adda_registers(struct ieee80211_hw *hw,
1570 u32 *addareg, u32 *addabackup,
1571 u32 regiesternum)
1572{
1573 u32 i;
1574
1575 for (i = 0; i < regiesternum; i++)
1576 rtl_set_bbreg(hw, addareg[i], MASKDWORD, addabackup[i]);
1577}
1578
1579static void _rtl92c_phy_reload_mac_registers(struct ieee80211_hw *hw,
1580 u32 *macreg, u32 *macbackup)
1581{
1582 struct rtl_priv *rtlpriv = rtl_priv(hw);
1583 u32 i;
1584
1585 for (i = 0; i < (IQK_MAC_REG_NUM - 1); i++)
1586 rtl_write_byte(rtlpriv, macreg[i], (u8) macbackup[i]);
1587 rtl_write_dword(rtlpriv, macreg[i], macbackup[i]);
1588}
1589
1590static void _rtl92c_phy_path_adda_on(struct ieee80211_hw *hw,
1591 u32 *addareg, bool is_patha_on, bool is2t)
1592{
1593 u32 pathOn;
1594 u32 i;
1595
1596 pathOn = is_patha_on ? 0x04db25a4 : 0x0b1b25a4;
1597 if (false == is2t) {
1598 pathOn = 0x0bdb25a0;
1599 rtl_set_bbreg(hw, addareg[0], MASKDWORD, 0x0b1b25a0);
1600 } else {
1601 rtl_set_bbreg(hw, addareg[0], MASKDWORD, pathOn);
1602 }
1603
1604 for (i = 1; i < IQK_ADDA_REG_NUM; i++)
1605 rtl_set_bbreg(hw, addareg[i], MASKDWORD, pathOn);
1606}
1607
1608static void _rtl92c_phy_mac_setting_calibration(struct ieee80211_hw *hw,
1609 u32 *macreg, u32 *macbackup)
1610{
1611 struct rtl_priv *rtlpriv = rtl_priv(hw);
1612 u32 i;
1613
1614 rtl_write_byte(rtlpriv, macreg[0], 0x3F);
1615
1616 for (i = 1; i < (IQK_MAC_REG_NUM - 1); i++)
1617 rtl_write_byte(rtlpriv, macreg[i],
1618 (u8) (macbackup[i] & (~BIT(3))));
1619 rtl_write_byte(rtlpriv, macreg[i], (u8) (macbackup[i] & (~BIT(5))));
1620}
1621
1622static void _rtl92c_phy_path_a_standby(struct ieee80211_hw *hw)
1623{
1624 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x0);
1625 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
1626 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
1627}
1628
1629static void _rtl92c_phy_pi_mode_switch(struct ieee80211_hw *hw, bool pi_mode)
1630{
1631 u32 mode;
1632
1633 mode = pi_mode ? 0x01000100 : 0x01000000;
1634 rtl_set_bbreg(hw, 0x820, MASKDWORD, mode);
1635 rtl_set_bbreg(hw, 0x828, MASKDWORD, mode);
1636}
1637
1638static bool _rtl92c_phy_simularity_compare(struct ieee80211_hw *hw,
1639 long result[][8], u8 c1, u8 c2)
1640{
1641 u32 i, j, diff, simularity_bitmap, bound;
1642 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
1643
1644 u8 final_candidate[2] = { 0xFF, 0xFF };
1645 bool bresult = true, is2t = IS_92C_SERIAL(rtlhal->version);
1646
1647 if (is2t)
1648 bound = 8;
1649 else
1650 bound = 4;
1651
1652 simularity_bitmap = 0;
1653
1654 for (i = 0; i < bound; i++) {
1655 diff = (result[c1][i] > result[c2][i]) ?
1656 (result[c1][i] - result[c2][i]) :
1657 (result[c2][i] - result[c1][i]);
1658
1659 if (diff > MAX_TOLERANCE) {
1660 if ((i == 2 || i == 6) && !simularity_bitmap) {
1661 if (result[c1][i] + result[c1][i + 1] == 0)
1662 final_candidate[(i / 4)] = c2;
1663 else if (result[c2][i] + result[c2][i + 1] == 0)
1664 final_candidate[(i / 4)] = c1;
1665 else
1666 simularity_bitmap = simularity_bitmap |
1667 (1 << i);
1668 } else
1669 simularity_bitmap =
1670 simularity_bitmap | (1 << i);
1671 }
1672 }
1673
1674 if (simularity_bitmap == 0) {
1675 for (i = 0; i < (bound / 4); i++) {
1676 if (final_candidate[i] != 0xFF) {
1677 for (j = i * 4; j < (i + 1) * 4 - 2; j++)
1678 result[3][j] =
1679 result[final_candidate[i]][j];
1680 bresult = false;
1681 }
1682 }
1683 return bresult;
1684 } else if (!(simularity_bitmap & 0x0F)) {
1685 for (i = 0; i < 4; i++)
1686 result[3][i] = result[c1][i];
1687 return false;
1688 } else if (!(simularity_bitmap & 0xF0) && is2t) {
1689 for (i = 4; i < 8; i++)
1690 result[3][i] = result[c1][i];
1691 return false;
1692 } else {
1693 return false;
1694 }
1695
1696}
1697
1698static void _rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw,
1699 long result[][8], u8 t, bool is2t)
1700{
1701 struct rtl_priv *rtlpriv = rtl_priv(hw);
1702 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1703 u32 i;
1704 u8 patha_ok, pathb_ok;
1705 u32 adda_reg[IQK_ADDA_REG_NUM] = {
1706 0x85c, 0xe6c, 0xe70, 0xe74,
1707 0xe78, 0xe7c, 0xe80, 0xe84,
1708 0xe88, 0xe8c, 0xed0, 0xed4,
1709 0xed8, 0xedc, 0xee0, 0xeec
1710 };
1711
1712 u32 iqk_mac_reg[IQK_MAC_REG_NUM] = {
1713 0x522, 0x550, 0x551, 0x040
1714 };
1715
1716 const u32 retrycount = 2;
1717
1718 u32 bbvalue;
1719
1720 if (t == 0) {
1721 bbvalue = rtl_get_bbreg(hw, 0x800, MASKDWORD);
1722
1723 _rtl92c_phy_save_adda_registers(hw, adda_reg,
1724 rtlphy->adda_backup, 16);
1725 _rtl92c_phy_save_mac_registers(hw, iqk_mac_reg,
1726 rtlphy->iqk_mac_backup);
1727 }
1728 _rtl92c_phy_path_adda_on(hw, adda_reg, true, is2t);
1729 if (t == 0) {
1730 rtlphy->b_rfpi_enable = (u8) rtl_get_bbreg(hw,
1731 RFPGA0_XA_HSSIPARAMETER1,
1732 BIT(8));
1733 }
1734 if (!rtlphy->b_rfpi_enable)
1735 _rtl92c_phy_pi_mode_switch(hw, true);
1736 if (t == 0) {
1737 rtlphy->reg_c04 = rtl_get_bbreg(hw, 0xc04, MASKDWORD);
1738 rtlphy->reg_c08 = rtl_get_bbreg(hw, 0xc08, MASKDWORD);
1739 rtlphy->reg_874 = rtl_get_bbreg(hw, 0x874, MASKDWORD);
1740 }
1741 rtl_set_bbreg(hw, 0xc04, MASKDWORD, 0x03a05600);
1742 rtl_set_bbreg(hw, 0xc08, MASKDWORD, 0x000800e4);
1743 rtl_set_bbreg(hw, 0x874, MASKDWORD, 0x22204000);
1744 if (is2t) {
1745 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00010000);
1746 rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00010000);
1747 }
1748 _rtl92c_phy_mac_setting_calibration(hw, iqk_mac_reg,
1749 rtlphy->iqk_mac_backup);
1750 rtl_set_bbreg(hw, 0xb68, MASKDWORD, 0x00080000);
1751 if (is2t)
1752 rtl_set_bbreg(hw, 0xb6c, MASKDWORD, 0x00080000);
1753 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80800000);
1754 rtl_set_bbreg(hw, 0xe40, MASKDWORD, 0x01007c00);
1755 rtl_set_bbreg(hw, 0xe44, MASKDWORD, 0x01004800);
1756 for (i = 0; i < retrycount; i++) {
1757 patha_ok = _rtl92c_phy_path_a_iqk(hw, is2t);
1758 if (patha_ok == 0x03) {
1759 result[t][0] = (rtl_get_bbreg(hw, 0xe94, MASKDWORD) &
1760 0x3FF0000) >> 16;
1761 result[t][1] = (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) &
1762 0x3FF0000) >> 16;
1763 result[t][2] = (rtl_get_bbreg(hw, 0xea4, MASKDWORD) &
1764 0x3FF0000) >> 16;
1765 result[t][3] = (rtl_get_bbreg(hw, 0xeac, MASKDWORD) &
1766 0x3FF0000) >> 16;
1767 break;
1768 } else if (i == (retrycount - 1) && patha_ok == 0x01)
1769 result[t][0] = (rtl_get_bbreg(hw, 0xe94,
1770 MASKDWORD) & 0x3FF0000) >>
1771 16;
1772 result[t][1] =
1773 (rtl_get_bbreg(hw, 0xe9c, MASKDWORD) & 0x3FF0000) >> 16;
1774
1775 }
1776
1777 if (is2t) {
1778 _rtl92c_phy_path_a_standby(hw);
1779 _rtl92c_phy_path_adda_on(hw, adda_reg, false, is2t);
1780 for (i = 0; i < retrycount; i++) {
1781 pathb_ok = _rtl92c_phy_path_b_iqk(hw);
1782 if (pathb_ok == 0x03) {
1783 result[t][4] = (rtl_get_bbreg(hw,
1784 0xeb4,
1785 MASKDWORD) &
1786 0x3FF0000) >> 16;
1787 result[t][5] =
1788 (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
1789 0x3FF0000) >> 16;
1790 result[t][6] =
1791 (rtl_get_bbreg(hw, 0xec4, MASKDWORD) &
1792 0x3FF0000) >> 16;
1793 result[t][7] =
1794 (rtl_get_bbreg(hw, 0xecc, MASKDWORD) &
1795 0x3FF0000) >> 16;
1796 break;
1797 } else if (i == (retrycount - 1) && pathb_ok == 0x01) {
1798 result[t][4] = (rtl_get_bbreg(hw,
1799 0xeb4,
1800 MASKDWORD) &
1801 0x3FF0000) >> 16;
1802 }
1803 result[t][5] = (rtl_get_bbreg(hw, 0xebc, MASKDWORD) &
1804 0x3FF0000) >> 16;
1805 }
1806 }
1807 rtl_set_bbreg(hw, 0xc04, MASKDWORD, rtlphy->reg_c04);
1808 rtl_set_bbreg(hw, 0x874, MASKDWORD, rtlphy->reg_874);
1809 rtl_set_bbreg(hw, 0xc08, MASKDWORD, rtlphy->reg_c08);
1810 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0);
1811 rtl_set_bbreg(hw, 0x840, MASKDWORD, 0x00032ed3);
1812 if (is2t)
1813 rtl_set_bbreg(hw, 0x844, MASKDWORD, 0x00032ed3);
1814 if (t != 0) {
1815 if (!rtlphy->b_rfpi_enable)
1816 _rtl92c_phy_pi_mode_switch(hw, false);
1817 _rtl92c_phy_reload_adda_registers(hw, adda_reg,
1818 rtlphy->adda_backup, 16);
1819 _rtl92c_phy_reload_mac_registers(hw, iqk_mac_reg,
1820 rtlphy->iqk_mac_backup);
1821 }
1822}
1823
1824static void _rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw, bool is2t)
1825{
1826 u8 tmpreg;
1827 u32 rf_a_mode = 0, rf_b_mode = 0, lc_cal;
1828 struct rtl_priv *rtlpriv = rtl_priv(hw);
1829
1830 tmpreg = rtl_read_byte(rtlpriv, 0xd03);
1831
1832 if ((tmpreg & 0x70) != 0)
1833 rtl_write_byte(rtlpriv, 0xd03, tmpreg & 0x8F);
1834 else
1835 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
1836
1837 if ((tmpreg & 0x70) != 0) {
1838 rf_a_mode = rtl_get_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS);
1839
1840 if (is2t)
1841 rf_b_mode = rtl_get_rfreg(hw, RF90_PATH_B, 0x00,
1842 MASK12BITS);
1843
1844 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS,
1845 (rf_a_mode & 0x8FFFF) | 0x10000);
1846
1847 if (is2t)
1848 rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
1849 (rf_b_mode & 0x8FFFF) | 0x10000);
1850 }
1851 lc_cal = rtl_get_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS);
1852
1853 rtl_set_rfreg(hw, RF90_PATH_A, 0x18, MASK12BITS, lc_cal | 0x08000);
1854
1855 mdelay(100);
1856
1857 if ((tmpreg & 0x70) != 0) {
1858 rtl_write_byte(rtlpriv, 0xd03, tmpreg);
1859 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASK12BITS, rf_a_mode);
1860
1861 if (is2t)
1862 rtl_set_rfreg(hw, RF90_PATH_B, 0x00, MASK12BITS,
1863 rf_b_mode);
1864 } else {
1865 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
1866 }
1867}
1868
1869static void _rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw,
1870 char delta, bool is2t)
1871{
1872 /* This routine is deliberately dummied out for later fixes */
1873#if 0
1874 struct rtl_priv *rtlpriv = rtl_priv(hw);
1875 struct rtl_phy *rtlphy = &(rtlpriv->phy);
1876 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
1877
1878 u32 reg_d[PATH_NUM];
1879 u32 tmpreg, index, offset, path, i, pathbound = PATH_NUM, apkbound;
1880
1881 u32 bb_backup[APK_BB_REG_NUM];
1882 u32 bb_reg[APK_BB_REG_NUM] = {
1883 0x904, 0xc04, 0x800, 0xc08, 0x874
1884 };
1885 u32 bb_ap_mode[APK_BB_REG_NUM] = {
1886 0x00000020, 0x00a05430, 0x02040000,
1887 0x000800e4, 0x00204000
1888 };
1889 u32 bb_normal_ap_mode[APK_BB_REG_NUM] = {
1890 0x00000020, 0x00a05430, 0x02040000,
1891 0x000800e4, 0x22204000
1892 };
1893
1894 u32 afe_backup[APK_AFE_REG_NUM];
1895 u32 afe_reg[APK_AFE_REG_NUM] = {
1896 0x85c, 0xe6c, 0xe70, 0xe74, 0xe78,
1897 0xe7c, 0xe80, 0xe84, 0xe88, 0xe8c,
1898 0xed0, 0xed4, 0xed8, 0xedc, 0xee0,
1899 0xeec
1900 };
1901
1902 u32 mac_backup[IQK_MAC_REG_NUM];
1903 u32 mac_reg[IQK_MAC_REG_NUM] = {
1904 0x522, 0x550, 0x551, 0x040
1905 };
1906
1907 u32 apk_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
1908 {0x0852c, 0x1852c, 0x5852c, 0x1852c, 0x5852c},
1909 {0x2852e, 0x0852e, 0x3852e, 0x0852e, 0x0852e}
1910 };
1911
1912 u32 apk_normal_rf_init_value[PATH_NUM][APK_BB_REG_NUM] = {
1913 {0x0852c, 0x0a52c, 0x3a52c, 0x5a52c, 0x5a52c},
1914 {0x0852c, 0x0a52c, 0x5a52c, 0x5a52c, 0x5a52c}
1915 };
1916
1917 u32 apk_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
1918 {0x52019, 0x52014, 0x52013, 0x5200f, 0x5208d},
1919 {0x5201a, 0x52019, 0x52016, 0x52033, 0x52050}
1920 };
1921
1922 u32 apk_normal_rf_value_0[PATH_NUM][APK_BB_REG_NUM] = {
1923 {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a},
1924 {0x52019, 0x52017, 0x52010, 0x5200d, 0x5206a}
1925 };
1926
1927 u32 afe_on_off[PATH_NUM] = {
1928 0x04db25a4, 0x0b1b25a4
1929 };
1930
1931 u32 apk_offset[PATH_NUM] = { 0xb68, 0xb6c };
1932
1933 u32 apk_normal_offset[PATH_NUM] = { 0xb28, 0xb98 };
1934
1935 u32 apk_value[PATH_NUM] = { 0x92fc0000, 0x12fc0000 };
1936
1937 u32 apk_normal_value[PATH_NUM] = { 0x92680000, 0x12680000 };
1938
1939 const char apk_delta_mapping[APK_BB_REG_NUM][13] = {
1940 {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
1941 {-4, -3, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
1942 {-6, -4, -2, -2, -1, -1, 0, 1, 2, 3, 4, 5, 6},
1943 {-1, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4, 5, 6},
1944 {-11, -9, -7, -5, -3, -1, 0, 0, 0, 0, 0, 0, 0}
1945 };
1946
1947 const u32 apk_normal_setting_value_1[13] = {
1948 0x01017018, 0xf7ed8f84, 0x1b1a1816, 0x2522201e, 0x322e2b28,
1949 0x433f3a36, 0x5b544e49, 0x7b726a62, 0xa69a8f84, 0xdfcfc0b3,
1950 0x12680000, 0x00880000, 0x00880000
1951 };
1952
1953 const u32 apk_normal_setting_value_2[16] = {
1954 0x01c7021d, 0x01670183, 0x01000123, 0x00bf00e2, 0x008d00a3,
1955 0x0068007b, 0x004d0059, 0x003a0042, 0x002b0031, 0x001f0025,
1956 0x0017001b, 0x00110014, 0x000c000f, 0x0009000b, 0x00070008,
1957 0x00050006
1958 };
1959
1960 const u32 apk_result[PATH_NUM][APK_BB_REG_NUM];
1961
1962 long bb_offset, delta_v, delta_offset;
1963
1964 if (!is2t)
1965 pathbound = 1;
1966
1967 for (index = 0; index < PATH_NUM; index++) {
1968 apk_offset[index] = apk_normal_offset[index];
1969 apk_value[index] = apk_normal_value[index];
1970 afe_on_off[index] = 0x6fdb25a4;
1971 }
1972
1973 for (index = 0; index < APK_BB_REG_NUM; index++) {
1974 for (path = 0; path < pathbound; path++) {
1975 apk_rf_init_value[path][index] =
1976 apk_normal_rf_init_value[path][index];
1977 apk_rf_value_0[path][index] =
1978 apk_normal_rf_value_0[path][index];
1979 }
1980 bb_ap_mode[index] = bb_normal_ap_mode[index];
1981
1982 apkbound = 6;
1983 }
1984
1985 for (index = 0; index < APK_BB_REG_NUM; index++) {
1986 if (index == 0)
1987 continue;
1988 bb_backup[index] = rtl_get_bbreg(hw, bb_reg[index], MASKDWORD);
1989 }
1990
1991 _rtl92c_phy_save_mac_registers(hw, mac_reg, mac_backup);
1992
1993 _rtl92c_phy_save_adda_registers(hw, afe_reg, afe_backup, 16);
1994
1995 for (path = 0; path < pathbound; path++) {
1996 if (path == RF90_PATH_A) {
1997 offset = 0xb00;
1998 for (index = 0; index < 11; index++) {
1999 rtl_set_bbreg(hw, offset, MASKDWORD,
2000 apk_normal_setting_value_1
2001 [index]);
2002
2003 offset += 0x04;
2004 }
2005
2006 rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
2007
2008 offset = 0xb68;
2009 for (; index < 13; index++) {
2010 rtl_set_bbreg(hw, offset, MASKDWORD,
2011 apk_normal_setting_value_1
2012 [index]);
2013
2014 offset += 0x04;
2015 }
2016
2017 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
2018
2019 offset = 0xb00;
2020 for (index = 0; index < 16; index++) {
2021 rtl_set_bbreg(hw, offset, MASKDWORD,
2022 apk_normal_setting_value_2
2023 [index]);
2024
2025 offset += 0x04;
2026 }
2027 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
2028 } else if (path == RF90_PATH_B) {
2029 offset = 0xb70;
2030 for (index = 0; index < 10; index++) {
2031 rtl_set_bbreg(hw, offset, MASKDWORD,
2032 apk_normal_setting_value_1
2033 [index]);
2034
2035 offset += 0x04;
2036 }
2037 rtl_set_bbreg(hw, 0xb28, MASKDWORD, 0x12680000);
2038 rtl_set_bbreg(hw, 0xb98, MASKDWORD, 0x12680000);
2039
2040 offset = 0xb68;
2041 index = 11;
2042 for (; index < 13; index++) {
2043 rtl_set_bbreg(hw, offset, MASKDWORD,
2044 apk_normal_setting_value_1
2045 [index]);
2046
2047 offset += 0x04;
2048 }
2049
2050 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x40000000);
2051
2052 offset = 0xb60;
2053 for (index = 0; index < 16; index++) {
2054 rtl_set_bbreg(hw, offset, MASKDWORD,
2055 apk_normal_setting_value_2
2056 [index]);
2057
2058 offset += 0x04;
2059 }
2060 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
2061 }
2062
2063 reg_d[path] = rtl_get_rfreg(hw, (enum radio_path)path,
2064 0xd, MASKDWORD);
2065
2066 for (index = 0; index < APK_AFE_REG_NUM; index++)
2067 rtl_set_bbreg(hw, afe_reg[index], MASKDWORD,
2068 afe_on_off[path]);
2069
2070 if (path == RF90_PATH_A) {
2071 for (index = 0; index < APK_BB_REG_NUM; index++) {
2072 if (index == 0)
2073 continue;
2074 rtl_set_bbreg(hw, bb_reg[index], MASKDWORD,
2075 bb_ap_mode[index]);
2076 }
2077 }
2078
2079 _rtl92c_phy_mac_setting_calibration(hw, mac_reg, mac_backup);
2080
2081 if (path == 0) {
2082 rtl_set_rfreg(hw, RF90_PATH_B, 0x0, MASKDWORD, 0x10000);
2083 } else {
2084 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, MASKDWORD,
2085 0x10000);
2086 rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
2087 0x1000f);
2088 rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
2089 0x20103);
2090 }
2091
2092 delta_offset = ((delta + 14) / 2);
2093 if (delta_offset < 0)
2094 delta_offset = 0;
2095 else if (delta_offset > 12)
2096 delta_offset = 12;
2097
2098 for (index = 0; index < APK_BB_REG_NUM; index++) {
2099 if (index != 1)
2100 continue;
2101
2102 tmpreg = apk_rf_init_value[path][index];
2103
2104 if (!rtlefuse->b_apk_thermalmeterignore) {
2105 bb_offset = (tmpreg & 0xF0000) >> 16;
2106
2107 if (!(tmpreg & BIT(15)))
2108 bb_offset = -bb_offset;
2109
2110 delta_v =
2111 apk_delta_mapping[index][delta_offset];
2112
2113 bb_offset += delta_v;
2114
2115 if (bb_offset < 0) {
2116 tmpreg = tmpreg & (~BIT(15));
2117 bb_offset = -bb_offset;
2118 } else {
2119 tmpreg = tmpreg | BIT(15);
2120 }
2121
2122 tmpreg =
2123 (tmpreg & 0xFFF0FFFF) | (bb_offset << 16);
2124 }
2125
2126 rtl_set_rfreg(hw, (enum radio_path)path, 0xc,
2127 MASKDWORD, 0x8992e);
2128 rtl_set_rfreg(hw, (enum radio_path)path, 0x0,
2129 MASKDWORD, apk_rf_value_0[path][index]);
2130 rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
2131 MASKDWORD, tmpreg);
2132
2133 i = 0;
2134 do {
2135 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x80000000);
2136 rtl_set_bbreg(hw, apk_offset[path],
2137 MASKDWORD, apk_value[0]);
2138 RTPRINT(rtlpriv, FINIT, INIT_IQK,
2139 ("PHY_APCalibrate() offset 0x%x "
2140 "value 0x%x\n",
2141 apk_offset[path],
2142 rtl_get_bbreg(hw, apk_offset[path],
2143 MASKDWORD)));
2144
2145 mdelay(3);
2146
2147 rtl_set_bbreg(hw, apk_offset[path],
2148 MASKDWORD, apk_value[1]);
2149 RTPRINT(rtlpriv, FINIT, INIT_IQK,
2150 ("PHY_APCalibrate() offset 0x%x "
2151 "value 0x%x\n",
2152 apk_offset[path],
2153 rtl_get_bbreg(hw, apk_offset[path],
2154 MASKDWORD)));
2155
2156 mdelay(20);
2157
2158 rtl_set_bbreg(hw, 0xe28, MASKDWORD, 0x00000000);
2159
2160 if (path == RF90_PATH_A)
2161 tmpreg = rtl_get_bbreg(hw, 0xbd8,
2162 0x03E00000);
2163 else
2164 tmpreg = rtl_get_bbreg(hw, 0xbd8,
2165 0xF8000000);
2166
2167 RTPRINT(rtlpriv, FINIT, INIT_IQK,
2168 ("PHY_APCalibrate() offset "
2169 "0xbd8[25:21] %x\n", tmpreg));
2170
2171 i++;
2172
2173 } while (tmpreg > apkbound && i < 4);
2174
2175 apk_result[path][index] = tmpreg;
2176 }
2177 }
2178
2179 _rtl92c_phy_reload_mac_registers(hw, mac_reg, mac_backup);
2180
2181 for (index = 0; index < APK_BB_REG_NUM; index++) {
2182 if (index == 0)
2183 continue;
2184 rtl_set_bbreg(hw, bb_reg[index], MASKDWORD, bb_backup[index]);
2185 }
2186
2187 _rtl92c_phy_reload_adda_registers(hw, afe_reg, afe_backup, 16);
2188
2189 for (path = 0; path < pathbound; path++) {
2190 rtl_set_rfreg(hw, (enum radio_path)path, 0xd,
2191 MASKDWORD, reg_d[path]);
2192
2193 if (path == RF90_PATH_B) {
2194 rtl_set_rfreg(hw, RF90_PATH_A, 0x10, MASKDWORD,
2195 0x1000f);
2196 rtl_set_rfreg(hw, RF90_PATH_A, 0x11, MASKDWORD,
2197 0x20101);
2198 }
2199
2200 if (apk_result[path][1] > 6)
2201 apk_result[path][1] = 6;
2202 }
2203
2204 for (path = 0; path < pathbound; path++) {
2205 rtl_set_rfreg(hw, (enum radio_path)path, 0x3, MASKDWORD,
2206 ((apk_result[path][1] << 15) |
2207 (apk_result[path][1] << 10) |
2208 (apk_result[path][1] << 5) |
2209 apk_result[path][1]));
2210
2211 if (path == RF90_PATH_A)
2212 rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
2213 ((apk_result[path][1] << 15) |
2214 (apk_result[path][1] << 10) |
2215 (0x00 << 5) | 0x05));
2216 else
2217 rtl_set_rfreg(hw, (enum radio_path)path, 0x4, MASKDWORD,
2218 ((apk_result[path][1] << 15) |
2219 (apk_result[path][1] << 10) |
2220 (0x02 << 5) | 0x05));
2221
2222 rtl_set_rfreg(hw, (enum radio_path)path, 0xe, MASKDWORD,
2223 ((0x08 << 15) | (0x08 << 10) | (0x08 << 5) |
2224 0x08));
2225
2226 }
2227
2228 rtlphy->b_apk_done = true;
2229#endif
2230}
2231
2232static void _rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw,
2233 bool bmain, bool is2t)
2234{
2235 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2236
2237 if (is_hal_stop(rtlhal)) {
2238 rtl_set_bbreg(hw, REG_LEDCFG0, BIT(23), 0x01);
2239 rtl_set_bbreg(hw, rFPGA0_XAB_RFPARAMETER, BIT(13), 0x01);
2240 }
2241 if (is2t) {
2242 if (bmain)
2243 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
2244 BIT(5) | BIT(6), 0x1);
2245 else
2246 rtl_set_bbreg(hw, RFPGA0_XB_RFINTERFACEOE,
2247 BIT(5) | BIT(6), 0x2);
2248 } else {
2249 if (bmain)
2250 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x2);
2251 else
2252 rtl_set_bbreg(hw, RFPGA0_XA_RFINTERFACEOE, 0x300, 0x1);
2253
2254 }
2255}
2256
2257#undef IQK_ADDA_REG_NUM
2258#undef IQK_DELAY_TIME
2259
2260void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery)
2261{
2262 struct rtl_priv *rtlpriv = rtl_priv(hw);
2263 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2264 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2265
2266 long result[4][8];
2267 u8 i, final_candidate;
2268 bool b_patha_ok, b_pathb_ok;
2269 long reg_e94, reg_e9c, reg_ea4, reg_eac, reg_eb4, reg_ebc, reg_ec4,
2270 reg_ecc, reg_tmp = 0;
2271 bool is12simular, is13simular, is23simular;
2272 bool b_start_conttx = false, b_singletone = false;
2273 u32 iqk_bb_reg[10] = {
2274 ROFDM0_XARXIQIMBALANCE,
2275 ROFDM0_XBRXIQIMBALANCE,
2276 ROFDM0_ECCATHRESHOLD,
2277 ROFDM0_AGCRSSITABLE,
2278 ROFDM0_XATXIQIMBALANCE,
2279 ROFDM0_XBTXIQIMBALANCE,
2280 ROFDM0_XCTXIQIMBALANCE,
2281 ROFDM0_XCTXAFE,
2282 ROFDM0_XDTXAFE,
2283 ROFDM0_RXIQEXTANTA
2284 };
2285
2286 if (b_recovery) {
2287 _rtl92c_phy_reload_adda_registers(hw,
2288 iqk_bb_reg,
2289 rtlphy->iqk_bb_backup, 10);
2290 return;
2291 }
2292 if (b_start_conttx || b_singletone)
2293 return;
2294 for (i = 0; i < 8; i++) {
2295 result[0][i] = 0;
2296 result[1][i] = 0;
2297 result[2][i] = 0;
2298 result[3][i] = 0;
2299 }
2300 final_candidate = 0xff;
2301 b_patha_ok = false;
2302 b_pathb_ok = false;
2303 is12simular = false;
2304 is23simular = false;
2305 is13simular = false;
2306 for (i = 0; i < 3; i++) {
2307 if (IS_92C_SERIAL(rtlhal->version))
2308 _rtl92c_phy_iq_calibrate(hw, result, i, true);
2309 else
2310 _rtl92c_phy_iq_calibrate(hw, result, i, false);
2311 if (i == 1) {
2312 is12simular = _rtl92c_phy_simularity_compare(hw,
2313 result, 0,
2314 1);
2315 if (is12simular) {
2316 final_candidate = 0;
2317 break;
2318 }
2319 }
2320 if (i == 2) {
2321 is13simular = _rtl92c_phy_simularity_compare(hw,
2322 result, 0,
2323 2);
2324 if (is13simular) {
2325 final_candidate = 0;
2326 break;
2327 }
2328 is23simular = _rtl92c_phy_simularity_compare(hw,
2329 result, 1,
2330 2);
2331 if (is23simular)
2332 final_candidate = 1;
2333 else {
2334 for (i = 0; i < 8; i++)
2335 reg_tmp += result[3][i];
2336
2337 if (reg_tmp != 0)
2338 final_candidate = 3;
2339 else
2340 final_candidate = 0xFF;
2341 }
2342 }
2343 }
2344 for (i = 0; i < 4; i++) {
2345 reg_e94 = result[i][0];
2346 reg_e9c = result[i][1];
2347 reg_ea4 = result[i][2];
2348 reg_eac = result[i][3];
2349 reg_eb4 = result[i][4];
2350 reg_ebc = result[i][5];
2351 reg_ec4 = result[i][6];
2352 reg_ecc = result[i][7];
2353 }
2354 if (final_candidate != 0xff) {
2355 rtlphy->reg_e94 = reg_e94 = result[final_candidate][0];
2356 rtlphy->reg_e9c = reg_e9c = result[final_candidate][1];
2357 reg_ea4 = result[final_candidate][2];
2358 reg_eac = result[final_candidate][3];
2359 rtlphy->reg_eb4 = reg_eb4 = result[final_candidate][4];
2360 rtlphy->reg_ebc = reg_ebc = result[final_candidate][5];
2361 reg_ec4 = result[final_candidate][6];
2362 reg_ecc = result[final_candidate][7];
2363 b_patha_ok = b_pathb_ok = true;
2364 } else {
2365 rtlphy->reg_e94 = rtlphy->reg_eb4 = 0x100;
2366 rtlphy->reg_e9c = rtlphy->reg_ebc = 0x0;
2367 }
2368 if (reg_e94 != 0) /*&&(reg_ea4 != 0) */
2369 _rtl92c_phy_path_a_fill_iqk_matrix(hw, b_patha_ok, result,
2370 final_candidate,
2371 (reg_ea4 == 0));
2372 if (IS_92C_SERIAL(rtlhal->version)) {
2373 if (reg_eb4 != 0) /*&&(reg_ec4 != 0) */
2374 _rtl92c_phy_path_b_fill_iqk_matrix(hw, b_pathb_ok,
2375 result,
2376 final_candidate,
2377 (reg_ec4 == 0));
2378 }
2379 _rtl92c_phy_save_adda_registers(hw, iqk_bb_reg,
2380 rtlphy->iqk_bb_backup, 10);
2381}
2382
2383void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw)
2384{
2385 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2386 bool b_start_conttx = false, b_singletone = false;
2387
2388 if (b_start_conttx || b_singletone)
2389 return;
2390 if (IS_92C_SERIAL(rtlhal->version))
2391 _rtl92c_phy_lc_calibrate(hw, true);
2392 else
2393 _rtl92c_phy_lc_calibrate(hw, false);
2394}
2395
2396void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta)
2397{
2398 struct rtl_priv *rtlpriv = rtl_priv(hw);
2399 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2400 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2401
2402 if (rtlphy->b_apk_done)
2403 return;
2404 if (IS_92C_SERIAL(rtlhal->version))
2405 _rtl92c_phy_ap_calibrate(hw, delta, true);
2406 else
2407 _rtl92c_phy_ap_calibrate(hw, delta, false);
2408}
2409
2410void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain)
2411{
2412 struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
2413
2414 if (IS_92C_SERIAL(rtlhal->version))
2415 _rtl92c_phy_set_rfpath_switch(hw, bmain, true);
2416 else
2417 _rtl92c_phy_set_rfpath_switch(hw, bmain, false);
2418}
2419
2420bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype)
2421{
2422 struct rtl_priv *rtlpriv = rtl_priv(hw);
2423 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2424 bool b_postprocessing = false;
2425
2426 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
2427 ("-->IO Cmd(%#x), set_io_inprogress(%d)\n",
2428 iotype, rtlphy->set_io_inprogress));
2429 do {
2430 switch (iotype) {
2431 case IO_CMD_RESUME_DM_BY_SCAN:
2432 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
2433 ("[IO CMD] Resume DM after scan.\n"));
2434 b_postprocessing = true;
2435 break;
2436 case IO_CMD_PAUSE_DM_BY_SCAN:
2437 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
2438 ("[IO CMD] Pause DM before scan.\n"));
2439 b_postprocessing = true;
2440 break;
2441 default:
2442 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2443 ("switch case not process\n"));
2444 break;
2445 }
2446 } while (false);
2447 if (b_postprocessing && !rtlphy->set_io_inprogress) {
2448 rtlphy->set_io_inprogress = true;
2449 rtlphy->current_io_type = iotype;
2450 } else {
2451 return false;
2452 }
2453 rtl92c_phy_set_io(hw);
2454 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE, ("<--IO Type(%#x)\n", iotype));
2455 return true;
2456}
2457
2458void rtl92c_phy_set_io(struct ieee80211_hw *hw)
2459{
2460 struct rtl_priv *rtlpriv = rtl_priv(hw);
2461 struct rtl_phy *rtlphy = &(rtlpriv->phy);
2462
2463 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
2464 ("--->Cmd(%#x), set_io_inprogress(%d)\n",
2465 rtlphy->current_io_type, rtlphy->set_io_inprogress));
2466 switch (rtlphy->current_io_type) {
2467 case IO_CMD_RESUME_DM_BY_SCAN:
2468 dm_digtable.cur_igvalue = rtlphy->initgain_backup.xaagccore1;
2469 rtl92c_dm_write_dig(hw);
2470 rtl92c_phy_set_txpower_level(hw, rtlphy->current_channel);
2471 break;
2472 case IO_CMD_PAUSE_DM_BY_SCAN:
2473 rtlphy->initgain_backup.xaagccore1 = dm_digtable.cur_igvalue;
2474 dm_digtable.cur_igvalue = 0x17;
2475 rtl92c_dm_write_dig(hw);
2476 break;
2477 default:
2478 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2479 ("switch case not process\n"));
2480 break;
2481 }
2482 rtlphy->set_io_inprogress = false;
2483 RT_TRACE(rtlpriv, COMP_CMD, DBG_TRACE,
2484 ("<---(%#x)\n", rtlphy->current_io_type));
2485}
2486
2487void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw)
2488{
2489 struct rtl_priv *rtlpriv = rtl_priv(hw);
2490
2491 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x2b);
2492 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
2493 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
2494 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
2495 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
2496 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
2497}
2498
2499static void _rtl92ce_phy_set_rf_sleep(struct ieee80211_hw *hw)
2500{
2501 u32 u4b_tmp;
2502 u8 delay = 5;
2503 struct rtl_priv *rtlpriv = rtl_priv(hw);
2504
2505 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0xFF);
2506 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
2507 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
2508 u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
2509 while (u4b_tmp != 0 && delay > 0) {
2510 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x0);
2511 rtl_set_rfreg(hw, RF90_PATH_A, 0x00, RFREG_OFFSET_MASK, 0x00);
2512 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x40);
2513 u4b_tmp = rtl_get_rfreg(hw, RF90_PATH_A, 0, RFREG_OFFSET_MASK);
2514 delay--;
2515 }
2516 if (delay == 0) {
2517 rtl_write_byte(rtlpriv, REG_APSD_CTRL, 0x00);
2518 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
2519 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE3);
2520 rtl_write_byte(rtlpriv, REG_TXPAUSE, 0x00);
2521 RT_TRACE(rtlpriv, COMP_POWER, DBG_TRACE,
2522 ("Switch RF timeout !!!.\n"));
2523 return;
2524 }
2525 rtl_write_byte(rtlpriv, REG_SYS_FUNC_EN, 0xE2);
2526 rtl_write_byte(rtlpriv, REG_SPS0_CTRL, 0x22);
2527}
2528
2529static bool _rtl92ce_phy_set_rf_power_state(struct ieee80211_hw *hw,
2530 enum rf_pwrstate rfpwr_state)
2531{
2532 struct rtl_priv *rtlpriv = rtl_priv(hw);
2533 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
2534 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
2535 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
2536 bool bresult = true;
2537 u8 i, queue_id;
2538 struct rtl8192_tx_ring *ring = NULL;
2539
2540 ppsc->set_rfpowerstate_inprogress = true;
2541 switch (rfpwr_state) {
2542 case ERFON:{
2543 if ((ppsc->rfpwr_state == ERFOFF) &&
2544 RT_IN_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC)) {
2545 bool rtstatus;
2546 u32 InitializeCount = 0;
2547 do {
2548 InitializeCount++;
2549 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2550 ("IPS Set eRf nic enable\n"));
2551 rtstatus = rtl_ps_enable_nic(hw);
2552 } while ((rtstatus != true)
2553 && (InitializeCount < 10));
2554 RT_CLEAR_PS_LEVEL(ppsc,
2555 RT_RF_OFF_LEVL_HALT_NIC);
2556 } else {
2557 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2558 ("Set ERFON sleeped:%d ms\n",
2559 jiffies_to_msecs(jiffies -
2560 ppsc->
2561 last_sleep_jiffies)));
2562 ppsc->last_awake_jiffies = jiffies;
2563 rtl92ce_phy_set_rf_on(hw);
2564 }
2565 if (mac->link_state == MAC80211_LINKED) {
2566 rtlpriv->cfg->ops->led_control(hw,
2567 LED_CTL_LINK);
2568 } else {
2569 rtlpriv->cfg->ops->led_control(hw,
2570 LED_CTL_NO_LINK);
2571 }
2572 break;
2573 }
2574 case ERFOFF:{
2575 for (queue_id = 0, i = 0;
2576 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
2577 ring = &pcipriv->dev.tx_ring[queue_id];
2578 if (skb_queue_len(&ring->queue) == 0 ||
2579 queue_id == BEACON_QUEUE) {
2580 queue_id++;
2581 continue;
2582 } else {
2583 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
2584 ("eRf Off/Sleep: %d times "
2585 "TcbBusyQueue[%d] "
2586 "=%d before doze!\n", (i + 1),
2587 queue_id,
2588 skb_queue_len(&ring->queue)));
2589 udelay(10);
2590 i++;
2591 }
2592 if (i >= MAX_DOZE_WAITING_TIMES_9x) {
2593 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
2594 ("\nERFOFF: %d times "
2595 "TcbBusyQueue[%d] = %d !\n",
2596 MAX_DOZE_WAITING_TIMES_9x,
2597 queue_id,
2598 skb_queue_len(&ring->queue)));
2599 break;
2600 }
2601 }
2602 if (ppsc->reg_rfps_level & RT_RF_OFF_LEVL_HALT_NIC) {
2603 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2604 ("IPS Set eRf nic disable\n"));
2605 rtl_ps_disable_nic(hw);
2606 RT_SET_PS_LEVEL(ppsc, RT_RF_OFF_LEVL_HALT_NIC);
2607 } else {
2608 if (ppsc->rfoff_reason == RF_CHANGE_BY_IPS) {
2609 rtlpriv->cfg->ops->led_control(hw,
2610 LED_CTL_NO_LINK);
2611 } else {
2612 rtlpriv->cfg->ops->led_control(hw,
2613 LED_CTL_POWER_OFF);
2614 }
2615 }
2616 break;
2617 }
2618 case ERFSLEEP:{
2619 if (ppsc->rfpwr_state == ERFOFF)
2620 break;
2621 for (queue_id = 0, i = 0;
2622 queue_id < RTL_PCI_MAX_TX_QUEUE_COUNT;) {
2623 ring = &pcipriv->dev.tx_ring[queue_id];
2624 if (skb_queue_len(&ring->queue) == 0) {
2625 queue_id++;
2626 continue;
2627 } else {
2628 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
2629 ("eRf Off/Sleep: %d times "
2630 "TcbBusyQueue[%d] =%d before "
2631 "doze!\n", (i + 1), queue_id,
2632 skb_queue_len(&ring->queue)));
2633 udelay(10);
2634 i++;
2635 }
2636 if (i >= MAX_DOZE_WAITING_TIMES_9x) {
2637 RT_TRACE(rtlpriv, COMP_ERR, DBG_WARNING,
2638 ("\n ERFSLEEP: %d times "
2639 "TcbBusyQueue[%d] = %d !\n",
2640 MAX_DOZE_WAITING_TIMES_9x,
2641 queue_id,
2642 skb_queue_len(&ring->queue)));
2643 break;
2644 }
2645 }
2646 RT_TRACE(rtlpriv, COMP_RF, DBG_DMESG,
2647 ("Set ERFSLEEP awaked:%d ms\n",
2648 jiffies_to_msecs(jiffies -
2649 ppsc->last_awake_jiffies)));
2650 ppsc->last_sleep_jiffies = jiffies;
2651 _rtl92ce_phy_set_rf_sleep(hw);
2652 break;
2653 }
2654 default:
2655 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
2656 ("switch case not process\n"));
2657 bresult = false;
2658 break;
2659 }
2660 if (bresult)
2661 ppsc->rfpwr_state = rfpwr_state;
2662 ppsc->set_rfpowerstate_inprogress = false;
2663 return bresult;
2664}
2665
2666bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
2667 enum rf_pwrstate rfpwr_state)
2668{
2669 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
2670 bool bresult = false;
2671
2672 if (rfpwr_state == ppsc->rfpwr_state)
2673 return bresult;
2674 bresult = _rtl92ce_phy_set_rf_power_state(hw, rfpwr_state);
2675 return bresult;
2676}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
new file mode 100644
index 000000000000..ca4daee6e9a8
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/phy.h
@@ -0,0 +1,237 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92C_PHY_H__
31#define __RTL92C_PHY_H__
32
33#define MAX_PRECMD_CNT 16
34#define MAX_RFDEPENDCMD_CNT 16
35#define MAX_POSTCMD_CNT 16
36
37#define MAX_DOZE_WAITING_TIMES_9x 64
38
39#define RT_CANNOT_IO(hw) false
40#define HIGHPOWER_RADIOA_ARRAYLEN 22
41
42#define MAX_TOLERANCE 5
43#define IQK_DELAY_TIME 1
44
45#define APK_BB_REG_NUM 5
46#define APK_AFE_REG_NUM 16
47#define APK_CURVE_REG_NUM 4
48#define PATH_NUM 2
49
50#define LOOP_LIMIT 5
51#define MAX_STALL_TIME 50
52#define AntennaDiversityValue 0x80
53#define MAX_TXPWR_IDX_NMODE_92S 63
54#define Reset_Cnt_Limit 3
55
56#define IQK_ADDA_REG_NUM 16
57#define IQK_MAC_REG_NUM 4
58
59#define RF90_PATH_MAX 2
60#define CHANNEL_MAX_NUMBER 14
61#define CHANNEL_GROUP_MAX 3
62
63#define CT_OFFSET_MAC_ADDR 0X16
64
65#define CT_OFFSET_CCK_TX_PWR_IDX 0x5A
66#define CT_OFFSET_HT401S_TX_PWR_IDX 0x60
67#define CT_OFFSET_HT402S_TX_PWR_IDX_DIF 0x66
68#define CT_OFFSET_HT20_TX_PWR_IDX_DIFF 0x69
69#define CT_OFFSET_OFDM_TX_PWR_IDX_DIFF 0x6C
70
71#define CT_OFFSET_HT40_MAX_PWR_OFFSET 0x6F
72#define CT_OFFSET_HT20_MAX_PWR_OFFSET 0x72
73
74#define CT_OFFSET_CHANNEL_PLAH 0x75
75#define CT_OFFSET_THERMAL_METER 0x78
76#define CT_OFFSET_RF_OPTION 0x79
77#define CT_OFFSET_VERSION 0x7E
78#define CT_OFFSET_CUSTOMER_ID 0x7F
79
80#define RTL92C_MAX_PATH_NUM 2
81#define CHANNEL_MAX_NUMBER 14
82#define CHANNEL_GROUP_MAX 3
83
84enum swchnlcmd_id {
85 CMDID_END,
86 CMDID_SET_TXPOWEROWER_LEVEL,
87 CMDID_BBREGWRITE10,
88 CMDID_WRITEPORT_ULONG,
89 CMDID_WRITEPORT_USHORT,
90 CMDID_WRITEPORT_UCHAR,
91 CMDID_RF_WRITEREG,
92};
93
94struct swchnlcmd {
95 enum swchnlcmd_id cmdid;
96 u32 para1;
97 u32 para2;
98 u32 msdelay;
99};
100
101enum hw90_block_e {
102 HW90_BLOCK_MAC = 0,
103 HW90_BLOCK_PHY0 = 1,
104 HW90_BLOCK_PHY1 = 2,
105 HW90_BLOCK_RF = 3,
106 HW90_BLOCK_MAXIMUM = 4,
107};
108
109enum baseband_config_type {
110 BASEBAND_CONFIG_PHY_REG = 0,
111 BASEBAND_CONFIG_AGC_TAB = 1,
112};
113
114enum ra_offset_area {
115 RA_OFFSET_LEGACY_OFDM1,
116 RA_OFFSET_LEGACY_OFDM2,
117 RA_OFFSET_HT_OFDM1,
118 RA_OFFSET_HT_OFDM2,
119 RA_OFFSET_HT_OFDM3,
120 RA_OFFSET_HT_OFDM4,
121 RA_OFFSET_HT_CCK,
122};
123
124enum antenna_path {
125 ANTENNA_NONE,
126 ANTENNA_D,
127 ANTENNA_C,
128 ANTENNA_CD,
129 ANTENNA_B,
130 ANTENNA_BD,
131 ANTENNA_BC,
132 ANTENNA_BCD,
133 ANTENNA_A,
134 ANTENNA_AD,
135 ANTENNA_AC,
136 ANTENNA_ACD,
137 ANTENNA_AB,
138 ANTENNA_ABD,
139 ANTENNA_ABC,
140 ANTENNA_ABCD
141};
142
143struct r_antenna_select_ofdm {
144 u32 r_tx_antenna:4;
145 u32 r_ant_l:4;
146 u32 r_ant_non_ht:4;
147 u32 r_ant_ht1:4;
148 u32 r_ant_ht2:4;
149 u32 r_ant_ht_s1:4;
150 u32 r_ant_non_ht_s1:4;
151 u32 ofdm_txsc:2;
152 u32 reserved:2;
153};
154
155struct r_antenna_select_cck {
156 u8 r_cckrx_enable_2:2;
157 u8 r_cckrx_enable:2;
158 u8 r_ccktx_enable:4;
159};
160
161struct efuse_contents {
162 u8 mac_addr[ETH_ALEN];
163 u8 cck_tx_power_idx[6];
164 u8 ht40_1s_tx_power_idx[6];
165 u8 ht40_2s_tx_power_idx_diff[3];
166 u8 ht20_tx_power_idx_diff[3];
167 u8 ofdm_tx_power_idx_diff[3];
168 u8 ht40_max_power_offset[3];
169 u8 ht20_max_power_offset[3];
170 u8 channel_plan;
171 u8 thermal_meter;
172 u8 rf_option[5];
173 u8 version;
174 u8 oem_id;
175 u8 regulatory;
176};
177
178struct tx_power_struct {
179 u8 cck[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
180 u8 ht40_1s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
181 u8 ht40_2s[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
182 u8 ht20_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
183 u8 legacy_ht_diff[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
184 u8 legacy_ht_txpowerdiff;
185 u8 groupht20[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
186 u8 groupht40[RTL92C_MAX_PATH_NUM][CHANNEL_MAX_NUMBER];
187 u8 pwrgroup_cnt;
188 u32 mcs_original_offset[4][16];
189};
190
191extern u32 rtl92c_phy_query_bb_reg(struct ieee80211_hw *hw,
192 u32 regaddr, u32 bitmask);
193extern void rtl92c_phy_set_bb_reg(struct ieee80211_hw *hw,
194 u32 regaddr, u32 bitmask, u32 data);
195extern u32 rtl92c_phy_query_rf_reg(struct ieee80211_hw *hw,
196 enum radio_path rfpath, u32 regaddr,
197 u32 bitmask);
198extern void rtl92c_phy_set_rf_reg(struct ieee80211_hw *hw,
199 enum radio_path rfpath, u32 regaddr,
200 u32 bitmask, u32 data);
201extern bool rtl92c_phy_mac_config(struct ieee80211_hw *hw);
202extern bool rtl92c_phy_bb_config(struct ieee80211_hw *hw);
203extern bool rtl92c_phy_rf_config(struct ieee80211_hw *hw);
204extern bool rtl92c_phy_config_rf_with_feaderfile(struct ieee80211_hw *hw,
205 enum radio_path rfpath);
206extern void rtl92c_phy_get_hw_reg_originalvalue(struct ieee80211_hw *hw);
207extern void rtl92c_phy_get_txpower_level(struct ieee80211_hw *hw,
208 long *powerlevel);
209extern void rtl92c_phy_set_txpower_level(struct ieee80211_hw *hw, u8 channel);
210extern bool rtl92c_phy_update_txpower_dbm(struct ieee80211_hw *hw,
211 long power_indbm);
212extern void rtl92c_phy_scan_operation_backup(struct ieee80211_hw *hw,
213 u8 operation);
214extern void rtl92c_phy_set_bw_mode_callback(struct ieee80211_hw *hw);
215extern void rtl92c_phy_set_bw_mode(struct ieee80211_hw *hw,
216 enum nl80211_channel_type ch_type);
217extern void rtl92c_phy_sw_chnl_callback(struct ieee80211_hw *hw);
218extern u8 rtl92c_phy_sw_chnl(struct ieee80211_hw *hw);
219extern void rtl92c_phy_iq_calibrate(struct ieee80211_hw *hw, bool b_recovery);
220extern void rtl92c_phy_set_beacon_hw_reg(struct ieee80211_hw *hw,
221 u16 beaconinterval);
222void rtl92c_phy_ap_calibrate(struct ieee80211_hw *hw, char delta);
223void rtl92c_phy_lc_calibrate(struct ieee80211_hw *hw);
224void rtl92c_phy_set_rfpath_switch(struct ieee80211_hw *hw, bool bmain);
225bool rtl92c_phy_config_rf_with_headerfile(struct ieee80211_hw *hw,
226 enum radio_path rfpath);
227extern bool rtl8192_phy_check_is_legal_rfpath(struct ieee80211_hw *hw,
228 u32 rfpath);
229bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
230extern bool rtl92c_phy_set_rf_power_state(struct ieee80211_hw *hw,
231 enum rf_pwrstate rfpwr_state);
232void rtl92c_phy_config_bb_external_pa(struct ieee80211_hw *hw);
233void rtl92ce_phy_set_rf_on(struct ieee80211_hw *hw);
234bool rtl92c_phy_set_io_cmd(struct ieee80211_hw *hw, enum io_type iotype);
235void rtl92c_phy_set_io(struct ieee80211_hw *hw);
236
237#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
new file mode 100644
index 000000000000..875d51465225
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/reg.h
@@ -0,0 +1,2065 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92C_REG_H__
31#define __RTL92C_REG_H__
32
33#define REG_SYS_ISO_CTRL 0x0000
34#define REG_SYS_FUNC_EN 0x0002
35#define REG_APS_FSMCO 0x0004
36#define REG_SYS_CLKR 0x0008
37#define REG_9346CR 0x000A
38#define REG_EE_VPD 0x000C
39#define REG_AFE_MISC 0x0010
40#define REG_SPS0_CTRL 0x0011
41#define REG_SPS_OCP_CFG 0x0018
42#define REG_RSV_CTRL 0x001C
43#define REG_RF_CTRL 0x001F
44#define REG_LDOA15_CTRL 0x0020
45#define REG_LDOV12D_CTRL 0x0021
46#define REG_LDOHCI12_CTRL 0x0022
47#define REG_LPLDO_CTRL 0x0023
48#define REG_AFE_XTAL_CTRL 0x0024
49#define REG_AFE_PLL_CTRL 0x0028
50#define REG_EFUSE_CTRL 0x0030
51#define REG_EFUSE_TEST 0x0034
52#define REG_PWR_DATA 0x0038
53#define REG_CAL_TIMER 0x003C
54#define REG_ACLK_MON 0x003E
55#define REG_GPIO_MUXCFG 0x0040
56#define REG_GPIO_IO_SEL 0x0042
57#define REG_MAC_PINMUX_CFG 0x0043
58#define REG_GPIO_PIN_CTRL 0x0044
59#define REG_GPIO_INTM 0x0048
60#define REG_LEDCFG0 0x004C
61#define REG_LEDCFG1 0x004D
62#define REG_LEDCFG2 0x004E
63#define REG_LEDCFG3 0x004F
64#define REG_FSIMR 0x0050
65#define REG_FSISR 0x0054
66
67#define REG_MCUFWDL 0x0080
68
69#define REG_HMEBOX_EXT_0 0x0088
70#define REG_HMEBOX_EXT_1 0x008A
71#define REG_HMEBOX_EXT_2 0x008C
72#define REG_HMEBOX_EXT_3 0x008E
73
74#define REG_BIST_SCAN 0x00D0
75#define REG_BIST_RPT 0x00D4
76#define REG_BIST_ROM_RPT 0x00D8
77#define REG_USB_SIE_INTF 0x00E0
78#define REG_PCIE_MIO_INTF 0x00E4
79#define REG_PCIE_MIO_INTD 0x00E8
80#define REG_HPON_FSM 0x00EC
81#define REG_SYS_CFG 0x00F0
82
83#define REG_CR 0x0100
84#define REG_PBP 0x0104
85#define REG_TRXDMA_CTRL 0x010C
86#define REG_TRXFF_BNDY 0x0114
87#define REG_TRXFF_STATUS 0x0118
88#define REG_RXFF_PTR 0x011C
89#define REG_HIMR 0x0120
90#define REG_HISR 0x0124
91#define REG_HIMRE 0x0128
92#define REG_HISRE 0x012C
93#define REG_CPWM 0x012F
94#define REG_FWIMR 0x0130
95#define REG_FWISR 0x0134
96#define REG_PKTBUF_DBG_CTRL 0x0140
97#define REG_PKTBUF_DBG_DATA_L 0x0144
98#define REG_PKTBUF_DBG_DATA_H 0x0148
99
100#define REG_TC0_CTRL 0x0150
101#define REG_TC1_CTRL 0x0154
102#define REG_TC2_CTRL 0x0158
103#define REG_TC3_CTRL 0x015C
104#define REG_TC4_CTRL 0x0160
105#define REG_TCUNIT_BASE 0x0164
106#define REG_MBIST_START 0x0174
107#define REG_MBIST_DONE 0x0178
108#define REG_MBIST_FAIL 0x017C
109#define REG_C2HEVT_MSG_NORMAL 0x01A0
110#define REG_C2HEVT_MSG_TEST 0x01B8
111#define REG_C2HEVT_CLEAR 0x01BF
112#define REG_MCUTST_1 0x01c0
113#define REG_FMETHR 0x01C8
114#define REG_HMETFR 0x01CC
115#define REG_HMEBOX_0 0x01D0
116#define REG_HMEBOX_1 0x01D4
117#define REG_HMEBOX_2 0x01D8
118#define REG_HMEBOX_3 0x01DC
119
120#define REG_LLT_INIT 0x01E0
121#define REG_BB_ACCEESS_CTRL 0x01E8
122#define REG_BB_ACCESS_DATA 0x01EC
123
124#define REG_RQPN 0x0200
125#define REG_FIFOPAGE 0x0204
126#define REG_TDECTRL 0x0208
127#define REG_TXDMA_OFFSET_CHK 0x020C
128#define REG_TXDMA_STATUS 0x0210
129#define REG_RQPN_NPQ 0x0214
130
131#define REG_RXDMA_AGG_PG_TH 0x0280
132#define REG_RXPKT_NUM 0x0284
133#define REG_RXDMA_STATUS 0x0288
134
135#define REG_PCIE_CTRL_REG 0x0300
136#define REG_INT_MIG 0x0304
137#define REG_BCNQ_DESA 0x0308
138#define REG_HQ_DESA 0x0310
139#define REG_MGQ_DESA 0x0318
140#define REG_VOQ_DESA 0x0320
141#define REG_VIQ_DESA 0x0328
142#define REG_BEQ_DESA 0x0330
143#define REG_BKQ_DESA 0x0338
144#define REG_RX_DESA 0x0340
145#define REG_DBI 0x0348
146#define REG_MDIO 0x0354
147#define REG_DBG_SEL 0x0360
148#define REG_PCIE_HRPWM 0x0361
149#define REG_PCIE_HCPWM 0x0363
150#define REG_UART_CTRL 0x0364
151#define REG_UART_TX_DESA 0x0370
152#define REG_UART_RX_DESA 0x0378
153
154#define REG_HDAQ_DESA_NODEF 0x0000
155#define REG_CMDQ_DESA_NODEF 0x0000
156
157#define REG_VOQ_INFORMATION 0x0400
158#define REG_VIQ_INFORMATION 0x0404
159#define REG_BEQ_INFORMATION 0x0408
160#define REG_BKQ_INFORMATION 0x040C
161#define REG_MGQ_INFORMATION 0x0410
162#define REG_HGQ_INFORMATION 0x0414
163#define REG_BCNQ_INFORMATION 0x0418
164
165#define REG_CPU_MGQ_INFORMATION 0x041C
166#define REG_FWHW_TXQ_CTRL 0x0420
167#define REG_HWSEQ_CTRL 0x0423
168#define REG_TXPKTBUF_BCNQ_BDNY 0x0424
169#define REG_TXPKTBUF_MGQ_BDNY 0x0425
170#define REG_MULTI_BCNQ_EN 0x0426
171#define REG_MULTI_BCNQ_OFFSET 0x0427
172#define REG_SPEC_SIFS 0x0428
173#define REG_RL 0x042A
174#define REG_DARFRC 0x0430
175#define REG_RARFRC 0x0438
176#define REG_RRSR 0x0440
177#define REG_ARFR0 0x0444
178#define REG_ARFR1 0x0448
179#define REG_ARFR2 0x044C
180#define REG_ARFR3 0x0450
181#define REG_AGGLEN_LMT 0x0458
182#define REG_AMPDU_MIN_SPACE 0x045C
183#define REG_TXPKTBUF_WMAC_LBK_BF_HD 0x045D
184#define REG_FAST_EDCA_CTRL 0x0460
185#define REG_RD_RESP_PKT_TH 0x0463
186#define REG_INIRTS_RATE_SEL 0x0480
187#define REG_INIDATA_RATE_SEL 0x0484
188#define REG_POWER_STATUS 0x04A4
189#define REG_POWER_STAGE1 0x04B4
190#define REG_POWER_STAGE2 0x04B8
191#define REG_PKT_LIFE_TIME 0x04C0
192#define REG_STBC_SETTING 0x04C4
193#define REG_PROT_MODE_CTRL 0x04C8
194#define REG_BAR_MODE_CTRL 0x04CC
195#define REG_RA_TRY_RATE_AGG_LMT 0x04CF
196#define REG_NQOS_SEQ 0x04DC
197#define REG_QOS_SEQ 0x04DE
198#define REG_NEED_CPU_HANDLE 0x04E0
199#define REG_PKT_LOSE_RPT 0x04E1
200#define REG_PTCL_ERR_STATUS 0x04E2
201#define REG_DUMMY 0x04FC
202
203#define REG_EDCA_VO_PARAM 0x0500
204#define REG_EDCA_VI_PARAM 0x0504
205#define REG_EDCA_BE_PARAM 0x0508
206#define REG_EDCA_BK_PARAM 0x050C
207#define REG_BCNTCFG 0x0510
208#define REG_PIFS 0x0512
209#define REG_RDG_PIFS 0x0513
210#define REG_SIFS_CTX 0x0514
211#define REG_SIFS_TRX 0x0516
212#define REG_AGGR_BREAK_TIME 0x051A
213#define REG_SLOT 0x051B
214#define REG_TX_PTCL_CTRL 0x0520
215#define REG_TXPAUSE 0x0522
216#define REG_DIS_TXREQ_CLR 0x0523
217#define REG_RD_CTRL 0x0524
218#define REG_TBTT_PROHIBIT 0x0540
219#define REG_RD_NAV_NXT 0x0544
220#define REG_NAV_PROT_LEN 0x0546
221#define REG_BCN_CTRL 0x0550
222#define REG_USTIME_TSF 0x0551
223#define REG_MBID_NUM 0x0552
224#define REG_DUAL_TSF_RST 0x0553
225#define REG_BCN_INTERVAL 0x0554
226#define REG_MBSSID_BCN_SPACE 0x0554
227#define REG_DRVERLYINT 0x0558
228#define REG_BCNDMATIM 0x0559
229#define REG_ATIMWND 0x055A
230#define REG_BCN_MAX_ERR 0x055D
231#define REG_RXTSF_OFFSET_CCK 0x055E
232#define REG_RXTSF_OFFSET_OFDM 0x055F
233#define REG_TSFTR 0x0560
234#define REG_INIT_TSFTR 0x0564
235#define REG_PSTIMER 0x0580
236#define REG_TIMER0 0x0584
237#define REG_TIMER1 0x0588
238#define REG_ACMHWCTRL 0x05C0
239#define REG_ACMRSTCTRL 0x05C1
240#define REG_ACMAVG 0x05C2
241#define REG_VO_ADMTIME 0x05C4
242#define REG_VI_ADMTIME 0x05C6
243#define REG_BE_ADMTIME 0x05C8
244#define REG_EDCA_RANDOM_GEN 0x05CC
245#define REG_SCH_TXCMD 0x05D0
246
247#define REG_APSD_CTRL 0x0600
248#define REG_BWOPMODE 0x0603
249#define REG_TCR 0x0604
250#define REG_RCR 0x0608
251#define REG_RX_PKT_LIMIT 0x060C
252#define REG_RX_DLK_TIME 0x060D
253#define REG_RX_DRVINFO_SZ 0x060F
254
255#define REG_MACID 0x0610
256#define REG_BSSID 0x0618
257#define REG_MAR 0x0620
258#define REG_MBIDCAMCFG 0x0628
259
260#define REG_USTIME_EDCA 0x0638
261#define REG_MAC_SPEC_SIFS 0x063A
262#define REG_RESP_SIFS_CCK 0x063C
263#define REG_RESP_SIFS_OFDM 0x063E
264#define REG_ACKTO 0x0640
265#define REG_CTS2TO 0x0641
266#define REG_EIFS 0x0642
267
268#define REG_NAV_CTRL 0x0650
269#define REG_BACAMCMD 0x0654
270#define REG_BACAMCONTENT 0x0658
271#define REG_LBDLY 0x0660
272#define REG_FWDLY 0x0661
273#define REG_RXERR_RPT 0x0664
274#define REG_WMAC_TRXPTCL_CTL 0x0668
275
276#define REG_CAMCMD 0x0670
277#define REG_CAMWRITE 0x0674
278#define REG_CAMREAD 0x0678
279#define REG_CAMDBG 0x067C
280#define REG_SECCFG 0x0680
281
282#define REG_WOW_CTRL 0x0690
283#define REG_PSSTATUS 0x0691
284#define REG_PS_RX_INFO 0x0692
285#define REG_LPNAV_CTRL 0x0694
286#define REG_WKFMCAM_CMD 0x0698
287#define REG_WKFMCAM_RWD 0x069C
288#define REG_RXFLTMAP0 0x06A0
289#define REG_RXFLTMAP1 0x06A2
290#define REG_RXFLTMAP2 0x06A4
291#define REG_BCN_PSR_RPT 0x06A8
292#define REG_CALB32K_CTRL 0x06AC
293#define REG_PKT_MON_CTRL 0x06B4
294#define REG_BT_COEX_TABLE 0x06C0
295#define REG_WMAC_RESP_TXINFO 0x06D8
296
297#define REG_USB_INFO 0xFE17
298#define REG_USB_SPECIAL_OPTION 0xFE55
299#define REG_USB_DMA_AGG_TO 0xFE5B
300#define REG_USB_AGG_TO 0xFE5C
301#define REG_USB_AGG_TH 0xFE5D
302
303#define REG_TEST_USB_TXQS 0xFE48
304#define REG_TEST_SIE_VID 0xFE60
305#define REG_TEST_SIE_PID 0xFE62
306#define REG_TEST_SIE_OPTIONAL 0xFE64
307#define REG_TEST_SIE_CHIRP_K 0xFE65
308#define REG_TEST_SIE_PHY 0xFE66
309#define REG_TEST_SIE_MAC_ADDR 0xFE70
310#define REG_TEST_SIE_STRING 0xFE80
311
312#define REG_NORMAL_SIE_VID 0xFE60
313#define REG_NORMAL_SIE_PID 0xFE62
314#define REG_NORMAL_SIE_OPTIONAL 0xFE64
315#define REG_NORMAL_SIE_EP 0xFE65
316#define REG_NORMAL_SIE_PHY 0xFE68
317#define REG_NORMAL_SIE_MAC_ADDR 0xFE70
318#define REG_NORMAL_SIE_STRING 0xFE80
319
320#define CR9346 REG_9346CR
321#define MSR (REG_CR + 2)
322#define ISR REG_HISR
323#define TSFR REG_TSFTR
324
325#define MACIDR0 REG_MACID
326#define MACIDR4 (REG_MACID + 4)
327
328#define PBP REG_PBP
329
330#define IDR0 MACIDR0
331#define IDR4 MACIDR4
332
333#define UNUSED_REGISTER 0x1BF
334#define DCAM UNUSED_REGISTER
335#define PSR UNUSED_REGISTER
336#define BBADDR UNUSED_REGISTER
337#define PHYDATAR UNUSED_REGISTER
338
339#define INVALID_BBRF_VALUE 0x12345678
340
341#define MAX_MSS_DENSITY_2T 0x13
342#define MAX_MSS_DENSITY_1T 0x0A
343
344#define CMDEEPROM_EN BIT(5)
345#define CMDEEPROM_SEL BIT(4)
346#define CMD9346CR_9356SEL BIT(4)
347#define AUTOLOAD_EEPROM (CMDEEPROM_EN|CMDEEPROM_SEL)
348#define AUTOLOAD_EFUSE CMDEEPROM_EN
349
350#define GPIOSEL_GPIO 0
351#define GPIOSEL_ENBT BIT(5)
352
353#define GPIO_IN REG_GPIO_PIN_CTRL
354#define GPIO_OUT (REG_GPIO_PIN_CTRL+1)
355#define GPIO_IO_SEL (REG_GPIO_PIN_CTRL+2)
356#define GPIO_MOD (REG_GPIO_PIN_CTRL+3)
357
358#define MSR_NOLINK 0x00
359#define MSR_ADHOC 0x01
360#define MSR_INFRA 0x02
361#define MSR_AP 0x03
362
363#define RRSR_RSC_OFFSET 21
364#define RRSR_SHORT_OFFSET 23
365#define RRSR_RSC_BW_40M 0x600000
366#define RRSR_RSC_UPSUBCHNL 0x400000
367#define RRSR_RSC_LOWSUBCHNL 0x200000
368#define RRSR_SHORT 0x800000
369#define RRSR_1M BIT(0)
370#define RRSR_2M BIT(1)
371#define RRSR_5_5M BIT(2)
372#define RRSR_11M BIT(3)
373#define RRSR_6M BIT(4)
374#define RRSR_9M BIT(5)
375#define RRSR_12M BIT(6)
376#define RRSR_18M BIT(7)
377#define RRSR_24M BIT(8)
378#define RRSR_36M BIT(9)
379#define RRSR_48M BIT(10)
380#define RRSR_54M BIT(11)
381#define RRSR_MCS0 BIT(12)
382#define RRSR_MCS1 BIT(13)
383#define RRSR_MCS2 BIT(14)
384#define RRSR_MCS3 BIT(15)
385#define RRSR_MCS4 BIT(16)
386#define RRSR_MCS5 BIT(17)
387#define RRSR_MCS6 BIT(18)
388#define RRSR_MCS7 BIT(19)
389#define BRSR_ACKSHORTPMB BIT(23)
390
391#define RATR_1M 0x00000001
392#define RATR_2M 0x00000002
393#define RATR_55M 0x00000004
394#define RATR_11M 0x00000008
395#define RATR_6M 0x00000010
396#define RATR_9M 0x00000020
397#define RATR_12M 0x00000040
398#define RATR_18M 0x00000080
399#define RATR_24M 0x00000100
400#define RATR_36M 0x00000200
401#define RATR_48M 0x00000400
402#define RATR_54M 0x00000800
403#define RATR_MCS0 0x00001000
404#define RATR_MCS1 0x00002000
405#define RATR_MCS2 0x00004000
406#define RATR_MCS3 0x00008000
407#define RATR_MCS4 0x00010000
408#define RATR_MCS5 0x00020000
409#define RATR_MCS6 0x00040000
410#define RATR_MCS7 0x00080000
411#define RATR_MCS8 0x00100000
412#define RATR_MCS9 0x00200000
413#define RATR_MCS10 0x00400000
414#define RATR_MCS11 0x00800000
415#define RATR_MCS12 0x01000000
416#define RATR_MCS13 0x02000000
417#define RATR_MCS14 0x04000000
418#define RATR_MCS15 0x08000000
419
420#define RATE_1M BIT(0)
421#define RATE_2M BIT(1)
422#define RATE_5_5M BIT(2)
423#define RATE_11M BIT(3)
424#define RATE_6M BIT(4)
425#define RATE_9M BIT(5)
426#define RATE_12M BIT(6)
427#define RATE_18M BIT(7)
428#define RATE_24M BIT(8)
429#define RATE_36M BIT(9)
430#define RATE_48M BIT(10)
431#define RATE_54M BIT(11)
432#define RATE_MCS0 BIT(12)
433#define RATE_MCS1 BIT(13)
434#define RATE_MCS2 BIT(14)
435#define RATE_MCS3 BIT(15)
436#define RATE_MCS4 BIT(16)
437#define RATE_MCS5 BIT(17)
438#define RATE_MCS6 BIT(18)
439#define RATE_MCS7 BIT(19)
440#define RATE_MCS8 BIT(20)
441#define RATE_MCS9 BIT(21)
442#define RATE_MCS10 BIT(22)
443#define RATE_MCS11 BIT(23)
444#define RATE_MCS12 BIT(24)
445#define RATE_MCS13 BIT(25)
446#define RATE_MCS14 BIT(26)
447#define RATE_MCS15 BIT(27)
448
449#define RATE_ALL_CCK (RATR_1M | RATR_2M | RATR_55M | RATR_11M)
450#define RATE_ALL_OFDM_AG (RATR_6M | RATR_9M | RATR_12M | RATR_18M \
451 | RATR_24M | RATR_36M | RATR_48M | RATR_54M)
452#define RATE_ALL_OFDM_1SS (RATR_MCS0 | RATR_MCS1 | RATR_MCS2 | \
453 RATR_MCS3 | RATR_MCS4 | RATR_MCS5 | \
454 RATR_MCS6 | RATR_MCS7)
455#define RATE_ALL_OFDM_2SS (RATR_MCS8 | RATR_MCS9 | RATR_MCS10 | \
456 RATR_MCS11 | RATR_MCS12 | RATR_MCS13 | \
457 RATR_MCS14 | RATR_MCS15)
458
459#define BW_OPMODE_20MHZ BIT(2)
460#define BW_OPMODE_5G BIT(1)
461#define BW_OPMODE_11J BIT(0)
462
463#define CAM_VALID BIT(15)
464#define CAM_NOTVALID 0x0000
465#define CAM_USEDK BIT(5)
466
467#define CAM_NONE 0x0
468#define CAM_WEP40 0x01
469#define CAM_TKIP 0x02
470#define CAM_AES 0x04
471#define CAM_WEP104 0x05
472
473#define TOTAL_CAM_ENTRY 32
474#define HALF_CAM_ENTRY 16
475
476#define CAM_WRITE BIT(16)
477#define CAM_READ 0x00000000
478#define CAM_POLLINIG BIT(31)
479
480#define SCR_USEDK 0x01
481#define SCR_TXSEC_ENABLE 0x02
482#define SCR_RXSEC_ENABLE 0x04
483
484#define WOW_PMEN BIT(0)
485#define WOW_WOMEN BIT(1)
486#define WOW_MAGIC BIT(2)
487#define WOW_UWF BIT(3)
488
489#define IMR8190_DISABLED 0x0
490#define IMR_BCNDMAINT6 BIT(31)
491#define IMR_BCNDMAINT5 BIT(30)
492#define IMR_BCNDMAINT4 BIT(29)
493#define IMR_BCNDMAINT3 BIT(28)
494#define IMR_BCNDMAINT2 BIT(27)
495#define IMR_BCNDMAINT1 BIT(26)
496#define IMR_BCNDOK8 BIT(25)
497#define IMR_BCNDOK7 BIT(24)
498#define IMR_BCNDOK6 BIT(23)
499#define IMR_BCNDOK5 BIT(22)
500#define IMR_BCNDOK4 BIT(21)
501#define IMR_BCNDOK3 BIT(20)
502#define IMR_BCNDOK2 BIT(19)
503#define IMR_BCNDOK1 BIT(18)
504#define IMR_TIMEOUT2 BIT(17)
505#define IMR_TIMEOUT1 BIT(16)
506#define IMR_TXFOVW BIT(15)
507#define IMR_PSTIMEOUT BIT(14)
508#define IMR_BCNINT BIT(13)
509#define IMR_RXFOVW BIT(12)
510#define IMR_RDU BIT(11)
511#define IMR_ATIMEND BIT(10)
512#define IMR_BDOK BIT(9)
513#define IMR_HIGHDOK BIT(8)
514#define IMR_TBDOK BIT(7)
515#define IMR_MGNTDOK BIT(6)
516#define IMR_TBDER BIT(5)
517#define IMR_BKDOK BIT(4)
518#define IMR_BEDOK BIT(3)
519#define IMR_VIDOK BIT(2)
520#define IMR_VODOK BIT(1)
521#define IMR_ROK BIT(0)
522
523#define IMR_TXERR BIT(11)
524#define IMR_RXERR BIT(10)
525#define IMR_C2HCMD BIT(9)
526#define IMR_CPWM BIT(8)
527#define IMR_OCPINT BIT(1)
528#define IMR_WLANOFF BIT(0)
529
530#define HWSET_MAX_SIZE 128
531
532#define EEPROM_DEFAULT_TSSI 0x0
533#define EEPROM_DEFAULT_TXPOWERDIFF 0x0
534#define EEPROM_DEFAULT_CRYSTALCAP 0x5
535#define EEPROM_DEFAULT_BOARDTYPE 0x02
536#define EEPROM_DEFAULT_TXPOWER 0x1010
537#define EEPROM_DEFAULT_HT2T_TXPWR 0x10
538
539#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
540#define EEPROM_DEFAULT_THERMALMETER 0x12
541#define EEPROM_DEFAULT_ANTTXPOWERDIFF 0x0
542#define EEPROM_DEFAULT_TXPWDIFF_CRYSTALCAP 0x5
543#define EEPROM_DEFAULT_TXPOWERLEVEL 0x22
544#define EEPROM_DEFAULT_HT40_2SDIFF 0x0
545#define EEPROM_DEFAULT_HT20_DIFF 2
546#define EEPROM_DEFAULT_LEGACYHTTXPOWERDIFF 0x3
547#define EEPROM_DEFAULT_HT40_PWRMAXOFFSET 0
548#define EEPROM_DEFAULT_HT20_PWRMAXOFFSET 0
549
550#define RF_OPTION1 0x79
551#define RF_OPTION2 0x7A
552#define RF_OPTION3 0x7B
553#define RF_OPTION4 0x7C
554
555#define EEPROM_DEFAULT_PID 0x1234
556#define EEPROM_DEFAULT_VID 0x5678
557#define EEPROM_DEFAULT_CUSTOMERID 0xAB
558#define EEPROM_DEFAULT_SUBCUSTOMERID 0xCD
559#define EEPROM_DEFAULT_VERSION 0
560
561#define EEPROM_CHANNEL_PLAN_FCC 0x0
562#define EEPROM_CHANNEL_PLAN_IC 0x1
563#define EEPROM_CHANNEL_PLAN_ETSI 0x2
564#define EEPROM_CHANNEL_PLAN_SPAIN 0x3
565#define EEPROM_CHANNEL_PLAN_FRANCE 0x4
566#define EEPROM_CHANNEL_PLAN_MKK 0x5
567#define EEPROM_CHANNEL_PLAN_MKK1 0x6
568#define EEPROM_CHANNEL_PLAN_ISRAEL 0x7
569#define EEPROM_CHANNEL_PLAN_TELEC 0x8
570#define EEPROM_CHANNEL_PLAN_GLOBAL_DOMAIN 0x9
571#define EEPROM_CHANNEL_PLAN_WORLD_WIDE_13 0xA
572#define EEPROM_CHANNEL_PLAN_NCC 0xB
573#define EEPROM_CHANNEL_PLAN_BY_HW_MASK 0x80
574
575#define EEPROM_CID_DEFAULT 0x0
576#define EEPROM_CID_TOSHIBA 0x4
577#define EEPROM_CID_CCX 0x10
578#define EEPROM_CID_QMI 0x0D
579#define EEPROM_CID_WHQL 0xFE
580
581#define RTL8192_EEPROM_ID 0x8129
582
583#define RTL8190_EEPROM_ID 0x8129
584#define EEPROM_HPON 0x02
585#define EEPROM_CLK 0x06
586#define EEPROM_TESTR 0x08
587
588#define EEPROM_VID 0x0A
589#define EEPROM_DID 0x0C
590#define EEPROM_SVID 0x0E
591#define EEPROM_SMID 0x10
592
593#define EEPROM_MAC_ADDR 0x16
594
595#define EEPROM_CCK_TX_PWR_INX 0x5A
596#define EEPROM_HT40_1S_TX_PWR_INX 0x60
597#define EEPROM_HT40_2S_TX_PWR_INX_DIFF 0x66
598#define EEPROM_HT20_TX_PWR_INX_DIFF 0x69
599#define EEPROM_OFDM_TX_PWR_INX_DIFF 0x6C
600#define EEPROM_HT40_MAX_PWR_OFFSET 0x6F
601#define EEPROM_HT20_MAX_PWR_OFFSET 0x72
602
603#define EEPROM_TSSI_A 0x76
604#define EEPROM_TSSI_B 0x77
605#define EEPROM_THERMAL_METER 0x78
606#define EEPROM_XTAL_K 0x78
607#define EEPROM_RF_OPT1 0x79
608#define EEPROM_RF_OPT2 0x7A
609#define EEPROM_RF_OPT3 0x7B
610#define EEPROM_RF_OPT4 0x7C
611#define EEPROM_CHANNEL_PLAN 0x7D
612#define EEPROM_VERSION 0x7E
613#define EEPROM_CUSTOMER_ID 0x7F
614
615#define EEPROM_PWRDIFF 0x54
616
617#define EEPROM_TXPOWERCCK 0x5A
618#define EEPROM_TXPOWERHT40_1S 0x60
619#define EEPROM_TXPOWERHT40_2SDIFF 0x66
620#define EEPROM_TXPOWERHT20DIFF 0x69
621#define EEPROM_TXPOWER_OFDMDIFF 0x6C
622
623#define EEPROM_TXPWR_GROUP 0x6F
624
625#define EEPROM_TSSI_A 0x76
626#define EEPROM_TSSI_B 0x77
627#define EEPROM_THERMAL_METER 0x78
628
629#define EEPROM_CHANNELPLAN 0x75
630
631#define RF_OPTION1 0x79
632#define RF_OPTION2 0x7A
633#define RF_OPTION3 0x7B
634#define RF_OPTION4 0x7C
635
636#define STOPBECON BIT(6)
637#define STOPHIGHT BIT(5)
638#define STOPMGT BIT(4)
639#define STOPVO BIT(3)
640#define STOPVI BIT(2)
641#define STOPBE BIT(1)
642#define STOPBK BIT(0)
643
644#define RCR_APPFCS BIT(31)
645#define RCR_APP_MIC BIT(30)
646#define RCR_APP_ICV BIT(29)
647#define RCR_APP_PHYST_RXFF BIT(28)
648#define RCR_APP_BA_SSN BIT(27)
649#define RCR_ENMBID BIT(24)
650#define RCR_LSIGEN BIT(23)
651#define RCR_MFBEN BIT(22)
652#define RCR_HTC_LOC_CTRL BIT(14)
653#define RCR_AMF BIT(13)
654#define RCR_ACF BIT(12)
655#define RCR_ADF BIT(11)
656#define RCR_AICV BIT(9)
657#define RCR_ACRC32 BIT(8)
658#define RCR_CBSSID_BCN BIT(7)
659#define RCR_CBSSID_DATA BIT(6)
660#define RCR_CBSSID RCR_CBSSID_DATA
661#define RCR_APWRMGT BIT(5)
662#define RCR_ADD3 BIT(4)
663#define RCR_AB BIT(3)
664#define RCR_AM BIT(2)
665#define RCR_APM BIT(1)
666#define RCR_AAP BIT(0)
667#define RCR_MXDMA_OFFSET 8
668#define RCR_FIFO_OFFSET 13
669
670#define RSV_CTRL 0x001C
671#define RD_CTRL 0x0524
672
673#define REG_USB_INFO 0xFE17
674#define REG_USB_SPECIAL_OPTION 0xFE55
675#define REG_USB_DMA_AGG_TO 0xFE5B
676#define REG_USB_AGG_TO 0xFE5C
677#define REG_USB_AGG_TH 0xFE5D
678
679#define REG_USB_VID 0xFE60
680#define REG_USB_PID 0xFE62
681#define REG_USB_OPTIONAL 0xFE64
682#define REG_USB_CHIRP_K 0xFE65
683#define REG_USB_PHY 0xFE66
684#define REG_USB_MAC_ADDR 0xFE70
685#define REG_USB_HRPWM 0xFE58
686#define REG_USB_HCPWM 0xFE57
687
688#define SW18_FPWM BIT(3)
689
690#define ISO_MD2PP BIT(0)
691#define ISO_UA2USB BIT(1)
692#define ISO_UD2CORE BIT(2)
693#define ISO_PA2PCIE BIT(3)
694#define ISO_PD2CORE BIT(4)
695#define ISO_IP2MAC BIT(5)
696#define ISO_DIOP BIT(6)
697#define ISO_DIOE BIT(7)
698#define ISO_EB2CORE BIT(8)
699#define ISO_DIOR BIT(9)
700
701#define PWC_EV25V BIT(14)
702#define PWC_EV12V BIT(15)
703
704#define FEN_BBRSTB BIT(0)
705#define FEN_BB_GLB_RSTn BIT(1)
706#define FEN_USBA BIT(2)
707#define FEN_UPLL BIT(3)
708#define FEN_USBD BIT(4)
709#define FEN_DIO_PCIE BIT(5)
710#define FEN_PCIEA BIT(6)
711#define FEN_PPLL BIT(7)
712#define FEN_PCIED BIT(8)
713#define FEN_DIOE BIT(9)
714#define FEN_CPUEN BIT(10)
715#define FEN_DCORE BIT(11)
716#define FEN_ELDR BIT(12)
717#define FEN_DIO_RF BIT(13)
718#define FEN_HWPDN BIT(14)
719#define FEN_MREGEN BIT(15)
720
721#define PFM_LDALL BIT(0)
722#define PFM_ALDN BIT(1)
723#define PFM_LDKP BIT(2)
724#define PFM_WOWL BIT(3)
725#define EnPDN BIT(4)
726#define PDN_PL BIT(5)
727#define APFM_ONMAC BIT(8)
728#define APFM_OFF BIT(9)
729#define APFM_RSM BIT(10)
730#define AFSM_HSUS BIT(11)
731#define AFSM_PCIE BIT(12)
732#define APDM_MAC BIT(13)
733#define APDM_HOST BIT(14)
734#define APDM_HPDN BIT(15)
735#define RDY_MACON BIT(16)
736#define SUS_HOST BIT(17)
737#define ROP_ALD BIT(20)
738#define ROP_PWR BIT(21)
739#define ROP_SPS BIT(22)
740#define SOP_MRST BIT(25)
741#define SOP_FUSE BIT(26)
742#define SOP_ABG BIT(27)
743#define SOP_AMB BIT(28)
744#define SOP_RCK BIT(29)
745#define SOP_A8M BIT(30)
746#define XOP_BTCK BIT(31)
747
748#define ANAD16V_EN BIT(0)
749#define ANA8M BIT(1)
750#define MACSLP BIT(4)
751#define LOADER_CLK_EN BIT(5)
752#define _80M_SSC_DIS BIT(7)
753#define _80M_SSC_EN_HO BIT(8)
754#define PHY_SSC_RSTB BIT(9)
755#define SEC_CLK_EN BIT(10)
756#define MAC_CLK_EN BIT(11)
757#define SYS_CLK_EN BIT(12)
758#define RING_CLK_EN BIT(13)
759
760#define BOOT_FROM_EEPROM BIT(4)
761#define EEPROM_EN BIT(5)
762
763#define AFE_BGEN BIT(0)
764#define AFE_MBEN BIT(1)
765#define MAC_ID_EN BIT(7)
766
767#define WLOCK_ALL BIT(0)
768#define WLOCK_00 BIT(1)
769#define WLOCK_04 BIT(2)
770#define WLOCK_08 BIT(3)
771#define WLOCK_40 BIT(4)
772#define R_DIS_PRST_0 BIT(5)
773#define R_DIS_PRST_1 BIT(6)
774#define LOCK_ALL_EN BIT(7)
775
776#define RF_EN BIT(0)
777#define RF_RSTB BIT(1)
778#define RF_SDMRSTB BIT(2)
779
780#define LDA15_EN BIT(0)
781#define LDA15_STBY BIT(1)
782#define LDA15_OBUF BIT(2)
783#define LDA15_REG_VOS BIT(3)
784#define _LDA15_VOADJ(x) (((x) & 0x7) << 4)
785
786#define LDV12_EN BIT(0)
787#define LDV12_SDBY BIT(1)
788#define LPLDO_HSM BIT(2)
789#define LPLDO_LSM_DIS BIT(3)
790#define _LDV12_VADJ(x) (((x) & 0xF) << 4)
791
792#define XTAL_EN BIT(0)
793#define XTAL_BSEL BIT(1)
794#define _XTAL_BOSC(x) (((x) & 0x3) << 2)
795#define _XTAL_CADJ(x) (((x) & 0xF) << 4)
796#define XTAL_GATE_USB BIT(8)
797#define _XTAL_USB_DRV(x) (((x) & 0x3) << 9)
798#define XTAL_GATE_AFE BIT(11)
799#define _XTAL_AFE_DRV(x) (((x) & 0x3) << 12)
800#define XTAL_RF_GATE BIT(14)
801#define _XTAL_RF_DRV(x) (((x) & 0x3) << 15)
802#define XTAL_GATE_DIG BIT(17)
803#define _XTAL_DIG_DRV(x) (((x) & 0x3) << 18)
804#define XTAL_BT_GATE BIT(20)
805#define _XTAL_BT_DRV(x) (((x) & 0x3) << 21)
806#define _XTAL_GPIO(x) (((x) & 0x7) << 23)
807
808#define CKDLY_AFE BIT(26)
809#define CKDLY_USB BIT(27)
810#define CKDLY_DIG BIT(28)
811#define CKDLY_BT BIT(29)
812
813#define APLL_EN BIT(0)
814#define APLL_320_EN BIT(1)
815#define APLL_FREF_SEL BIT(2)
816#define APLL_EDGE_SEL BIT(3)
817#define APLL_WDOGB BIT(4)
818#define APLL_LPFEN BIT(5)
819
820#define APLL_REF_CLK_13MHZ 0x1
821#define APLL_REF_CLK_19_2MHZ 0x2
822#define APLL_REF_CLK_20MHZ 0x3
823#define APLL_REF_CLK_25MHZ 0x4
824#define APLL_REF_CLK_26MHZ 0x5
825#define APLL_REF_CLK_38_4MHZ 0x6
826#define APLL_REF_CLK_40MHZ 0x7
827
828#define APLL_320EN BIT(14)
829#define APLL_80EN BIT(15)
830#define APLL_1MEN BIT(24)
831
832#define ALD_EN BIT(18)
833#define EF_PD BIT(19)
834#define EF_FLAG BIT(31)
835
836#define EF_TRPT BIT(7)
837#define LDOE25_EN BIT(31)
838
839#define RSM_EN BIT(0)
840#define Timer_EN BIT(4)
841
842#define TRSW0EN BIT(2)
843#define TRSW1EN BIT(3)
844#define EROM_EN BIT(4)
845#define EnBT BIT(5)
846#define EnUart BIT(8)
847#define Uart_910 BIT(9)
848#define EnPMAC BIT(10)
849#define SIC_SWRST BIT(11)
850#define EnSIC BIT(12)
851#define SIC_23 BIT(13)
852#define EnHDP BIT(14)
853#define SIC_LBK BIT(15)
854
855#define LED0PL BIT(4)
856#define LED1PL BIT(12)
857#define LED0DIS BIT(7)
858
859#define MCUFWDL_EN BIT(0)
860#define MCUFWDL_RDY BIT(1)
861#define FWDL_ChkSum_rpt BIT(2)
862#define MACINI_RDY BIT(3)
863#define BBINI_RDY BIT(4)
864#define RFINI_RDY BIT(5)
865#define WINTINI_RDY BIT(6)
866#define CPRST BIT(23)
867
868#define XCLK_VLD BIT(0)
869#define ACLK_VLD BIT(1)
870#define UCLK_VLD BIT(2)
871#define PCLK_VLD BIT(3)
872#define PCIRSTB BIT(4)
873#define V15_VLD BIT(5)
874#define TRP_B15V_EN BIT(7)
875#define SIC_IDLE BIT(8)
876#define BD_MAC2 BIT(9)
877#define BD_MAC1 BIT(10)
878#define IC_MACPHY_MODE BIT(11)
879#define PAD_HWPD_IDN BIT(22)
880#define TRP_VAUX_EN BIT(23)
881#define TRP_BT_EN BIT(24)
882#define BD_PKG_SEL BIT(25)
883#define BD_HCI_SEL BIT(26)
884#define TYPE_ID BIT(27)
885
886#define CHIP_VER_RTL_MASK 0xF000
887#define CHIP_VER_RTL_SHIFT 12
888
889#define REG_LBMODE (REG_CR + 3)
890
891#define HCI_TXDMA_EN BIT(0)
892#define HCI_RXDMA_EN BIT(1)
893#define TXDMA_EN BIT(2)
894#define RXDMA_EN BIT(3)
895#define PROTOCOL_EN BIT(4)
896#define SCHEDULE_EN BIT(5)
897#define MACTXEN BIT(6)
898#define MACRXEN BIT(7)
899#define ENSWBCN BIT(8)
900#define ENSEC BIT(9)
901
902#define _NETTYPE(x) (((x) & 0x3) << 16)
903#define MASK_NETTYPE 0x30000
904#define NT_NO_LINK 0x0
905#define NT_LINK_AD_HOC 0x1
906#define NT_LINK_AP 0x2
907#define NT_AS_AP 0x3
908
909#define _LBMODE(x) (((x) & 0xF) << 24)
910#define MASK_LBMODE 0xF000000
911#define LOOPBACK_NORMAL 0x0
912#define LOOPBACK_IMMEDIATELY 0xB
913#define LOOPBACK_MAC_DELAY 0x3
914#define LOOPBACK_PHY 0x1
915#define LOOPBACK_DMA 0x7
916
917#define GET_RX_PAGE_SIZE(value) ((value) & 0xF)
918#define GET_TX_PAGE_SIZE(value) (((value) & 0xF0) >> 4)
919#define _PSRX_MASK 0xF
920#define _PSTX_MASK 0xF0
921#define _PSRX(x) (x)
922#define _PSTX(x) ((x) << 4)
923
924#define PBP_64 0x0
925#define PBP_128 0x1
926#define PBP_256 0x2
927#define PBP_512 0x3
928#define PBP_1024 0x4
929
930#define RXDMA_ARBBW_EN BIT(0)
931#define RXSHFT_EN BIT(1)
932#define RXDMA_AGG_EN BIT(2)
933#define QS_VO_QUEUE BIT(8)
934#define QS_VI_QUEUE BIT(9)
935#define QS_BE_QUEUE BIT(10)
936#define QS_BK_QUEUE BIT(11)
937#define QS_MANAGER_QUEUE BIT(12)
938#define QS_HIGH_QUEUE BIT(13)
939
940#define HQSEL_VOQ BIT(0)
941#define HQSEL_VIQ BIT(1)
942#define HQSEL_BEQ BIT(2)
943#define HQSEL_BKQ BIT(3)
944#define HQSEL_MGTQ BIT(4)
945#define HQSEL_HIQ BIT(5)
946
947#define _TXDMA_HIQ_MAP(x) (((x)&0x3) << 14)
948#define _TXDMA_MGQ_MAP(x) (((x)&0x3) << 12)
949#define _TXDMA_BKQ_MAP(x) (((x)&0x3) << 10)
950#define _TXDMA_BEQ_MAP(x) (((x)&0x3) << 8)
951#define _TXDMA_VIQ_MAP(x) (((x)&0x3) << 6)
952#define _TXDMA_VOQ_MAP(x) (((x)&0x3) << 4)
953
954#define QUEUE_LOW 1
955#define QUEUE_NORMAL 2
956#define QUEUE_HIGH 3
957
958#define _LLT_NO_ACTIVE 0x0
959#define _LLT_WRITE_ACCESS 0x1
960#define _LLT_READ_ACCESS 0x2
961
962#define _LLT_INIT_DATA(x) ((x) & 0xFF)
963#define _LLT_INIT_ADDR(x) (((x) & 0xFF) << 8)
964#define _LLT_OP(x) (((x) & 0x3) << 30)
965#define _LLT_OP_VALUE(x) (((x) >> 30) & 0x3)
966
967#define BB_WRITE_READ_MASK (BIT(31) | BIT(30))
968#define BB_WRITE_EN BIT(30)
969#define BB_READ_EN BIT(31)
970
971#define _HPQ(x) ((x) & 0xFF)
972#define _LPQ(x) (((x) & 0xFF) << 8)
973#define _PUBQ(x) (((x) & 0xFF) << 16)
974#define _NPQ(x) ((x) & 0xFF)
975
976#define HPQ_PUBLIC_DIS BIT(24)
977#define LPQ_PUBLIC_DIS BIT(25)
978#define LD_RQPN BIT(31)
979
980#define BCN_VALID BIT(16)
981#define BCN_HEAD(x) (((x) & 0xFF) << 8)
982#define BCN_HEAD_MASK 0xFF00
983
984#define BLK_DESC_NUM_SHIFT 4
985#define BLK_DESC_NUM_MASK 0xF
986
987#define DROP_DATA_EN BIT(9)
988
989#define EN_AMPDU_RTY_NEW BIT(7)
990
991#define _INIRTSMCS_SEL(x) ((x) & 0x3F)
992
993#define _SPEC_SIFS_CCK(x) ((x) & 0xFF)
994#define _SPEC_SIFS_OFDM(x) (((x) & 0xFF) << 8)
995
996#define RATE_REG_BITMAP_ALL 0xFFFFF
997
998#define _RRSC_BITMAP(x) ((x) & 0xFFFFF)
999
1000#define _RRSR_RSC(x) (((x) & 0x3) << 21)
1001#define RRSR_RSC_RESERVED 0x0
1002#define RRSR_RSC_UPPER_SUBCHANNEL 0x1
1003#define RRSR_RSC_LOWER_SUBCHANNEL 0x2
1004#define RRSR_RSC_DUPLICATE_MODE 0x3
1005
1006#define USE_SHORT_G1 BIT(20)
1007
1008#define _AGGLMT_MCS0(x) ((x) & 0xF)
1009#define _AGGLMT_MCS1(x) (((x) & 0xF) << 4)
1010#define _AGGLMT_MCS2(x) (((x) & 0xF) << 8)
1011#define _AGGLMT_MCS3(x) (((x) & 0xF) << 12)
1012#define _AGGLMT_MCS4(x) (((x) & 0xF) << 16)
1013#define _AGGLMT_MCS5(x) (((x) & 0xF) << 20)
1014#define _AGGLMT_MCS6(x) (((x) & 0xF) << 24)
1015#define _AGGLMT_MCS7(x) (((x) & 0xF) << 28)
1016
1017#define RETRY_LIMIT_SHORT_SHIFT 8
1018#define RETRY_LIMIT_LONG_SHIFT 0
1019
1020#define _DARF_RC1(x) ((x) & 0x1F)
1021#define _DARF_RC2(x) (((x) & 0x1F) << 8)
1022#define _DARF_RC3(x) (((x) & 0x1F) << 16)
1023#define _DARF_RC4(x) (((x) & 0x1F) << 24)
1024#define _DARF_RC5(x) ((x) & 0x1F)
1025#define _DARF_RC6(x) (((x) & 0x1F) << 8)
1026#define _DARF_RC7(x) (((x) & 0x1F) << 16)
1027#define _DARF_RC8(x) (((x) & 0x1F) << 24)
1028
1029#define _RARF_RC1(x) ((x) & 0x1F)
1030#define _RARF_RC2(x) (((x) & 0x1F) << 8)
1031#define _RARF_RC3(x) (((x) & 0x1F) << 16)
1032#define _RARF_RC4(x) (((x) & 0x1F) << 24)
1033#define _RARF_RC5(x) ((x) & 0x1F)
1034#define _RARF_RC6(x) (((x) & 0x1F) << 8)
1035#define _RARF_RC7(x) (((x) & 0x1F) << 16)
1036#define _RARF_RC8(x) (((x) & 0x1F) << 24)
1037
1038#define AC_PARAM_TXOP_LIMIT_OFFSET 16
1039#define AC_PARAM_ECW_MAX_OFFSET 12
1040#define AC_PARAM_ECW_MIN_OFFSET 8
1041#define AC_PARAM_AIFS_OFFSET 0
1042
1043#define _AIFS(x) (x)
1044#define _ECW_MAX_MIN(x) ((x) << 8)
1045#define _TXOP_LIMIT(x) ((x) << 16)
1046
1047#define _BCNIFS(x) ((x) & 0xFF)
1048#define _BCNECW(x) ((((x) & 0xF)) << 8)
1049
1050#define _LRL(x) ((x) & 0x3F)
1051#define _SRL(x) (((x) & 0x3F) << 8)
1052
1053#define _SIFS_CCK_CTX(x) ((x) & 0xFF)
1054#define _SIFS_CCK_TRX(x) (((x) & 0xFF) << 8);
1055
1056#define _SIFS_OFDM_CTX(x) ((x) & 0xFF)
1057#define _SIFS_OFDM_TRX(x) (((x) & 0xFF) << 8);
1058
1059#define _TBTT_PROHIBIT_HOLD(x) (((x) & 0xFF) << 8)
1060
1061#define DIS_EDCA_CNT_DWN BIT(11)
1062
1063#define EN_MBSSID BIT(1)
1064#define EN_TXBCN_RPT BIT(2)
1065#define EN_BCN_FUNCTION BIT(3)
1066
1067#define TSFTR_RST BIT(0)
1068#define TSFTR1_RST BIT(1)
1069
1070#define STOP_BCNQ BIT(6)
1071
1072#define DIS_TSF_UDT0_NORMAL_CHIP BIT(4)
1073#define DIS_TSF_UDT0_TEST_CHIP BIT(5)
1074
1075#define AcmHw_HwEn BIT(0)
1076#define AcmHw_BeqEn BIT(1)
1077#define AcmHw_ViqEn BIT(2)
1078#define AcmHw_VoqEn BIT(3)
1079#define AcmHw_BeqStatus BIT(4)
1080#define AcmHw_ViqStatus BIT(5)
1081#define AcmHw_VoqStatus BIT(6)
1082
1083#define APSDOFF BIT(6)
1084#define APSDOFF_STATUS BIT(7)
1085
1086#define BW_20MHZ BIT(2)
1087
1088#define RATE_BITMAP_ALL 0xFFFFF
1089
1090#define RATE_RRSR_CCK_ONLY_1M 0xFFFF1
1091
1092#define TSFRST BIT(0)
1093#define DIS_GCLK BIT(1)
1094#define PAD_SEL BIT(2)
1095#define PWR_ST BIT(6)
1096#define PWRBIT_OW_EN BIT(7)
1097#define ACRC BIT(8)
1098#define CFENDFORM BIT(9)
1099#define ICV BIT(10)
1100
1101#define AAP BIT(0)
1102#define APM BIT(1)
1103#define AM BIT(2)
1104#define AB BIT(3)
1105#define ADD3 BIT(4)
1106#define APWRMGT BIT(5)
1107#define CBSSID BIT(6)
1108#define CBSSID_DATA BIT(6)
1109#define CBSSID_BCN BIT(7)
1110#define ACRC32 BIT(8)
1111#define AICV BIT(9)
1112#define ADF BIT(11)
1113#define ACF BIT(12)
1114#define AMF BIT(13)
1115#define HTC_LOC_CTRL BIT(14)
1116#define UC_DATA_EN BIT(16)
1117#define BM_DATA_EN BIT(17)
1118#define MFBEN BIT(22)
1119#define LSIGEN BIT(23)
1120#define EnMBID BIT(24)
1121#define APP_BASSN BIT(27)
1122#define APP_PHYSTS BIT(28)
1123#define APP_ICV BIT(29)
1124#define APP_MIC BIT(30)
1125#define APP_FCS BIT(31)
1126
1127#define _MIN_SPACE(x) ((x) & 0x7)
1128#define _SHORT_GI_PADDING(x) (((x) & 0x1F) << 3)
1129
1130#define RXERR_TYPE_OFDM_PPDU 0
1131#define RXERR_TYPE_OFDM_FALSE_ALARM 1
1132#define RXERR_TYPE_OFDM_MPDU_OK 2
1133#define RXERR_TYPE_OFDM_MPDU_FAIL 3
1134#define RXERR_TYPE_CCK_PPDU 4
1135#define RXERR_TYPE_CCK_FALSE_ALARM 5
1136#define RXERR_TYPE_CCK_MPDU_OK 6
1137#define RXERR_TYPE_CCK_MPDU_FAIL 7
1138#define RXERR_TYPE_HT_PPDU 8
1139#define RXERR_TYPE_HT_FALSE_ALARM 9
1140#define RXERR_TYPE_HT_MPDU_TOTAL 10
1141#define RXERR_TYPE_HT_MPDU_OK 11
1142#define RXERR_TYPE_HT_MPDU_FAIL 12
1143#define RXERR_TYPE_RX_FULL_DROP 15
1144
1145#define RXERR_COUNTER_MASK 0xFFFFF
1146#define RXERR_RPT_RST BIT(27)
1147#define _RXERR_RPT_SEL(type) ((type) << 28)
1148
1149#define SCR_TxUseDK BIT(0)
1150#define SCR_RxUseDK BIT(1)
1151#define SCR_TxEncEnable BIT(2)
1152#define SCR_RxDecEnable BIT(3)
1153#define SCR_SKByA2 BIT(4)
1154#define SCR_NoSKMC BIT(5)
1155#define SCR_TXBCUSEDK BIT(6)
1156#define SCR_RXBCUSEDK BIT(7)
1157
1158#define USB_IS_HIGH_SPEED 0
1159#define USB_IS_FULL_SPEED 1
1160#define USB_SPEED_MASK BIT(5)
1161
1162#define USB_NORMAL_SIE_EP_MASK 0xF
1163#define USB_NORMAL_SIE_EP_SHIFT 4
1164
1165#define USB_TEST_EP_MASK 0x30
1166#define USB_TEST_EP_SHIFT 4
1167
1168#define USB_AGG_EN BIT(3)
1169
1170#define MAC_ADDR_LEN 6
1171#define LAST_ENTRY_OF_TX_PKT_BUFFER 255
1172
1173#define POLLING_LLT_THRESHOLD 20
1174#define POLLING_READY_TIMEOUT_COUNT 1000
1175
1176#define MAX_MSS_DENSITY_2T 0x13
1177#define MAX_MSS_DENSITY_1T 0x0A
1178
1179#define EPROM_CMD_OPERATING_MODE_MASK ((1<<7)|(1<<6))
1180#define EPROM_CMD_CONFIG 0x3
1181#define EPROM_CMD_LOAD 1
1182
1183#define HWSET_MAX_SIZE_92S HWSET_MAX_SIZE
1184
1185#define HAL_8192C_HW_GPIO_WPS_BIT BIT(2)
1186
1187#define RPMAC_RESET 0x100
1188#define RPMAC_TXSTART 0x104
1189#define RPMAC_TXLEGACYSIG 0x108
1190#define RPMAC_TXHTSIG1 0x10c
1191#define RPMAC_TXHTSIG2 0x110
1192#define RPMAC_PHYDEBUG 0x114
1193#define RPMAC_TXPACKETNUM 0x118
1194#define RPMAC_TXIDLE 0x11c
1195#define RPMAC_TXMACHEADER0 0x120
1196#define RPMAC_TXMACHEADER1 0x124
1197#define RPMAC_TXMACHEADER2 0x128
1198#define RPMAC_TXMACHEADER3 0x12c
1199#define RPMAC_TXMACHEADER4 0x130
1200#define RPMAC_TXMACHEADER5 0x134
1201#define RPMAC_TXDADATYPE 0x138
1202#define RPMAC_TXRANDOMSEED 0x13c
1203#define RPMAC_CCKPLCPPREAMBLE 0x140
1204#define RPMAC_CCKPLCPHEADER 0x144
1205#define RPMAC_CCKCRC16 0x148
1206#define RPMAC_OFDMRXCRC32OK 0x170
1207#define RPMAC_OFDMRXCRC32Er 0x174
1208#define RPMAC_OFDMRXPARITYER 0x178
1209#define RPMAC_OFDMRXCRC8ER 0x17c
1210#define RPMAC_CCKCRXRC16ER 0x180
1211#define RPMAC_CCKCRXRC32ER 0x184
1212#define RPMAC_CCKCRXRC32OK 0x188
1213#define RPMAC_TXSTATUS 0x18c
1214
1215#define RFPGA0_RFMOD 0x800
1216
1217#define RFPGA0_TXINFO 0x804
1218#define RFPGA0_PSDFUNCTION 0x808
1219
1220#define RFPGA0_TXGAINSTAGE 0x80c
1221
1222#define RFPGA0_RFTIMING1 0x810
1223#define RFPGA0_RFTIMING2 0x814
1224
1225#define RFPGA0_XA_HSSIPARAMETER1 0x820
1226#define RFPGA0_XA_HSSIPARAMETER2 0x824
1227#define RFPGA0_XB_HSSIPARAMETER1 0x828
1228#define RFPGA0_XB_HSSIPARAMETER2 0x82c
1229
1230#define RFPGA0_XA_LSSIPARAMETER 0x840
1231#define RFPGA0_XB_LSSIPARAMETER 0x844
1232
1233#define RFPGA0_RFWAKEUPPARAMETER 0x850
1234#define RFPGA0_RFSLEEPUPPARAMETER 0x854
1235
1236#define RFPGA0_XAB_SWITCHCONTROL 0x858
1237#define RFPGA0_XCD_SWITCHCONTROL 0x85c
1238
1239#define RFPGA0_XA_RFINTERFACEOE 0x860
1240#define RFPGA0_XB_RFINTERFACEOE 0x864
1241
1242#define RFPGA0_XAB_RFINTERFACESW 0x870
1243#define RFPGA0_XCD_RFINTERFACESW 0x874
1244
1245#define rFPGA0_XAB_RFPARAMETER 0x878
1246#define rFPGA0_XCD_RFPARAMETER 0x87c
1247
1248#define RFPGA0_ANALOGPARAMETER1 0x880
1249#define RFPGA0_ANALOGPARAMETER2 0x884
1250#define RFPGA0_ANALOGPARAMETER3 0x888
1251#define RFPGA0_ANALOGPARAMETER4 0x88c
1252
1253#define RFPGA0_XA_LSSIREADBACK 0x8a0
1254#define RFPGA0_XB_LSSIREADBACK 0x8a4
1255#define RFPGA0_XC_LSSIREADBACK 0x8a8
1256#define RFPGA0_XD_LSSIREADBACK 0x8ac
1257
1258#define RFPGA0_PSDREPORT 0x8b4
1259#define TRANSCEIVEA_HSPI_READBACK 0x8b8
1260#define TRANSCEIVEB_HSPI_READBACK 0x8bc
1261#define RFPGA0_XAB_RFINTERFACERB 0x8e0
1262#define RFPGA0_XCD_RFINTERFACERB 0x8e4
1263
1264#define RFPGA1_RFMOD 0x900
1265
1266#define RFPGA1_TXBLOCK 0x904
1267#define RFPGA1_DEBUGSELECT 0x908
1268#define RFPGA1_TXINFO 0x90c
1269
1270#define RCCK0_SYSTEM 0xa00
1271
1272#define RCCK0_AFESETTING 0xa04
1273#define RCCK0_CCA 0xa08
1274
1275#define RCCK0_RXAGC1 0xa0c
1276#define RCCK0_RXAGC2 0xa10
1277
1278#define RCCK0_RXHP 0xa14
1279
1280#define RCCK0_DSPPARAMETER1 0xa18
1281#define RCCK0_DSPPARAMETER2 0xa1c
1282
1283#define RCCK0_TXFILTER1 0xa20
1284#define RCCK0_TXFILTER2 0xa24
1285#define RCCK0_DEBUGPORT 0xa28
1286#define RCCK0_FALSEALARMREPORT 0xa2c
1287#define RCCK0_TRSSIREPORT 0xa50
1288#define RCCK0_RXREPORT 0xa54
1289#define RCCK0_FACOUNTERLOWER 0xa5c
1290#define RCCK0_FACOUNTERUPPER 0xa58
1291
1292#define ROFDM0_LSTF 0xc00
1293
1294#define ROFDM0_TRXPATHENABLE 0xc04
1295#define ROFDM0_TRMUXPAR 0xc08
1296#define ROFDM0_TRSWISOLATION 0xc0c
1297
1298#define ROFDM0_XARXAFE 0xc10
1299#define ROFDM0_XARXIQIMBALANCE 0xc14
1300#define ROFDM0_XBRXAFE 0xc18
1301#define ROFDM0_XBRXIQIMBALANCE 0xc1c
1302#define ROFDM0_XCRXAFE 0xc20
1303#define ROFDM0_XCRXIQIMBANLANCE 0xc24
1304#define ROFDM0_XDRXAFE 0xc28
1305#define ROFDM0_XDRXIQIMBALANCE 0xc2c
1306
1307#define ROFDM0_RXDETECTOR1 0xc30
1308#define ROFDM0_RXDETECTOR2 0xc34
1309#define ROFDM0_RXDETECTOR3 0xc38
1310#define ROFDM0_RXDETECTOR4 0xc3c
1311
1312#define ROFDM0_RXDSP 0xc40
1313#define ROFDM0_CFOANDDAGC 0xc44
1314#define ROFDM0_CCADROPTHRESHOLD 0xc48
1315#define ROFDM0_ECCATHRESHOLD 0xc4c
1316
1317#define ROFDM0_XAAGCCORE1 0xc50
1318#define ROFDM0_XAAGCCORE2 0xc54
1319#define ROFDM0_XBAGCCORE1 0xc58
1320#define ROFDM0_XBAGCCORE2 0xc5c
1321#define ROFDM0_XCAGCCORE1 0xc60
1322#define ROFDM0_XCAGCCORE2 0xc64
1323#define ROFDM0_XDAGCCORE1 0xc68
1324#define ROFDM0_XDAGCCORE2 0xc6c
1325
1326#define ROFDM0_AGCPARAMETER1 0xc70
1327#define ROFDM0_AGCPARAMETER2 0xc74
1328#define ROFDM0_AGCRSSITABLE 0xc78
1329#define ROFDM0_HTSTFAGC 0xc7c
1330
1331#define ROFDM0_XATXIQIMBALANCE 0xc80
1332#define ROFDM0_XATXAFE 0xc84
1333#define ROFDM0_XBTXIQIMBALANCE 0xc88
1334#define ROFDM0_XBTXAFE 0xc8c
1335#define ROFDM0_XCTXIQIMBALANCE 0xc90
1336#define ROFDM0_XCTXAFE 0xc94
1337#define ROFDM0_XDTXIQIMBALANCE 0xc98
1338#define ROFDM0_XDTXAFE 0xc9c
1339
1340#define ROFDM0_RXIQEXTANTA 0xca0
1341
1342#define ROFDM0_RXHPPARAMETER 0xce0
1343#define ROFDM0_TXPSEUDONOISEWGT 0xce4
1344#define ROFDM0_FRAMESYNC 0xcf0
1345#define ROFDM0_DFSREPORT 0xcf4
1346#define ROFDM0_TXCOEFF1 0xca4
1347#define ROFDM0_TXCOEFF2 0xca8
1348#define ROFDM0_TXCOEFF3 0xcac
1349#define ROFDM0_TXCOEFF4 0xcb0
1350#define ROFDM0_TXCOEFF5 0xcb4
1351#define ROFDM0_TXCOEFF6 0xcb8
1352
1353#define ROFDM1_LSTF 0xd00
1354#define ROFDM1_TRXPATHENABLE 0xd04
1355
1356#define ROFDM1_CF0 0xd08
1357#define ROFDM1_CSI1 0xd10
1358#define ROFDM1_SBD 0xd14
1359#define ROFDM1_CSI2 0xd18
1360#define ROFDM1_CFOTRACKING 0xd2c
1361#define ROFDM1_TRXMESAURE1 0xd34
1362#define ROFDM1_INTFDET 0xd3c
1363#define ROFDM1_PSEUDONOISESTATEAB 0xd50
1364#define ROFDM1_PSEUDONOISESTATECD 0xd54
1365#define ROFDM1_RXPSEUDONOISEWGT 0xd58
1366
1367#define ROFDM_PHYCOUNTER1 0xda0
1368#define ROFDM_PHYCOUNTER2 0xda4
1369#define ROFDM_PHYCOUNTER3 0xda8
1370
1371#define ROFDM_SHORTCFOAB 0xdac
1372#define ROFDM_SHORTCFOCD 0xdb0
1373#define ROFDM_LONGCFOAB 0xdb4
1374#define ROFDM_LONGCFOCD 0xdb8
1375#define ROFDM_TAILCF0AB 0xdbc
1376#define ROFDM_TAILCF0CD 0xdc0
1377#define ROFDM_PWMEASURE1 0xdc4
1378#define ROFDM_PWMEASURE2 0xdc8
1379#define ROFDM_BWREPORT 0xdcc
1380#define ROFDM_AGCREPORT 0xdd0
1381#define ROFDM_RXSNR 0xdd4
1382#define ROFDM_RXEVMCSI 0xdd8
1383#define ROFDM_SIGREPORT 0xddc
1384
1385#define RTXAGC_A_RATE18_06 0xe00
1386#define RTXAGC_A_RATE54_24 0xe04
1387#define RTXAGC_A_CCK1_MCS32 0xe08
1388#define RTXAGC_A_MCS03_MCS00 0xe10
1389#define RTXAGC_A_MCS07_MCS04 0xe14
1390#define RTXAGC_A_MCS11_MCS08 0xe18
1391#define RTXAGC_A_MCS15_MCS12 0xe1c
1392
1393#define RTXAGC_B_RATE18_06 0x830
1394#define RTXAGC_B_RATE54_24 0x834
1395#define RTXAGC_B_CCK1_55_MCS32 0x838
1396#define RTXAGC_B_MCS03_MCS00 0x83c
1397#define RTXAGC_B_MCS07_MCS04 0x848
1398#define RTXAGC_B_MCS11_MCS08 0x84c
1399#define RTXAGC_B_MCS15_MCS12 0x868
1400#define RTXAGC_B_CCK11_A_CCK2_11 0x86c
1401
1402#define RZEBRA1_HSSIENABLE 0x0
1403#define RZEBRA1_TRXENABLE1 0x1
1404#define RZEBRA1_TRXENABLE2 0x2
1405#define RZEBRA1_AGC 0x4
1406#define RZEBRA1_CHARGEPUMP 0x5
1407#define RZEBRA1_CHANNEL 0x7
1408
1409#define RZEBRA1_TXGAIN 0x8
1410#define RZEBRA1_TXLPF 0x9
1411#define RZEBRA1_RXLPF 0xb
1412#define RZEBRA1_RXHPFCORNER 0xc
1413
1414#define RGLOBALCTRL 0
1415#define RRTL8256_TXLPF 19
1416#define RRTL8256_RXLPF 11
1417#define RRTL8258_TXLPF 0x11
1418#define RRTL8258_RXLPF 0x13
1419#define RRTL8258_RSSILPF 0xa
1420
1421#define RF_AC 0x00
1422
1423#define RF_IQADJ_G1 0x01
1424#define RF_IQADJ_G2 0x02
1425#define RF_POW_TRSW 0x05
1426
1427#define RF_GAIN_RX 0x06
1428#define RF_GAIN_TX 0x07
1429
1430#define RF_TXM_IDAC 0x08
1431#define RF_BS_IQGEN 0x0F
1432
1433#define RF_MODE1 0x10
1434#define RF_MODE2 0x11
1435
1436#define RF_RX_AGC_HP 0x12
1437#define RF_TX_AGC 0x13
1438#define RF_BIAS 0x14
1439#define RF_IPA 0x15
1440#define RF_POW_ABILITY 0x17
1441#define RF_MODE_AG 0x18
1442#define RRFCHANNEL 0x18
1443#define RF_CHNLBW 0x18
1444#define RF_TOP 0x19
1445
1446#define RF_RX_G1 0x1A
1447#define RF_RX_G2 0x1B
1448
1449#define RF_RX_BB2 0x1C
1450#define RF_RX_BB1 0x1D
1451
1452#define RF_RCK1 0x1E
1453#define RF_RCK2 0x1F
1454
1455#define RF_TX_G1 0x20
1456#define RF_TX_G2 0x21
1457#define RF_TX_G3 0x22
1458
1459#define RF_TX_BB1 0x23
1460#define RF_T_METER 0x24
1461
1462#define RF_SYN_G1 0x25
1463#define RF_SYN_G2 0x26
1464#define RF_SYN_G3 0x27
1465#define RF_SYN_G4 0x28
1466#define RF_SYN_G5 0x29
1467#define RF_SYN_G6 0x2A
1468#define RF_SYN_G7 0x2B
1469#define RF_SYN_G8 0x2C
1470
1471#define RF_RCK_OS 0x30
1472#define RF_TXPA_G1 0x31
1473#define RF_TXPA_G2 0x32
1474#define RF_TXPA_G3 0x33
1475
1476#define BBBRESETB 0x100
1477#define BGLOBALRESETB 0x200
1478#define BOFDMTXSTART 0x4
1479#define BCCKTXSTART 0x8
1480#define BCRC32DEBUG 0x100
1481#define BPMACLOOPBACK 0x10
1482#define BTXLSIG 0xffffff
1483#define BOFDMTXRATE 0xf
1484#define BOFDMTXRESERVED 0x10
1485#define BOFDMTXLENGTH 0x1ffe0
1486#define BOFDMTXPARITY 0x20000
1487#define BTXHTSIG1 0xffffff
1488#define BTXHTMCSRATE 0x7f
1489#define BTXHTBW 0x80
1490#define BTXHTLENGTH 0xffff00
1491#define BTXHTSIG2 0xffffff
1492#define BTXHTSMOOTHING 0x1
1493#define BTXHTSOUNDING 0x2
1494#define BTXHTRESERVED 0x4
1495#define BTXHTAGGREATION 0x8
1496#define BTXHTSTBC 0x30
1497#define BTXHTADVANCECODING 0x40
1498#define BTXHTSHORTGI 0x80
1499#define BTXHTNUMBERHT_LT F 0x300
1500#define BTXHTCRC8 0x3fc00
1501#define BCOUNTERRESET 0x10000
1502#define BNUMOFOFDMTX 0xffff
1503#define BNUMOFCCKTX 0xffff0000
1504#define BTXIDLEINTERVAL 0xffff
1505#define BOFDMSERVICE 0xffff0000
1506#define BTXMACHEADER 0xffffffff
1507#define BTXDATAINIT 0xff
1508#define BTXHTMODE 0x100
1509#define BTXDATATYPE 0x30000
1510#define BTXRANDOMSEED 0xffffffff
1511#define BCCKTXPREAMBLE 0x1
1512#define BCCKTXSFD 0xffff0000
1513#define BCCKTXSIG 0xff
1514#define BCCKTXSERVICE 0xff00
1515#define BCCKLENGTHEXT 0x8000
1516#define BCCKTXLENGHT 0xffff0000
1517#define BCCKTXCRC16 0xffff
1518#define BCCKTXSTATUS 0x1
1519#define BOFDMTXSTATUS 0x2
1520#define IS_BB_REG_OFFSET_92S(_Offset) \
1521 ((_Offset >= 0x800) && (_Offset <= 0xfff))
1522
1523#define BRFMOD 0x1
1524#define BJAPANMODE 0x2
1525#define BCCKTXSC 0x30
1526#define BCCKEN 0x1000000
1527#define BOFDMEN 0x2000000
1528
1529#define BOFDMRXADCPHASE 0x10000
1530#define BOFDMTXDACPHASE 0x40000
1531#define BXATXAGC 0x3f
1532
1533#define BXBTXAGC 0xf00
1534#define BXCTXAGC 0xf000
1535#define BXDTXAGC 0xf0000
1536
1537#define BPASTART 0xf0000000
1538#define BTRSTART 0x00f00000
1539#define BRFSTART 0x0000f000
1540#define BBBSTART 0x000000f0
1541#define BBBCCKSTART 0x0000000f
1542#define BPAEND 0xf
1543#define BTREND 0x0f000000
1544#define BRFEND 0x000f0000
1545#define BCCAMASK 0x000000f0
1546#define BR2RCCAMASK 0x00000f00
1547#define BHSSI_R2TDELAY 0xf8000000
1548#define BHSSI_T2RDELAY 0xf80000
1549#define BCONTXHSSI 0x400
1550#define BIGFROMCCK 0x200
1551#define BAGCADDRESS 0x3f
1552#define BRXHPTX 0x7000
1553#define BRXHP2RX 0x38000
1554#define BRXHPCCKINI 0xc0000
1555#define BAGCTXCODE 0xc00000
1556#define BAGCRXCODE 0x300000
1557
1558#define B3WIREDATALENGTH 0x800
1559#define B3WIREADDREAALENGTH 0x400
1560
1561#define B3WIRERFPOWERDOWN 0x1
1562#define B5GPAPEPOLARITY 0x40000000
1563#define B2GPAPEPOLARITY 0x80000000
1564#define BRFSW_TXDEFAULTANT 0x3
1565#define BRFSW_TXOPTIONANT 0x30
1566#define BRFSW_RXDEFAULTANT 0x300
1567#define BRFSW_RXOPTIONANT 0x3000
1568#define BRFSI_3WIREDATA 0x1
1569#define BRFSI_3WIRECLOCK 0x2
1570#define BRFSI_3WIRELOAD 0x4
1571#define BRFSI_3WIRERW 0x8
1572#define BRFSI_3WIRE 0xf
1573
1574#define BRFSI_RFENV 0x10
1575
1576#define BRFSI_TRSW 0x20
1577#define BRFSI_TRSWB 0x40
1578#define BRFSI_ANTSW 0x100
1579#define BRFSI_ANTSWB 0x200
1580#define BRFSI_PAPE 0x400
1581#define BRFSI_PAPE5G 0x800
1582#define BBANDSELECT 0x1
1583#define BHTSIG2_GI 0x80
1584#define BHTSIG2_SMOOTHING 0x01
1585#define BHTSIG2_SOUNDING 0x02
1586#define BHTSIG2_AGGREATON 0x08
1587#define BHTSIG2_STBC 0x30
1588#define BHTSIG2_ADVCODING 0x40
1589#define BHTSIG2_NUMOFHTLTF 0x300
1590#define BHTSIG2_CRC8 0x3fc
1591#define BHTSIG1_MCS 0x7f
1592#define BHTSIG1_BANDWIDTH 0x80
1593#define BHTSIG1_HTLENGTH 0xffff
1594#define BLSIG_RATE 0xf
1595#define BLSIG_RESERVED 0x10
1596#define BLSIG_LENGTH 0x1fffe
1597#define BLSIG_PARITY 0x20
1598#define BCCKRXPHASE 0x4
1599
1600#define BLSSIREADADDRESS 0x7f800000
1601#define BLSSIREADEDGE 0x80000000
1602
1603#define BLSSIREADBACKDATA 0xfffff
1604
1605#define BLSSIREADOKFLAG 0x1000
1606#define BCCKSAMPLERATE 0x8
1607#define BREGULATOR0STANDBY 0x1
1608#define BREGULATORPLLSTANDBY 0x2
1609#define BREGULATOR1STANDBY 0x4
1610#define BPLLPOWERUP 0x8
1611#define BDPLLPOWERUP 0x10
1612#define BDA10POWERUP 0x20
1613#define BAD7POWERUP 0x200
1614#define BDA6POWERUP 0x2000
1615#define BXTALPOWERUP 0x4000
1616#define B40MDCLKPOWERUP 0x8000
1617#define BDA6DEBUGMODE 0x20000
1618#define BDA6SWING 0x380000
1619
1620#define BADCLKPHASE 0x4000000
1621#define B80MCLKDELAY 0x18000000
1622#define BAFEWATCHDOGENABLE 0x20000000
1623
1624#define BXTALCAP01 0xc0000000
1625#define BXTALCAP23 0x3
1626#define BXTALCAP92X 0x0f000000
1627#define BXTALCAP 0x0f000000
1628
1629#define BINTDIFCLKENABLE 0x400
1630#define BEXTSIGCLKENABLE 0x800
1631#define BBANDGAP_MBIAS_POWERUP 0x10000
1632#define BAD11SH_GAIN 0xc0000
1633#define BAD11NPUT_RANGE 0x700000
1634#define BAD110P_CURRENT 0x3800000
1635#define BLPATH_LOOPBACK 0x4000000
1636#define BQPATH_LOOPBACK 0x8000000
1637#define BAFE_LOOPBACK 0x10000000
1638#define BDA10_SWING 0x7e0
1639#define BDA10_REVERSE 0x800
1640#define BDA_CLK_SOURCE 0x1000
1641#define BDA7INPUT_RANGE 0x6000
1642#define BDA7_GAIN 0x38000
1643#define BDA7OUTPUT_CM_MODE 0x40000
1644#define BDA7INPUT_CM_MODE 0x380000
1645#define BDA7CURRENT 0xc00000
1646#define BREGULATOR_ADJUST 0x7000000
1647#define BAD11POWERUP_ATTX 0x1
1648#define BDA10PS_ATTX 0x10
1649#define BAD11POWERUP_ATRX 0x100
1650#define BDA10PS_ATRX 0x1000
1651#define BCCKRX_AGC_FORMAT 0x200
1652#define BPSDFFT_SAMPLE_POINT 0xc000
1653#define BPSD_AVERAGE_NUM 0x3000
1654#define BIQPATH_CONTROL 0xc00
1655#define BPSD_FREQ 0x3ff
1656#define BPSD_ANTENNA_PATH 0x30
1657#define BPSD_IQ_SWITCH 0x40
1658#define BPSD_RX_TRIGGER 0x400000
1659#define BPSD_TX_TRIGGER 0x80000000
1660#define BPSD_SINE_TONE_SCALE 0x7f000000
1661#define BPSD_REPORT 0xffff
1662
1663#define BOFDM_TXSC 0x30000000
1664#define BCCK_TXON 0x1
1665#define BOFDM_TXON 0x2
1666#define BDEBUG_PAGE 0xfff
1667#define BDEBUG_ITEM 0xff
1668#define BANTL 0x10
1669#define BANT_NONHT 0x100
1670#define BANT_HT1 0x1000
1671#define BANT_HT2 0x10000
1672#define BANT_HT1S1 0x100000
1673#define BANT_NONHTS1 0x1000000
1674
1675#define BCCK_BBMODE 0x3
1676#define BCCK_TXPOWERSAVING 0x80
1677#define BCCK_RXPOWERSAVING 0x40
1678
1679#define BCCK_SIDEBAND 0x10
1680
1681#define BCCK_SCRAMBLE 0x8
1682#define BCCK_ANTDIVERSITY 0x8000
1683#define BCCK_CARRIER_RECOVERY 0x4000
1684#define BCCK_TXRATE 0x3000
1685#define BCCK_DCCANCEL 0x0800
1686#define BCCK_ISICANCEL 0x0400
1687#define BCCK_MATCH_FILTER 0x0200
1688#define BCCK_EQUALIZER 0x0100
1689#define BCCK_PREAMBLE_DETECT 0x800000
1690#define BCCK_FAST_FALSECCA 0x400000
1691#define BCCK_CH_ESTSTART 0x300000
1692#define BCCK_CCA_COUNT 0x080000
1693#define BCCK_CS_LIM 0x070000
1694#define BCCK_BIST_MODE 0x80000000
1695#define BCCK_CCAMASK 0x40000000
1696#define BCCK_TX_DAC_PHASE 0x4
1697#define BCCK_RX_ADC_PHASE 0x20000000
1698#define BCCKR_CP_MODE 0x0100
1699#define BCCK_TXDC_OFFSET 0xf0
1700#define BCCK_RXDC_OFFSET 0xf
1701#define BCCK_CCA_MODE 0xc000
1702#define BCCK_FALSECS_LIM 0x3f00
1703#define BCCK_CS_RATIO 0xc00000
1704#define BCCK_CORGBIT_SEL 0x300000
1705#define BCCK_PD_LIM 0x0f0000
1706#define BCCK_NEWCCA 0x80000000
1707#define BCCK_RXHP_OF_IG 0x8000
1708#define BCCK_RXIG 0x7f00
1709#define BCCK_LNA_POLARITY 0x800000
1710#define BCCK_RX1ST_BAIN 0x7f0000
1711#define BCCK_RF_EXTEND 0x20000000
1712#define BCCK_RXAGC_SATLEVEL 0x1f000000
1713#define BCCK_RXAGC_SATCOUNT 0xe0
1714#define bCCKRxRFSettle 0x1f
1715#define BCCK_FIXED_RXAGC 0x8000
1716#define BCCK_ANTENNA_POLARITY 0x2000
1717#define BCCK_TXFILTER_TYPE 0x0c00
1718#define BCCK_RXAGC_REPORTTYPE 0x0300
1719#define BCCK_RXDAGC_EN 0x80000000
1720#define BCCK_RXDAGC_PERIOD 0x20000000
1721#define BCCK_RXDAGC_SATLEVEL 0x1f000000
1722#define BCCK_TIMING_RECOVERY 0x800000
1723#define BCCK_TXC0 0x3f0000
1724#define BCCK_TXC1 0x3f000000
1725#define BCCK_TXC2 0x3f
1726#define BCCK_TXC3 0x3f00
1727#define BCCK_TXC4 0x3f0000
1728#define BCCK_TXC5 0x3f000000
1729#define BCCK_TXC6 0x3f
1730#define BCCK_TXC7 0x3f00
1731#define BCCK_DEBUGPORT 0xff0000
1732#define BCCK_DAC_DEBUG 0x0f000000
1733#define BCCK_FALSEALARM_ENABLE 0x8000
1734#define BCCK_FALSEALARM_READ 0x4000
1735#define BCCK_TRSSI 0x7f
1736#define BCCK_RXAGC_REPORT 0xfe
1737#define BCCK_RXREPORT_ANTSEL 0x80000000
1738#define BCCK_RXREPORT_MFOFF 0x40000000
1739#define BCCK_RXREPORT_SQLOSS 0x20000000
1740#define BCCK_RXREPORT_PKTLOSS 0x10000000
1741#define BCCK_RXREPORT_LOCKEDBIT 0x08000000
1742#define BCCK_RXREPORT_RATEERROR 0x04000000
1743#define BCCK_RXREPORT_RXRATE 0x03000000
1744#define BCCK_RXFA_COUNTER_LOWER 0xff
1745#define BCCK_RXFA_COUNTER_UPPER 0xff000000
1746#define BCCK_RXHPAGC_START 0xe000
1747#define BCCK_RXHPAGC_FINAL 0x1c00
1748#define BCCK_RXFALSEALARM_ENABLE 0x8000
1749#define BCCK_FACOUNTER_FREEZE 0x4000
1750#define BCCK_TXPATH_SEL 0x10000000
1751#define BCCK_DEFAULT_RXPATH 0xc000000
1752#define BCCK_OPTION_RXPATH 0x3000000
1753
1754#define BNUM_OFSTF 0x3
1755#define BSHIFT_L 0xc0
1756#define BGI_TH 0xc
1757#define BRXPATH_A 0x1
1758#define BRXPATH_B 0x2
1759#define BRXPATH_C 0x4
1760#define BRXPATH_D 0x8
1761#define BTXPATH_A 0x1
1762#define BTXPATH_B 0x2
1763#define BTXPATH_C 0x4
1764#define BTXPATH_D 0x8
1765#define BTRSSI_FREQ 0x200
1766#define BADC_BACKOFF 0x3000
1767#define BDFIR_BACKOFF 0xc000
1768#define BTRSSI_LATCH_PHASE 0x10000
1769#define BRX_LDC_OFFSET 0xff
1770#define BRX_QDC_OFFSET 0xff00
1771#define BRX_DFIR_MODE 0x1800000
1772#define BRX_DCNF_TYPE 0xe000000
1773#define BRXIQIMB_A 0x3ff
1774#define BRXIQIMB_B 0xfc00
1775#define BRXIQIMB_C 0x3f0000
1776#define BRXIQIMB_D 0xffc00000
1777#define BDC_DC_NOTCH 0x60000
1778#define BRXNB_NOTCH 0x1f000000
1779#define BPD_TH 0xf
1780#define BPD_TH_OPT2 0xc000
1781#define BPWED_TH 0x700
1782#define BIFMF_WIN_L 0x800
1783#define BPD_OPTION 0x1000
1784#define BMF_WIN_L 0xe000
1785#define BBW_SEARCH_L 0x30000
1786#define BWIN_ENH_L 0xc0000
1787#define BBW_TH 0x700000
1788#define BED_TH2 0x3800000
1789#define BBW_OPTION 0x4000000
1790#define BRADIO_TH 0x18000000
1791#define BWINDOW_L 0xe0000000
1792#define BSBD_OPTION 0x1
1793#define BFRAME_TH 0x1c
1794#define BFS_OPTION 0x60
1795#define BDC_SLOPE_CHECK 0x80
1796#define BFGUARD_COUNTER_DC_L 0xe00
1797#define BFRAME_WEIGHT_SHORT 0x7000
1798#define BSUB_TUNE 0xe00000
1799#define BFRAME_DC_LENGTH 0xe000000
1800#define BSBD_START_OFFSET 0x30000000
1801#define BFRAME_TH_2 0x7
1802#define BFRAME_GI2_TH 0x38
1803#define BGI2_SYNC_EN 0x40
1804#define BSARCH_SHORT_EARLY 0x300
1805#define BSARCH_SHORT_LATE 0xc00
1806#define BSARCH_GI2_LATE 0x70000
1807#define BCFOANTSUM 0x1
1808#define BCFOACC 0x2
1809#define BCFOSTARTOFFSET 0xc
1810#define BCFOLOOPBACK 0x70
1811#define BCFOSUMWEIGHT 0x80
1812#define BDAGCENABLE 0x10000
1813#define BTXIQIMB_A 0x3ff
1814#define BTXIQIMB_b 0xfc00
1815#define BTXIQIMB_C 0x3f0000
1816#define BTXIQIMB_D 0xffc00000
1817#define BTXIDCOFFSET 0xff
1818#define BTXIQDCOFFSET 0xff00
1819#define BTXDFIRMODE 0x10000
1820#define BTXPESUDO_NOISEON 0x4000000
1821#define BTXPESUDO_NOISE_A 0xff
1822#define BTXPESUDO_NOISE_B 0xff00
1823#define BTXPESUDO_NOISE_C 0xff0000
1824#define BTXPESUDO_NOISE_D 0xff000000
1825#define BCCA_DROPOPTION 0x20000
1826#define BCCA_DROPTHRES 0xfff00000
1827#define BEDCCA_H 0xf
1828#define BEDCCA_L 0xf0
1829#define BLAMBDA_ED 0x300
1830#define BRX_INITIALGAIN 0x7f
1831#define BRX_ANTDIV_EN 0x80
1832#define BRX_AGC_ADDRESS_FOR_LNA 0x7f00
1833#define BRX_HIGHPOWER_FLOW 0x8000
1834#define BRX_AGC_FREEZE_THRES 0xc0000
1835#define BRX_FREEZESTEP_AGC1 0x300000
1836#define BRX_FREEZESTEP_AGC2 0xc00000
1837#define BRX_FREEZESTEP_AGC3 0x3000000
1838#define BRX_FREEZESTEP_AGC0 0xc000000
1839#define BRXRSSI_CMP_EN 0x10000000
1840#define BRXQUICK_AGCEN 0x20000000
1841#define BRXAGC_FREEZE_THRES_MODE 0x40000000
1842#define BRX_OVERFLOW_CHECKTYPE 0x80000000
1843#define BRX_AGCSHIFT 0x7f
1844#define BTRSW_TRI_ONLY 0x80
1845#define BPOWER_THRES 0x300
1846#define BRXAGC_EN 0x1
1847#define BRXAGC_TOGETHER_EN 0x2
1848#define BRXAGC_MIN 0x4
1849#define BRXHP_INI 0x7
1850#define BRXHP_TRLNA 0x70
1851#define BRXHP_RSSI 0x700
1852#define BRXHP_BBP1 0x7000
1853#define BRXHP_BBP2 0x70000
1854#define BRXHP_BBP3 0x700000
1855#define BRSSI_H 0x7f0000
1856#define BRSSI_GEN 0x7f000000
1857#define BRXSETTLE_TRSW 0x7
1858#define BRXSETTLE_LNA 0x38
1859#define BRXSETTLE_RSSI 0x1c0
1860#define BRXSETTLE_BBP 0xe00
1861#define BRXSETTLE_RXHP 0x7000
1862#define BRXSETTLE_ANTSW_RSSI 0x38000
1863#define BRXSETTLE_ANTSW 0xc0000
1864#define BRXPROCESS_TIME_DAGC 0x300000
1865#define BRXSETTLE_HSSI 0x400000
1866#define BRXPROCESS_TIME_BBPPW 0x800000
1867#define BRXANTENNA_POWER_SHIFT 0x3000000
1868#define BRSSI_TABLE_SELECT 0xc000000
1869#define BRXHP_FINAL 0x7000000
1870#define BRXHPSETTLE_BBP 0x7
1871#define BRXHTSETTLE_HSSI 0x8
1872#define BRXHTSETTLE_RXHP 0x70
1873#define BRXHTSETTLE_BBPPW 0x80
1874#define BRXHTSETTLE_IDLE 0x300
1875#define BRXHTSETTLE_RESERVED 0x1c00
1876#define BRXHT_RXHP_EN 0x8000
1877#define BRXAGC_FREEZE_THRES 0x30000
1878#define BRXAGC_TOGETHEREN 0x40000
1879#define BRXHTAGC_MIN 0x80000
1880#define BRXHTAGC_EN 0x100000
1881#define BRXHTDAGC_EN 0x200000
1882#define BRXHT_RXHP_BBP 0x1c00000
1883#define BRXHT_RXHP_FINAL 0xe0000000
1884#define BRXPW_RADIO_TH 0x3
1885#define BRXPW_RADIO_EN 0x4
1886#define BRXMF_HOLD 0x3800
1887#define BRXPD_DELAY_TH1 0x38
1888#define BRXPD_DELAY_TH2 0x1c0
1889#define BRXPD_DC_COUNT_MAX 0x600
1890#define BRXPD_DELAY_TH 0x8000
1891#define BRXPROCESS_DELAY 0xf0000
1892#define BRXSEARCHRANGE_GI2_EARLY 0x700000
1893#define BRXFRAME_FUARD_COUNTER_L 0x3800000
1894#define BRXSGI_GUARD_L 0xc000000
1895#define BRXSGI_SEARCH_L 0x30000000
1896#define BRXSGI_TH 0xc0000000
1897#define BDFSCNT0 0xff
1898#define BDFSCNT1 0xff00
1899#define BDFSFLAG 0xf0000
1900#define BMF_WEIGHT_SUM 0x300000
1901#define BMINIDX_TH 0x7f000000
1902#define BDAFORMAT 0x40000
1903#define BTXCH_EMU_ENABLE 0x01000000
1904#define BTRSW_ISOLATION_A 0x7f
1905#define BTRSW_ISOLATION_B 0x7f00
1906#define BTRSW_ISOLATION_C 0x7f0000
1907#define BTRSW_ISOLATION_D 0x7f000000
1908#define BEXT_LNA_GAIN 0x7c00
1909
1910#define BSTBC_EN 0x4
1911#define BANTENNA_MAPPING 0x10
1912#define BNSS 0x20
1913#define BCFO_ANTSUM_ID 0x200
1914#define BPHY_COUNTER_RESET 0x8000000
1915#define BCFO_REPORT_GET 0x4000000
1916#define BOFDM_CONTINUE_TX 0x10000000
1917#define BOFDM_SINGLE_CARRIER 0x20000000
1918#define BOFDM_SINGLE_TONE 0x40000000
1919#define BHT_DETECT 0x100
1920#define BCFOEN 0x10000
1921#define BCFOVALUE 0xfff00000
1922#define BSIGTONE_RE 0x3f
1923#define BSIGTONE_IM 0x7f00
1924#define BCOUNTER_CCA 0xffff
1925#define BCOUNTER_PARITYFAIL 0xffff0000
1926#define BCOUNTER_RATEILLEGAL 0xffff
1927#define BCOUNTER_CRC8FAIL 0xffff0000
1928#define BCOUNTER_MCSNOSUPPORT 0xffff
1929#define BCOUNTER_FASTSYNC 0xffff
1930#define BSHORTCFO 0xfff
1931#define BSHORTCFOT_LENGTH 12
1932#define BSHORTCFOF_LENGTH 11
1933#define BLONGCFO 0x7ff
1934#define BLONGCFOT_LENGTH 11
1935#define BLONGCFOF_LENGTH 11
1936#define BTAILCFO 0x1fff
1937#define BTAILCFOT_LENGTH 13
1938#define BTAILCFOF_LENGTH 12
1939#define BNOISE_EN_PWDB 0xffff
1940#define BCC_POWER_DB 0xffff0000
1941#define BMOISE_PWDB 0xffff
1942#define BPOWERMEAST_LENGTH 10
1943#define BPOWERMEASF_LENGTH 3
1944#define BRX_HT_BW 0x1
1945#define BRXSC 0x6
1946#define BRX_HT 0x8
1947#define BNB_INTF_DET_ON 0x1
1948#define BINTF_WIN_LEN_CFG 0x30
1949#define BNB_INTF_TH_CFG 0x1c0
1950#define BRFGAIN 0x3f
1951#define BTABLESEL 0x40
1952#define BTRSW 0x80
1953#define BRXSNR_A 0xff
1954#define BRXSNR_B 0xff00
1955#define BRXSNR_C 0xff0000
1956#define BRXSNR_D 0xff000000
1957#define BSNR_EVMT_LENGTH 8
1958#define BSNR_EVMF_LENGTH 1
1959#define BCSI1ST 0xff
1960#define BCSI2ND 0xff00
1961#define BRXEVM1ST 0xff0000
1962#define BRXEVM2ND 0xff000000
1963#define BSIGEVM 0xff
1964#define BPWDB 0xff00
1965#define BSGIEN 0x10000
1966
1967#define BSFACTOR_QMA1 0xf
1968#define BSFACTOR_QMA2 0xf0
1969#define BSFACTOR_QMA3 0xf00
1970#define BSFACTOR_QMA4 0xf000
1971#define BSFACTOR_QMA5 0xf0000
1972#define BSFACTOR_QMA6 0xf0000
1973#define BSFACTOR_QMA7 0xf00000
1974#define BSFACTOR_QMA8 0xf000000
1975#define BSFACTOR_QMA9 0xf0000000
1976#define BCSI_SCHEME 0x100000
1977
1978#define BNOISE_LVL_TOP_SET 0x3
1979#define BCHSMOOTH 0x4
1980#define BCHSMOOTH_CFG1 0x38
1981#define BCHSMOOTH_CFG2 0x1c0
1982#define BCHSMOOTH_CFG3 0xe00
1983#define BCHSMOOTH_CFG4 0x7000
1984#define BMRCMODE 0x800000
1985#define BTHEVMCFG 0x7000000
1986
1987#define BLOOP_FIT_TYPE 0x1
1988#define BUPD_CFO 0x40
1989#define BUPD_CFO_OFFDATA 0x80
1990#define BADV_UPD_CFO 0x100
1991#define BADV_TIME_CTRL 0x800
1992#define BUPD_CLKO 0x1000
1993#define BFC 0x6000
1994#define BTRACKING_MODE 0x8000
1995#define BPHCMP_ENABLE 0x10000
1996#define BUPD_CLKO_LTF 0x20000
1997#define BCOM_CH_CFO 0x40000
1998#define BCSI_ESTI_MODE 0x80000
1999#define BADV_UPD_EQZ 0x100000
2000#define BUCHCFG 0x7000000
2001#define BUPDEQZ 0x8000000
2002
2003#define BRX_PESUDO_NOISE_ON 0x20000000
2004#define BRX_PESUDO_NOISE_A 0xff
2005#define BRX_PESUDO_NOISE_B 0xff00
2006#define BRX_PESUDO_NOISE_C 0xff0000
2007#define BRX_PESUDO_NOISE_D 0xff000000
2008#define BRX_PESUDO_NOISESTATE_A 0xffff
2009#define BRX_PESUDO_NOISESTATE_B 0xffff0000
2010#define BRX_PESUDO_NOISESTATE_C 0xffff
2011#define BRX_PESUDO_NOISESTATE_D 0xffff0000
2012
2013#define BZEBRA1_HSSIENABLE 0x8
2014#define BZEBRA1_TRXCONTROL 0xc00
2015#define BZEBRA1_TRXGAINSETTING 0x07f
2016#define BZEBRA1_RXCOUNTER 0xc00
2017#define BZEBRA1_TXCHANGEPUMP 0x38
2018#define BZEBRA1_RXCHANGEPUMP 0x7
2019#define BZEBRA1_CHANNEL_NUM 0xf80
2020#define BZEBRA1_TXLPFBW 0x400
2021#define BZEBRA1_RXLPFBW 0x600
2022
2023#define BRTL8256REG_MODE_CTRL1 0x100
2024#define BRTL8256REG_MODE_CTRL0 0x40
2025#define BRTL8256REG_TXLPFBW 0x18
2026#define BRTL8256REG_RXLPFBW 0x600
2027
2028#define BRTL8258_TXLPFBW 0xc
2029#define BRTL8258_RXLPFBW 0xc00
2030#define BRTL8258_RSSILPFBW 0xc0
2031
2032#define BBYTE0 0x1
2033#define BBYTE1 0x2
2034#define BBYTE2 0x4
2035#define BBYTE3 0x8
2036#define BWORD0 0x3
2037#define BWORD1 0xc
2038#define BWORD 0xf
2039
2040#define MASKBYTE0 0xff
2041#define MASKBYTE1 0xff00
2042#define MASKBYTE2 0xff0000
2043#define MASKBYTE3 0xff000000
2044#define MASKHWORD 0xffff0000
2045#define MASKLWORD 0x0000ffff
2046#define MASKDWORD 0xffffffff
2047#define MASK12BITS 0xfff
2048#define MASKH4BITS 0xf0000000
2049#define MASKOFDM_D 0xffc00000
2050#define MASKCCK 0x3f3f3f3f
2051
2052#define MASK4BITS 0x0f
2053#define MASK20BITS 0xfffff
2054#define RFREG_OFFSET_MASK 0xfffff
2055
2056#define BENABLE 0x1
2057#define BDISABLE 0x0
2058
2059#define LEFT_ANTENNA 0x0
2060#define RIGHT_ANTENNA 0x1
2061
2062#define TCHECK_TXSTATUS 500
2063#define TUPDATE_RXCOUNTER 100
2064
2065#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
new file mode 100644
index 000000000000..ffd8e04c4028
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.c
@@ -0,0 +1,523 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "reg.h"
32#include "def.h"
33#include "phy.h"
34#include "rf.h"
35#include "dm.h"
36
37static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw);
38
39void rtl92c_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw, u8 bandwidth)
40{
41 struct rtl_priv *rtlpriv = rtl_priv(hw);
42 struct rtl_phy *rtlphy = &(rtlpriv->phy);
43
44 switch (bandwidth) {
45 case HT_CHANNEL_WIDTH_20:
46 rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
47 0xfffff3ff) | 0x0400);
48 rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
49 rtlphy->rfreg_chnlval[0]);
50 break;
51 case HT_CHANNEL_WIDTH_20_40:
52 rtlphy->rfreg_chnlval[0] = ((rtlphy->rfreg_chnlval[0] &
53 0xfffff3ff));
54 rtl_set_rfreg(hw, RF90_PATH_A, RF_CHNLBW, RFREG_OFFSET_MASK,
55 rtlphy->rfreg_chnlval[0]);
56 break;
57 default:
58 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
59 ("unknown bandwidth: %#X\n", bandwidth));
60 break;
61 }
62}
63
64void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
65 u8 *ppowerlevel)
66{
67 struct rtl_priv *rtlpriv = rtl_priv(hw);
68 struct rtl_phy *rtlphy = &(rtlpriv->phy);
69 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
70 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
71 u32 tx_agc[2] = {0, 0}, tmpval;
72 bool turbo_scanoff = false;
73 u8 idx1, idx2;
74 u8 *ptr;
75
76 if (rtlefuse->eeprom_regulatory != 0)
77 turbo_scanoff = true;
78
79 if (mac->act_scanning == true) {
80 tx_agc[RF90_PATH_A] = 0x3f3f3f3f;
81 tx_agc[RF90_PATH_B] = 0x3f3f3f3f;
82
83 if (turbo_scanoff) {
84 for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
85 tx_agc[idx1] = ppowerlevel[idx1] |
86 (ppowerlevel[idx1] << 8) |
87 (ppowerlevel[idx1] << 16) |
88 (ppowerlevel[idx1] << 24);
89 }
90 }
91 } else {
92 for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
93 tx_agc[idx1] = ppowerlevel[idx1] |
94 (ppowerlevel[idx1] << 8) |
95 (ppowerlevel[idx1] << 16) |
96 (ppowerlevel[idx1] << 24);
97 }
98
99 if (rtlefuse->eeprom_regulatory == 0) {
100 tmpval =
101 (rtlphy->mcs_txpwrlevel_origoffset[0][6]) +
102 (rtlphy->mcs_txpwrlevel_origoffset[0][7] <<
103 8);
104 tx_agc[RF90_PATH_A] += tmpval;
105
106 tmpval = (rtlphy->mcs_txpwrlevel_origoffset[0][14]) +
107 (rtlphy->mcs_txpwrlevel_origoffset[0][15] <<
108 24);
109 tx_agc[RF90_PATH_B] += tmpval;
110 }
111 }
112
113 for (idx1 = RF90_PATH_A; idx1 <= RF90_PATH_B; idx1++) {
114 ptr = (u8 *) (&(tx_agc[idx1]));
115 for (idx2 = 0; idx2 < 4; idx2++) {
116 if (*ptr > RF6052_MAX_TX_PWR)
117 *ptr = RF6052_MAX_TX_PWR;
118 ptr++;
119 }
120 }
121
122 tmpval = tx_agc[RF90_PATH_A] & 0xff;
123 rtl_set_bbreg(hw, RTXAGC_A_CCK1_MCS32, MASKBYTE1, tmpval);
124
125 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
126 ("CCK PWR 1M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
127 RTXAGC_A_CCK1_MCS32));
128
129 tmpval = tx_agc[RF90_PATH_A] >> 8;
130
131 if (mac->mode == WIRELESS_MODE_B)
132 tmpval = tmpval & 0xff00ffff;
133
134 rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, 0xffffff00, tmpval);
135
136 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
137 ("CCK PWR 2~11M (rf-A) = 0x%x (reg 0x%x)\n", tmpval,
138 RTXAGC_B_CCK11_A_CCK2_11));
139
140 tmpval = tx_agc[RF90_PATH_B] >> 24;
141 rtl_set_bbreg(hw, RTXAGC_B_CCK11_A_CCK2_11, MASKBYTE0, tmpval);
142
143 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
144 ("CCK PWR 11M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
145 RTXAGC_B_CCK11_A_CCK2_11));
146
147 tmpval = tx_agc[RF90_PATH_B] & 0x00ffffff;
148 rtl_set_bbreg(hw, RTXAGC_B_CCK1_55_MCS32, 0xffffff00, tmpval);
149
150 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
151 ("CCK PWR 1~5.5M (rf-B) = 0x%x (reg 0x%x)\n", tmpval,
152 RTXAGC_B_CCK1_55_MCS32));
153}
154
155static void rtl92c_phy_get_power_base(struct ieee80211_hw *hw,
156 u8 *ppowerlevel, u8 channel,
157 u32 *ofdmbase, u32 *mcsbase)
158{
159 struct rtl_priv *rtlpriv = rtl_priv(hw);
160 struct rtl_phy *rtlphy = &(rtlpriv->phy);
161 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
162 u32 powerBase0, powerBase1;
163 u8 legacy_pwrdiff, ht20_pwrdiff;
164 u8 i, powerlevel[2];
165
166 for (i = 0; i < 2; i++) {
167 powerlevel[i] = ppowerlevel[i];
168 legacy_pwrdiff = rtlefuse->txpwr_legacyhtdiff[i][channel - 1];
169 powerBase0 = powerlevel[i] + legacy_pwrdiff;
170
171 powerBase0 = (powerBase0 << 24) | (powerBase0 << 16) |
172 (powerBase0 << 8) | powerBase0;
173 *(ofdmbase + i) = powerBase0;
174 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
175 (" [OFDM power base index rf(%c) = 0x%x]\n",
176 ((i == 0) ? 'A' : 'B'), *(ofdmbase + i)));
177 }
178
179 for (i = 0; i < 2; i++) {
180 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20) {
181 ht20_pwrdiff = rtlefuse->txpwr_ht20diff[i][channel - 1];
182 powerlevel[i] += ht20_pwrdiff;
183 }
184 powerBase1 = powerlevel[i];
185 powerBase1 = (powerBase1 << 24) |
186 (powerBase1 << 16) | (powerBase1 << 8) | powerBase1;
187
188 *(mcsbase + i) = powerBase1;
189
190 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
191 (" [MCS power base index rf(%c) = 0x%x]\n",
192 ((i == 0) ? 'A' : 'B'), *(mcsbase + i)));
193 }
194}
195
196static void _rtl92c_get_txpower_writeval_by_regulatory(struct ieee80211_hw *hw,
197 u8 channel, u8 index,
198 u32 *powerBase0,
199 u32 *powerBase1,
200 u32 *p_outwriteval)
201{
202 struct rtl_priv *rtlpriv = rtl_priv(hw);
203 struct rtl_phy *rtlphy = &(rtlpriv->phy);
204 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
205 u8 i, chnlgroup, pwr_diff_limit[4];
206 u32 writeVal, customer_limit, rf;
207
208 for (rf = 0; rf < 2; rf++) {
209 switch (rtlefuse->eeprom_regulatory) {
210 case 0:
211 chnlgroup = 0;
212
213 writeVal =
214 rtlphy->mcs_txpwrlevel_origoffset[chnlgroup][index +
215 (rf ? 8 : 0)]
216 + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
217
218 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
219 ("RTK better performance, "
220 "writeVal(%c) = 0x%x\n",
221 ((rf == 0) ? 'A' : 'B'), writeVal));
222 break;
223 case 1:
224 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
225 writeVal = ((index < 2) ? powerBase0[rf] :
226 powerBase1[rf]);
227
228 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
229 ("Realtek regulatory, 40MHz, "
230 "writeVal(%c) = 0x%x\n",
231 ((rf == 0) ? 'A' : 'B'), writeVal));
232 } else {
233 if (rtlphy->pwrgroup_cnt == 1)
234 chnlgroup = 0;
235 if (rtlphy->pwrgroup_cnt >= 3) {
236 if (channel <= 3)
237 chnlgroup = 0;
238 else if (channel >= 4 && channel <= 9)
239 chnlgroup = 1;
240 else if (channel > 9)
241 chnlgroup = 2;
242 if (rtlphy->pwrgroup_cnt == 4)
243 chnlgroup++;
244 }
245
246 writeVal =
247 rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
248 [index + (rf ? 8 : 0)] + ((index < 2) ?
249 powerBase0[rf] :
250 powerBase1[rf]);
251
252 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
253 ("Realtek regulatory, 20MHz, "
254 "writeVal(%c) = 0x%x\n",
255 ((rf == 0) ? 'A' : 'B'), writeVal));
256 }
257 break;
258 case 2:
259 writeVal =
260 ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
261
262 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
263 ("Better regulatory, "
264 "writeVal(%c) = 0x%x\n",
265 ((rf == 0) ? 'A' : 'B'), writeVal));
266 break;
267 case 3:
268 chnlgroup = 0;
269
270 if (rtlphy->current_chan_bw == HT_CHANNEL_WIDTH_20_40) {
271 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
272 ("customer's limit, 40MHz "
273 "rf(%c) = 0x%x\n",
274 ((rf == 0) ? 'A' : 'B'),
275 rtlefuse->pwrgroup_ht40[rf][channel -
276 1]));
277 } else {
278 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
279 ("customer's limit, 20MHz "
280 "rf(%c) = 0x%x\n",
281 ((rf == 0) ? 'A' : 'B'),
282 rtlefuse->pwrgroup_ht20[rf][channel -
283 1]));
284 }
285 for (i = 0; i < 4; i++) {
286 pwr_diff_limit[i] =
287 (u8) ((rtlphy->mcs_txpwrlevel_origoffset
288 [chnlgroup][index +
289 (rf ? 8 : 0)] & (0x7f << (i * 8))) >>
290 (i * 8));
291
292 if (rtlphy->current_chan_bw ==
293 HT_CHANNEL_WIDTH_20_40) {
294 if (pwr_diff_limit[i] >
295 rtlefuse->
296 pwrgroup_ht40[rf][channel - 1])
297 pwr_diff_limit[i] =
298 rtlefuse->pwrgroup_ht40[rf]
299 [channel - 1];
300 } else {
301 if (pwr_diff_limit[i] >
302 rtlefuse->
303 pwrgroup_ht20[rf][channel - 1])
304 pwr_diff_limit[i] =
305 rtlefuse->pwrgroup_ht20[rf]
306 [channel - 1];
307 }
308 }
309
310 customer_limit = (pwr_diff_limit[3] << 24) |
311 (pwr_diff_limit[2] << 16) |
312 (pwr_diff_limit[1] << 8) | (pwr_diff_limit[0]);
313
314 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
315 ("Customer's limit rf(%c) = 0x%x\n",
316 ((rf == 0) ? 'A' : 'B'), customer_limit));
317
318 writeVal = customer_limit +
319 ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
320
321 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
322 ("Customer, writeVal rf(%c)= 0x%x\n",
323 ((rf == 0) ? 'A' : 'B'), writeVal));
324 break;
325 default:
326 chnlgroup = 0;
327 writeVal =
328 rtlphy->mcs_txpwrlevel_origoffset[chnlgroup]
329 [index + (rf ? 8 : 0)]
330 + ((index < 2) ? powerBase0[rf] : powerBase1[rf]);
331
332 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
333 ("RTK better performance, writeVal "
334 "rf(%c) = 0x%x\n",
335 ((rf == 0) ? 'A' : 'B'), writeVal));
336 break;
337 }
338
339 if (rtlpriv->dm.dynamic_txhighpower_lvl == TXHIGHPWRLEVEL_BT1)
340 writeVal = writeVal - 0x06060606;
341 else if (rtlpriv->dm.dynamic_txhighpower_lvl ==
342 TXHIGHPWRLEVEL_BT2)
343 writeVal = writeVal - 0x0c0c0c0c;
344 *(p_outwriteval + rf) = writeVal;
345 }
346}
347
348static void _rtl92c_write_ofdm_power_reg(struct ieee80211_hw *hw,
349 u8 index, u32 *pValue)
350{
351 struct rtl_priv *rtlpriv = rtl_priv(hw);
352 struct rtl_phy *rtlphy = &(rtlpriv->phy);
353
354 u16 regoffset_a[6] = {
355 RTXAGC_A_RATE18_06, RTXAGC_A_RATE54_24,
356 RTXAGC_A_MCS03_MCS00, RTXAGC_A_MCS07_MCS04,
357 RTXAGC_A_MCS11_MCS08, RTXAGC_A_MCS15_MCS12
358 };
359 u16 regoffset_b[6] = {
360 RTXAGC_B_RATE18_06, RTXAGC_B_RATE54_24,
361 RTXAGC_B_MCS03_MCS00, RTXAGC_B_MCS07_MCS04,
362 RTXAGC_B_MCS11_MCS08, RTXAGC_B_MCS15_MCS12
363 };
364 u8 i, rf, pwr_val[4];
365 u32 writeVal;
366 u16 regoffset;
367
368 for (rf = 0; rf < 2; rf++) {
369 writeVal = pValue[rf];
370 for (i = 0; i < 4; i++) {
371 pwr_val[i] = (u8) ((writeVal & (0x7f <<
372 (i * 8))) >> (i * 8));
373
374 if (pwr_val[i] > RF6052_MAX_TX_PWR)
375 pwr_val[i] = RF6052_MAX_TX_PWR;
376 }
377 writeVal = (pwr_val[3] << 24) | (pwr_val[2] << 16) |
378 (pwr_val[1] << 8) | pwr_val[0];
379
380 if (rf == 0)
381 regoffset = regoffset_a[index];
382 else
383 regoffset = regoffset_b[index];
384 rtl_set_bbreg(hw, regoffset, MASKDWORD, writeVal);
385
386 RTPRINT(rtlpriv, FPHY, PHY_TXPWR,
387 ("Set 0x%x = %08x\n", regoffset, writeVal));
388
389 if (((get_rf_type(rtlphy) == RF_2T2R) &&
390 (regoffset == RTXAGC_A_MCS15_MCS12 ||
391 regoffset == RTXAGC_B_MCS15_MCS12)) ||
392 ((get_rf_type(rtlphy) != RF_2T2R) &&
393 (regoffset == RTXAGC_A_MCS07_MCS04 ||
394 regoffset == RTXAGC_B_MCS07_MCS04))) {
395
396 writeVal = pwr_val[3];
397 if (regoffset == RTXAGC_A_MCS15_MCS12 ||
398 regoffset == RTXAGC_A_MCS07_MCS04)
399 regoffset = 0xc90;
400 if (regoffset == RTXAGC_B_MCS15_MCS12 ||
401 regoffset == RTXAGC_B_MCS07_MCS04)
402 regoffset = 0xc98;
403
404 for (i = 0; i < 3; i++) {
405 writeVal = (writeVal > 6) ? (writeVal - 6) : 0;
406 rtl_write_byte(rtlpriv, (u32) (regoffset + i),
407 (u8) writeVal);
408 }
409 }
410 }
411}
412
413void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
414 u8 *ppowerlevel, u8 channel)
415{
416 u32 writeVal[2], powerBase0[2], powerBase1[2];
417 u8 index;
418
419 rtl92c_phy_get_power_base(hw, ppowerlevel,
420 channel, &powerBase0[0], &powerBase1[0]);
421
422 for (index = 0; index < 6; index++) {
423 _rtl92c_get_txpower_writeval_by_regulatory(hw,
424 channel, index,
425 &powerBase0[0],
426 &powerBase1[0],
427 &writeVal[0]);
428
429 _rtl92c_write_ofdm_power_reg(hw, index, &writeVal[0]);
430 }
431}
432
433bool rtl92c_phy_rf6052_config(struct ieee80211_hw *hw)
434{
435 struct rtl_priv *rtlpriv = rtl_priv(hw);
436 struct rtl_phy *rtlphy = &(rtlpriv->phy);
437
438 if (rtlphy->rf_type == RF_1T1R)
439 rtlphy->num_total_rfpath = 1;
440 else
441 rtlphy->num_total_rfpath = 2;
442
443 return _rtl92c_phy_rf6052_config_parafile(hw);
444}
445
446static bool _rtl92c_phy_rf6052_config_parafile(struct ieee80211_hw *hw)
447{
448 struct rtl_priv *rtlpriv = rtl_priv(hw);
449 struct rtl_phy *rtlphy = &(rtlpriv->phy);
450 u32 u4_regvalue;
451 u8 rfpath;
452 bool rtstatus;
453 struct bb_reg_def *pphyreg;
454
455 for (rfpath = 0; rfpath < rtlphy->num_total_rfpath; rfpath++) {
456
457 pphyreg = &rtlphy->phyreg_def[rfpath];
458
459 switch (rfpath) {
460 case RF90_PATH_A:
461 case RF90_PATH_C:
462 u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
463 BRFSI_RFENV);
464 break;
465 case RF90_PATH_B:
466 case RF90_PATH_D:
467 u4_regvalue = rtl_get_bbreg(hw, pphyreg->rfintfs,
468 BRFSI_RFENV << 16);
469 break;
470 }
471
472 rtl_set_bbreg(hw, pphyreg->rfintfe, BRFSI_RFENV << 16, 0x1);
473 udelay(1);
474
475 rtl_set_bbreg(hw, pphyreg->rfintfo, BRFSI_RFENV, 0x1);
476 udelay(1);
477
478 rtl_set_bbreg(hw, pphyreg->rfhssi_para2,
479 B3WIREADDREAALENGTH, 0x0);
480 udelay(1);
481
482 rtl_set_bbreg(hw, pphyreg->rfhssi_para2, B3WIREDATALENGTH, 0x0);
483 udelay(1);
484
485 switch (rfpath) {
486 case RF90_PATH_A:
487 rtstatus = rtl92c_phy_config_rf_with_headerfile(hw,
488 (enum radio_path) rfpath);
489 break;
490 case RF90_PATH_B:
491 rtstatus = rtl92c_phy_config_rf_with_headerfile(hw,
492 (enum radio_path) rfpath);
493 break;
494 case RF90_PATH_C:
495 break;
496 case RF90_PATH_D:
497 break;
498 }
499
500 switch (rfpath) {
501 case RF90_PATH_A:
502 case RF90_PATH_C:
503 rtl_set_bbreg(hw, pphyreg->rfintfs,
504 BRFSI_RFENV, u4_regvalue);
505 break;
506 case RF90_PATH_B:
507 case RF90_PATH_D:
508 rtl_set_bbreg(hw, pphyreg->rfintfs,
509 BRFSI_RFENV << 16, u4_regvalue);
510 break;
511 }
512
513 if (rtstatus != true) {
514 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
515 ("Radio[%d] Fail!!", rfpath));
516 return false;
517 }
518
519 }
520
521 RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE, ("<---\n"));
522 return rtstatus;
523}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
new file mode 100644
index 000000000000..d3014f99bb7b
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/rf.h
@@ -0,0 +1,44 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92C_RF_H__
31#define __RTL92C_RF_H__
32
33#define RF6052_MAX_TX_PWR 0x3F
34#define RF6052_MAX_REG 0x3F
35#define RF6052_MAX_PATH 2
36
37extern void rtl92c_phy_rf6052_set_bandwidth(struct ieee80211_hw *hw,
38 u8 bandwidth);
39extern void rtl92c_phy_rf6052_set_cck_txpower(struct ieee80211_hw *hw,
40 u8 *ppowerlevel);
41extern void rtl92c_phy_rf6052_set_ofdm_txpower(struct ieee80211_hw *hw,
42 u8 *ppowerlevel, u8 channel);
43extern bool rtl92c_phy_rf6052_config(struct ieee80211_hw *hw);
44#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
new file mode 100644
index 000000000000..b366e8862929
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -0,0 +1,282 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include <linux/vmalloc.h>
31
32#include "../wifi.h"
33#include "../core.h"
34#include "../pci.h"
35#include "reg.h"
36#include "def.h"
37#include "phy.h"
38#include "dm.h"
39#include "hw.h"
40#include "sw.h"
41#include "trx.h"
42#include "led.h"
43
44int rtl92c_init_sw_vars(struct ieee80211_hw *hw)
45{
46 struct rtl_priv *rtlpriv = rtl_priv(hw);
47 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
48
49 rtlpriv->dm.b_dm_initialgain_enable = 1;
50 rtlpriv->dm.dm_flag = 0;
51 rtlpriv->dm.b_disable_framebursting = 0;;
52 rtlpriv->dm.thermalvalue = 0;
53 rtlpci->transmit_config = CFENDFORM | BIT(12) | BIT(13);
54
55 rtlpci->receive_config = (RCR_APPFCS |
56 RCR_AMF |
57 RCR_ADF |
58 RCR_APP_MIC |
59 RCR_APP_ICV |
60 RCR_AICV |
61 RCR_ACRC32 |
62 RCR_AB |
63 RCR_AM |
64 RCR_APM |
65 RCR_APP_PHYST_RXFF | RCR_HTC_LOC_CTRL | 0);
66
67 rtlpci->irq_mask[0] =
68 (u32) (IMR_ROK |
69 IMR_VODOK |
70 IMR_VIDOK |
71 IMR_BEDOK |
72 IMR_BKDOK |
73 IMR_MGNTDOK |
74 IMR_HIGHDOK | IMR_BDOK | IMR_RDU | IMR_RXFOVW | 0);
75
76 rtlpci->irq_mask[1] = (u32) (IMR_CPWM | IMR_C2HCMD | 0);
77
78 rtlpriv->rtlhal.pfirmware = (u8 *) vmalloc(0x4000);
79 if (!rtlpriv->rtlhal.pfirmware) {
80 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
81 ("Can't alloc buffer for fw.\n"));
82 return 1;
83 }
84
85 return 0;
86}
87
88void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw)
89{
90 struct rtl_priv *rtlpriv = rtl_priv(hw);
91
92 if (rtlpriv->rtlhal.pfirmware) {
93 vfree(rtlpriv->rtlhal.pfirmware);
94 rtlpriv->rtlhal.pfirmware = NULL;
95 }
96}
97
98static struct rtl_hal_ops rtl8192ce_hal_ops = {
99 .init_sw_vars = rtl92c_init_sw_vars,
100 .deinit_sw_vars = rtl92c_deinit_sw_vars,
101 .read_eeprom_info = rtl92ce_read_eeprom_info,
102 .interrupt_recognized = rtl92ce_interrupt_recognized,
103 .hw_init = rtl92ce_hw_init,
104 .hw_disable = rtl92ce_card_disable,
105 .enable_interrupt = rtl92ce_enable_interrupt,
106 .disable_interrupt = rtl92ce_disable_interrupt,
107 .set_network_type = rtl92ce_set_network_type,
108 .set_qos = rtl92ce_set_qos,
109 .set_bcn_reg = rtl92ce_set_beacon_related_registers,
110 .set_bcn_intv = rtl92ce_set_beacon_interval,
111 .update_interrupt_mask = rtl92ce_update_interrupt_mask,
112 .get_hw_reg = rtl92ce_get_hw_reg,
113 .set_hw_reg = rtl92ce_set_hw_reg,
114 .update_rate_table = rtl92ce_update_hal_rate_table,
115 .update_rate_mask = rtl92ce_update_hal_rate_mask,
116 .fill_tx_desc = rtl92ce_tx_fill_desc,
117 .fill_tx_cmddesc = rtl92ce_tx_fill_cmddesc,
118 .query_rx_desc = rtl92ce_rx_query_desc,
119 .set_channel_access = rtl92ce_update_channel_access_setting,
120 .radio_onoff_checking = rtl92ce_gpio_radio_on_off_checking,
121 .set_bw_mode = rtl92c_phy_set_bw_mode,
122 .switch_channel = rtl92c_phy_sw_chnl,
123 .dm_watchdog = rtl92c_dm_watchdog,
124 .scan_operation_backup = rtl92c_phy_scan_operation_backup,
125 .set_rf_power_state = rtl92c_phy_set_rf_power_state,
126 .led_control = rtl92ce_led_control,
127 .set_desc = rtl92ce_set_desc,
128 .get_desc = rtl92ce_get_desc,
129 .tx_polling = rtl92ce_tx_polling,
130 .enable_hw_sec = rtl92ce_enable_hw_security_config,
131 .set_key = rtl92ce_set_key,
132 .init_sw_leds = rtl92ce_init_sw_leds,
133 .deinit_sw_leds = rtl92ce_deinit_sw_leds,
134 .get_bbreg = rtl92c_phy_query_bb_reg,
135 .set_bbreg = rtl92c_phy_set_bb_reg,
136 .get_rfreg = rtl92c_phy_query_rf_reg,
137 .set_rfreg = rtl92c_phy_set_rf_reg,
138};
139
140static struct rtl_mod_params rtl92ce_mod_params = {
141 .sw_crypto = 0,
142};
143
144static struct rtl_hal_cfg rtl92ce_hal_cfg = {
145 .name = "rtl92c_pci",
146 .fw_name = "rtlwifi/rtl8192cfw.bin",
147 .ops = &rtl8192ce_hal_ops,
148 .mod_params = &rtl92ce_mod_params,
149
150 .maps[SYS_ISO_CTRL] = REG_SYS_ISO_CTRL,
151 .maps[SYS_FUNC_EN] = REG_SYS_FUNC_EN,
152 .maps[SYS_CLK] = REG_SYS_CLKR,
153 .maps[MAC_RCR_AM] = AM,
154 .maps[MAC_RCR_AB] = AB,
155 .maps[MAC_RCR_ACRC32] = ACRC32,
156 .maps[MAC_RCR_ACF] = ACF,
157 .maps[MAC_RCR_AAP] = AAP,
158
159 .maps[EFUSE_TEST] = REG_EFUSE_TEST,
160 .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
161 .maps[EFUSE_CLK] = 0,
162 .maps[EFUSE_CLK_CTRL] = REG_EFUSE_CTRL,
163 .maps[EFUSE_PWC_EV12V] = PWC_EV12V,
164 .maps[EFUSE_FEN_ELDR] = FEN_ELDR,
165 .maps[EFUSE_LOADER_CLK_EN] = LOADER_CLK_EN,
166 .maps[EFUSE_ANA8M] = EFUSE_ANA8M,
167 .maps[EFUSE_HWSET_MAX_SIZE] = HWSET_MAX_SIZE,
168
169 .maps[RWCAM] = REG_CAMCMD,
170 .maps[WCAMI] = REG_CAMWRITE,
171 .maps[RCAMO] = REG_CAMREAD,
172 .maps[CAMDBG] = REG_CAMDBG,
173 .maps[SECR] = REG_SECCFG,
174 .maps[SEC_CAM_NONE] = CAM_NONE,
175 .maps[SEC_CAM_WEP40] = CAM_WEP40,
176 .maps[SEC_CAM_TKIP] = CAM_TKIP,
177 .maps[SEC_CAM_AES] = CAM_AES,
178 .maps[SEC_CAM_WEP104] = CAM_WEP104,
179
180 .maps[RTL_IMR_BCNDMAINT6] = IMR_BCNDMAINT6,
181 .maps[RTL_IMR_BCNDMAINT5] = IMR_BCNDMAINT5,
182 .maps[RTL_IMR_BCNDMAINT4] = IMR_BCNDMAINT4,
183 .maps[RTL_IMR_BCNDMAINT3] = IMR_BCNDMAINT3,
184 .maps[RTL_IMR_BCNDMAINT2] = IMR_BCNDMAINT2,
185 .maps[RTL_IMR_BCNDMAINT1] = IMR_BCNDMAINT1,
186 .maps[RTL_IMR_BCNDOK8] = IMR_BCNDOK8,
187 .maps[RTL_IMR_BCNDOK7] = IMR_BCNDOK7,
188 .maps[RTL_IMR_BCNDOK6] = IMR_BCNDOK6,
189 .maps[RTL_IMR_BCNDOK5] = IMR_BCNDOK5,
190 .maps[RTL_IMR_BCNDOK4] = IMR_BCNDOK4,
191 .maps[RTL_IMR_BCNDOK3] = IMR_BCNDOK3,
192 .maps[RTL_IMR_BCNDOK2] = IMR_BCNDOK2,
193 .maps[RTL_IMR_BCNDOK1] = IMR_BCNDOK1,
194 .maps[RTL_IMR_TIMEOUT2] = IMR_TIMEOUT2,
195 .maps[RTL_IMR_TIMEOUT1] = IMR_TIMEOUT1,
196
197 .maps[RTL_IMR_TXFOVW] = IMR_TXFOVW,
198 .maps[RTL_IMR_PSTIMEOUT] = IMR_PSTIMEOUT,
199 .maps[RTL_IMR_BcnInt] = IMR_BCNINT,
200 .maps[RTL_IMR_RXFOVW] = IMR_RXFOVW,
201 .maps[RTL_IMR_RDU] = IMR_RDU,
202 .maps[RTL_IMR_ATIMEND] = IMR_ATIMEND,
203 .maps[RTL_IMR_BDOK] = IMR_BDOK,
204 .maps[RTL_IMR_MGNTDOK] = IMR_MGNTDOK,
205 .maps[RTL_IMR_TBDER] = IMR_TBDER,
206 .maps[RTL_IMR_HIGHDOK] = IMR_HIGHDOK,
207 .maps[RTL_IMR_TBDOK] = IMR_TBDOK,
208 .maps[RTL_IMR_BKDOK] = IMR_BKDOK,
209 .maps[RTL_IMR_BEDOK] = IMR_BEDOK,
210 .maps[RTL_IMR_VIDOK] = IMR_VIDOK,
211 .maps[RTL_IMR_VODOK] = IMR_VODOK,
212 .maps[RTL_IMR_ROK] = IMR_ROK,
213 .maps[RTL_IBSS_INT_MASKS] = (IMR_BCNINT | IMR_TBDOK | IMR_TBDER),
214
215 .maps[RTL_RC_CCK_RATE1M] = DESC92C_RATE1M,
216 .maps[RTL_RC_CCK_RATE2M] = DESC92C_RATE2M,
217 .maps[RTL_RC_CCK_RATE5_5M] = DESC92C_RATE5_5M,
218 .maps[RTL_RC_CCK_RATE11M] = DESC92C_RATE11M,
219 .maps[RTL_RC_OFDM_RATE6M] = DESC92C_RATE6M,
220 .maps[RTL_RC_OFDM_RATE9M] = DESC92C_RATE9M,
221 .maps[RTL_RC_OFDM_RATE12M] = DESC92C_RATE12M,
222 .maps[RTL_RC_OFDM_RATE18M] = DESC92C_RATE18M,
223 .maps[RTL_RC_OFDM_RATE24M] = DESC92C_RATE24M,
224 .maps[RTL_RC_OFDM_RATE36M] = DESC92C_RATE36M,
225 .maps[RTL_RC_OFDM_RATE48M] = DESC92C_RATE48M,
226 .maps[RTL_RC_OFDM_RATE54M] = DESC92C_RATE54M,
227
228 .maps[RTL_RC_HT_RATEMCS7] = DESC92C_RATEMCS7,
229 .maps[RTL_RC_HT_RATEMCS15] = DESC92C_RATEMCS15,
230};
231
232static struct pci_device_id rtl92ce_pci_ids[] __devinitdata = {
233 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8191, rtl92ce_hal_cfg)},
234 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8178, rtl92ce_hal_cfg)},
235 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8177, rtl92ce_hal_cfg)},
236 {RTL_PCI_DEVICE(PCI_VENDOR_ID_REALTEK, 0x8176, rtl92ce_hal_cfg)},
237 {},
238};
239
240MODULE_DEVICE_TABLE(pci, rtl92ce_pci_ids);
241
242MODULE_AUTHOR("lizhaoming <chaoming_li@realsil.com.cn>");
243MODULE_AUTHOR("Realtek WlanFAE <wlanfae@realtek.com>");
244MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
245MODULE_LICENSE("GPL");
246MODULE_DESCRIPTION("Realtek 8192C/8188C 802.11n PCI wireless");
247MODULE_FIRMWARE("rtlwifi/rtl8192cfw.bin");
248
249module_param_named(swenc, rtl92ce_mod_params.sw_crypto, bool, 0444);
250MODULE_PARM_DESC(swenc, "using hardware crypto (default 0 [hardware])\n");
251
252static struct pci_driver rtl92ce_driver = {
253 .name = KBUILD_MODNAME,
254 .id_table = rtl92ce_pci_ids,
255 .probe = rtl_pci_probe,
256 .remove = rtl_pci_disconnect,
257
258#ifdef CONFIG_PM
259 .suspend = rtl_pci_suspend,
260 .resume = rtl_pci_resume,
261#endif
262
263};
264
265static int __init rtl92ce_module_init(void)
266{
267 int ret;
268
269 ret = pci_register_driver(&rtl92ce_driver);
270 if (ret)
271 RT_ASSERT(false, (": No device found\n"));
272
273 return ret;
274}
275
276static void __exit rtl92ce_module_exit(void)
277{
278 pci_unregister_driver(&rtl92ce_driver);
279}
280
281module_init(rtl92ce_module_init);
282module_exit(rtl92ce_module_exit);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
new file mode 100644
index 000000000000..de1198c38d4e
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.h
@@ -0,0 +1,37 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92CE_SW_H__
31#define __RTL92CE_SW_H__
32
33int rtl92c_init_sw_vars(struct ieee80211_hw *hw);
34void rtl92c_deinit_sw_vars(struct ieee80211_hw *hw);
35void rtl92c_init_var_map(struct ieee80211_hw *hw);
36
37#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/table.c b/drivers/net/wireless/rtlwifi/rtl8192ce/table.c
new file mode 100644
index 000000000000..ba938b91aa6f
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/table.c
@@ -0,0 +1,1224 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Created on 2010/ 5/18, 1:41
27 *
28 * Larry Finger <Larry.Finger@lwfinger.net>
29 *
30 *****************************************************************************/
31
32#include "table.h"
33
34
35u32 RTL8192CEPHY_REG_2TARRAY[PHY_REG_2TARRAY_LENGTH] = {
36 0x024, 0x0011800f,
37 0x028, 0x00ffdb83,
38 0x800, 0x80040002,
39 0x804, 0x00000003,
40 0x808, 0x0000fc00,
41 0x80c, 0x0000000a,
42 0x810, 0x10005388,
43 0x814, 0x020c3d10,
44 0x818, 0x02200385,
45 0x81c, 0x00000000,
46 0x820, 0x01000100,
47 0x824, 0x00390004,
48 0x828, 0x01000100,
49 0x82c, 0x00390004,
50 0x830, 0x27272727,
51 0x834, 0x27272727,
52 0x838, 0x27272727,
53 0x83c, 0x27272727,
54 0x840, 0x00010000,
55 0x844, 0x00010000,
56 0x848, 0x27272727,
57 0x84c, 0x27272727,
58 0x850, 0x00000000,
59 0x854, 0x00000000,
60 0x858, 0x569a569a,
61 0x85c, 0x0c1b25a4,
62 0x860, 0x66e60230,
63 0x864, 0x061f0130,
64 0x868, 0x27272727,
65 0x86c, 0x2b2b2b27,
66 0x870, 0x07000700,
67 0x874, 0x22184000,
68 0x878, 0x08080808,
69 0x87c, 0x00000000,
70 0x880, 0xc0083070,
71 0x884, 0x000004d5,
72 0x888, 0x00000000,
73 0x88c, 0xcc0000c0,
74 0x890, 0x00000800,
75 0x894, 0xfffffffe,
76 0x898, 0x40302010,
77 0x89c, 0x00706050,
78 0x900, 0x00000000,
79 0x904, 0x00000023,
80 0x908, 0x00000000,
81 0x90c, 0x81121313,
82 0xa00, 0x00d047c8,
83 0xa04, 0x80ff000c,
84 0xa08, 0x8c838300,
85 0xa0c, 0x2e68120f,
86 0xa10, 0x9500bb78,
87 0xa14, 0x11144028,
88 0xa18, 0x00881117,
89 0xa1c, 0x89140f00,
90 0xa20, 0x1a1b0000,
91 0xa24, 0x090e1317,
92 0xa28, 0x00000204,
93 0xa2c, 0x00d30000,
94 0xa70, 0x101fbf00,
95 0xa74, 0x00000007,
96 0xc00, 0x48071d40,
97 0xc04, 0x03a05633,
98 0xc08, 0x000000e4,
99 0xc0c, 0x6c6c6c6c,
100 0xc10, 0x08800000,
101 0xc14, 0x40000100,
102 0xc18, 0x08800000,
103 0xc1c, 0x40000100,
104 0xc20, 0x00000000,
105 0xc24, 0x00000000,
106 0xc28, 0x00000000,
107 0xc2c, 0x00000000,
108 0xc30, 0x69e9ac44,
109 0xc34, 0x469652cf,
110 0xc38, 0x49795994,
111 0xc3c, 0x0a97971c,
112 0xc40, 0x1f7c403f,
113 0xc44, 0x000100b7,
114 0xc48, 0xec020107,
115 0xc4c, 0x007f037f,
116 0xc50, 0x69543420,
117 0xc54, 0x43bc0094,
118 0xc58, 0x69543420,
119 0xc5c, 0x433c0094,
120 0xc60, 0x00000000,
121 0xc64, 0x5116848b,
122 0xc68, 0x47c00bff,
123 0xc6c, 0x00000036,
124 0xc70, 0x2c7f000d,
125 0xc74, 0x018610db,
126 0xc78, 0x0000001f,
127 0xc7c, 0x00b91612,
128 0xc80, 0x40000100,
129 0xc84, 0x20f60000,
130 0xc88, 0x40000100,
131 0xc8c, 0x20200000,
132 0xc90, 0x00121820,
133 0xc94, 0x00000000,
134 0xc98, 0x00121820,
135 0xc9c, 0x00007f7f,
136 0xca0, 0x00000000,
137 0xca4, 0x00000080,
138 0xca8, 0x00000000,
139 0xcac, 0x00000000,
140 0xcb0, 0x00000000,
141 0xcb4, 0x00000000,
142 0xcb8, 0x00000000,
143 0xcbc, 0x28000000,
144 0xcc0, 0x00000000,
145 0xcc4, 0x00000000,
146 0xcc8, 0x00000000,
147 0xccc, 0x00000000,
148 0xcd0, 0x00000000,
149 0xcd4, 0x00000000,
150 0xcd8, 0x64b22427,
151 0xcdc, 0x00766932,
152 0xce0, 0x00222222,
153 0xce4, 0x00000000,
154 0xce8, 0x37644302,
155 0xcec, 0x2f97d40c,
156 0xd00, 0x00080740,
157 0xd04, 0x00020403,
158 0xd08, 0x0000907f,
159 0xd0c, 0x20010201,
160 0xd10, 0xa0633333,
161 0xd14, 0x3333bc43,
162 0xd18, 0x7a8f5b6b,
163 0xd2c, 0xcc979975,
164 0xd30, 0x00000000,
165 0xd34, 0x80608000,
166 0xd38, 0x00000000,
167 0xd3c, 0x00027293,
168 0xd40, 0x00000000,
169 0xd44, 0x00000000,
170 0xd48, 0x00000000,
171 0xd4c, 0x00000000,
172 0xd50, 0x6437140a,
173 0xd54, 0x00000000,
174 0xd58, 0x00000000,
175 0xd5c, 0x30032064,
176 0xd60, 0x4653de68,
177 0xd64, 0x04518a3c,
178 0xd68, 0x00002101,
179 0xd6c, 0x2a201c16,
180 0xd70, 0x1812362e,
181 0xd74, 0x322c2220,
182 0xd78, 0x000e3c24,
183 0xe00, 0x2a2a2a2a,
184 0xe04, 0x2a2a2a2a,
185 0xe08, 0x03902a2a,
186 0xe10, 0x2a2a2a2a,
187 0xe14, 0x2a2a2a2a,
188 0xe18, 0x2a2a2a2a,
189 0xe1c, 0x2a2a2a2a,
190 0xe28, 0x00000000,
191 0xe30, 0x1000dc1f,
192 0xe34, 0x10008c1f,
193 0xe38, 0x02140102,
194 0xe3c, 0x681604c2,
195 0xe40, 0x01007c00,
196 0xe44, 0x01004800,
197 0xe48, 0xfb000000,
198 0xe4c, 0x000028d1,
199 0xe50, 0x1000dc1f,
200 0xe54, 0x10008c1f,
201 0xe58, 0x02140102,
202 0xe5c, 0x28160d05,
203 0xe60, 0x00000010,
204 0xe68, 0x001b25a4,
205 0xe6c, 0x63db25a4,
206 0xe70, 0x63db25a4,
207 0xe74, 0x0c1b25a4,
208 0xe78, 0x0c1b25a4,
209 0xe7c, 0x0c1b25a4,
210 0xe80, 0x0c1b25a4,
211 0xe84, 0x63db25a4,
212 0xe88, 0x0c1b25a4,
213 0xe8c, 0x63db25a4,
214 0xed0, 0x63db25a4,
215 0xed4, 0x63db25a4,
216 0xed8, 0x63db25a4,
217 0xedc, 0x001b25a4,
218 0xee0, 0x001b25a4,
219 0xeec, 0x6fdb25a4,
220 0xf14, 0x00000003,
221 0xf4c, 0x00000000,
222 0xf00, 0x00000300,
223};
224
225u32 RTL8192CEPHY_REG_1TARRAY[PHY_REG_1TARRAY_LENGTH] = {
226 0x024, 0x0011800f,
227 0x028, 0x00ffdb83,
228 0x800, 0x80040000,
229 0x804, 0x00000001,
230 0x808, 0x0000fc00,
231 0x80c, 0x0000000a,
232 0x810, 0x10005388,
233 0x814, 0x020c3d10,
234 0x818, 0x02200385,
235 0x81c, 0x00000000,
236 0x820, 0x01000100,
237 0x824, 0x00390004,
238 0x828, 0x00000000,
239 0x82c, 0x00000000,
240 0x830, 0x00000000,
241 0x834, 0x00000000,
242 0x838, 0x00000000,
243 0x83c, 0x00000000,
244 0x840, 0x00010000,
245 0x844, 0x00000000,
246 0x848, 0x00000000,
247 0x84c, 0x00000000,
248 0x850, 0x00000000,
249 0x854, 0x00000000,
250 0x858, 0x569a569a,
251 0x85c, 0x001b25a4,
252 0x860, 0x66e60230,
253 0x864, 0x061f0130,
254 0x868, 0x00000000,
255 0x86c, 0x32323200,
256 0x870, 0x07000700,
257 0x874, 0x22004000,
258 0x878, 0x00000808,
259 0x87c, 0x00000000,
260 0x880, 0xc0083070,
261 0x884, 0x000004d5,
262 0x888, 0x00000000,
263 0x88c, 0xccc000c0,
264 0x890, 0x00000800,
265 0x894, 0xfffffffe,
266 0x898, 0x40302010,
267 0x89c, 0x00706050,
268 0x900, 0x00000000,
269 0x904, 0x00000023,
270 0x908, 0x00000000,
271 0x90c, 0x81121111,
272 0xa00, 0x00d047c8,
273 0xa04, 0x80ff000c,
274 0xa08, 0x8c838300,
275 0xa0c, 0x2e68120f,
276 0xa10, 0x9500bb78,
277 0xa14, 0x11144028,
278 0xa18, 0x00881117,
279 0xa1c, 0x89140f00,
280 0xa20, 0x1a1b0000,
281 0xa24, 0x090e1317,
282 0xa28, 0x00000204,
283 0xa2c, 0x00d30000,
284 0xa70, 0x101fbf00,
285 0xa74, 0x00000007,
286 0xc00, 0x48071d40,
287 0xc04, 0x03a05611,
288 0xc08, 0x000000e4,
289 0xc0c, 0x6c6c6c6c,
290 0xc10, 0x08800000,
291 0xc14, 0x40000100,
292 0xc18, 0x08800000,
293 0xc1c, 0x40000100,
294 0xc20, 0x00000000,
295 0xc24, 0x00000000,
296 0xc28, 0x00000000,
297 0xc2c, 0x00000000,
298 0xc30, 0x69e9ac44,
299 0xc34, 0x469652cf,
300 0xc38, 0x49795994,
301 0xc3c, 0x0a97971c,
302 0xc40, 0x1f7c403f,
303 0xc44, 0x000100b7,
304 0xc48, 0xec020107,
305 0xc4c, 0x007f037f,
306 0xc50, 0x69543420,
307 0xc54, 0x43bc0094,
308 0xc58, 0x69543420,
309 0xc5c, 0x433c0094,
310 0xc60, 0x00000000,
311 0xc64, 0x5116848b,
312 0xc68, 0x47c00bff,
313 0xc6c, 0x00000036,
314 0xc70, 0x2c7f000d,
315 0xc74, 0x018610db,
316 0xc78, 0x0000001f,
317 0xc7c, 0x00b91612,
318 0xc80, 0x40000100,
319 0xc84, 0x20f60000,
320 0xc88, 0x40000100,
321 0xc8c, 0x20200000,
322 0xc90, 0x00121820,
323 0xc94, 0x00000000,
324 0xc98, 0x00121820,
325 0xc9c, 0x00007f7f,
326 0xca0, 0x00000000,
327 0xca4, 0x00000080,
328 0xca8, 0x00000000,
329 0xcac, 0x00000000,
330 0xcb0, 0x00000000,
331 0xcb4, 0x00000000,
332 0xcb8, 0x00000000,
333 0xcbc, 0x28000000,
334 0xcc0, 0x00000000,
335 0xcc4, 0x00000000,
336 0xcc8, 0x00000000,
337 0xccc, 0x00000000,
338 0xcd0, 0x00000000,
339 0xcd4, 0x00000000,
340 0xcd8, 0x64b22427,
341 0xcdc, 0x00766932,
342 0xce0, 0x00222222,
343 0xce4, 0x00000000,
344 0xce8, 0x37644302,
345 0xcec, 0x2f97d40c,
346 0xd00, 0x00080740,
347 0xd04, 0x00020401,
348 0xd08, 0x0000907f,
349 0xd0c, 0x20010201,
350 0xd10, 0xa0633333,
351 0xd14, 0x3333bc43,
352 0xd18, 0x7a8f5b6b,
353 0xd2c, 0xcc979975,
354 0xd30, 0x00000000,
355 0xd34, 0x80608000,
356 0xd38, 0x00000000,
357 0xd3c, 0x00027293,
358 0xd40, 0x00000000,
359 0xd44, 0x00000000,
360 0xd48, 0x00000000,
361 0xd4c, 0x00000000,
362 0xd50, 0x6437140a,
363 0xd54, 0x00000000,
364 0xd58, 0x00000000,
365 0xd5c, 0x30032064,
366 0xd60, 0x4653de68,
367 0xd64, 0x04518a3c,
368 0xd68, 0x00002101,
369 0xd6c, 0x2a201c16,
370 0xd70, 0x1812362e,
371 0xd74, 0x322c2220,
372 0xd78, 0x000e3c24,
373 0xe00, 0x2a2a2a2a,
374 0xe04, 0x2a2a2a2a,
375 0xe08, 0x03902a2a,
376 0xe10, 0x2a2a2a2a,
377 0xe14, 0x2a2a2a2a,
378 0xe18, 0x2a2a2a2a,
379 0xe1c, 0x2a2a2a2a,
380 0xe28, 0x00000000,
381 0xe30, 0x1000dc1f,
382 0xe34, 0x10008c1f,
383 0xe38, 0x02140102,
384 0xe3c, 0x681604c2,
385 0xe40, 0x01007c00,
386 0xe44, 0x01004800,
387 0xe48, 0xfb000000,
388 0xe4c, 0x000028d1,
389 0xe50, 0x1000dc1f,
390 0xe54, 0x10008c1f,
391 0xe58, 0x02140102,
392 0xe5c, 0x28160d05,
393 0xe60, 0x00000010,
394 0xe68, 0x001b25a4,
395 0xe6c, 0x631b25a0,
396 0xe70, 0x631b25a0,
397 0xe74, 0x081b25a0,
398 0xe78, 0x081b25a0,
399 0xe7c, 0x081b25a0,
400 0xe80, 0x081b25a0,
401 0xe84, 0x631b25a0,
402 0xe88, 0x081b25a0,
403 0xe8c, 0x631b25a0,
404 0xed0, 0x631b25a0,
405 0xed4, 0x631b25a0,
406 0xed8, 0x631b25a0,
407 0xedc, 0x001b25a0,
408 0xee0, 0x001b25a0,
409 0xeec, 0x6b1b25a0,
410 0xf14, 0x00000003,
411 0xf4c, 0x00000000,
412 0xf00, 0x00000300,
413};
414
415u32 RTL8192CEPHY_REG_ARRAY_PG[PHY_REG_ARRAY_PGLENGTH] = {
416 0xe00, 0xffffffff, 0x0a0c0c0c,
417 0xe04, 0xffffffff, 0x02040608,
418 0xe08, 0x0000ff00, 0x00000000,
419 0x86c, 0xffffff00, 0x00000000,
420 0xe10, 0xffffffff, 0x0a0c0d0e,
421 0xe14, 0xffffffff, 0x02040608,
422 0xe18, 0xffffffff, 0x0a0c0d0e,
423 0xe1c, 0xffffffff, 0x02040608,
424 0x830, 0xffffffff, 0x0a0c0c0c,
425 0x834, 0xffffffff, 0x02040608,
426 0x838, 0xffffff00, 0x00000000,
427 0x86c, 0x000000ff, 0x00000000,
428 0x83c, 0xffffffff, 0x0a0c0d0e,
429 0x848, 0xffffffff, 0x02040608,
430 0x84c, 0xffffffff, 0x0a0c0d0e,
431 0x868, 0xffffffff, 0x02040608,
432 0xe00, 0xffffffff, 0x00000000,
433 0xe04, 0xffffffff, 0x00000000,
434 0xe08, 0x0000ff00, 0x00000000,
435 0x86c, 0xffffff00, 0x00000000,
436 0xe10, 0xffffffff, 0x00000000,
437 0xe14, 0xffffffff, 0x00000000,
438 0xe18, 0xffffffff, 0x00000000,
439 0xe1c, 0xffffffff, 0x00000000,
440 0x830, 0xffffffff, 0x00000000,
441 0x834, 0xffffffff, 0x00000000,
442 0x838, 0xffffff00, 0x00000000,
443 0x86c, 0x000000ff, 0x00000000,
444 0x83c, 0xffffffff, 0x00000000,
445 0x848, 0xffffffff, 0x00000000,
446 0x84c, 0xffffffff, 0x00000000,
447 0x868, 0xffffffff, 0x00000000,
448 0xe00, 0xffffffff, 0x04040404,
449 0xe04, 0xffffffff, 0x00020204,
450 0xe08, 0x0000ff00, 0x00000000,
451 0x86c, 0xffffff00, 0x00000000,
452 0xe10, 0xffffffff, 0x06060606,
453 0xe14, 0xffffffff, 0x00020406,
454 0xe18, 0xffffffff, 0x06060606,
455 0xe1c, 0xffffffff, 0x00020406,
456 0x830, 0xffffffff, 0x04040404,
457 0x834, 0xffffffff, 0x00020204,
458 0x838, 0xffffff00, 0x00000000,
459 0x86c, 0x000000ff, 0x00000000,
460 0x83c, 0xffffffff, 0x06060606,
461 0x848, 0xffffffff, 0x00020406,
462 0x84c, 0xffffffff, 0x06060606,
463 0x868, 0xffffffff, 0x00020406,
464 0xe00, 0xffffffff, 0x00000000,
465 0xe04, 0xffffffff, 0x00000000,
466 0xe08, 0x0000ff00, 0x00000000,
467 0x86c, 0xffffff00, 0x00000000,
468 0xe10, 0xffffffff, 0x00000000,
469 0xe14, 0xffffffff, 0x00000000,
470 0xe18, 0xffffffff, 0x00000000,
471 0xe1c, 0xffffffff, 0x00000000,
472 0x830, 0xffffffff, 0x00000000,
473 0x834, 0xffffffff, 0x00000000,
474 0x838, 0xffffff00, 0x00000000,
475 0x86c, 0x000000ff, 0x00000000,
476 0x83c, 0xffffffff, 0x00000000,
477 0x848, 0xffffffff, 0x00000000,
478 0x84c, 0xffffffff, 0x00000000,
479 0x868, 0xffffffff, 0x00000000,
480};
481
482u32 RTL8192CERADIOA_2TARRAY[RADIOA_2TARRAYLENGTH] = {
483 0x000, 0x00030159,
484 0x001, 0x00031284,
485 0x002, 0x00098000,
486 0x003, 0x00018c63,
487 0x004, 0x000210e7,
488 0x009, 0x0002044f,
489 0x00a, 0x0001adb0,
490 0x00b, 0x00054867,
491 0x00c, 0x0008992e,
492 0x00d, 0x0000e52c,
493 0x00e, 0x00039ce7,
494 0x00f, 0x00000451,
495 0x019, 0x00000000,
496 0x01a, 0x00010255,
497 0x01b, 0x00060a00,
498 0x01c, 0x000fc378,
499 0x01d, 0x000a1250,
500 0x01e, 0x0004445f,
501 0x01f, 0x00080001,
502 0x020, 0x0000b614,
503 0x021, 0x0006c000,
504 0x022, 0x00000000,
505 0x023, 0x00001558,
506 0x024, 0x00000060,
507 0x025, 0x00000483,
508 0x026, 0x0004f000,
509 0x027, 0x000ec7d9,
510 0x028, 0x000977c0,
511 0x029, 0x00004783,
512 0x02a, 0x00000001,
513 0x02b, 0x00021334,
514 0x02a, 0x00000000,
515 0x02b, 0x00000054,
516 0x02a, 0x00000001,
517 0x02b, 0x00000808,
518 0x02b, 0x00053333,
519 0x02c, 0x0000000c,
520 0x02a, 0x00000002,
521 0x02b, 0x00000808,
522 0x02b, 0x0005b333,
523 0x02c, 0x0000000d,
524 0x02a, 0x00000003,
525 0x02b, 0x00000808,
526 0x02b, 0x00063333,
527 0x02c, 0x0000000d,
528 0x02a, 0x00000004,
529 0x02b, 0x00000808,
530 0x02b, 0x0006b333,
531 0x02c, 0x0000000d,
532 0x02a, 0x00000005,
533 0x02b, 0x00000808,
534 0x02b, 0x00073333,
535 0x02c, 0x0000000d,
536 0x02a, 0x00000006,
537 0x02b, 0x00000709,
538 0x02b, 0x0005b333,
539 0x02c, 0x0000000d,
540 0x02a, 0x00000007,
541 0x02b, 0x00000709,
542 0x02b, 0x00063333,
543 0x02c, 0x0000000d,
544 0x02a, 0x00000008,
545 0x02b, 0x0000060a,
546 0x02b, 0x0004b333,
547 0x02c, 0x0000000d,
548 0x02a, 0x00000009,
549 0x02b, 0x0000060a,
550 0x02b, 0x00053333,
551 0x02c, 0x0000000d,
552 0x02a, 0x0000000a,
553 0x02b, 0x0000060a,
554 0x02b, 0x0005b333,
555 0x02c, 0x0000000d,
556 0x02a, 0x0000000b,
557 0x02b, 0x0000060a,
558 0x02b, 0x00063333,
559 0x02c, 0x0000000d,
560 0x02a, 0x0000000c,
561 0x02b, 0x0000060a,
562 0x02b, 0x0006b333,
563 0x02c, 0x0000000d,
564 0x02a, 0x0000000d,
565 0x02b, 0x0000060a,
566 0x02b, 0x00073333,
567 0x02c, 0x0000000d,
568 0x02a, 0x0000000e,
569 0x02b, 0x0000050b,
570 0x02b, 0x00066666,
571 0x02c, 0x0000001a,
572 0x02a, 0x000e0000,
573 0x010, 0x0004000f,
574 0x011, 0x000e31fc,
575 0x010, 0x0006000f,
576 0x011, 0x000ff9f8,
577 0x010, 0x0002000f,
578 0x011, 0x000203f9,
579 0x010, 0x0003000f,
580 0x011, 0x000ff500,
581 0x010, 0x00000000,
582 0x011, 0x00000000,
583 0x010, 0x0008000f,
584 0x011, 0x0003f100,
585 0x010, 0x0009000f,
586 0x011, 0x00023100,
587 0x012, 0x00032000,
588 0x012, 0x00071000,
589 0x012, 0x000b0000,
590 0x012, 0x000fc000,
591 0x013, 0x000287af,
592 0x013, 0x000244b7,
593 0x013, 0x000204ab,
594 0x013, 0x0001c49f,
595 0x013, 0x00018493,
596 0x013, 0x00014297,
597 0x013, 0x00010295,
598 0x013, 0x0000c298,
599 0x013, 0x0000819c,
600 0x013, 0x000040a8,
601 0x013, 0x0000001c,
602 0x014, 0x0001944c,
603 0x014, 0x00059444,
604 0x014, 0x0009944c,
605 0x014, 0x000d9444,
606 0x015, 0x0000f424,
607 0x015, 0x0004f424,
608 0x015, 0x0008f424,
609 0x015, 0x000cf424,
610 0x016, 0x000e0330,
611 0x016, 0x000a0330,
612 0x016, 0x00060330,
613 0x016, 0x00020330,
614 0x000, 0x00010159,
615 0x018, 0x0000f401,
616 0x0fe, 0x00000000,
617 0x0fe, 0x00000000,
618 0x01f, 0x00080003,
619 0x0fe, 0x00000000,
620 0x0fe, 0x00000000,
621 0x01e, 0x00044457,
622 0x01f, 0x00080000,
623 0x000, 0x00030159,
624};
625
626u32 RTL8192CE_RADIOB_2TARRAY[RADIOB_2TARRAYLENGTH] = {
627 0x000, 0x00030159,
628 0x001, 0x00031284,
629 0x002, 0x00098000,
630 0x003, 0x00018c63,
631 0x004, 0x000210e7,
632 0x009, 0x0002044f,
633 0x00a, 0x0001adb0,
634 0x00b, 0x00054867,
635 0x00c, 0x0008992e,
636 0x00d, 0x0000e52c,
637 0x00e, 0x00039ce7,
638 0x00f, 0x00000451,
639 0x012, 0x00032000,
640 0x012, 0x00071000,
641 0x012, 0x000b0000,
642 0x012, 0x000fc000,
643 0x013, 0x000287af,
644 0x013, 0x000244b7,
645 0x013, 0x000204ab,
646 0x013, 0x0001c49f,
647 0x013, 0x00018493,
648 0x013, 0x00014297,
649 0x013, 0x00010295,
650 0x013, 0x0000c298,
651 0x013, 0x0000819c,
652 0x013, 0x000040a8,
653 0x013, 0x0000001c,
654 0x014, 0x0001944c,
655 0x014, 0x00059444,
656 0x014, 0x0009944c,
657 0x014, 0x000d9444,
658 0x015, 0x0000f424,
659 0x015, 0x0004f424,
660 0x015, 0x0008f424,
661 0x015, 0x000cf424,
662 0x016, 0x000e0330,
663 0x016, 0x000a0330,
664 0x016, 0x00060330,
665 0x016, 0x00020330,
666};
667
668u32 RTL8192CE_RADIOA_1TARRAY[RADIOA_1TARRAYLENGTH] = {
669 0x000, 0x00030159,
670 0x001, 0x00031284,
671 0x002, 0x00098000,
672 0x003, 0x00018c63,
673 0x004, 0x000210e7,
674 0x009, 0x0002044f,
675 0x00a, 0x0001adb0,
676 0x00b, 0x00054867,
677 0x00c, 0x0008992e,
678 0x00d, 0x0000e52c,
679 0x00e, 0x00039ce7,
680 0x00f, 0x00000451,
681 0x019, 0x00000000,
682 0x01a, 0x00010255,
683 0x01b, 0x00060a00,
684 0x01c, 0x000fc378,
685 0x01d, 0x000a1250,
686 0x01e, 0x0004445f,
687 0x01f, 0x00080001,
688 0x020, 0x0000b614,
689 0x021, 0x0006c000,
690 0x022, 0x00000000,
691 0x023, 0x00001558,
692 0x024, 0x00000060,
693 0x025, 0x00000483,
694 0x026, 0x0004f000,
695 0x027, 0x000ec7d9,
696 0x028, 0x000977c0,
697 0x029, 0x00004783,
698 0x02a, 0x00000001,
699 0x02b, 0x00021334,
700 0x02a, 0x00000000,
701 0x02b, 0x00000054,
702 0x02a, 0x00000001,
703 0x02b, 0x00000808,
704 0x02b, 0x00053333,
705 0x02c, 0x0000000c,
706 0x02a, 0x00000002,
707 0x02b, 0x00000808,
708 0x02b, 0x0005b333,
709 0x02c, 0x0000000d,
710 0x02a, 0x00000003,
711 0x02b, 0x00000808,
712 0x02b, 0x00063333,
713 0x02c, 0x0000000d,
714 0x02a, 0x00000004,
715 0x02b, 0x00000808,
716 0x02b, 0x0006b333,
717 0x02c, 0x0000000d,
718 0x02a, 0x00000005,
719 0x02b, 0x00000808,
720 0x02b, 0x00073333,
721 0x02c, 0x0000000d,
722 0x02a, 0x00000006,
723 0x02b, 0x00000709,
724 0x02b, 0x0005b333,
725 0x02c, 0x0000000d,
726 0x02a, 0x00000007,
727 0x02b, 0x00000709,
728 0x02b, 0x00063333,
729 0x02c, 0x0000000d,
730 0x02a, 0x00000008,
731 0x02b, 0x0000060a,
732 0x02b, 0x0004b333,
733 0x02c, 0x0000000d,
734 0x02a, 0x00000009,
735 0x02b, 0x0000060a,
736 0x02b, 0x00053333,
737 0x02c, 0x0000000d,
738 0x02a, 0x0000000a,
739 0x02b, 0x0000060a,
740 0x02b, 0x0005b333,
741 0x02c, 0x0000000d,
742 0x02a, 0x0000000b,
743 0x02b, 0x0000060a,
744 0x02b, 0x00063333,
745 0x02c, 0x0000000d,
746 0x02a, 0x0000000c,
747 0x02b, 0x0000060a,
748 0x02b, 0x0006b333,
749 0x02c, 0x0000000d,
750 0x02a, 0x0000000d,
751 0x02b, 0x0000060a,
752 0x02b, 0x00073333,
753 0x02c, 0x0000000d,
754 0x02a, 0x0000000e,
755 0x02b, 0x0000050b,
756 0x02b, 0x00066666,
757 0x02c, 0x0000001a,
758 0x02a, 0x000e0000,
759 0x010, 0x0004000f,
760 0x011, 0x000e31fc,
761 0x010, 0x0006000f,
762 0x011, 0x000ff9f8,
763 0x010, 0x0002000f,
764 0x011, 0x000203f9,
765 0x010, 0x0003000f,
766 0x011, 0x000ff500,
767 0x010, 0x00000000,
768 0x011, 0x00000000,
769 0x010, 0x0008000f,
770 0x011, 0x0003f100,
771 0x010, 0x0009000f,
772 0x011, 0x00023100,
773 0x012, 0x00032000,
774 0x012, 0x00071000,
775 0x012, 0x000b0000,
776 0x012, 0x000fc000,
777 0x013, 0x000287af,
778 0x013, 0x000244b7,
779 0x013, 0x000204ab,
780 0x013, 0x0001c49f,
781 0x013, 0x00018493,
782 0x013, 0x00014297,
783 0x013, 0x00010295,
784 0x013, 0x0000c298,
785 0x013, 0x0000819c,
786 0x013, 0x000040a8,
787 0x013, 0x0000001c,
788 0x014, 0x0001944c,
789 0x014, 0x00059444,
790 0x014, 0x0009944c,
791 0x014, 0x000d9444,
792 0x015, 0x0000f424,
793 0x015, 0x0004f424,
794 0x015, 0x0008f424,
795 0x015, 0x000cf424,
796 0x016, 0x000e0330,
797 0x016, 0x000a0330,
798 0x016, 0x00060330,
799 0x016, 0x00020330,
800 0x000, 0x00010159,
801 0x018, 0x0000f401,
802 0x0fe, 0x00000000,
803 0x0fe, 0x00000000,
804 0x01f, 0x00080003,
805 0x0fe, 0x00000000,
806 0x0fe, 0x00000000,
807 0x01e, 0x00044457,
808 0x01f, 0x00080000,
809 0x000, 0x00030159,
810};
811
812u32 RTL8192CE_RADIOB_1TARRAY[RADIOB_1TARRAYLENGTH] = {
813 0x0,
814};
815
816u32 RTL8192CEMAC_2T_ARRAY[MAC_2T_ARRAYLENGTH] = {
817 0x420, 0x00000080,
818 0x423, 0x00000000,
819 0x430, 0x00000000,
820 0x431, 0x00000000,
821 0x432, 0x00000000,
822 0x433, 0x00000001,
823 0x434, 0x00000004,
824 0x435, 0x00000005,
825 0x436, 0x00000006,
826 0x437, 0x00000007,
827 0x438, 0x00000000,
828 0x439, 0x00000000,
829 0x43a, 0x00000000,
830 0x43b, 0x00000001,
831 0x43c, 0x00000004,
832 0x43d, 0x00000005,
833 0x43e, 0x00000006,
834 0x43f, 0x00000007,
835 0x440, 0x0000005d,
836 0x441, 0x00000001,
837 0x442, 0x00000000,
838 0x444, 0x00000015,
839 0x445, 0x000000f0,
840 0x446, 0x0000000f,
841 0x447, 0x00000000,
842 0x458, 0x00000041,
843 0x459, 0x000000a8,
844 0x45a, 0x00000072,
845 0x45b, 0x000000b9,
846 0x460, 0x00000088,
847 0x461, 0x00000088,
848 0x462, 0x00000006,
849 0x463, 0x00000003,
850 0x4c8, 0x00000004,
851 0x4c9, 0x00000008,
852 0x4cc, 0x00000002,
853 0x4cd, 0x00000028,
854 0x4ce, 0x00000001,
855 0x500, 0x00000026,
856 0x501, 0x000000a2,
857 0x502, 0x0000002f,
858 0x503, 0x00000000,
859 0x504, 0x00000028,
860 0x505, 0x000000a3,
861 0x506, 0x0000005e,
862 0x507, 0x00000000,
863 0x508, 0x0000002b,
864 0x509, 0x000000a4,
865 0x50a, 0x0000005e,
866 0x50b, 0x00000000,
867 0x50c, 0x0000004f,
868 0x50d, 0x000000a4,
869 0x50e, 0x00000000,
870 0x50f, 0x00000000,
871 0x512, 0x0000001c,
872 0x514, 0x0000000a,
873 0x515, 0x00000010,
874 0x516, 0x0000000a,
875 0x517, 0x00000010,
876 0x51a, 0x00000016,
877 0x524, 0x0000000f,
878 0x525, 0x0000004f,
879 0x546, 0x00000020,
880 0x547, 0x00000000,
881 0x559, 0x00000002,
882 0x55a, 0x00000002,
883 0x55d, 0x000000ff,
884 0x605, 0x00000030,
885 0x608, 0x0000000e,
886 0x609, 0x0000002a,
887 0x652, 0x00000020,
888 0x63c, 0x0000000a,
889 0x63d, 0x0000000a,
890 0x700, 0x00000021,
891 0x701, 0x00000043,
892 0x702, 0x00000065,
893 0x703, 0x00000087,
894 0x708, 0x00000021,
895 0x709, 0x00000043,
896 0x70a, 0x00000065,
897 0x70b, 0x00000087,
898};
899
900u32 RTL8192CEAGCTAB_2TARRAY[AGCTAB_2TARRAYLENGTH] = {
901 0xc78, 0x7b000001,
902 0xc78, 0x7b010001,
903 0xc78, 0x7b020001,
904 0xc78, 0x7b030001,
905 0xc78, 0x7b040001,
906 0xc78, 0x7b050001,
907 0xc78, 0x7a060001,
908 0xc78, 0x79070001,
909 0xc78, 0x78080001,
910 0xc78, 0x77090001,
911 0xc78, 0x760a0001,
912 0xc78, 0x750b0001,
913 0xc78, 0x740c0001,
914 0xc78, 0x730d0001,
915 0xc78, 0x720e0001,
916 0xc78, 0x710f0001,
917 0xc78, 0x70100001,
918 0xc78, 0x6f110001,
919 0xc78, 0x6e120001,
920 0xc78, 0x6d130001,
921 0xc78, 0x6c140001,
922 0xc78, 0x6b150001,
923 0xc78, 0x6a160001,
924 0xc78, 0x69170001,
925 0xc78, 0x68180001,
926 0xc78, 0x67190001,
927 0xc78, 0x661a0001,
928 0xc78, 0x651b0001,
929 0xc78, 0x641c0001,
930 0xc78, 0x631d0001,
931 0xc78, 0x621e0001,
932 0xc78, 0x611f0001,
933 0xc78, 0x60200001,
934 0xc78, 0x49210001,
935 0xc78, 0x48220001,
936 0xc78, 0x47230001,
937 0xc78, 0x46240001,
938 0xc78, 0x45250001,
939 0xc78, 0x44260001,
940 0xc78, 0x43270001,
941 0xc78, 0x42280001,
942 0xc78, 0x41290001,
943 0xc78, 0x402a0001,
944 0xc78, 0x262b0001,
945 0xc78, 0x252c0001,
946 0xc78, 0x242d0001,
947 0xc78, 0x232e0001,
948 0xc78, 0x222f0001,
949 0xc78, 0x21300001,
950 0xc78, 0x20310001,
951 0xc78, 0x06320001,
952 0xc78, 0x05330001,
953 0xc78, 0x04340001,
954 0xc78, 0x03350001,
955 0xc78, 0x02360001,
956 0xc78, 0x01370001,
957 0xc78, 0x00380001,
958 0xc78, 0x00390001,
959 0xc78, 0x003a0001,
960 0xc78, 0x003b0001,
961 0xc78, 0x003c0001,
962 0xc78, 0x003d0001,
963 0xc78, 0x003e0001,
964 0xc78, 0x003f0001,
965 0xc78, 0x7b400001,
966 0xc78, 0x7b410001,
967 0xc78, 0x7b420001,
968 0xc78, 0x7b430001,
969 0xc78, 0x7b440001,
970 0xc78, 0x7b450001,
971 0xc78, 0x7a460001,
972 0xc78, 0x79470001,
973 0xc78, 0x78480001,
974 0xc78, 0x77490001,
975 0xc78, 0x764a0001,
976 0xc78, 0x754b0001,
977 0xc78, 0x744c0001,
978 0xc78, 0x734d0001,
979 0xc78, 0x724e0001,
980 0xc78, 0x714f0001,
981 0xc78, 0x70500001,
982 0xc78, 0x6f510001,
983 0xc78, 0x6e520001,
984 0xc78, 0x6d530001,
985 0xc78, 0x6c540001,
986 0xc78, 0x6b550001,
987 0xc78, 0x6a560001,
988 0xc78, 0x69570001,
989 0xc78, 0x68580001,
990 0xc78, 0x67590001,
991 0xc78, 0x665a0001,
992 0xc78, 0x655b0001,
993 0xc78, 0x645c0001,
994 0xc78, 0x635d0001,
995 0xc78, 0x625e0001,
996 0xc78, 0x615f0001,
997 0xc78, 0x60600001,
998 0xc78, 0x49610001,
999 0xc78, 0x48620001,
1000 0xc78, 0x47630001,
1001 0xc78, 0x46640001,
1002 0xc78, 0x45650001,
1003 0xc78, 0x44660001,
1004 0xc78, 0x43670001,
1005 0xc78, 0x42680001,
1006 0xc78, 0x41690001,
1007 0xc78, 0x406a0001,
1008 0xc78, 0x266b0001,
1009 0xc78, 0x256c0001,
1010 0xc78, 0x246d0001,
1011 0xc78, 0x236e0001,
1012 0xc78, 0x226f0001,
1013 0xc78, 0x21700001,
1014 0xc78, 0x20710001,
1015 0xc78, 0x06720001,
1016 0xc78, 0x05730001,
1017 0xc78, 0x04740001,
1018 0xc78, 0x03750001,
1019 0xc78, 0x02760001,
1020 0xc78, 0x01770001,
1021 0xc78, 0x00780001,
1022 0xc78, 0x00790001,
1023 0xc78, 0x007a0001,
1024 0xc78, 0x007b0001,
1025 0xc78, 0x007c0001,
1026 0xc78, 0x007d0001,
1027 0xc78, 0x007e0001,
1028 0xc78, 0x007f0001,
1029 0xc78, 0x3800001e,
1030 0xc78, 0x3801001e,
1031 0xc78, 0x3802001e,
1032 0xc78, 0x3803001e,
1033 0xc78, 0x3804001e,
1034 0xc78, 0x3805001e,
1035 0xc78, 0x3806001e,
1036 0xc78, 0x3807001e,
1037 0xc78, 0x3808001e,
1038 0xc78, 0x3c09001e,
1039 0xc78, 0x3e0a001e,
1040 0xc78, 0x400b001e,
1041 0xc78, 0x440c001e,
1042 0xc78, 0x480d001e,
1043 0xc78, 0x4c0e001e,
1044 0xc78, 0x500f001e,
1045 0xc78, 0x5210001e,
1046 0xc78, 0x5611001e,
1047 0xc78, 0x5a12001e,
1048 0xc78, 0x5e13001e,
1049 0xc78, 0x6014001e,
1050 0xc78, 0x6015001e,
1051 0xc78, 0x6016001e,
1052 0xc78, 0x6217001e,
1053 0xc78, 0x6218001e,
1054 0xc78, 0x6219001e,
1055 0xc78, 0x621a001e,
1056 0xc78, 0x621b001e,
1057 0xc78, 0x621c001e,
1058 0xc78, 0x621d001e,
1059 0xc78, 0x621e001e,
1060 0xc78, 0x621f001e,
1061};
1062
1063u32 RTL8192CEAGCTAB_1TARRAY[AGCTAB_1TARRAYLENGTH] = {
1064 0xc78, 0x7b000001,
1065 0xc78, 0x7b010001,
1066 0xc78, 0x7b020001,
1067 0xc78, 0x7b030001,
1068 0xc78, 0x7b040001,
1069 0xc78, 0x7b050001,
1070 0xc78, 0x7a060001,
1071 0xc78, 0x79070001,
1072 0xc78, 0x78080001,
1073 0xc78, 0x77090001,
1074 0xc78, 0x760a0001,
1075 0xc78, 0x750b0001,
1076 0xc78, 0x740c0001,
1077 0xc78, 0x730d0001,
1078 0xc78, 0x720e0001,
1079 0xc78, 0x710f0001,
1080 0xc78, 0x70100001,
1081 0xc78, 0x6f110001,
1082 0xc78, 0x6e120001,
1083 0xc78, 0x6d130001,
1084 0xc78, 0x6c140001,
1085 0xc78, 0x6b150001,
1086 0xc78, 0x6a160001,
1087 0xc78, 0x69170001,
1088 0xc78, 0x68180001,
1089 0xc78, 0x67190001,
1090 0xc78, 0x661a0001,
1091 0xc78, 0x651b0001,
1092 0xc78, 0x641c0001,
1093 0xc78, 0x631d0001,
1094 0xc78, 0x621e0001,
1095 0xc78, 0x611f0001,
1096 0xc78, 0x60200001,
1097 0xc78, 0x49210001,
1098 0xc78, 0x48220001,
1099 0xc78, 0x47230001,
1100 0xc78, 0x46240001,
1101 0xc78, 0x45250001,
1102 0xc78, 0x44260001,
1103 0xc78, 0x43270001,
1104 0xc78, 0x42280001,
1105 0xc78, 0x41290001,
1106 0xc78, 0x402a0001,
1107 0xc78, 0x262b0001,
1108 0xc78, 0x252c0001,
1109 0xc78, 0x242d0001,
1110 0xc78, 0x232e0001,
1111 0xc78, 0x222f0001,
1112 0xc78, 0x21300001,
1113 0xc78, 0x20310001,
1114 0xc78, 0x06320001,
1115 0xc78, 0x05330001,
1116 0xc78, 0x04340001,
1117 0xc78, 0x03350001,
1118 0xc78, 0x02360001,
1119 0xc78, 0x01370001,
1120 0xc78, 0x00380001,
1121 0xc78, 0x00390001,
1122 0xc78, 0x003a0001,
1123 0xc78, 0x003b0001,
1124 0xc78, 0x003c0001,
1125 0xc78, 0x003d0001,
1126 0xc78, 0x003e0001,
1127 0xc78, 0x003f0001,
1128 0xc78, 0x7b400001,
1129 0xc78, 0x7b410001,
1130 0xc78, 0x7b420001,
1131 0xc78, 0x7b430001,
1132 0xc78, 0x7b440001,
1133 0xc78, 0x7b450001,
1134 0xc78, 0x7a460001,
1135 0xc78, 0x79470001,
1136 0xc78, 0x78480001,
1137 0xc78, 0x77490001,
1138 0xc78, 0x764a0001,
1139 0xc78, 0x754b0001,
1140 0xc78, 0x744c0001,
1141 0xc78, 0x734d0001,
1142 0xc78, 0x724e0001,
1143 0xc78, 0x714f0001,
1144 0xc78, 0x70500001,
1145 0xc78, 0x6f510001,
1146 0xc78, 0x6e520001,
1147 0xc78, 0x6d530001,
1148 0xc78, 0x6c540001,
1149 0xc78, 0x6b550001,
1150 0xc78, 0x6a560001,
1151 0xc78, 0x69570001,
1152 0xc78, 0x68580001,
1153 0xc78, 0x67590001,
1154 0xc78, 0x665a0001,
1155 0xc78, 0x655b0001,
1156 0xc78, 0x645c0001,
1157 0xc78, 0x635d0001,
1158 0xc78, 0x625e0001,
1159 0xc78, 0x615f0001,
1160 0xc78, 0x60600001,
1161 0xc78, 0x49610001,
1162 0xc78, 0x48620001,
1163 0xc78, 0x47630001,
1164 0xc78, 0x46640001,
1165 0xc78, 0x45650001,
1166 0xc78, 0x44660001,
1167 0xc78, 0x43670001,
1168 0xc78, 0x42680001,
1169 0xc78, 0x41690001,
1170 0xc78, 0x406a0001,
1171 0xc78, 0x266b0001,
1172 0xc78, 0x256c0001,
1173 0xc78, 0x246d0001,
1174 0xc78, 0x236e0001,
1175 0xc78, 0x226f0001,
1176 0xc78, 0x21700001,
1177 0xc78, 0x20710001,
1178 0xc78, 0x06720001,
1179 0xc78, 0x05730001,
1180 0xc78, 0x04740001,
1181 0xc78, 0x03750001,
1182 0xc78, 0x02760001,
1183 0xc78, 0x01770001,
1184 0xc78, 0x00780001,
1185 0xc78, 0x00790001,
1186 0xc78, 0x007a0001,
1187 0xc78, 0x007b0001,
1188 0xc78, 0x007c0001,
1189 0xc78, 0x007d0001,
1190 0xc78, 0x007e0001,
1191 0xc78, 0x007f0001,
1192 0xc78, 0x3800001e,
1193 0xc78, 0x3801001e,
1194 0xc78, 0x3802001e,
1195 0xc78, 0x3803001e,
1196 0xc78, 0x3804001e,
1197 0xc78, 0x3805001e,
1198 0xc78, 0x3806001e,
1199 0xc78, 0x3807001e,
1200 0xc78, 0x3808001e,
1201 0xc78, 0x3c09001e,
1202 0xc78, 0x3e0a001e,
1203 0xc78, 0x400b001e,
1204 0xc78, 0x440c001e,
1205 0xc78, 0x480d001e,
1206 0xc78, 0x4c0e001e,
1207 0xc78, 0x500f001e,
1208 0xc78, 0x5210001e,
1209 0xc78, 0x5611001e,
1210 0xc78, 0x5a12001e,
1211 0xc78, 0x5e13001e,
1212 0xc78, 0x6014001e,
1213 0xc78, 0x6015001e,
1214 0xc78, 0x6016001e,
1215 0xc78, 0x6217001e,
1216 0xc78, 0x6218001e,
1217 0xc78, 0x6219001e,
1218 0xc78, 0x621a001e,
1219 0xc78, 0x621b001e,
1220 0xc78, 0x621c001e,
1221 0xc78, 0x621d001e,
1222 0xc78, 0x621e001e,
1223 0xc78, 0x621f001e,
1224};
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/table.h b/drivers/net/wireless/rtlwifi/rtl8192ce/table.h
new file mode 100644
index 000000000000..3a6e8b6aeee0
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/table.h
@@ -0,0 +1,58 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Created on 2010/ 5/18, 1:41
27 *
28 * Larry Finger <Larry.Finger@lwfinger.net>
29 *
30 *****************************************************************************/
31
32#ifndef __RTL92CE_TABLE__H_
33#define __RTL92CE_TABLE__H_
34
35#include <linux/types.h>
36
37#define PHY_REG_2TARRAY_LENGTH 374
38extern u32 RTL8192CEPHY_REG_2TARRAY[PHY_REG_2TARRAY_LENGTH];
39#define PHY_REG_1TARRAY_LENGTH 374
40extern u32 RTL8192CEPHY_REG_1TARRAY[PHY_REG_1TARRAY_LENGTH];
41#define PHY_REG_ARRAY_PGLENGTH 192
42extern u32 RTL8192CEPHY_REG_ARRAY_PG[PHY_REG_ARRAY_PGLENGTH];
43#define RADIOA_2TARRAYLENGTH 282
44extern u32 RTL8192CERADIOA_2TARRAY[RADIOA_2TARRAYLENGTH];
45#define RADIOB_2TARRAYLENGTH 78
46extern u32 RTL8192CE_RADIOB_2TARRAY[RADIOB_2TARRAYLENGTH];
47#define RADIOA_1TARRAYLENGTH 282
48extern u32 RTL8192CE_RADIOA_1TARRAY[RADIOA_1TARRAYLENGTH];
49#define RADIOB_1TARRAYLENGTH 1
50extern u32 RTL8192CE_RADIOB_1TARRAY[RADIOB_1TARRAYLENGTH];
51#define MAC_2T_ARRAYLENGTH 162
52extern u32 RTL8192CEMAC_2T_ARRAY[MAC_2T_ARRAYLENGTH];
53#define AGCTAB_2TARRAYLENGTH 320
54extern u32 RTL8192CEAGCTAB_2TARRAY[AGCTAB_2TARRAYLENGTH];
55#define AGCTAB_1TARRAYLENGTH 320
56extern u32 RTL8192CEAGCTAB_1TARRAY[AGCTAB_1TARRAYLENGTH];
57
58#endif
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
new file mode 100644
index 000000000000..bf5852f2d634
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -0,0 +1,1031 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#include "../wifi.h"
31#include "../pci.h"
32#include "../base.h"
33#include "reg.h"
34#include "def.h"
35#include "phy.h"
36#include "trx.h"
37#include "led.h"
38
39static enum rtl_desc_qsel _rtl92ce_map_hwqueue_to_fwqueue(u16 fc,
40 unsigned int
41 skb_queue)
42{
43 enum rtl_desc_qsel qsel;
44
45 if (unlikely(ieee80211_is_beacon(fc))) {
46 qsel = QSLT_BEACON;
47 return qsel;
48 }
49
50 if (ieee80211_is_mgmt(fc)) {
51 qsel = QSLT_MGNT;
52 return qsel;
53 }
54
55 switch (skb_queue) {
56 case VO_QUEUE:
57 qsel = QSLT_VO;
58 break;
59 case VI_QUEUE:
60 qsel = QSLT_VI;
61 break;
62 case BE_QUEUE:
63 qsel = QSLT_BE;
64 break;
65 case BK_QUEUE:
66 qsel = QSLT_BK;
67 break;
68 default:
69 qsel = QSLT_BE;
70 RT_ASSERT(false, ("BE queue, skb_queue:%d,"
71 " set qsel = 0x%X\n", skb_queue, QSLT_BE));
72 break;
73 }
74 return qsel;
75}
76
77static int _rtl92ce_rate_mapping(bool isht, u8 desc_rate, bool first_ampdu)
78{
79 int rate_idx;
80
81 if (first_ampdu) {
82 if (false == isht) {
83 switch (desc_rate) {
84 case DESC92C_RATE1M:
85 rate_idx = 0;
86 break;
87 case DESC92C_RATE2M:
88 rate_idx = 1;
89 break;
90 case DESC92C_RATE5_5M:
91 rate_idx = 2;
92 break;
93 case DESC92C_RATE11M:
94 rate_idx = 3;
95 break;
96 case DESC92C_RATE6M:
97 rate_idx = 4;
98 break;
99 case DESC92C_RATE9M:
100 rate_idx = 5;
101 break;
102 case DESC92C_RATE12M:
103 rate_idx = 6;
104 break;
105 case DESC92C_RATE18M:
106 rate_idx = 7;
107 break;
108 case DESC92C_RATE24M:
109 rate_idx = 8;
110 break;
111 case DESC92C_RATE36M:
112 rate_idx = 9;
113 break;
114 case DESC92C_RATE48M:
115 rate_idx = 10;
116 break;
117 case DESC92C_RATE54M:
118 rate_idx = 11;
119 break;
120 default:
121 rate_idx = 0;
122 break;
123 }
124 } else {
125 rate_idx = 11;
126 }
127
128 return rate_idx;
129 }
130
131 switch (desc_rate) {
132 case DESC92C_RATE1M:
133 rate_idx = 0;
134 break;
135 case DESC92C_RATE2M:
136 rate_idx = 1;
137 break;
138 case DESC92C_RATE5_5M:
139 rate_idx = 2;
140 break;
141 case DESC92C_RATE11M:
142 rate_idx = 3;
143 break;
144 case DESC92C_RATE6M:
145 rate_idx = 4;
146 break;
147 case DESC92C_RATE9M:
148 rate_idx = 5;
149 break;
150 case DESC92C_RATE12M:
151 rate_idx = 6;
152 break;
153 case DESC92C_RATE18M:
154 rate_idx = 7;
155 break;
156 case DESC92C_RATE24M:
157 rate_idx = 8;
158 break;
159 case DESC92C_RATE36M:
160 rate_idx = 9;
161 break;
162 case DESC92C_RATE48M:
163 rate_idx = 10;
164 break;
165 case DESC92C_RATE54M:
166 rate_idx = 11;
167 break;
168 default:
169 rate_idx = 11;
170 break;
171 }
172 return rate_idx;
173}
174
175static u8 _rtl92c_query_rxpwrpercentage(char antpower)
176{
177 if ((antpower <= -100) || (antpower >= 20))
178 return 0;
179 else if (antpower >= 0)
180 return 100;
181 else
182 return 100 + antpower;
183}
184
185static u8 _rtl92c_evm_db_to_percentage(char value)
186{
187 char ret_val;
188 ret_val = value;
189
190 if (ret_val >= 0)
191 ret_val = 0;
192
193 if (ret_val <= -33)
194 ret_val = -33;
195
196 ret_val = 0 - ret_val;
197 ret_val *= 3;
198
199 if (ret_val == 99)
200 ret_val = 100;
201
202 return ret_val;
203}
204
205static long _rtl92ce_translate_todbm(struct ieee80211_hw *hw,
206 u8 signal_strength_index)
207{
208 long signal_power;
209
210 signal_power = (long)((signal_strength_index + 1) >> 1);
211 signal_power -= 95;
212 return signal_power;
213}
214
215static long _rtl92ce_signal_scale_mapping(struct ieee80211_hw *hw,
216 long currsig)
217{
218 long retsig;
219
220 if (currsig >= 61 && currsig <= 100)
221 retsig = 90 + ((currsig - 60) / 4);
222 else if (currsig >= 41 && currsig <= 60)
223 retsig = 78 + ((currsig - 40) / 2);
224 else if (currsig >= 31 && currsig <= 40)
225 retsig = 66 + (currsig - 30);
226 else if (currsig >= 21 && currsig <= 30)
227 retsig = 54 + (currsig - 20);
228 else if (currsig >= 5 && currsig <= 20)
229 retsig = 42 + (((currsig - 5) * 2) / 3);
230 else if (currsig == 4)
231 retsig = 36;
232 else if (currsig == 3)
233 retsig = 27;
234 else if (currsig == 2)
235 retsig = 18;
236 else if (currsig == 1)
237 retsig = 9;
238 else
239 retsig = currsig;
240
241 return retsig;
242}
243
244static void _rtl92ce_query_rxphystatus(struct ieee80211_hw *hw,
245 struct rtl_stats *pstats,
246 struct rx_desc_92c *pdesc,
247 struct rx_fwinfo_92c *p_drvinfo,
248 bool bpacket_match_bssid,
249 bool bpacket_toself,
250 bool b_packet_beacon)
251{
252 struct rtl_priv *rtlpriv = rtl_priv(hw);
253 struct phy_sts_cck_8192s_t *cck_buf;
254 s8 rx_pwr_all, rx_pwr[4];
255 u8 rf_rx_num, evm, pwdb_all;
256 u8 i, max_spatial_stream;
257 u32 rssi, total_rssi;
258 bool is_cck_rate;
259
260 is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
261 pstats->b_packet_matchbssid = bpacket_match_bssid;
262 pstats->b_packet_toself = bpacket_toself;
263 pstats->b_is_cck = is_cck_rate;
264 pstats->b_packet_beacon = b_packet_beacon;
265 pstats->b_is_cck = is_cck_rate;
266 pstats->rx_mimo_signalquality[0] = -1;
267 pstats->rx_mimo_signalquality[1] = -1;
268
269 if (is_cck_rate) {
270 u8 report, cck_highpwr;
271 cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
272
273 cck_highpwr = (u8) rtl_get_bbreg(hw,
274 RFPGA0_XA_HSSIPARAMETER2,
275 BIT(9));
276 if (!cck_highpwr) {
277 u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
278 report = cck_buf->cck_agc_rpt & 0xc0;
279 report = report >> 6;
280 switch (report) {
281 case 0x3:
282 rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
283 break;
284 case 0x2:
285 rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
286 break;
287 case 0x1:
288 rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
289 break;
290 case 0x0:
291 rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
292 break;
293 }
294 } else {
295 u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
296 report = p_drvinfo->cfosho[0] & 0x60;
297 report = report >> 5;
298 switch (report) {
299 case 0x3:
300 rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f) << 1);
301 break;
302 case 0x2:
303 rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f) << 1);
304 break;
305 case 0x1:
306 rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f) << 1);
307 break;
308 case 0x0:
309 rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f) << 1);
310 break;
311 }
312 }
313
314 pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
315 pstats->rx_pwdb_all = pwdb_all;
316 pstats->recvsignalpower = rx_pwr_all;
317
318 if (bpacket_match_bssid) {
319 u8 sq;
320 if (pstats->rx_pwdb_all > 40)
321 sq = 100;
322 else {
323 sq = cck_buf->sq_rpt;
324 if (sq > 64)
325 sq = 0;
326 else if (sq < 20)
327 sq = 100;
328 else
329 sq = ((64 - sq) * 100) / 44;
330 }
331
332 pstats->signalquality = sq;
333 pstats->rx_mimo_signalquality[0] = sq;
334 pstats->rx_mimo_signalquality[1] = -1;
335 }
336 } else {
337 rtlpriv->dm.brfpath_rxenable[0] =
338 rtlpriv->dm.brfpath_rxenable[1] = true;
339 for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) {
340 if (rtlpriv->dm.brfpath_rxenable[i])
341 rf_rx_num++;
342
343 rx_pwr[i] =
344 ((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110;
345 rssi = _rtl92c_query_rxpwrpercentage(rx_pwr[i]);
346 total_rssi += rssi;
347 rtlpriv->stats.rx_snr_db[i] =
348 (long)(p_drvinfo->rxsnr[i] / 2);
349
350 if (bpacket_match_bssid)
351 pstats->rx_mimo_signalstrength[i] = (u8) rssi;
352 }
353
354 rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
355 pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
356 pstats->rx_pwdb_all = pwdb_all;
357 pstats->rxpower = rx_pwr_all;
358 pstats->recvsignalpower = rx_pwr_all;
359
360 if (pdesc->rxht && pdesc->rxmcs >= DESC92C_RATEMCS8 &&
361 pdesc->rxmcs <= DESC92C_RATEMCS15)
362 max_spatial_stream = 2;
363 else
364 max_spatial_stream = 1;
365
366 for (i = 0; i < max_spatial_stream; i++) {
367 evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]);
368
369 if (bpacket_match_bssid) {
370 if (i == 0)
371 pstats->signalquality =
372 (u8) (evm & 0xff);
373 pstats->rx_mimo_signalquality[i] =
374 (u8) (evm & 0xff);
375 }
376 }
377 }
378
379 if (is_cck_rate)
380 pstats->signalstrength =
381 (u8) (_rtl92ce_signal_scale_mapping(hw, pwdb_all));
382 else if (rf_rx_num != 0)
383 pstats->signalstrength =
384 (u8) (_rtl92ce_signal_scale_mapping
385 (hw, total_rssi /= rf_rx_num));
386}
387
388static void _rtl92ce_process_ui_rssi(struct ieee80211_hw *hw,
389 struct rtl_stats *pstats)
390{
391 struct rtl_priv *rtlpriv = rtl_priv(hw);
392 struct rtl_phy *rtlphy = &(rtlpriv->phy);
393 u8 rfpath;
394 u32 last_rssi, tmpval;
395
396 if (pstats->b_packet_toself || pstats->b_packet_beacon) {
397 rtlpriv->stats.rssi_calculate_cnt++;
398
399 if (rtlpriv->stats.ui_rssi.total_num++ >=
400 PHY_RSSI_SLID_WIN_MAX) {
401 rtlpriv->stats.ui_rssi.total_num =
402 PHY_RSSI_SLID_WIN_MAX;
403 last_rssi =
404 rtlpriv->stats.ui_rssi.elements[rtlpriv->
405 stats.ui_rssi.index];
406 rtlpriv->stats.ui_rssi.total_val -= last_rssi;
407 }
408
409 rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
410 rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.
411 index++] =
412 pstats->signalstrength;
413
414 if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
415 rtlpriv->stats.ui_rssi.index = 0;
416
417 tmpval = rtlpriv->stats.ui_rssi.total_val /
418 rtlpriv->stats.ui_rssi.total_num;
419 rtlpriv->stats.signal_strength =
420 _rtl92ce_translate_todbm(hw, (u8) tmpval);
421 pstats->rssi = rtlpriv->stats.signal_strength;
422 }
423
424 if (!pstats->b_is_cck && pstats->b_packet_toself) {
425 for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
426 rfpath++) {
427
428 if (!rtl8192_phy_check_is_legal_rfpath(hw, rfpath))
429 continue;
430
431 if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
432 rtlpriv->stats.rx_rssi_percentage[rfpath] =
433 pstats->rx_mimo_signalstrength[rfpath];
434
435 }
436
437 if (pstats->rx_mimo_signalstrength[rfpath] >
438 rtlpriv->stats.rx_rssi_percentage[rfpath]) {
439 rtlpriv->stats.rx_rssi_percentage[rfpath] =
440 ((rtlpriv->stats.
441 rx_rssi_percentage[rfpath] *
442 (RX_SMOOTH_FACTOR - 1)) +
443 (pstats->rx_mimo_signalstrength[rfpath])) /
444 (RX_SMOOTH_FACTOR);
445
446 rtlpriv->stats.rx_rssi_percentage[rfpath] =
447 rtlpriv->stats.rx_rssi_percentage[rfpath] +
448 1;
449 } else {
450 rtlpriv->stats.rx_rssi_percentage[rfpath] =
451 ((rtlpriv->stats.
452 rx_rssi_percentage[rfpath] *
453 (RX_SMOOTH_FACTOR - 1)) +
454 (pstats->rx_mimo_signalstrength[rfpath])) /
455 (RX_SMOOTH_FACTOR);
456 }
457
458 }
459 }
460}
461
462static void _rtl92ce_update_rxsignalstatistics(struct ieee80211_hw *hw,
463 struct rtl_stats *pstats)
464{
465 struct rtl_priv *rtlpriv = rtl_priv(hw);
466 int weighting;
467
468 if (rtlpriv->stats.recv_signal_power == 0)
469 rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
470
471 if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
472 weighting = 5;
473
474 else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
475 weighting = (-5);
476
477 rtlpriv->stats.recv_signal_power =
478 (rtlpriv->stats.recv_signal_power * 5 +
479 pstats->recvsignalpower + weighting) / 6;
480}
481
482static void _rtl92ce_process_pwdb(struct ieee80211_hw *hw,
483 struct rtl_stats *pstats)
484{
485 struct rtl_priv *rtlpriv = rtl_priv(hw);
486 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
487 long undecorated_smoothed_pwdb;
488
489 if (mac->opmode == NL80211_IFTYPE_ADHOC) {
490 return;
491 } else {
492 undecorated_smoothed_pwdb =
493 rtlpriv->dm.undecorated_smoothed_pwdb;
494 }
495
496 if (pstats->b_packet_toself || pstats->b_packet_beacon) {
497 if (undecorated_smoothed_pwdb < 0)
498 undecorated_smoothed_pwdb = pstats->rx_pwdb_all;
499
500 if (pstats->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) {
501 undecorated_smoothed_pwdb =
502 (((undecorated_smoothed_pwdb) *
503 (RX_SMOOTH_FACTOR - 1)) +
504 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
505
506 undecorated_smoothed_pwdb = undecorated_smoothed_pwdb
507 + 1;
508 } else {
509 undecorated_smoothed_pwdb =
510 (((undecorated_smoothed_pwdb) *
511 (RX_SMOOTH_FACTOR - 1)) +
512 (pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
513 }
514
515 rtlpriv->dm.undecorated_smoothed_pwdb =
516 undecorated_smoothed_pwdb;
517 _rtl92ce_update_rxsignalstatistics(hw, pstats);
518 }
519}
520
521static void _rtl92ce_process_ui_link_quality(struct ieee80211_hw *hw,
522 struct rtl_stats *pstats)
523{
524 struct rtl_priv *rtlpriv = rtl_priv(hw);
525 u32 last_evm, n_spatialstream, tmpval;
526
527 if (pstats->signalquality != 0) {
528 if (pstats->b_packet_toself || pstats->b_packet_beacon) {
529
530 if (rtlpriv->stats.ui_link_quality.total_num++ >=
531 PHY_LINKQUALITY_SLID_WIN_MAX) {
532 rtlpriv->stats.ui_link_quality.total_num =
533 PHY_LINKQUALITY_SLID_WIN_MAX;
534 last_evm =
535 rtlpriv->stats.
536 ui_link_quality.elements[rtlpriv->
537 stats.ui_link_quality.
538 index];
539 rtlpriv->stats.ui_link_quality.total_val -=
540 last_evm;
541 }
542
543 rtlpriv->stats.ui_link_quality.total_val +=
544 pstats->signalquality;
545 rtlpriv->stats.ui_link_quality.elements[rtlpriv->stats.
546 ui_link_quality.
547 index++] =
548 pstats->signalquality;
549
550 if (rtlpriv->stats.ui_link_quality.index >=
551 PHY_LINKQUALITY_SLID_WIN_MAX)
552 rtlpriv->stats.ui_link_quality.index = 0;
553
554 tmpval = rtlpriv->stats.ui_link_quality.total_val /
555 rtlpriv->stats.ui_link_quality.total_num;
556 rtlpriv->stats.signal_quality = tmpval;
557
558 rtlpriv->stats.last_sigstrength_inpercent = tmpval;
559
560 for (n_spatialstream = 0; n_spatialstream < 2;
561 n_spatialstream++) {
562 if (pstats->
563 rx_mimo_signalquality[n_spatialstream] !=
564 -1) {
565 if (rtlpriv->stats.
566 rx_evm_percentage[n_spatialstream]
567 == 0) {
568 rtlpriv->stats.
569 rx_evm_percentage
570 [n_spatialstream] =
571 pstats->rx_mimo_signalquality
572 [n_spatialstream];
573 }
574
575 rtlpriv->stats.
576 rx_evm_percentage[n_spatialstream] =
577 ((rtlpriv->
578 stats.rx_evm_percentage
579 [n_spatialstream] *
580 (RX_SMOOTH_FACTOR - 1)) +
581 (pstats->
582 rx_mimo_signalquality
583 [n_spatialstream] * 1)) /
584 (RX_SMOOTH_FACTOR);
585 }
586 }
587 }
588 } else {
589 ;
590 }
591}
592
593static void _rtl92ce_process_phyinfo(struct ieee80211_hw *hw,
594 u8 *buffer,
595 struct rtl_stats *pcurrent_stats)
596{
597
598 if (!pcurrent_stats->b_packet_matchbssid &&
599 !pcurrent_stats->b_packet_beacon)
600 return;
601
602 _rtl92ce_process_ui_rssi(hw, pcurrent_stats);
603 _rtl92ce_process_pwdb(hw, pcurrent_stats);
604 _rtl92ce_process_ui_link_quality(hw, pcurrent_stats);
605}
606
607static void _rtl92ce_translate_rx_signal_stuff(struct ieee80211_hw *hw,
608 struct sk_buff *skb,
609 struct rtl_stats *pstats,
610 struct rx_desc_92c *pdesc,
611 struct rx_fwinfo_92c *p_drvinfo)
612{
613 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
614 struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
615
616 struct ieee80211_hdr *hdr;
617 u8 *tmp_buf;
618 u8 *praddr;
619 u8 *psaddr;
620 u16 fc, type;
621 bool b_packet_matchbssid, b_packet_toself, b_packet_beacon;
622
623 tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
624
625 hdr = (struct ieee80211_hdr *)tmp_buf;
626 fc = le16_to_cpu(hdr->frame_control);
627 type = WLAN_FC_GET_TYPE(fc);
628 praddr = hdr->addr1;
629 psaddr = hdr->addr2;
630
631 b_packet_matchbssid =
632 ((IEEE80211_FTYPE_CTL != type) &&
633 (!compare_ether_addr(mac->bssid,
634 (fc & IEEE80211_FCTL_TODS) ?
635 hdr->addr1 : (fc & IEEE80211_FCTL_FROMDS) ?
636 hdr->addr2 : hdr->addr3)) &&
637 (!pstats->b_hwerror) && (!pstats->b_crc) && (!pstats->b_icv));
638
639 b_packet_toself = b_packet_matchbssid &&
640 (!compare_ether_addr(praddr, rtlefuse->dev_addr));
641
642 if (ieee80211_is_beacon(fc))
643 b_packet_beacon = true;
644
645 _rtl92ce_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
646 b_packet_matchbssid, b_packet_toself,
647 b_packet_beacon);
648
649 _rtl92ce_process_phyinfo(hw, tmp_buf, pstats);
650}
651
652bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
653 struct rtl_stats *stats,
654 struct ieee80211_rx_status *rx_status,
655 u8 *p_desc, struct sk_buff *skb)
656{
657 struct rx_fwinfo_92c *p_drvinfo;
658 struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
659
660 u32 phystatus = GET_RX_DESC_PHYST(pdesc);
661 stats->length = (u16) GET_RX_DESC_PKT_LEN(pdesc);
662 stats->rx_drvinfo_size = (u8) GET_RX_DESC_DRV_INFO_SIZE(pdesc) *
663 RX_DRV_INFO_SIZE_UNIT;
664 stats->rx_bufshift = (u8) (GET_RX_DESC_SHIFT(pdesc) & 0x03);
665 stats->b_icv = (u16) GET_RX_DESC_ICV(pdesc);
666 stats->b_crc = (u16) GET_RX_DESC_CRC32(pdesc);
667 stats->b_hwerror = (stats->b_crc | stats->b_icv);
668 stats->decrypted = !GET_RX_DESC_SWDEC(pdesc);
669 stats->rate = (u8) GET_RX_DESC_RXMCS(pdesc);
670 stats->b_shortpreamble = (u16) GET_RX_DESC_SPLCP(pdesc);
671 stats->b_isampdu = (bool) (GET_RX_DESC_PAGGR(pdesc) == 1);
672 stats->b_isampdu = (bool) ((GET_RX_DESC_PAGGR(pdesc) == 1)
673 && (GET_RX_DESC_FAGGR(pdesc) == 1));
674 stats->timestamp_low = GET_RX_DESC_TSFL(pdesc);
675 stats->rx_is40Mhzpacket = (bool) GET_RX_DESC_BW(pdesc);
676
677 rx_status->freq = hw->conf.channel->center_freq;
678 rx_status->band = hw->conf.channel->band;
679
680 if (GET_RX_DESC_CRC32(pdesc))
681 rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
682
683 if (!GET_RX_DESC_SWDEC(pdesc))
684 rx_status->flag |= RX_FLAG_DECRYPTED;
685
686 if (GET_RX_DESC_BW(pdesc))
687 rx_status->flag |= RX_FLAG_40MHZ;
688
689 if (GET_RX_DESC_RXHT(pdesc))
690 rx_status->flag |= RX_FLAG_HT;
691
692 rx_status->flag |= RX_FLAG_TSFT;
693
694 if (stats->decrypted)
695 rx_status->flag |= RX_FLAG_DECRYPTED;
696
697 rx_status->rate_idx = _rtl92ce_rate_mapping((bool)
698 GET_RX_DESC_RXHT(pdesc),
699 (u8)
700 GET_RX_DESC_RXMCS(pdesc),
701 (bool)
702 GET_RX_DESC_PAGGR(pdesc));
703
704 rx_status->mactime = GET_RX_DESC_TSFL(pdesc);
705 if (phystatus == true) {
706 p_drvinfo = (struct rx_fwinfo_92c *)(skb->data +
707 stats->rx_bufshift);
708
709 _rtl92ce_translate_rx_signal_stuff(hw,
710 skb, stats, pdesc,
711 p_drvinfo);
712 }
713
714 /*rx_status->qual = stats->signal; */
715 rx_status->signal = stats->rssi + 10;
716 /*rx_status->noise = -stats->noise; */
717
718 return true;
719}
720
721void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
722 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
723 struct ieee80211_tx_info *info, struct sk_buff *skb,
724 unsigned int queue_index)
725{
726 struct rtl_priv *rtlpriv = rtl_priv(hw);
727 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
728 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
729 struct rtl_ps_ctl *ppsc = rtl_psc(rtl_priv(hw));
730 bool b_defaultadapter = true;
731
732 struct ieee80211_sta *sta = ieee80211_find_sta(mac->vif, mac->bssid);
733
734 u8 *pdesc = (u8 *) pdesc_tx;
735 struct rtl_tcb_desc tcb_desc;
736 u8 *qc = ieee80211_get_qos_ctl(hdr);
737 u8 tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
738 u16 seq_number;
739 u16 fc = le16_to_cpu(hdr->frame_control);
740 u8 rate_flag = info->control.rates[0].flags;
741
742 enum rtl_desc_qsel fw_qsel =
743 _rtl92ce_map_hwqueue_to_fwqueue(le16_to_cpu(hdr->frame_control),
744 queue_index);
745
746 bool b_firstseg = ((hdr->seq_ctrl &
747 cpu_to_le16(IEEE80211_SCTL_FRAG)) == 0);
748
749 bool b_lastseg = ((hdr->frame_control &
750 cpu_to_le16(IEEE80211_FCTL_MOREFRAGS)) == 0);
751
752 dma_addr_t mapping = pci_map_single(rtlpci->pdev,
753 skb->data, skb->len,
754 PCI_DMA_TODEVICE);
755
756 seq_number = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
757
758 rtl_get_tcb_desc(hw, info, skb, &tcb_desc);
759
760 CLEAR_PCI_TX_DESC_CONTENT(pdesc, sizeof(struct tx_desc_92c));
761
762 if (b_firstseg) {
763 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
764
765 SET_TX_DESC_TX_RATE(pdesc, tcb_desc.hw_rate);
766
767 if (tcb_desc.use_shortgi || tcb_desc.use_shortpreamble)
768 SET_TX_DESC_DATA_SHORTGI(pdesc, 1);
769
770 if (mac->tids[tid].agg.agg_state == RTL_AGG_ON &&
771 info->flags & IEEE80211_TX_CTL_AMPDU) {
772 SET_TX_DESC_AGG_BREAK(pdesc, 1);
773 SET_TX_DESC_MAX_AGG_NUM(pdesc, 0x14);
774 }
775 SET_TX_DESC_SEQ(pdesc, seq_number);
776
777 SET_TX_DESC_RTS_ENABLE(pdesc, ((tcb_desc.b_rts_enable &&
778 !tcb_desc.
779 b_cts_enable) ? 1 : 0));
780 SET_TX_DESC_HW_RTS_ENABLE(pdesc,
781 ((tcb_desc.b_rts_enable
782 || tcb_desc.b_cts_enable) ? 1 : 0));
783 SET_TX_DESC_CTS2SELF(pdesc, ((tcb_desc.b_cts_enable) ? 1 : 0));
784 SET_TX_DESC_RTS_STBC(pdesc, ((tcb_desc.b_rts_stbc) ? 1 : 0));
785
786 SET_TX_DESC_RTS_RATE(pdesc, tcb_desc.rts_rate);
787 SET_TX_DESC_RTS_BW(pdesc, 0);
788 SET_TX_DESC_RTS_SC(pdesc, tcb_desc.rts_sc);
789 SET_TX_DESC_RTS_SHORT(pdesc,
790 ((tcb_desc.rts_rate <= DESC92C_RATE54M) ?
791 (tcb_desc.b_rts_use_shortpreamble ? 1 : 0)
792 : (tcb_desc.b_rts_use_shortgi ? 1 : 0)));
793
794 if (mac->bw_40) {
795 if (tcb_desc.b_packet_bw) {
796 SET_TX_DESC_DATA_BW(pdesc, 1);
797 SET_TX_DESC_TX_SUB_CARRIER(pdesc, 3);
798 } else {
799 SET_TX_DESC_DATA_BW(pdesc, 0);
800
801 if (rate_flag & IEEE80211_TX_RC_DUP_DATA) {
802 SET_TX_DESC_TX_SUB_CARRIER(pdesc,
803 mac->cur_40_prime_sc);
804 }
805 }
806 } else {
807 SET_TX_DESC_DATA_BW(pdesc, 0);
808 SET_TX_DESC_TX_SUB_CARRIER(pdesc, 0);
809 }
810
811 SET_TX_DESC_LINIP(pdesc, 0);
812 SET_TX_DESC_PKT_SIZE(pdesc, (u16) skb->len);
813
814 if (sta) {
815 u8 ampdu_density = sta->ht_cap.ampdu_density;
816 SET_TX_DESC_AMPDU_DENSITY(pdesc, ampdu_density);
817 }
818
819 if (info->control.hw_key) {
820 struct ieee80211_key_conf *keyconf =
821 info->control.hw_key;
822
823 switch (keyconf->cipher) {
824 case WLAN_CIPHER_SUITE_WEP40:
825 case WLAN_CIPHER_SUITE_WEP104:
826 case WLAN_CIPHER_SUITE_TKIP:
827 SET_TX_DESC_SEC_TYPE(pdesc, 0x1);
828 break;
829 case WLAN_CIPHER_SUITE_CCMP:
830 SET_TX_DESC_SEC_TYPE(pdesc, 0x3);
831 break;
832 default:
833 SET_TX_DESC_SEC_TYPE(pdesc, 0x0);
834 break;
835
836 }
837 }
838
839 SET_TX_DESC_PKT_ID(pdesc, 0);
840 SET_TX_DESC_QUEUE_SEL(pdesc, fw_qsel);
841
842 SET_TX_DESC_DATA_RATE_FB_LIMIT(pdesc, 0x1F);
843 SET_TX_DESC_RTS_RATE_FB_LIMIT(pdesc, 0xF);
844 SET_TX_DESC_DISABLE_FB(pdesc, 0);
845 SET_TX_DESC_USE_RATE(pdesc, tcb_desc.use_driver_rate ? 1 : 0);
846
847 if (ieee80211_is_data_qos(fc)) {
848 if (mac->rdg_en) {
849 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE,
850 ("Enable RDG function.\n"));
851 SET_TX_DESC_RDG_ENABLE(pdesc, 1);
852 SET_TX_DESC_HTC(pdesc, 1);
853 }
854 }
855 }
856
857 SET_TX_DESC_FIRST_SEG(pdesc, (b_firstseg ? 1 : 0));
858 SET_TX_DESC_LAST_SEG(pdesc, (b_lastseg ? 1 : 0));
859
860 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) skb->len);
861
862 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
863
864 if (rtlpriv->dm.b_useramask) {
865 SET_TX_DESC_RATE_ID(pdesc, tcb_desc.ratr_index);
866 SET_TX_DESC_MACID(pdesc, tcb_desc.mac_id);
867 } else {
868 SET_TX_DESC_RATE_ID(pdesc, 0xC + tcb_desc.ratr_index);
869 SET_TX_DESC_MACID(pdesc, tcb_desc.ratr_index);
870 }
871
872 if ((!ieee80211_is_data_qos(fc)) && ppsc->b_leisure_ps &&
873 ppsc->b_fwctrl_lps) {
874 SET_TX_DESC_HWSEQ_EN(pdesc, 1);
875 SET_TX_DESC_PKT_ID(pdesc, 8);
876
877 if (!b_defaultadapter)
878 SET_TX_DESC_QOS(pdesc, 1);
879 }
880
881 SET_TX_DESC_MORE_FRAG(pdesc, (b_lastseg ? 0 : 1));
882
883 if (is_multicast_ether_addr(ieee80211_get_DA(hdr)) ||
884 is_broadcast_ether_addr(ieee80211_get_DA(hdr))) {
885 SET_TX_DESC_BMC(pdesc, 1);
886 }
887
888 RT_TRACE(rtlpriv, COMP_SEND, DBG_TRACE, ("\n"));
889}
890
891void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw,
892 u8 *pdesc, bool b_firstseg,
893 bool b_lastseg, struct sk_buff *skb)
894{
895 struct rtl_priv *rtlpriv = rtl_priv(hw);
896 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
897 u8 fw_queue = QSLT_BEACON;
898
899 dma_addr_t mapping = pci_map_single(rtlpci->pdev,
900 skb->data, skb->len,
901 PCI_DMA_TODEVICE);
902
903 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
904 u16 fc = le16_to_cpu(hdr->frame_control);
905
906 CLEAR_PCI_TX_DESC_CONTENT(pdesc, TX_DESC_SIZE);
907
908 if (b_firstseg)
909 SET_TX_DESC_OFFSET(pdesc, USB_HWDESC_HEADER_LEN);
910
911 SET_TX_DESC_TX_RATE(pdesc, DESC92C_RATE1M);
912
913 SET_TX_DESC_SEQ(pdesc, 0);
914
915 SET_TX_DESC_LINIP(pdesc, 0);
916
917 SET_TX_DESC_QUEUE_SEL(pdesc, fw_queue);
918
919 SET_TX_DESC_FIRST_SEG(pdesc, 1);
920 SET_TX_DESC_LAST_SEG(pdesc, 1);
921
922 SET_TX_DESC_TX_BUFFER_SIZE(pdesc, (u16) (skb->len));
923
924 SET_TX_DESC_TX_BUFFER_ADDRESS(pdesc, cpu_to_le32(mapping));
925
926 SET_TX_DESC_RATE_ID(pdesc, 7);
927 SET_TX_DESC_MACID(pdesc, 0);
928
929 SET_TX_DESC_OWN(pdesc, 1);
930
931 SET_TX_DESC_PKT_SIZE((u8 *) pdesc, (u16) (skb->len));
932
933 SET_TX_DESC_FIRST_SEG(pdesc, 1);
934 SET_TX_DESC_LAST_SEG(pdesc, 1);
935
936 SET_TX_DESC_OFFSET(pdesc, 0x20);
937
938 SET_TX_DESC_USE_RATE(pdesc, 1);
939
940 if (!ieee80211_is_data_qos(fc)) {
941 SET_TX_DESC_HWSEQ_EN(pdesc, 1);
942 SET_TX_DESC_PKT_ID(pdesc, 8);
943 }
944
945 RT_PRINT_DATA(rtlpriv, COMP_CMD, DBG_LOUD,
946 "H2C Tx Cmd Content\n",
947 pdesc, TX_DESC_SIZE);
948}
949
950void rtl92ce_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val)
951{
952 if (istx == true) {
953 switch (desc_name) {
954 case HW_DESC_OWN:
955 SET_TX_DESC_OWN(pdesc, 1);
956 break;
957 case HW_DESC_TX_NEXTDESC_ADDR:
958 SET_TX_DESC_NEXT_DESC_ADDRESS(pdesc, *(u32 *) val);
959 break;
960 default:
961 RT_ASSERT(false, ("ERR txdesc :%d"
962 " not process\n", desc_name));
963 break;
964 }
965 } else {
966 switch (desc_name) {
967 case HW_DESC_RXOWN:
968 SET_RX_DESC_OWN(pdesc, 1);
969 break;
970 case HW_DESC_RXBUFF_ADDR:
971 SET_RX_DESC_BUFF_ADDR(pdesc, *(u32 *) val);
972 break;
973 case HW_DESC_RXPKT_LEN:
974 SET_RX_DESC_PKT_LEN(pdesc, *(u32 *) val);
975 break;
976 case HW_DESC_RXERO:
977 SET_RX_DESC_EOR(pdesc, 1);
978 break;
979 default:
980 RT_ASSERT(false, ("ERR rxdesc :%d "
981 "not process\n", desc_name));
982 break;
983 }
984 }
985}
986
987u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name)
988{
989 u32 ret = 0;
990
991 if (istx == true) {
992 switch (desc_name) {
993 case HW_DESC_OWN:
994 ret = GET_TX_DESC_OWN(p_desc);
995 break;
996 case HW_DESC_TXBUFF_ADDR:
997 ret = GET_TX_DESC_TX_BUFFER_ADDRESS(p_desc);
998 break;
999 default:
1000 RT_ASSERT(false, ("ERR txdesc :%d "
1001 "not process\n", desc_name));
1002 break;
1003 }
1004 } else {
1005 struct rx_desc_92c *pdesc = (struct rx_desc_92c *)p_desc;
1006 switch (desc_name) {
1007 case HW_DESC_OWN:
1008 ret = GET_RX_DESC_OWN(pdesc);
1009 break;
1010 case HW_DESC_RXPKT_LEN:
1011 ret = GET_RX_DESC_PKT_LEN(pdesc);
1012 break;
1013 default:
1014 RT_ASSERT(false, ("ERR rxdesc :%d "
1015 "not process\n", desc_name));
1016 break;
1017 }
1018 }
1019 return ret;
1020}
1021
1022void rtl92ce_tx_polling(struct ieee80211_hw *hw, unsigned int hw_queue)
1023{
1024 struct rtl_priv *rtlpriv = rtl_priv(hw);
1025 if (hw_queue == BEACON_QUEUE) {
1026 rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG, BIT(4));
1027 } else {
1028 rtl_write_word(rtlpriv, REG_PCIE_CTRL_REG,
1029 BIT(0) << (hw_queue));
1030 }
1031}
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
new file mode 100644
index 000000000000..53d0e0a5af5c
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.h
@@ -0,0 +1,714 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL92CE_TRX_H__
31#define __RTL92CE_TRX_H__
32
33#define TX_DESC_SIZE 64
34#define TX_DESC_AGGR_SUBFRAME_SIZE 32
35
36#define RX_DESC_SIZE 32
37#define RX_DRV_INFO_SIZE_UNIT 8
38
39#define TX_DESC_NEXT_DESC_OFFSET 40
40#define USB_HWDESC_HEADER_LEN 32
41#define CRCLENGTH 4
42
43#define SET_TX_DESC_PKT_SIZE(__pdesc, __val) \
44 SET_BITS_TO_LE_4BYTE(__pdesc, 0, 16, __val)
45#define SET_TX_DESC_OFFSET(__pdesc, __val) \
46 SET_BITS_TO_LE_4BYTE(__pdesc, 16, 8, __val)
47#define SET_TX_DESC_BMC(__pdesc, __val) \
48 SET_BITS_TO_LE_4BYTE(__pdesc, 24, 1, __val)
49#define SET_TX_DESC_HTC(__pdesc, __val) \
50 SET_BITS_TO_LE_4BYTE(__pdesc, 25, 1, __val)
51#define SET_TX_DESC_LAST_SEG(__pdesc, __val) \
52 SET_BITS_TO_LE_4BYTE(__pdesc, 26, 1, __val)
53#define SET_TX_DESC_FIRST_SEG(__pdesc, __val) \
54 SET_BITS_TO_LE_4BYTE(__pdesc, 27, 1, __val)
55#define SET_TX_DESC_LINIP(__pdesc, __val) \
56 SET_BITS_TO_LE_4BYTE(__pdesc, 28, 1, __val)
57#define SET_TX_DESC_NO_ACM(__pdesc, __val) \
58 SET_BITS_TO_LE_4BYTE(__pdesc, 29, 1, __val)
59#define SET_TX_DESC_GF(__pdesc, __val) \
60 SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
61#define SET_TX_DESC_OWN(__pdesc, __val) \
62 SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
63
64#define GET_TX_DESC_PKT_SIZE(__pdesc) \
65 LE_BITS_TO_4BYTE(__pdesc, 0, 16)
66#define GET_TX_DESC_OFFSET(__pdesc) \
67 LE_BITS_TO_4BYTE(__pdesc, 16, 8)
68#define GET_TX_DESC_BMC(__pdesc) \
69 LE_BITS_TO_4BYTE(__pdesc, 24, 1)
70#define GET_TX_DESC_HTC(__pdesc) \
71 LE_BITS_TO_4BYTE(__pdesc, 25, 1)
72#define GET_TX_DESC_LAST_SEG(__pdesc) \
73 LE_BITS_TO_4BYTE(__pdesc, 26, 1)
74#define GET_TX_DESC_FIRST_SEG(__pdesc) \
75 LE_BITS_TO_4BYTE(__pdesc, 27, 1)
76#define GET_TX_DESC_LINIP(__pdesc) \
77 LE_BITS_TO_4BYTE(__pdesc, 28, 1)
78#define GET_TX_DESC_NO_ACM(__pdesc) \
79 LE_BITS_TO_4BYTE(__pdesc, 29, 1)
80#define GET_TX_DESC_GF(__pdesc) \
81 LE_BITS_TO_4BYTE(__pdesc, 30, 1)
82#define GET_TX_DESC_OWN(__pdesc) \
83 LE_BITS_TO_4BYTE(__pdesc, 31, 1)
84
85#define SET_TX_DESC_MACID(__pdesc, __val) \
86 SET_BITS_TO_LE_4BYTE(__pdesc+4, 0, 5, __val)
87#define SET_TX_DESC_AGG_BREAK(__pdesc, __val) \
88 SET_BITS_TO_LE_4BYTE(__pdesc+4, 5, 1, __val)
89#define SET_TX_DESC_BK(__pdesc, __val) \
90 SET_BITS_TO_LE_4BYTE(__pdesc+4, 6, 1, __val)
91#define SET_TX_DESC_RDG_ENABLE(__pdesc, __val) \
92 SET_BITS_TO_LE_4BYTE(__pdesc+4, 7, 1, __val)
93#define SET_TX_DESC_QUEUE_SEL(__pdesc, __val) \
94 SET_BITS_TO_LE_4BYTE(__pdesc+4, 8, 5, __val)
95#define SET_TX_DESC_RDG_NAV_EXT(__pdesc, __val) \
96 SET_BITS_TO_LE_4BYTE(__pdesc+4, 13, 1, __val)
97#define SET_TX_DESC_LSIG_TXOP_EN(__pdesc, __val) \
98 SET_BITS_TO_LE_4BYTE(__pdesc+4, 14, 1, __val)
99#define SET_TX_DESC_PIFS(__pdesc, __val) \
100 SET_BITS_TO_LE_4BYTE(__pdesc+4, 15, 1, __val)
101#define SET_TX_DESC_RATE_ID(__pdesc, __val) \
102 SET_BITS_TO_LE_4BYTE(__pdesc+4, 16, 4, __val)
103#define SET_TX_DESC_NAV_USE_HDR(__pdesc, __val) \
104 SET_BITS_TO_LE_4BYTE(__pdesc+4, 20, 1, __val)
105#define SET_TX_DESC_EN_DESC_ID(__pdesc, __val) \
106 SET_BITS_TO_LE_4BYTE(__pdesc+4, 21, 1, __val)
107#define SET_TX_DESC_SEC_TYPE(__pdesc, __val) \
108 SET_BITS_TO_LE_4BYTE(__pdesc+4, 22, 2, __val)
109#define SET_TX_DESC_PKT_OFFSET(__pdesc, __val) \
110 SET_BITS_TO_LE_4BYTE(__pdesc+4, 24, 8, __val)
111
112#define GET_TX_DESC_MACID(__pdesc) \
113 LE_BITS_TO_4BYTE(__pdesc+4, 0, 5)
114#define GET_TX_DESC_AGG_ENABLE(__pdesc) \
115 LE_BITS_TO_4BYTE(__pdesc+4, 5, 1)
116#define GET_TX_DESC_AGG_BREAK(__pdesc) \
117 LE_BITS_TO_4BYTE(__pdesc+4, 6, 1)
118#define GET_TX_DESC_RDG_ENABLE(__pdesc) \
119 LE_BITS_TO_4BYTE(__pdesc+4, 7, 1)
120#define GET_TX_DESC_QUEUE_SEL(__pdesc) \
121 LE_BITS_TO_4BYTE(__pdesc+4, 8, 5)
122#define GET_TX_DESC_RDG_NAV_EXT(__pdesc) \
123 LE_BITS_TO_4BYTE(__pdesc+4, 13, 1)
124#define GET_TX_DESC_LSIG_TXOP_EN(__pdesc) \
125 LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
126#define GET_TX_DESC_PIFS(__pdesc) \
127 LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
128#define GET_TX_DESC_RATE_ID(__pdesc) \
129 LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
130#define GET_TX_DESC_NAV_USE_HDR(__pdesc) \
131 LE_BITS_TO_4BYTE(__pdesc+4, 20, 1)
132#define GET_TX_DESC_EN_DESC_ID(__pdesc) \
133 LE_BITS_TO_4BYTE(__pdesc+4, 21, 1)
134#define GET_TX_DESC_SEC_TYPE(__pdesc) \
135 LE_BITS_TO_4BYTE(__pdesc+4, 22, 2)
136#define GET_TX_DESC_PKT_OFFSET(__pdesc) \
137 LE_BITS_TO_4BYTE(__pdesc+4, 24, 8)
138
139#define SET_TX_DESC_RTS_RC(__pdesc, __val) \
140 SET_BITS_TO_LE_4BYTE(__pdesc+8, 0, 6, __val)
141#define SET_TX_DESC_DATA_RC(__pdesc, __val) \
142 SET_BITS_TO_LE_4BYTE(__pdesc+8, 6, 6, __val)
143#define SET_TX_DESC_BAR_RTY_TH(__pdesc, __val) \
144 SET_BITS_TO_LE_4BYTE(__pdesc+8, 14, 2, __val)
145#define SET_TX_DESC_MORE_FRAG(__pdesc, __val) \
146 SET_BITS_TO_LE_4BYTE(__pdesc+8, 17, 1, __val)
147#define SET_TX_DESC_RAW(__pdesc, __val) \
148 SET_BITS_TO_LE_4BYTE(__pdesc+8, 18, 1, __val)
149#define SET_TX_DESC_CCX(__pdesc, __val) \
150 SET_BITS_TO_LE_4BYTE(__pdesc+8, 19, 1, __val)
151#define SET_TX_DESC_AMPDU_DENSITY(__pdesc, __val) \
152 SET_BITS_TO_LE_4BYTE(__pdesc+8, 20, 3, __val)
153#define SET_TX_DESC_ANTSEL_A(__pdesc, __val) \
154 SET_BITS_TO_LE_4BYTE(__pdesc+8, 24, 1, __val)
155#define SET_TX_DESC_ANTSEL_B(__pdesc, __val) \
156 SET_BITS_TO_LE_4BYTE(__pdesc+8, 25, 1, __val)
157#define SET_TX_DESC_TX_ANT_CCK(__pdesc, __val) \
158 SET_BITS_TO_LE_4BYTE(__pdesc+8, 26, 2, __val)
159#define SET_TX_DESC_TX_ANTL(__pdesc, __val) \
160 SET_BITS_TO_LE_4BYTE(__pdesc+8, 28, 2, __val)
161#define SET_TX_DESC_TX_ANT_HT(__pdesc, __val) \
162 SET_BITS_TO_LE_4BYTE(__pdesc+8, 30, 2, __val)
163
164#define GET_TX_DESC_RTS_RC(__pdesc) \
165 LE_BITS_TO_4BYTE(__pdesc+8, 0, 6)
166#define GET_TX_DESC_DATA_RC(__pdesc) \
167 LE_BITS_TO_4BYTE(__pdesc+8, 6, 6)
168#define GET_TX_DESC_BAR_RTY_TH(__pdesc) \
169 LE_BITS_TO_4BYTE(__pdesc+8, 14, 2)
170#define GET_TX_DESC_MORE_FRAG(__pdesc) \
171 LE_BITS_TO_4BYTE(__pdesc+8, 17, 1)
172#define GET_TX_DESC_RAW(__pdesc) \
173 LE_BITS_TO_4BYTE(__pdesc+8, 18, 1)
174#define GET_TX_DESC_CCX(__pdesc) \
175 LE_BITS_TO_4BYTE(__pdesc+8, 19, 1)
176#define GET_TX_DESC_AMPDU_DENSITY(__pdesc) \
177 LE_BITS_TO_4BYTE(__pdesc+8, 20, 3)
178#define GET_TX_DESC_ANTSEL_A(__pdesc) \
179 LE_BITS_TO_4BYTE(__pdesc+8, 24, 1)
180#define GET_TX_DESC_ANTSEL_B(__pdesc) \
181 LE_BITS_TO_4BYTE(__pdesc+8, 25, 1)
182#define GET_TX_DESC_TX_ANT_CCK(__pdesc) \
183 LE_BITS_TO_4BYTE(__pdesc+8, 26, 2)
184#define GET_TX_DESC_TX_ANTL(__pdesc) \
185 LE_BITS_TO_4BYTE(__pdesc+8, 28, 2)
186#define GET_TX_DESC_TX_ANT_HT(__pdesc) \
187 LE_BITS_TO_4BYTE(__pdesc+8, 30, 2)
188
189#define SET_TX_DESC_NEXT_HEAP_PAGE(__pdesc, __val) \
190 SET_BITS_TO_LE_4BYTE(__pdesc+12, 0, 8, __val)
191#define SET_TX_DESC_TAIL_PAGE(__pdesc, __val) \
192 SET_BITS_TO_LE_4BYTE(__pdesc+12, 8, 8, __val)
193#define SET_TX_DESC_SEQ(__pdesc, __val) \
194 SET_BITS_TO_LE_4BYTE(__pdesc+12, 16, 12, __val)
195#define SET_TX_DESC_PKT_ID(__pdesc, __val) \
196 SET_BITS_TO_LE_4BYTE(__pdesc+12, 28, 4, __val)
197
198#define GET_TX_DESC_NEXT_HEAP_PAGE(__pdesc) \
199 LE_BITS_TO_4BYTE(__pdesc+12, 0, 8)
200#define GET_TX_DESC_TAIL_PAGE(__pdesc) \
201 LE_BITS_TO_4BYTE(__pdesc+12, 8, 8)
202#define GET_TX_DESC_SEQ(__pdesc) \
203 LE_BITS_TO_4BYTE(__pdesc+12, 16, 12)
204#define GET_TX_DESC_PKT_ID(__pdesc) \
205 LE_BITS_TO_4BYTE(__pdesc+12, 28, 4)
206
207#define SET_TX_DESC_RTS_RATE(__pdesc, __val) \
208 SET_BITS_TO_LE_4BYTE(__pdesc+16, 0, 5, __val)
209#define SET_TX_DESC_AP_DCFE(__pdesc, __val) \
210 SET_BITS_TO_LE_4BYTE(__pdesc+16, 5, 1, __val)
211#define SET_TX_DESC_QOS(__pdesc, __val) \
212 SET_BITS_TO_LE_4BYTE(__pdesc+16, 6, 1, __val)
213#define SET_TX_DESC_HWSEQ_EN(__pdesc, __val) \
214 SET_BITS_TO_LE_4BYTE(__pdesc+16, 7, 1, __val)
215#define SET_TX_DESC_USE_RATE(__pdesc, __val) \
216 SET_BITS_TO_LE_4BYTE(__pdesc+16, 8, 1, __val)
217#define SET_TX_DESC_DISABLE_RTS_FB(__pdesc, __val) \
218 SET_BITS_TO_LE_4BYTE(__pdesc+16, 9, 1, __val)
219#define SET_TX_DESC_DISABLE_FB(__pdesc, __val) \
220 SET_BITS_TO_LE_4BYTE(__pdesc+16, 10, 1, __val)
221#define SET_TX_DESC_CTS2SELF(__pdesc, __val) \
222 SET_BITS_TO_LE_4BYTE(__pdesc+16, 11, 1, __val)
223#define SET_TX_DESC_RTS_ENABLE(__pdesc, __val) \
224 SET_BITS_TO_LE_4BYTE(__pdesc+16, 12, 1, __val)
225#define SET_TX_DESC_HW_RTS_ENABLE(__pdesc, __val) \
226 SET_BITS_TO_LE_4BYTE(__pdesc+16, 13, 1, __val)
227#define SET_TX_DESC_PORT_ID(__pdesc, __val) \
228 SET_BITS_TO_LE_4BYTE(__pdesc+16, 14, 1, __val)
229#define SET_TX_DESC_WAIT_DCTS(__pdesc, __val) \
230 SET_BITS_TO_LE_4BYTE(__pdesc+16, 18, 1, __val)
231#define SET_TX_DESC_CTS2AP_EN(__pdesc, __val) \
232 SET_BITS_TO_LE_4BYTE(__pdesc+16, 19, 1, __val)
233#define SET_TX_DESC_TX_SUB_CARRIER(__pdesc, __val) \
234 SET_BITS_TO_LE_4BYTE(__pdesc+16, 20, 2, __val)
235#define SET_TX_DESC_TX_STBC(__pdesc, __val) \
236 SET_BITS_TO_LE_4BYTE(__pdesc+16, 22, 2, __val)
237#define SET_TX_DESC_DATA_SHORT(__pdesc, __val) \
238 SET_BITS_TO_LE_4BYTE(__pdesc+16, 24, 1, __val)
239#define SET_TX_DESC_DATA_BW(__pdesc, __val) \
240 SET_BITS_TO_LE_4BYTE(__pdesc+16, 25, 1, __val)
241#define SET_TX_DESC_RTS_SHORT(__pdesc, __val) \
242 SET_BITS_TO_LE_4BYTE(__pdesc+16, 26, 1, __val)
243#define SET_TX_DESC_RTS_BW(__pdesc, __val) \
244 SET_BITS_TO_LE_4BYTE(__pdesc+16, 27, 1, __val)
245#define SET_TX_DESC_RTS_SC(__pdesc, __val) \
246 SET_BITS_TO_LE_4BYTE(__pdesc+16, 28, 2, __val)
247#define SET_TX_DESC_RTS_STBC(__pdesc, __val) \
248 SET_BITS_TO_LE_4BYTE(__pdesc+16, 30, 2, __val)
249
250#define GET_TX_DESC_RTS_RATE(__pdesc) \
251 LE_BITS_TO_4BYTE(__pdesc+16, 0, 5)
252#define GET_TX_DESC_AP_DCFE(__pdesc) \
253 LE_BITS_TO_4BYTE(__pdesc+16, 5, 1)
254#define GET_TX_DESC_QOS(__pdesc) \
255 LE_BITS_TO_4BYTE(__pdesc+16, 6, 1)
256#define GET_TX_DESC_HWSEQ_EN(__pdesc) \
257 LE_BITS_TO_4BYTE(__pdesc+16, 7, 1)
258#define GET_TX_DESC_USE_RATE(__pdesc) \
259 LE_BITS_TO_4BYTE(__pdesc+16, 8, 1)
260#define GET_TX_DESC_DISABLE_RTS_FB(__pdesc) \
261 LE_BITS_TO_4BYTE(__pdesc+16, 9, 1)
262#define GET_TX_DESC_DISABLE_FB(__pdesc) \
263 LE_BITS_TO_4BYTE(__pdesc+16, 10, 1)
264#define GET_TX_DESC_CTS2SELF(__pdesc) \
265 LE_BITS_TO_4BYTE(__pdesc+16, 11, 1)
266#define GET_TX_DESC_RTS_ENABLE(__pdesc) \
267 LE_BITS_TO_4BYTE(__pdesc+16, 12, 1)
268#define GET_TX_DESC_HW_RTS_ENABLE(__pdesc) \
269 LE_BITS_TO_4BYTE(__pdesc+16, 13, 1)
270#define GET_TX_DESC_PORT_ID(__pdesc) \
271 LE_BITS_TO_4BYTE(__pdesc+16, 14, 1)
272#define GET_TX_DESC_WAIT_DCTS(__pdesc) \
273 LE_BITS_TO_4BYTE(__pdesc+16, 18, 1)
274#define GET_TX_DESC_CTS2AP_EN(__pdesc) \
275 LE_BITS_TO_4BYTE(__pdesc+16, 19, 1)
276#define GET_TX_DESC_TX_SUB_CARRIER(__pdesc) \
277 LE_BITS_TO_4BYTE(__pdesc+16, 20, 2)
278#define GET_TX_DESC_TX_STBC(__pdesc) \
279 LE_BITS_TO_4BYTE(__pdesc+16, 22, 2)
280#define GET_TX_DESC_DATA_SHORT(__pdesc) \
281 LE_BITS_TO_4BYTE(__pdesc+16, 24, 1)
282#define GET_TX_DESC_DATA_BW(__pdesc) \
283 LE_BITS_TO_4BYTE(__pdesc+16, 25, 1)
284#define GET_TX_DESC_RTS_SHORT(__pdesc) \
285 LE_BITS_TO_4BYTE(__pdesc+16, 26, 1)
286#define GET_TX_DESC_RTS_BW(__pdesc) \
287 LE_BITS_TO_4BYTE(__pdesc+16, 27, 1)
288#define GET_TX_DESC_RTS_SC(__pdesc) \
289 LE_BITS_TO_4BYTE(__pdesc+16, 28, 2)
290#define GET_TX_DESC_RTS_STBC(__pdesc) \
291 LE_BITS_TO_4BYTE(__pdesc+16, 30, 2)
292
293#define SET_TX_DESC_TX_RATE(__pdesc, __val) \
294 SET_BITS_TO_LE_4BYTE(__pdesc+20, 0, 6, __val)
295#define SET_TX_DESC_DATA_SHORTGI(__pdesc, __val) \
296 SET_BITS_TO_LE_4BYTE(__pdesc+20, 6, 1, __val)
297#define SET_TX_DESC_CCX_TAG(__pdesc, __val) \
298 SET_BITS_TO_LE_4BYTE(__pdesc+20, 7, 1, __val)
299#define SET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc, __val) \
300 SET_BITS_TO_LE_4BYTE(__pdesc+20, 8, 5, __val)
301#define SET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc, __val) \
302 SET_BITS_TO_LE_4BYTE(__pdesc+20, 13, 4, __val)
303#define SET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc, __val) \
304 SET_BITS_TO_LE_4BYTE(__pdesc+20, 17, 1, __val)
305#define SET_TX_DESC_DATA_RETRY_LIMIT(__pdesc, __val) \
306 SET_BITS_TO_LE_4BYTE(__pdesc+20, 18, 6, __val)
307#define SET_TX_DESC_USB_TXAGG_NUM(__pdesc, __val) \
308 SET_BITS_TO_LE_4BYTE(__pdesc+20, 24, 8, __val)
309
310#define GET_TX_DESC_TX_RATE(__pdesc) \
311 LE_BITS_TO_4BYTE(__pdesc+20, 0, 6)
312#define GET_TX_DESC_DATA_SHORTGI(__pdesc) \
313 LE_BITS_TO_4BYTE(__pdesc+20, 6, 1)
314#define GET_TX_DESC_CCX_TAG(__pdesc) \
315 LE_BITS_TO_4BYTE(__pdesc+20, 7, 1)
316#define GET_TX_DESC_DATA_RATE_FB_LIMIT(__pdesc) \
317 LE_BITS_TO_4BYTE(__pdesc+20, 8, 5)
318#define GET_TX_DESC_RTS_RATE_FB_LIMIT(__pdesc) \
319 LE_BITS_TO_4BYTE(__pdesc+20, 13, 4)
320#define GET_TX_DESC_RETRY_LIMIT_ENABLE(__pdesc) \
321 LE_BITS_TO_4BYTE(__pdesc+20, 17, 1)
322#define GET_TX_DESC_DATA_RETRY_LIMIT(__pdesc) \
323 LE_BITS_TO_4BYTE(__pdesc+20, 18, 6)
324#define GET_TX_DESC_USB_TXAGG_NUM(__pdesc) \
325 LE_BITS_TO_4BYTE(__pdesc+20, 24, 8)
326
327#define SET_TX_DESC_TXAGC_A(__pdesc, __val) \
328 SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 5, __val)
329#define SET_TX_DESC_TXAGC_B(__pdesc, __val) \
330 SET_BITS_TO_LE_4BYTE(__pdesc+24, 5, 5, __val)
331#define SET_TX_DESC_USE_MAX_LEN(__pdesc, __val) \
332 SET_BITS_TO_LE_4BYTE(__pdesc+24, 10, 1, __val)
333#define SET_TX_DESC_MAX_AGG_NUM(__pdesc, __val) \
334 SET_BITS_TO_LE_4BYTE(__pdesc+24, 11, 5, __val)
335#define SET_TX_DESC_MCSG1_MAX_LEN(__pdesc, __val) \
336 SET_BITS_TO_LE_4BYTE(__pdesc+24, 16, 4, __val)
337#define SET_TX_DESC_MCSG2_MAX_LEN(__pdesc, __val) \
338 SET_BITS_TO_LE_4BYTE(__pdesc+24, 20, 4, __val)
339#define SET_TX_DESC_MCSG3_MAX_LEN(__pdesc, __val) \
340 SET_BITS_TO_LE_4BYTE(__pdesc+24, 24, 4, __val)
341#define SET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc, __val) \
342 SET_BITS_TO_LE_4BYTE(__pdesc+24, 28, 4, __val)
343
344#define GET_TX_DESC_TXAGC_A(__pdesc) \
345 LE_BITS_TO_4BYTE(__pdesc+24, 0, 5)
346#define GET_TX_DESC_TXAGC_B(__pdesc) \
347 LE_BITS_TO_4BYTE(__pdesc+24, 5, 5)
348#define GET_TX_DESC_USE_MAX_LEN(__pdesc) \
349 LE_BITS_TO_4BYTE(__pdesc+24, 10, 1)
350#define GET_TX_DESC_MAX_AGG_NUM(__pdesc) \
351 LE_BITS_TO_4BYTE(__pdesc+24, 11, 5)
352#define GET_TX_DESC_MCSG1_MAX_LEN(__pdesc) \
353 LE_BITS_TO_4BYTE(__pdesc+24, 16, 4)
354#define GET_TX_DESC_MCSG2_MAX_LEN(__pdesc) \
355 LE_BITS_TO_4BYTE(__pdesc+24, 20, 4)
356#define GET_TX_DESC_MCSG3_MAX_LEN(__pdesc) \
357 LE_BITS_TO_4BYTE(__pdesc+24, 24, 4)
358#define GET_TX_DESC_MCS7_SGI_MAX_LEN(__pdesc) \
359 LE_BITS_TO_4BYTE(__pdesc+24, 28, 4)
360
361#define SET_TX_DESC_TX_BUFFER_SIZE(__pdesc, __val) \
362 SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 16, __val)
363#define SET_TX_DESC_MCSG4_MAX_LEN(__pdesc, __val) \
364 SET_BITS_TO_LE_4BYTE(__pdesc+28, 16, 4, __val)
365#define SET_TX_DESC_MCSG5_MAX_LEN(__pdesc, __val) \
366 SET_BITS_TO_LE_4BYTE(__pdesc+28, 20, 4, __val)
367#define SET_TX_DESC_MCSG6_MAX_LEN(__pdesc, __val) \
368 SET_BITS_TO_LE_4BYTE(__pdesc+28, 24, 4, __val)
369#define SET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc, __val) \
370 SET_BITS_TO_LE_4BYTE(__pdesc+28, 28, 4, __val)
371
372#define GET_TX_DESC_TX_BUFFER_SIZE(__pdesc) \
373 LE_BITS_TO_4BYTE(__pdesc+28, 0, 16)
374#define GET_TX_DESC_MCSG4_MAX_LEN(__pdesc) \
375 LE_BITS_TO_4BYTE(__pdesc+28, 16, 4)
376#define GET_TX_DESC_MCSG5_MAX_LEN(__pdesc) \
377 LE_BITS_TO_4BYTE(__pdesc+28, 20, 4)
378#define GET_TX_DESC_MCSG6_MAX_LEN(__pdesc) \
379 LE_BITS_TO_4BYTE(__pdesc+28, 24, 4)
380#define GET_TX_DESC_MCS15_SGI_MAX_LEN(__pdesc) \
381 LE_BITS_TO_4BYTE(__pdesc+28, 28, 4)
382
383#define SET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc, __val) \
384 SET_BITS_TO_LE_4BYTE(__pdesc+32, 0, 32, __val)
385#define SET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc, __val) \
386 SET_BITS_TO_LE_4BYTE(__pdesc+36, 0, 32, __val)
387
388#define GET_TX_DESC_TX_BUFFER_ADDRESS(__pdesc) \
389 LE_BITS_TO_4BYTE(__pdesc+32, 0, 32)
390#define GET_TX_DESC_TX_BUFFER_ADDRESS64(__pdesc) \
391 LE_BITS_TO_4BYTE(__pdesc+36, 0, 32)
392
393#define SET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc, __val) \
394 SET_BITS_TO_LE_4BYTE(__pdesc+40, 0, 32, __val)
395#define SET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc, __val) \
396 SET_BITS_TO_LE_4BYTE(__pdesc+44, 0, 32, __val)
397
398#define GET_TX_DESC_NEXT_DESC_ADDRESS(__pdesc) \
399 LE_BITS_TO_4BYTE(__pdesc+40, 0, 32)
400#define GET_TX_DESC_NEXT_DESC_ADDRESS64(__pdesc) \
401 LE_BITS_TO_4BYTE(__pdesc+44, 0, 32)
402
403#define GET_RX_DESC_PKT_LEN(__pdesc) \
404 LE_BITS_TO_4BYTE(__pdesc, 0, 14)
405#define GET_RX_DESC_CRC32(__pdesc) \
406 LE_BITS_TO_4BYTE(__pdesc, 14, 1)
407#define GET_RX_DESC_ICV(__pdesc) \
408 LE_BITS_TO_4BYTE(__pdesc, 15, 1)
409#define GET_RX_DESC_DRV_INFO_SIZE(__pdesc) \
410 LE_BITS_TO_4BYTE(__pdesc, 16, 4)
411#define GET_RX_DESC_SECURITY(__pdesc) \
412 LE_BITS_TO_4BYTE(__pdesc, 20, 3)
413#define GET_RX_DESC_QOS(__pdesc) \
414 LE_BITS_TO_4BYTE(__pdesc, 23, 1)
415#define GET_RX_DESC_SHIFT(__pdesc) \
416 LE_BITS_TO_4BYTE(__pdesc, 24, 2)
417#define GET_RX_DESC_PHYST(__pdesc) \
418 LE_BITS_TO_4BYTE(__pdesc, 26, 1)
419#define GET_RX_DESC_SWDEC(__pdesc) \
420 LE_BITS_TO_4BYTE(__pdesc, 27, 1)
421#define GET_RX_DESC_LS(__pdesc) \
422 LE_BITS_TO_4BYTE(__pdesc, 28, 1)
423#define GET_RX_DESC_FS(__pdesc) \
424 LE_BITS_TO_4BYTE(__pdesc, 29, 1)
425#define GET_RX_DESC_EOR(__pdesc) \
426 LE_BITS_TO_4BYTE(__pdesc, 30, 1)
427#define GET_RX_DESC_OWN(__pdesc) \
428 LE_BITS_TO_4BYTE(__pdesc, 31, 1)
429
430#define SET_RX_DESC_PKT_LEN(__pdesc, __val) \
431 SET_BITS_TO_LE_4BYTE(__pdesc, 0, 14, __val)
432#define SET_RX_DESC_EOR(__pdesc, __val) \
433 SET_BITS_TO_LE_4BYTE(__pdesc, 30, 1, __val)
434#define SET_RX_DESC_OWN(__pdesc, __val) \
435 SET_BITS_TO_LE_4BYTE(__pdesc, 31, 1, __val)
436
437#define GET_RX_DESC_MACID(__pdesc) \
438 LE_BITS_TO_4BYTE(__pdesc+4, 0, 5)
439#define GET_RX_DESC_TID(__pdesc) \
440 LE_BITS_TO_4BYTE(__pdesc+4, 5, 4)
441#define GET_RX_DESC_HWRSVD(__pdesc) \
442 LE_BITS_TO_4BYTE(__pdesc+4, 9, 5)
443#define GET_RX_DESC_PAGGR(__pdesc) \
444 LE_BITS_TO_4BYTE(__pdesc+4, 14, 1)
445#define GET_RX_DESC_FAGGR(__pdesc) \
446 LE_BITS_TO_4BYTE(__pdesc+4, 15, 1)
447#define GET_RX_DESC_A1_FIT(__pdesc) \
448 LE_BITS_TO_4BYTE(__pdesc+4, 16, 4)
449#define GET_RX_DESC_A2_FIT(__pdesc) \
450 LE_BITS_TO_4BYTE(__pdesc+4, 20, 4)
451#define GET_RX_DESC_PAM(__pdesc) \
452 LE_BITS_TO_4BYTE(__pdesc+4, 24, 1)
453#define GET_RX_DESC_PWR(__pdesc) \
454 LE_BITS_TO_4BYTE(__pdesc+4, 25, 1)
455#define GET_RX_DESC_MD(__pdesc) \
456 LE_BITS_TO_4BYTE(__pdesc+4, 26, 1)
457#define GET_RX_DESC_MF(__pdesc) \
458 LE_BITS_TO_4BYTE(__pdesc+4, 27, 1)
459#define GET_RX_DESC_TYPE(__pdesc) \
460 LE_BITS_TO_4BYTE(__pdesc+4, 28, 2)
461#define GET_RX_DESC_MC(__pdesc) \
462 LE_BITS_TO_4BYTE(__pdesc+4, 30, 1)
463#define GET_RX_DESC_BC(__pdesc) \
464 LE_BITS_TO_4BYTE(__pdesc+4, 31, 1)
465#define GET_RX_DESC_SEQ(__pdesc) \
466 LE_BITS_TO_4BYTE(__pdesc+8, 0, 12)
467#define GET_RX_DESC_FRAG(__pdesc) \
468 LE_BITS_TO_4BYTE(__pdesc+8, 12, 4)
469#define GET_RX_DESC_NEXT_PKT_LEN(__pdesc) \
470 LE_BITS_TO_4BYTE(__pdesc+8, 16, 14)
471#define GET_RX_DESC_NEXT_IND(__pdesc) \
472 LE_BITS_TO_4BYTE(__pdesc+8, 30, 1)
473#define GET_RX_DESC_RSVD(__pdesc) \
474 LE_BITS_TO_4BYTE(__pdesc+8, 31, 1)
475
476#define GET_RX_DESC_RXMCS(__pdesc) \
477 LE_BITS_TO_4BYTE(__pdesc+12, 0, 6)
478#define GET_RX_DESC_RXHT(__pdesc) \
479 LE_BITS_TO_4BYTE(__pdesc+12, 6, 1)
480#define GET_RX_DESC_SPLCP(__pdesc) \
481 LE_BITS_TO_4BYTE(__pdesc+12, 8, 1)
482#define GET_RX_DESC_BW(__pdesc) \
483 LE_BITS_TO_4BYTE(__pdesc+12, 9, 1)
484#define GET_RX_DESC_HTC(__pdesc) \
485 LE_BITS_TO_4BYTE(__pdesc+12, 10, 1)
486#define GET_RX_DESC_HWPC_ERR(__pdesc) \
487 LE_BITS_TO_4BYTE(__pdesc+12, 14, 1)
488#define GET_RX_DESC_HWPC_IND(__pdesc) \
489 LE_BITS_TO_4BYTE(__pdesc+12, 15, 1)
490#define GET_RX_DESC_IV0(__pdesc) \
491 LE_BITS_TO_4BYTE(__pdesc+12, 16, 16)
492
493#define GET_RX_DESC_IV1(__pdesc) \
494 LE_BITS_TO_4BYTE(__pdesc+16, 0, 32)
495#define GET_RX_DESC_TSFL(__pdesc) \
496 LE_BITS_TO_4BYTE(__pdesc+20, 0, 32)
497
498#define GET_RX_DESC_BUFF_ADDR(__pdesc) \
499 LE_BITS_TO_4BYTE(__pdesc+24, 0, 32)
500#define GET_RX_DESC_BUFF_ADDR64(__pdesc) \
501 LE_BITS_TO_4BYTE(__pdesc+28, 0, 32)
502
503#define SET_RX_DESC_BUFF_ADDR(__pdesc, __val) \
504 SET_BITS_TO_LE_4BYTE(__pdesc+24, 0, 32, __val)
505#define SET_RX_DESC_BUFF_ADDR64(__pdesc, __val) \
506 SET_BITS_TO_LE_4BYTE(__pdesc+28, 0, 32, __val)
507
508#define CLEAR_PCI_TX_DESC_CONTENT(__pdesc, _size) \
509do { \
510 if (_size > TX_DESC_NEXT_DESC_OFFSET) \
511 memset((void *)__pdesc, 0, TX_DESC_NEXT_DESC_OFFSET); \
512 else \
513 memset((void *)__pdesc, 0, _size); \
514} while (0);
515
516#define RX_HAL_IS_CCK_RATE(_pdesc)\
517 (_pdesc->rxmcs == DESC92C_RATE1M || \
518 _pdesc->rxmcs == DESC92C_RATE2M || \
519 _pdesc->rxmcs == DESC92C_RATE5_5M || \
520 _pdesc->rxmcs == DESC92C_RATE11M)
521
522struct rx_fwinfo_92c {
523 u8 gain_trsw[4];
524 u8 pwdb_all;
525 u8 cfosho[4];
526 u8 cfotail[4];
527 char rxevm[2];
528 char rxsnr[4];
529 u8 pdsnr[2];
530 u8 csi_current[2];
531 u8 csi_target[2];
532 u8 sigevm;
533 u8 max_ex_pwr;
534 u8 ex_intf_flag:1;
535 u8 sgi_en:1;
536 u8 rxsc:2;
537 u8 reserve:4;
538} __packed;
539
540struct tx_desc_92c {
541 u32 pktsize:16;
542 u32 offset:8;
543 u32 bmc:1;
544 u32 htc:1;
545 u32 lastseg:1;
546 u32 firstseg:1;
547 u32 linip:1;
548 u32 noacm:1;
549 u32 gf:1;
550 u32 own:1;
551
552 u32 macid:5;
553 u32 agg_en:1;
554 u32 bk:1;
555 u32 rdg_en:1;
556 u32 queuesel:5;
557 u32 rd_nav_ext:1;
558 u32 lsig_txop_en:1;
559 u32 pifs:1;
560 u32 rateid:4;
561 u32 nav_usehdr:1;
562 u32 en_descid:1;
563 u32 sectype:2;
564 u32 pktoffset:8;
565
566 u32 rts_rc:6;
567 u32 data_rc:6;
568 u32 rsvd0:2;
569 u32 bar_retryht:2;
570 u32 rsvd1:1;
571 u32 morefrag:1;
572 u32 raw:1;
573 u32 ccx:1;
574 u32 ampdudensity:3;
575 u32 rsvd2:1;
576 u32 ant_sela:1;
577 u32 ant_selb:1;
578 u32 txant_cck:2;
579 u32 txant_l:2;
580 u32 txant_ht:2;
581
582 u32 nextheadpage:8;
583 u32 tailpage:8;
584 u32 seq:12;
585 u32 pktid:4;
586
587 u32 rtsrate:5;
588 u32 apdcfe:1;
589 u32 qos:1;
590 u32 hwseq_enable:1;
591 u32 userrate:1;
592 u32 dis_rtsfb:1;
593 u32 dis_datafb:1;
594 u32 cts2self:1;
595 u32 rts_en:1;
596 u32 hwrts_en:1;
597 u32 portid:1;
598 u32 rsvd3:3;
599 u32 waitdcts:1;
600 u32 cts2ap_en:1;
601 u32 txsc:2;
602 u32 stbc:2;
603 u32 txshort:1;
604 u32 txbw:1;
605 u32 rtsshort:1;
606 u32 rtsbw:1;
607 u32 rtssc:2;
608 u32 rtsstbc:2;
609
610 u32 txrate:6;
611 u32 shortgi:1;
612 u32 ccxt:1;
613 u32 txrate_fb_lmt:5;
614 u32 rtsrate_fb_lmt:4;
615 u32 retrylmt_en:1;
616 u32 txretrylmt:6;
617 u32 usb_txaggnum:8;
618
619 u32 txagca:5;
620 u32 txagcb:5;
621 u32 usemaxlen:1;
622 u32 maxaggnum:5;
623 u32 mcsg1maxlen:4;
624 u32 mcsg2maxlen:4;
625 u32 mcsg3maxlen:4;
626 u32 mcs7sgimaxlen:4;
627
628 u32 txbuffersize:16;
629 u32 mcsg4maxlen:4;
630 u32 mcsg5maxlen:4;
631 u32 mcsg6maxlen:4;
632 u32 mcsg15sgimaxlen:4;
633
634 u32 txbuffaddr;
635 u32 txbufferaddr64;
636 u32 nextdescaddress;
637 u32 nextdescaddress64;
638
639 u32 reserve_pass_pcie_mm_limit[4];
640} __packed;
641
642struct rx_desc_92c {
643 u32 length:14;
644 u32 crc32:1;
645 u32 icverror:1;
646 u32 drv_infosize:4;
647 u32 security:3;
648 u32 qos:1;
649 u32 shift:2;
650 u32 phystatus:1;
651 u32 swdec:1;
652 u32 lastseg:1;
653 u32 firstseg:1;
654 u32 eor:1;
655 u32 own:1;
656
657 u32 macid:5;
658 u32 tid:4;
659 u32 hwrsvd:5;
660 u32 paggr:1;
661 u32 faggr:1;
662 u32 a1_fit:4;
663 u32 a2_fit:4;
664 u32 pam:1;
665 u32 pwr:1;
666 u32 moredata:1;
667 u32 morefrag:1;
668 u32 type:2;
669 u32 mc:1;
670 u32 bc:1;
671
672 u32 seq:12;
673 u32 frag:4;
674 u32 nextpktlen:14;
675 u32 nextind:1;
676 u32 rsvd:1;
677
678 u32 rxmcs:6;
679 u32 rxht:1;
680 u32 amsdu:1;
681 u32 splcp:1;
682 u32 bandwidth:1;
683 u32 htc:1;
684 u32 tcpchk_rpt:1;
685 u32 ipcchk_rpt:1;
686 u32 tcpchk_valid:1;
687 u32 hwpcerr:1;
688 u32 hwpcind:1;
689 u32 iv0:16;
690
691 u32 iv1;
692
693 u32 tsfl;
694
695 u32 bufferaddress;
696 u32 bufferaddress64;
697
698} __packed;
699
700void rtl92ce_tx_fill_desc(struct ieee80211_hw *hw,
701 struct ieee80211_hdr *hdr,
702 u8 *pdesc, struct ieee80211_tx_info *info,
703 struct sk_buff *skb, unsigned int qsel);
704bool rtl92ce_rx_query_desc(struct ieee80211_hw *hw,
705 struct rtl_stats *stats,
706 struct ieee80211_rx_status *rx_status,
707 u8 *pdesc, struct sk_buff *skb);
708void rtl92ce_set_desc(u8 *pdesc, bool istx, u8 desc_name, u8 *val);
709u32 rtl92ce_get_desc(u8 *pdesc, bool istx, u8 desc_name);
710void rtl92ce_tx_polling(struct ieee80211_hw *hw, unsigned int hw_queue);
711void rtl92ce_tx_fill_cmddesc(struct ieee80211_hw *hw, u8 *pdesc,
712 bool b_firstseg, bool b_lastseg,
713 struct sk_buff *skb);
714#endif
diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
new file mode 100644
index 000000000000..d44d79613d2d
--- /dev/null
+++ b/drivers/net/wireless/rtlwifi/wifi.h
@@ -0,0 +1,1532 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2009-2010 Realtek Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * wlanfae <wlanfae@realtek.com>
23 * Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
24 * Hsinchu 300, Taiwan.
25 *
26 * Larry Finger <Larry.Finger@lwfinger.net>
27 *
28 *****************************************************************************/
29
30#ifndef __RTL_WIFI_H__
31#define __RTL_WIFI_H__
32
33#include <linux/sched.h>
34#include <linux/firmware.h>
35#include <linux/version.h>
36#include <linux/etherdevice.h>
37#include <net/mac80211.h>
38#include "debug.h"
39
40#define RF_CHANGE_BY_INIT 0
41#define RF_CHANGE_BY_IPS BIT(28)
42#define RF_CHANGE_BY_PS BIT(29)
43#define RF_CHANGE_BY_HW BIT(30)
44#define RF_CHANGE_BY_SW BIT(31)
45
46#define IQK_ADDA_REG_NUM 16
47#define IQK_MAC_REG_NUM 4
48
49#define MAX_KEY_LEN 61
50#define KEY_BUF_SIZE 5
51
52/* QoS related. */
53/*aci: 0x00 Best Effort*/
54/*aci: 0x01 Background*/
55/*aci: 0x10 Video*/
56/*aci: 0x11 Voice*/
57/*Max: define total number.*/
58#define AC0_BE 0
59#define AC1_BK 1
60#define AC2_VI 2
61#define AC3_VO 3
62#define AC_MAX 4
63#define QOS_QUEUE_NUM 4
64#define RTL_MAC80211_NUM_QUEUE 5
65
66#define QBSS_LOAD_SIZE 5
67#define MAX_WMMELE_LENGTH 64
68
69/*slot time for 11g. */
70#define RTL_SLOT_TIME_9 9
71#define RTL_SLOT_TIME_20 20
72
73/*related with tcp/ip. */
74/*if_ehther.h*/
75#define ETH_P_PAE 0x888E /*Port Access Entity (IEEE 802.1X) */
76#define ETH_P_IP 0x0800 /*Internet Protocol packet */
77#define ETH_P_ARP 0x0806 /*Address Resolution packet */
78#define SNAP_SIZE 6
79#define PROTOC_TYPE_SIZE 2
80
81/*related with 802.11 frame*/
82#define MAC80211_3ADDR_LEN 24
83#define MAC80211_4ADDR_LEN 30
84
85enum intf_type {
86 INTF_PCI = 0,
87 INTF_USB = 1,
88};
89
90enum radio_path {
91 RF90_PATH_A = 0,
92 RF90_PATH_B = 1,
93 RF90_PATH_C = 2,
94 RF90_PATH_D = 3,
95};
96
97enum rt_eeprom_type {
98 EEPROM_93C46,
99 EEPROM_93C56,
100 EEPROM_BOOT_EFUSE,
101};
102
103enum rtl_status {
104 RTL_STATUS_INTERFACE_START = 0,
105};
106
107enum hardware_type {
108 HARDWARE_TYPE_RTL8192E,
109 HARDWARE_TYPE_RTL8192U,
110 HARDWARE_TYPE_RTL8192SE,
111 HARDWARE_TYPE_RTL8192SU,
112 HARDWARE_TYPE_RTL8192CE,
113 HARDWARE_TYPE_RTL8192CU,
114 HARDWARE_TYPE_RTL8192DE,
115 HARDWARE_TYPE_RTL8192DU,
116
117 /*keep it last*/
118 HARDWARE_TYPE_NUM
119};
120
121enum scan_operation_backup_opt {
122 SCAN_OPT_BACKUP = 0,
123 SCAN_OPT_RESTORE,
124 SCAN_OPT_MAX
125};
126
127/*RF state.*/
128enum rf_pwrstate {
129 ERFON,
130 ERFSLEEP,
131 ERFOFF
132};
133
134struct bb_reg_def {
135 u32 rfintfs;
136 u32 rfintfi;
137 u32 rfintfo;
138 u32 rfintfe;
139 u32 rf3wire_offset;
140 u32 rflssi_select;
141 u32 rftxgain_stage;
142 u32 rfhssi_para1;
143 u32 rfhssi_para2;
144 u32 rfswitch_control;
145 u32 rfagc_control1;
146 u32 rfagc_control2;
147 u32 rfrxiq_imbalance;
148 u32 rfrx_afe;
149 u32 rftxiq_imbalance;
150 u32 rftx_afe;
151 u32 rflssi_readback;
152 u32 rflssi_readbackpi;
153};
154
155enum io_type {
156 IO_CMD_PAUSE_DM_BY_SCAN = 0,
157 IO_CMD_RESUME_DM_BY_SCAN = 1,
158};
159
160enum hw_variables {
161 HW_VAR_ETHER_ADDR,
162 HW_VAR_MULTICAST_REG,
163 HW_VAR_BASIC_RATE,
164 HW_VAR_BSSID,
165 HW_VAR_MEDIA_STATUS,
166 HW_VAR_SECURITY_CONF,
167 HW_VAR_BEACON_INTERVAL,
168 HW_VAR_ATIM_WINDOW,
169 HW_VAR_LISTEN_INTERVAL,
170 HW_VAR_CS_COUNTER,
171 HW_VAR_DEFAULTKEY0,
172 HW_VAR_DEFAULTKEY1,
173 HW_VAR_DEFAULTKEY2,
174 HW_VAR_DEFAULTKEY3,
175 HW_VAR_SIFS,
176 HW_VAR_DIFS,
177 HW_VAR_EIFS,
178 HW_VAR_SLOT_TIME,
179 HW_VAR_ACK_PREAMBLE,
180 HW_VAR_CW_CONFIG,
181 HW_VAR_CW_VALUES,
182 HW_VAR_RATE_FALLBACK_CONTROL,
183 HW_VAR_CONTENTION_WINDOW,
184 HW_VAR_RETRY_COUNT,
185 HW_VAR_TR_SWITCH,
186 HW_VAR_COMMAND,
187 HW_VAR_WPA_CONFIG,
188 HW_VAR_AMPDU_MIN_SPACE,
189 HW_VAR_SHORTGI_DENSITY,
190 HW_VAR_AMPDU_FACTOR,
191 HW_VAR_MCS_RATE_AVAILABLE,
192 HW_VAR_AC_PARAM,
193 HW_VAR_ACM_CTRL,
194 HW_VAR_DIS_Req_Qsize,
195 HW_VAR_CCX_CHNL_LOAD,
196 HW_VAR_CCX_NOISE_HISTOGRAM,
197 HW_VAR_CCX_CLM_NHM,
198 HW_VAR_TxOPLimit,
199 HW_VAR_TURBO_MODE,
200 HW_VAR_RF_STATE,
201 HW_VAR_RF_OFF_BY_HW,
202 HW_VAR_BUS_SPEED,
203 HW_VAR_SET_DEV_POWER,
204
205 HW_VAR_RCR,
206 HW_VAR_RATR_0,
207 HW_VAR_RRSR,
208 HW_VAR_CPU_RST,
209 HW_VAR_CECHK_BSSID,
210 HW_VAR_LBK_MODE,
211 HW_VAR_AES_11N_FIX,
212 HW_VAR_USB_RX_AGGR,
213 HW_VAR_USER_CONTROL_TURBO_MODE,
214 HW_VAR_RETRY_LIMIT,
215 HW_VAR_INIT_TX_RATE,
216 HW_VAR_TX_RATE_REG,
217 HW_VAR_EFUSE_USAGE,
218 HW_VAR_EFUSE_BYTES,
219 HW_VAR_AUTOLOAD_STATUS,
220 HW_VAR_RF_2R_DISABLE,
221 HW_VAR_SET_RPWM,
222 HW_VAR_H2C_FW_PWRMODE,
223 HW_VAR_H2C_FW_JOINBSSRPT,
224 HW_VAR_FW_PSMODE_STATUS,
225 HW_VAR_1X1_RECV_COMBINE,
226 HW_VAR_STOP_SEND_BEACON,
227 HW_VAR_TSF_TIMER,
228 HW_VAR_IO_CMD,
229
230 HW_VAR_RF_RECOVERY,
231 HW_VAR_H2C_FW_UPDATE_GTK,
232 HW_VAR_WF_MASK,
233 HW_VAR_WF_CRC,
234 HW_VAR_WF_IS_MAC_ADDR,
235 HW_VAR_H2C_FW_OFFLOAD,
236 HW_VAR_RESET_WFCRC,
237
238 HW_VAR_HANDLE_FW_C2H,
239 HW_VAR_DL_FW_RSVD_PAGE,
240 HW_VAR_AID,
241 HW_VAR_HW_SEQ_ENABLE,
242 HW_VAR_CORRECT_TSF,
243 HW_VAR_BCN_VALID,
244 HW_VAR_FWLPS_RF_ON,
245 HW_VAR_DUAL_TSF_RST,
246 HW_VAR_SWITCH_EPHY_WoWLAN,
247 HW_VAR_INT_MIGRATION,
248 HW_VAR_INT_AC,
249 HW_VAR_RF_TIMING,
250
251 HW_VAR_MRC,
252
253 HW_VAR_MGT_FILTER,
254 HW_VAR_CTRL_FILTER,
255 HW_VAR_DATA_FILTER,
256};
257
258enum _RT_MEDIA_STATUS {
259 RT_MEDIA_DISCONNECT = 0,
260 RT_MEDIA_CONNECT = 1
261};
262
263enum rt_oem_id {
264 RT_CID_DEFAULT = 0,
265 RT_CID_8187_ALPHA0 = 1,
266 RT_CID_8187_SERCOMM_PS = 2,
267 RT_CID_8187_HW_LED = 3,
268 RT_CID_8187_NETGEAR = 4,
269 RT_CID_WHQL = 5,
270 RT_CID_819x_CAMEO = 6,
271 RT_CID_819x_RUNTOP = 7,
272 RT_CID_819x_Senao = 8,
273 RT_CID_TOSHIBA = 9,
274 RT_CID_819x_Netcore = 10,
275 RT_CID_Nettronix = 11,
276 RT_CID_DLINK = 12,
277 RT_CID_PRONET = 13,
278 RT_CID_COREGA = 14,
279 RT_CID_819x_ALPHA = 15,
280 RT_CID_819x_Sitecom = 16,
281 RT_CID_CCX = 17,
282 RT_CID_819x_Lenovo = 18,
283 RT_CID_819x_QMI = 19,
284 RT_CID_819x_Edimax_Belkin = 20,
285 RT_CID_819x_Sercomm_Belkin = 21,
286 RT_CID_819x_CAMEO1 = 22,
287 RT_CID_819x_MSI = 23,
288 RT_CID_819x_Acer = 24,
289 RT_CID_819x_HP = 27,
290 RT_CID_819x_CLEVO = 28,
291 RT_CID_819x_Arcadyan_Belkin = 29,
292 RT_CID_819x_SAMSUNG = 30,
293 RT_CID_819x_WNC_COREGA = 31,
294 RT_CID_819x_Foxcoon = 32,
295 RT_CID_819x_DELL = 33,
296};
297
298enum hw_descs {
299 HW_DESC_OWN,
300 HW_DESC_RXOWN,
301 HW_DESC_TX_NEXTDESC_ADDR,
302 HW_DESC_TXBUFF_ADDR,
303 HW_DESC_RXBUFF_ADDR,
304 HW_DESC_RXPKT_LEN,
305 HW_DESC_RXERO,
306};
307
308enum prime_sc {
309 PRIME_CHNL_OFFSET_DONT_CARE = 0,
310 PRIME_CHNL_OFFSET_LOWER = 1,
311 PRIME_CHNL_OFFSET_UPPER = 2,
312};
313
314enum rf_type {
315 RF_1T1R = 0,
316 RF_1T2R = 1,
317 RF_2T2R = 2,
318};
319
320enum ht_channel_width {
321 HT_CHANNEL_WIDTH_20 = 0,
322 HT_CHANNEL_WIDTH_20_40 = 1,
323};
324
325/* Ref: 802.11i sepc D10.0 7.3.2.25.1
326Cipher Suites Encryption Algorithms */
327enum rt_enc_alg {
328 NO_ENCRYPTION = 0,
329 WEP40_ENCRYPTION = 1,
330 TKIP_ENCRYPTION = 2,
331 RSERVED_ENCRYPTION = 3,
332 AESCCMP_ENCRYPTION = 4,
333 WEP104_ENCRYPTION = 5,
334};
335
336enum rtl_hal_state {
337 _HAL_STATE_STOP = 0,
338 _HAL_STATE_START = 1,
339};
340
341enum rtl_var_map {
342 /*reg map */
343 SYS_ISO_CTRL = 0,
344 SYS_FUNC_EN,
345 SYS_CLK,
346 MAC_RCR_AM,
347 MAC_RCR_AB,
348 MAC_RCR_ACRC32,
349 MAC_RCR_ACF,
350 MAC_RCR_AAP,
351
352 /*efuse map */
353 EFUSE_TEST,
354 EFUSE_CTRL,
355 EFUSE_CLK,
356 EFUSE_CLK_CTRL,
357 EFUSE_PWC_EV12V,
358 EFUSE_FEN_ELDR,
359 EFUSE_LOADER_CLK_EN,
360 EFUSE_ANA8M,
361 EFUSE_HWSET_MAX_SIZE,
362
363 /*CAM map */
364 RWCAM,
365 WCAMI,
366 RCAMO,
367 CAMDBG,
368 SECR,
369 SEC_CAM_NONE,
370 SEC_CAM_WEP40,
371 SEC_CAM_TKIP,
372 SEC_CAM_AES,
373 SEC_CAM_WEP104,
374
375 /*IMR map */
376 RTL_IMR_BCNDMAINT6, /*Beacon DMA Interrupt 6 */
377 RTL_IMR_BCNDMAINT5, /*Beacon DMA Interrupt 5 */
378 RTL_IMR_BCNDMAINT4, /*Beacon DMA Interrupt 4 */
379 RTL_IMR_BCNDMAINT3, /*Beacon DMA Interrupt 3 */
380 RTL_IMR_BCNDMAINT2, /*Beacon DMA Interrupt 2 */
381 RTL_IMR_BCNDMAINT1, /*Beacon DMA Interrupt 1 */
382 RTL_IMR_BCNDOK8, /*Beacon Queue DMA OK Interrup 8 */
383 RTL_IMR_BCNDOK7, /*Beacon Queue DMA OK Interrup 7 */
384 RTL_IMR_BCNDOK6, /*Beacon Queue DMA OK Interrup 6 */
385 RTL_IMR_BCNDOK5, /*Beacon Queue DMA OK Interrup 5 */
386 RTL_IMR_BCNDOK4, /*Beacon Queue DMA OK Interrup 4 */
387 RTL_IMR_BCNDOK3, /*Beacon Queue DMA OK Interrup 3 */
388 RTL_IMR_BCNDOK2, /*Beacon Queue DMA OK Interrup 2 */
389 RTL_IMR_BCNDOK1, /*Beacon Queue DMA OK Interrup 1 */
390 RTL_IMR_TIMEOUT2, /*Timeout interrupt 2 */
391 RTL_IMR_TIMEOUT1, /*Timeout interrupt 1 */
392 RTL_IMR_TXFOVW, /*Transmit FIFO Overflow */
393 RTL_IMR_PSTIMEOUT, /*Power save time out interrupt */
394 RTL_IMR_BcnInt, /*Beacon DMA Interrupt 0 */
395 RTL_IMR_RXFOVW, /*Receive FIFO Overflow */
396 RTL_IMR_RDU, /*Receive Descriptor Unavailable */
397 RTL_IMR_ATIMEND, /*For 92C,ATIM Window End Interrupt */
398 RTL_IMR_BDOK, /*Beacon Queue DMA OK Interrup */
399 RTL_IMR_HIGHDOK, /*High Queue DMA OK Interrupt */
400 RTL_IMR_TBDOK, /*Transmit Beacon OK interrup */
401 RTL_IMR_MGNTDOK, /*Management Queue DMA OK Interrupt */
402 RTL_IMR_TBDER, /*For 92C,Transmit Beacon Error Interrupt */
403 RTL_IMR_BKDOK, /*AC_BK DMA OK Interrupt */
404 RTL_IMR_BEDOK, /*AC_BE DMA OK Interrupt */
405 RTL_IMR_VIDOK, /*AC_VI DMA OK Interrupt */
406 RTL_IMR_VODOK, /*AC_VO DMA Interrupt */
407 RTL_IMR_ROK, /*Receive DMA OK Interrupt */
408 RTL_IBSS_INT_MASKS, /*(RTL_IMR_BcnInt|RTL_IMR_TBDOK|RTL_IMR_TBDER)*/
409
410 /*CCK Rates, TxHT = 0 */
411 RTL_RC_CCK_RATE1M,
412 RTL_RC_CCK_RATE2M,
413 RTL_RC_CCK_RATE5_5M,
414 RTL_RC_CCK_RATE11M,
415
416 /*OFDM Rates, TxHT = 0 */
417 RTL_RC_OFDM_RATE6M,
418 RTL_RC_OFDM_RATE9M,
419 RTL_RC_OFDM_RATE12M,
420 RTL_RC_OFDM_RATE18M,
421 RTL_RC_OFDM_RATE24M,
422 RTL_RC_OFDM_RATE36M,
423 RTL_RC_OFDM_RATE48M,
424 RTL_RC_OFDM_RATE54M,
425
426 RTL_RC_HT_RATEMCS7,
427 RTL_RC_HT_RATEMCS15,
428
429 /*keep it last */
430 RTL_VAR_MAP_MAX,
431};
432
433/*Firmware PS mode for control LPS.*/
434enum _fw_ps_mode {
435 FW_PS_ACTIVE_MODE = 0,
436 FW_PS_MIN_MODE = 1,
437 FW_PS_MAX_MODE = 2,
438 FW_PS_DTIM_MODE = 3,
439 FW_PS_VOIP_MODE = 4,
440 FW_PS_UAPSD_WMM_MODE = 5,
441 FW_PS_UAPSD_MODE = 6,
442 FW_PS_IBSS_MODE = 7,
443 FW_PS_WWLAN_MODE = 8,
444 FW_PS_PM_Radio_Off = 9,
445 FW_PS_PM_Card_Disable = 10,
446};
447
448enum rt_psmode {
449 EACTIVE, /*Active/Continuous access. */
450 EMAXPS, /*Max power save mode. */
451 EFASTPS, /*Fast power save mode. */
452 EAUTOPS, /*Auto power save mode. */
453};
454
455/*LED related.*/
456enum led_ctl_mode {
457 LED_CTL_POWER_ON = 1,
458 LED_CTL_LINK = 2,
459 LED_CTL_NO_LINK = 3,
460 LED_CTL_TX = 4,
461 LED_CTL_RX = 5,
462 LED_CTL_SITE_SURVEY = 6,
463 LED_CTL_POWER_OFF = 7,
464 LED_CTL_START_TO_LINK = 8,
465 LED_CTL_START_WPS = 9,
466 LED_CTL_STOP_WPS = 10,
467};
468
469enum rtl_led_pin {
470 LED_PIN_GPIO0,
471 LED_PIN_LED0,
472 LED_PIN_LED1,
473 LED_PIN_LED2
474};
475
476/*QoS related.*/
477/*acm implementation method.*/
478enum acm_method {
479 eAcmWay0_SwAndHw = 0,
480 eAcmWay1_HW = 1,
481 eAcmWay2_SW = 2,
482};
483
484/*aci/aifsn Field.
485Ref: WMM spec 2.2.2: WME Parameter Element, p.12.*/
486union aci_aifsn {
487 u8 char_data;
488
489 struct {
490 u8 aifsn:4;
491 u8 acm:1;
492 u8 aci:2;
493 u8 reserved:1;
494 } f; /* Field */
495};
496
497/*mlme related.*/
498enum wireless_mode {
499 WIRELESS_MODE_UNKNOWN = 0x00,
500 WIRELESS_MODE_A = 0x01,
501 WIRELESS_MODE_B = 0x02,
502 WIRELESS_MODE_G = 0x04,
503 WIRELESS_MODE_AUTO = 0x08,
504 WIRELESS_MODE_N_24G = 0x10,
505 WIRELESS_MODE_N_5G = 0x20
506};
507
508enum ratr_table_mode {
509 RATR_INX_WIRELESS_NGB = 0,
510 RATR_INX_WIRELESS_NG = 1,
511 RATR_INX_WIRELESS_NB = 2,
512 RATR_INX_WIRELESS_N = 3,
513 RATR_INX_WIRELESS_GB = 4,
514 RATR_INX_WIRELESS_G = 5,
515 RATR_INX_WIRELESS_B = 6,
516 RATR_INX_WIRELESS_MC = 7,
517 RATR_INX_WIRELESS_A = 8,
518};
519
520enum rtl_link_state {
521 MAC80211_NOLINK = 0,
522 MAC80211_LINKING = 1,
523 MAC80211_LINKED = 2,
524 MAC80211_LINKED_SCANNING = 3,
525};
526
527enum act_category {
528 ACT_CAT_QOS = 1,
529 ACT_CAT_DLS = 2,
530 ACT_CAT_BA = 3,
531 ACT_CAT_HT = 7,
532 ACT_CAT_WMM = 17,
533};
534
535enum ba_action {
536 ACT_ADDBAREQ = 0,
537 ACT_ADDBARSP = 1,
538 ACT_DELBA = 2,
539};
540
541struct octet_string {
542 u8 *octet;
543 u16 length;
544};
545
546struct rtl_hdr_3addr {
547 __le16 frame_ctl;
548 __le16 duration_id;
549 u8 addr1[ETH_ALEN];
550 u8 addr2[ETH_ALEN];
551 u8 addr3[ETH_ALEN];
552 __le16 seq_ctl;
553 u8 payload[0];
554} __packed;
555
556struct rtl_info_element {
557 u8 id;
558 u8 len;
559 u8 data[0];
560} __packed;
561
562struct rtl_probe_rsp {
563 struct rtl_hdr_3addr header;
564 u32 time_stamp[2];
565 __le16 beacon_interval;
566 __le16 capability;
567 /*SSID, supported rates, FH params, DS params,
568 CF params, IBSS params, TIM (if beacon), RSN */
569 struct rtl_info_element info_element[0];
570} __packed;
571
572/*LED related.*/
573/*ledpin Identify how to implement this SW led.*/
574struct rtl_led {
575 void *hw;
576 enum rtl_led_pin ledpin;
577 bool b_ledon;
578};
579
580struct rtl_led_ctl {
581 bool bled_opendrain;
582 struct rtl_led sw_led0;
583 struct rtl_led sw_led1;
584};
585
586struct rtl_qos_parameters {
587 __le16 cw_min;
588 __le16 cw_max;
589 u8 aifs;
590 u8 flag;
591 __le16 tx_op;
592} __packed;
593
594struct rt_smooth_data {
595 u32 elements[100]; /*array to store values */
596 u32 index; /*index to current array to store */
597 u32 total_num; /*num of valid elements */
598 u32 total_val; /*sum of valid elements */
599};
600
601struct false_alarm_statistics {
602 u32 cnt_parity_fail;
603 u32 cnt_rate_illegal;
604 u32 cnt_crc8_fail;
605 u32 cnt_mcs_fail;
606 u32 cnt_ofdm_fail;
607 u32 cnt_cck_fail;
608 u32 cnt_all;
609};
610
611struct init_gain {
612 u8 xaagccore1;
613 u8 xbagccore1;
614 u8 xcagccore1;
615 u8 xdagccore1;
616 u8 cca;
617
618};
619
620struct wireless_stats {
621 unsigned long txbytesunicast;
622 unsigned long txbytesmulticast;
623 unsigned long txbytesbroadcast;
624 unsigned long rxbytesunicast;
625
626 long rx_snr_db[4];
627 /*Correct smoothed ss in Dbm, only used
628 in driver to report real power now. */
629 long recv_signal_power;
630 long signal_quality;
631 long last_sigstrength_inpercent;
632
633 u32 rssi_calculate_cnt;
634
635 /*Transformed, in dbm. Beautified signal
636 strength for UI, not correct. */
637 long signal_strength;
638
639 u8 rx_rssi_percentage[4];
640 u8 rx_evm_percentage[2];
641
642 struct rt_smooth_data ui_rssi;
643 struct rt_smooth_data ui_link_quality;
644};
645
646struct rate_adaptive {
647 u8 rate_adaptive_disabled;
648 u8 ratr_state;
649 u16 reserve;
650
651 u32 high_rssi_thresh_for_ra;
652 u32 high2low_rssi_thresh_for_ra;
653 u8 low2high_rssi_thresh_for_ra40m;
654 u32 low_rssi_thresh_for_ra40M;
655 u8 low2high_rssi_thresh_for_ra20m;
656 u32 low_rssi_thresh_for_ra20M;
657 u32 upper_rssi_threshold_ratr;
658 u32 middleupper_rssi_threshold_ratr;
659 u32 middle_rssi_threshold_ratr;
660 u32 middlelow_rssi_threshold_ratr;
661 u32 low_rssi_threshold_ratr;
662 u32 ultralow_rssi_threshold_ratr;
663 u32 low_rssi_threshold_ratr_40m;
664 u32 low_rssi_threshold_ratr_20m;
665 u8 ping_rssi_enable;
666 u32 ping_rssi_ratr;
667 u32 ping_rssi_thresh_for_ra;
668 u32 last_ratr;
669 u8 pre_ratr_state;
670};
671
672struct regd_pair_mapping {
673 u16 reg_dmnenum;
674 u16 reg_5ghz_ctl;
675 u16 reg_2ghz_ctl;
676};
677
678struct rtl_regulatory {
679 char alpha2[2];
680 u16 country_code;
681 u16 max_power_level;
682 u32 tp_scale;
683 u16 current_rd;
684 u16 current_rd_ext;
685 int16_t power_limit;
686 struct regd_pair_mapping *regpair;
687};
688
689struct rtl_rfkill {
690 bool rfkill_state; /*0 is off, 1 is on */
691};
692
693struct rtl_phy {
694 struct bb_reg_def phyreg_def[4]; /*Radio A/B/C/D */
695 struct init_gain initgain_backup;
696 enum io_type current_io_type;
697
698 u8 rf_mode;
699 u8 rf_type;
700 u8 current_chan_bw;
701 u8 set_bwmode_inprogress;
702 u8 sw_chnl_inprogress;
703 u8 sw_chnl_stage;
704 u8 sw_chnl_step;
705 u8 current_channel;
706 u8 h2c_box_num;
707 u8 set_io_inprogress;
708
709 /*record for power tracking*/
710 s32 reg_e94;
711 s32 reg_e9c;
712 s32 reg_ea4;
713 s32 reg_eac;
714 s32 reg_eb4;
715 s32 reg_ebc;
716 s32 reg_ec4;
717 s32 reg_ecc;
718 u8 rfpienable;
719 u8 reserve_0;
720 u16 reserve_1;
721 u32 reg_c04, reg_c08, reg_874;
722 u32 adda_backup[16];
723 u32 iqk_mac_backup[IQK_MAC_REG_NUM];
724 u32 iqk_bb_backup[10];
725
726 bool b_rfpi_enable;
727
728 u8 pwrgroup_cnt;
729 u8 bcck_high_power;
730 /* 3 groups of pwr diff by rates*/
731 u32 mcs_txpwrlevel_origoffset[4][16];
732 u8 default_initialgain[4];
733
734 /*the current Tx power level*/
735 u8 cur_cck_txpwridx;
736 u8 cur_ofdm24g_txpwridx;
737
738 u32 rfreg_chnlval[2];
739 bool b_apk_done;
740
741 /*fsync*/
742 u8 framesync;
743 u32 framesync_c34;
744
745 u8 num_total_rfpath;
746};
747
748#define MAX_TID_COUNT 9
749#define RTL_AGG_OFF 0
750#define RTL_AGG_ON 1
751#define RTL_AGG_EMPTYING_HW_QUEUE_ADDBA 2
752#define RTL_AGG_EMPTYING_HW_QUEUE_DELBA 3
753
754struct rtl_ht_agg {
755 u16 txq_id;
756 u16 wait_for_ba;
757 u16 start_idx;
758 u64 bitmap;
759 u32 rate_n_flags;
760 u8 agg_state;
761};
762
763struct rtl_tid_data {
764 u16 seq_number;
765 struct rtl_ht_agg agg;
766};
767
768struct rtl_priv;
769struct rtl_io {
770 struct device *dev;
771
772 /*PCI MEM map */
773 unsigned long pci_mem_end; /*shared mem end */
774 unsigned long pci_mem_start; /*shared mem start */
775
776 /*PCI IO map */
777 unsigned long pci_base_addr; /*device I/O address */
778
779 void (*write8_async) (struct rtl_priv *rtlpriv, u32 addr, u8 val);
780 void (*write16_async) (struct rtl_priv *rtlpriv, u32 addr, u16 val);
781 void (*write32_async) (struct rtl_priv *rtlpriv, u32 addr, u32 val);
782
783 u8(*read8_sync) (struct rtl_priv *rtlpriv, u32 addr);
784 u16(*read16_sync) (struct rtl_priv *rtlpriv, u32 addr);
785 u32(*read32_sync) (struct rtl_priv *rtlpriv, u32 addr);
786
787};
788
789struct rtl_mac {
790 u8 mac_addr[ETH_ALEN];
791 u8 mac80211_registered;
792 u8 beacon_enabled;
793
794 u32 tx_ss_num;
795 u32 rx_ss_num;
796
797 struct ieee80211_supported_band bands[IEEE80211_NUM_BANDS];
798 struct ieee80211_hw *hw;
799 struct ieee80211_vif *vif;
800 enum nl80211_iftype opmode;
801
802 /*Probe Beacon management */
803 struct rtl_tid_data tids[MAX_TID_COUNT];
804 enum rtl_link_state link_state;
805
806 int n_channels;
807 int n_bitrates;
808
809 /*filters */
810 u32 rx_conf;
811 u16 rx_mgt_filter;
812 u16 rx_ctrl_filter;
813 u16 rx_data_filter;
814
815 bool act_scanning;
816 u8 cnt_after_linked;
817
818 /*RDG*/ bool rdg_en;
819
820 /*AP*/ u8 bssid[6];
821 u8 mcs[16]; /*16 bytes mcs for HT rates.*/
822 u32 basic_rates; /*b/g rates*/
823 u8 ht_enable;
824 u8 sgi_40;
825 u8 sgi_20;
826 u8 bw_40;
827 u8 mode; /*wireless mode*/
828 u8 slot_time;
829 u8 short_preamble;
830 u8 use_cts_protect;
831 u8 cur_40_prime_sc;
832 u8 cur_40_prime_sc_bk;
833 u64 tsf;
834 u8 retry_short;
835 u8 retry_long;
836 u16 assoc_id;
837
838 /*IBSS*/ int beacon_interval;
839
840 /*AMPDU*/ u8 min_space_cfg; /*For Min spacing configurations */
841 u8 max_mss_density;
842 u8 current_ampdu_factor;
843 u8 current_ampdu_density;
844
845 /*QOS & EDCA */
846 struct ieee80211_tx_queue_params edca_param[RTL_MAC80211_NUM_QUEUE];
847 struct rtl_qos_parameters ac[AC_MAX];
848};
849
850struct rtl_hal {
851 struct ieee80211_hw *hw;
852
853 enum intf_type interface;
854 u16 hw_type; /*92c or 92d or 92s and so on */
855 u8 oem_id;
856 u8 version; /*version of chip */
857 u8 state; /*stop 0, start 1 */
858
859 /*firmware */
860 u8 *pfirmware;
861 bool b_h2c_setinprogress;
862 u8 last_hmeboxnum;
863 bool bfw_ready;
864 /*Reserve page start offset except beacon in TxQ. */
865 u8 fw_rsvdpage_startoffset;
866};
867
868struct rtl_security {
869 /*default 0 */
870 bool use_sw_sec;
871
872 bool being_setkey;
873 bool use_defaultkey;
874 /*Encryption Algorithm for Unicast Packet */
875 enum rt_enc_alg pairwise_enc_algorithm;
876 /*Encryption Algorithm for Brocast/Multicast */
877 enum rt_enc_alg group_enc_algorithm;
878
879 /*local Key buffer, indx 0 is for
880 pairwise key 1-4 is for agoup key. */
881 u8 key_buf[KEY_BUF_SIZE][MAX_KEY_LEN];
882 u8 key_len[KEY_BUF_SIZE];
883
884 /*The pointer of Pairwise Key,
885 it always points to KeyBuf[4] */
886 u8 *pairwise_key;
887};
888
889struct rtl_dm {
890 /*PHY status for DM */
891 long entry_min_undecoratedsmoothed_pwdb;
892 long undecorated_smoothed_pwdb; /*out dm */
893 long entry_max_undecoratedsmoothed_pwdb;
894 bool b_dm_initialgain_enable;
895 bool bdynamic_txpower_enable;
896 bool bcurrent_turbo_edca;
897 bool bis_any_nonbepkts; /*out dm */
898 bool bis_cur_rdlstate;
899 bool btxpower_trackingInit;
900 bool b_disable_framebursting;
901 bool b_cck_inch14;
902 bool btxpower_tracking;
903 bool b_useramask;
904 bool brfpath_rxenable[4];
905
906 u8 thermalvalue_iqk;
907 u8 thermalvalue_lck;
908 u8 thermalvalue;
909 u8 last_dtp_lvl;
910 u8 dynamic_txhighpower_lvl; /*Tx high power level */
911 u8 dm_flag; /*Indicate if each dynamic mechanism's status. */
912 u8 dm_type;
913 u8 txpower_track_control;
914
915 char ofdm_index[2];
916 char cck_index;
917};
918
919#define EFUSE_MAX_LOGICAL_SIZE 128
920
921struct rtl_efuse {
922 bool bautoLoad_ok;
923 bool bootfromefuse;
924 u16 max_physical_size;
925 u8 contents[EFUSE_MAX_LOGICAL_SIZE];
926
927 u8 efuse_map[2][EFUSE_MAX_LOGICAL_SIZE];
928 u16 efuse_usedbytes;
929 u8 efuse_usedpercentage;
930
931 u8 autoload_failflag;
932
933 short epromtype;
934 u16 eeprom_vid;
935 u16 eeprom_did;
936 u16 eeprom_svid;
937 u16 eeprom_smid;
938 u8 eeprom_oemid;
939 u16 eeprom_channelplan;
940 u8 eeprom_version;
941
942 u8 dev_addr[6];
943
944 bool b_txpwr_fromeprom;
945 u8 eeprom_tssi[2];
946 u8 eeprom_pwrlimit_ht20[3];
947 u8 eeprom_pwrlimit_ht40[3];
948 u8 eeprom_chnlarea_txpwr_cck[2][3];
949 u8 eeprom_chnlarea_txpwr_ht40_1s[2][3];
950 u8 eeprom_chnlarea_txpwr_ht40_2sdiif[2][3];
951 u8 txpwrlevel_cck[2][14];
952 u8 txpwrlevel_ht40_1s[2][14]; /*For HT 40MHZ pwr */
953 u8 txpwrlevel_ht40_2s[2][14]; /*For HT 40MHZ pwr */
954
955 /*For power group */
956 u8 pwrgroup_ht20[2][14];
957 u8 pwrgroup_ht40[2][14];
958
959 char txpwr_ht20diff[2][14]; /*HT 20<->40 Pwr diff */
960 u8 txpwr_legacyhtdiff[2][14]; /*For HT<->legacy pwr diff */
961
962 u8 eeprom_regulatory;
963 u8 eeprom_thermalmeter;
964 /*ThermalMeter, index 0 for RFIC0, and 1 for RFIC1 */
965 u8 thermalmeter[2];
966
967 u8 legacy_ht_txpowerdiff; /*Legacy to HT rate power diff */
968 bool b_apk_thermalmeterignore;
969};
970
971struct rtl_ps_ctl {
972 bool set_rfpowerstate_inprogress;
973 bool b_in_powersavemode;
974 bool rfchange_inprogress;
975 bool b_swrf_processing;
976 bool b_hwradiooff;
977
978 u32 last_sleep_jiffies;
979 u32 last_awake_jiffies;
980 u32 last_delaylps_stamp_jiffies;
981
982 /*
983 * just for PCIE ASPM
984 * If it supports ASPM, Offset[560h] = 0x40,
985 * otherwise Offset[560h] = 0x00.
986 * */
987 bool b_support_aspm;
988 bool b_support_backdoor;
989
990 /*for LPS */
991 enum rt_psmode dot11_psmode; /*Power save mode configured. */
992 bool b_leisure_ps;
993 bool b_fwctrl_lps;
994 u8 fwctrl_psmode;
995 /*For Fw control LPS mode */
996 u8 b_reg_fwctrl_lps;
997 /*Record Fw PS mode status. */
998 bool b_fw_current_inpsmode;
999 u8 reg_max_lps_awakeintvl;
1000 bool report_linked;
1001
1002 /*for IPS */
1003 bool b_inactiveps;
1004
1005 u32 rfoff_reason;
1006
1007 /*RF OFF Level */
1008 u32 cur_ps_level;
1009 u32 reg_rfps_level;
1010
1011 /*just for PCIE ASPM */
1012 u8 const_amdpci_aspm;
1013
1014 enum rf_pwrstate inactive_pwrstate;
1015 enum rf_pwrstate rfpwr_state; /*cur power state */
1016};
1017
1018struct rtl_stats {
1019 u32 mac_time[2];
1020 s8 rssi;
1021 u8 signal;
1022 u8 noise;
1023 u16 rate; /*in 100 kbps */
1024 u8 received_channel;
1025 u8 control;
1026 u8 mask;
1027 u8 freq;
1028 u16 len;
1029 u64 tsf;
1030 u32 beacon_time;
1031 u8 nic_type;
1032 u16 length;
1033 u8 signalquality; /*in 0-100 index. */
1034 /*
1035 * Real power in dBm for this packet,
1036 * no beautification and aggregation.
1037 * */
1038 s32 recvsignalpower;
1039 s8 rxpower; /*in dBm Translate from PWdB */
1040 u8 signalstrength; /*in 0-100 index. */
1041 u16 b_hwerror:1;
1042 u16 b_crc:1;
1043 u16 b_icv:1;
1044 u16 b_shortpreamble:1;
1045 u16 antenna:1;
1046 u16 decrypted:1;
1047 u16 wakeup:1;
1048 u32 timestamp_low;
1049 u32 timestamp_high;
1050
1051 u8 rx_drvinfo_size;
1052 u8 rx_bufshift;
1053 bool b_isampdu;
1054 bool rx_is40Mhzpacket;
1055 u32 rx_pwdb_all;
1056 u8 rx_mimo_signalstrength[4]; /*in 0~100 index */
1057 s8 rx_mimo_signalquality[2];
1058 bool b_packet_matchbssid;
1059 bool b_is_cck;
1060 bool b_packet_toself;
1061 bool b_packet_beacon; /*for rssi */
1062 char cck_adc_pwdb[4]; /*for rx path selection */
1063};
1064
1065struct rt_link_detect {
1066 u32 num_tx_in4period[4];
1067 u32 num_rx_in4period[4];
1068
1069 u32 num_tx_inperiod;
1070 u32 num_rx_inperiod;
1071
1072 bool b_busytraffic;
1073 bool b_higher_busytraffic;
1074 bool b_higher_busyrxtraffic;
1075};
1076
1077struct rtl_tcb_desc {
1078 u8 b_packet_bw:1;
1079 u8 b_multicast:1;
1080 u8 b_broadcast:1;
1081
1082 u8 b_rts_stbc:1;
1083 u8 b_rts_enable:1;
1084 u8 b_cts_enable:1;
1085 u8 b_rts_use_shortpreamble:1;
1086 u8 b_rts_use_shortgi:1;
1087 u8 rts_sc:1;
1088 u8 b_rts_bw:1;
1089 u8 rts_rate;
1090
1091 u8 use_shortgi:1;
1092 u8 use_shortpreamble:1;
1093 u8 use_driver_rate:1;
1094 u8 disable_ratefallback:1;
1095
1096 u8 ratr_index;
1097 u8 mac_id;
1098 u8 hw_rate;
1099};
1100
1101struct rtl_hal_ops {
1102 int (*init_sw_vars) (struct ieee80211_hw *hw);
1103 void (*deinit_sw_vars) (struct ieee80211_hw *hw);
1104 void (*read_eeprom_info) (struct ieee80211_hw *hw);
1105 void (*interrupt_recognized) (struct ieee80211_hw *hw,
1106 u32 *p_inta, u32 *p_intb);
1107 int (*hw_init) (struct ieee80211_hw *hw);
1108 void (*hw_disable) (struct ieee80211_hw *hw);
1109 void (*enable_interrupt) (struct ieee80211_hw *hw);
1110 void (*disable_interrupt) (struct ieee80211_hw *hw);
1111 int (*set_network_type) (struct ieee80211_hw *hw,
1112 enum nl80211_iftype type);
1113 void (*set_bw_mode) (struct ieee80211_hw *hw,
1114 enum nl80211_channel_type ch_type);
1115 u8(*switch_channel) (struct ieee80211_hw *hw);
1116 void (*set_qos) (struct ieee80211_hw *hw, int aci);
1117 void (*set_bcn_reg) (struct ieee80211_hw *hw);
1118 void (*set_bcn_intv) (struct ieee80211_hw *hw);
1119 void (*update_interrupt_mask) (struct ieee80211_hw *hw,
1120 u32 add_msr, u32 rm_msr);
1121 void (*get_hw_reg) (struct ieee80211_hw *hw, u8 variable, u8 *val);
1122 void (*set_hw_reg) (struct ieee80211_hw *hw, u8 variable, u8 *val);
1123 void (*update_rate_table) (struct ieee80211_hw *hw);
1124 void (*update_rate_mask) (struct ieee80211_hw *hw, u8 rssi_level);
1125 void (*fill_tx_desc) (struct ieee80211_hw *hw,
1126 struct ieee80211_hdr *hdr, u8 *pdesc_tx,
1127 struct ieee80211_tx_info *info,
1128 struct sk_buff *skb, unsigned int queue_index);
1129 void (*fill_tx_cmddesc) (struct ieee80211_hw *hw, u8 *pdesc,
1130 bool b_firstseg, bool b_lastseg,
1131 struct sk_buff *skb);
1132 bool(*query_rx_desc) (struct ieee80211_hw *hw,
1133 struct rtl_stats *stats,
1134 struct ieee80211_rx_status *rx_status,
1135 u8 *pdesc, struct sk_buff *skb);
1136 void (*set_channel_access) (struct ieee80211_hw *hw);
1137 bool(*radio_onoff_checking) (struct ieee80211_hw *hw, u8 *valid);
1138 void (*dm_watchdog) (struct ieee80211_hw *hw);
1139 void (*scan_operation_backup) (struct ieee80211_hw *hw, u8 operation);
1140 bool(*set_rf_power_state) (struct ieee80211_hw *hw,
1141 enum rf_pwrstate rfpwr_state);
1142 void (*led_control) (struct ieee80211_hw *hw,
1143 enum led_ctl_mode ledaction);
1144 void (*set_desc) (u8 *pdesc, bool istx, u8 desc_name, u8 *val);
1145 u32(*get_desc) (u8 *pdesc, bool istx, u8 desc_name);
1146 void (*tx_polling) (struct ieee80211_hw *hw, unsigned int hw_queue);
1147 void (*enable_hw_sec) (struct ieee80211_hw *hw);
1148 void (*set_key) (struct ieee80211_hw *hw, u32 key_index,
1149 u8 *p_macaddr, bool is_group, u8 enc_algo,
1150 bool is_wepkey, bool clear_all);
1151 void (*init_sw_leds) (struct ieee80211_hw *hw);
1152 void (*deinit_sw_leds) (struct ieee80211_hw *hw);
1153 u32(*get_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask);
1154 void (*set_bbreg) (struct ieee80211_hw *hw, u32 regaddr, u32 bitmask,
1155 u32 data);
1156 u32(*get_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
1157 u32 regaddr, u32 bitmask);
1158 void (*set_rfreg) (struct ieee80211_hw *hw, enum radio_path rfpath,
1159 u32 regaddr, u32 bitmask, u32 data);
1160};
1161
1162struct rtl_intf_ops {
1163 /*com */
1164 int (*adapter_start) (struct ieee80211_hw *hw);
1165 void (*adapter_stop) (struct ieee80211_hw *hw);
1166
1167 int (*adapter_tx) (struct ieee80211_hw *hw, struct sk_buff *skb);
1168 int (*reset_trx_ring) (struct ieee80211_hw *hw);
1169
1170 /*pci */
1171 void (*disable_aspm) (struct ieee80211_hw *hw);
1172 void (*enable_aspm) (struct ieee80211_hw *hw);
1173
1174 /*usb */
1175};
1176
1177struct rtl_mod_params {
1178 /* default: 0 = using hardware encryption */
1179 int sw_crypto;
1180};
1181
1182struct rtl_hal_cfg {
1183 char *name;
1184 char *fw_name;
1185 struct rtl_hal_ops *ops;
1186 struct rtl_mod_params *mod_params;
1187
1188 /*this map used for some registers or vars
1189 defined int HAL but used in MAIN */
1190 u32 maps[RTL_VAR_MAP_MAX];
1191
1192};
1193
1194struct rtl_locks {
1195 /* mutex */
1196 struct mutex conf_mutex;
1197
1198 /*spin lock */
1199 spinlock_t ips_lock;
1200 spinlock_t irq_th_lock;
1201 spinlock_t h2c_lock;
1202 spinlock_t rf_ps_lock;
1203 spinlock_t rf_lock;
1204 spinlock_t lps_lock;
1205};
1206
1207struct rtl_works {
1208 struct ieee80211_hw *hw;
1209
1210 /*timer */
1211 struct timer_list watchdog_timer;
1212
1213 /*task */
1214 struct tasklet_struct irq_tasklet;
1215 struct tasklet_struct irq_prepare_bcn_tasklet;
1216
1217 /*work queue */
1218 struct workqueue_struct *rtl_wq;
1219 struct delayed_work watchdog_wq;
1220 struct delayed_work ips_nic_off_wq;
1221};
1222
1223struct rtl_debug {
1224 u32 dbgp_type[DBGP_TYPE_MAX];
1225 u32 global_debuglevel;
1226 u64 global_debugcomponents;
1227};
1228
1229struct rtl_priv {
1230 struct rtl_locks locks;
1231 struct rtl_works works;
1232 struct rtl_mac mac80211;
1233 struct rtl_hal rtlhal;
1234 struct rtl_regulatory regd;
1235 struct rtl_rfkill rfkill;
1236 struct rtl_io io;
1237 struct rtl_phy phy;
1238 struct rtl_dm dm;
1239 struct rtl_security sec;
1240 struct rtl_efuse efuse;
1241
1242 struct rtl_ps_ctl psc;
1243 struct rate_adaptive ra;
1244 struct wireless_stats stats;
1245 struct rt_link_detect link_info;
1246 struct false_alarm_statistics falsealm_cnt;
1247
1248 struct rtl_rate_priv *rate_priv;
1249
1250 struct rtl_debug dbg;
1251
1252 /*
1253 *hal_cfg : for diff cards
1254 *intf_ops : for diff interrface usb/pcie
1255 */
1256 struct rtl_hal_cfg *cfg;
1257 struct rtl_intf_ops *intf_ops;
1258
1259 /*this var will be set by set_bit,
1260 and was used to indicate status of
1261 interface or hardware */
1262 unsigned long status;
1263
1264 /*This must be the last item so
1265 that it points to the data allocated
1266 beyond this structure like:
1267 rtl_pci_priv or rtl_usb_priv */
1268 u8 priv[0];
1269};
1270
1271#define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv))
1272#define rtl_mac(rtlpriv) (&((rtlpriv)->mac80211))
1273#define rtl_hal(rtlpriv) (&((rtlpriv)->rtlhal))
1274#define rtl_efuse(rtlpriv) (&((rtlpriv)->efuse))
1275#define rtl_psc(rtlpriv) (&((rtlpriv)->psc))
1276
1277/****************************************
1278 mem access macro define start
1279 Call endian free function when
1280 1. Read/write packet content.
1281 2. Before write integer to IO.
1282 3. After read integer from IO.
1283****************************************/
1284/* Convert little data endian to host */
1285#define EF1BYTE(_val) \
1286 ((u8)(_val))
1287#define EF2BYTE(_val) \
1288 (le16_to_cpu(_val))
1289#define EF4BYTE(_val) \
1290 (le32_to_cpu(_val))
1291
1292/* Read data from memory */
1293#define READEF1BYTE(_ptr) \
1294 EF1BYTE(*((u8 *)(_ptr)))
1295#define READEF2BYTE(_ptr) \
1296 EF2BYTE(*((u16 *)(_ptr)))
1297#define READEF4BYTE(_ptr) \
1298 EF4BYTE(*((u32 *)(_ptr)))
1299
1300/* Write data to memory */
1301#define WRITEEF1BYTE(_ptr, _val) \
1302 (*((u8 *)(_ptr))) = EF1BYTE(_val)
1303#define WRITEEF2BYTE(_ptr, _val) \
1304 (*((u16 *)(_ptr))) = EF2BYTE(_val)
1305#define WRITEEF4BYTE(_ptr, _val) \
1306 (*((u32 *)(_ptr))) = EF4BYTE(_val)
1307
1308/*Example:
1309BIT_LEN_MASK_32(0) => 0x00000000
1310BIT_LEN_MASK_32(1) => 0x00000001
1311BIT_LEN_MASK_32(2) => 0x00000003
1312BIT_LEN_MASK_32(32) => 0xFFFFFFFF*/
1313#define BIT_LEN_MASK_32(__bitlen) \
1314 (0xFFFFFFFF >> (32 - (__bitlen)))
1315#define BIT_LEN_MASK_16(__bitlen) \
1316 (0xFFFF >> (16 - (__bitlen)))
1317#define BIT_LEN_MASK_8(__bitlen) \
1318 (0xFF >> (8 - (__bitlen)))
1319
1320/*Example:
1321BIT_OFFSET_LEN_MASK_32(0, 2) => 0x00000003
1322BIT_OFFSET_LEN_MASK_32(16, 2) => 0x00030000*/
1323#define BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen) \
1324 (BIT_LEN_MASK_32(__bitlen) << (__bitoffset))
1325#define BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen) \
1326 (BIT_LEN_MASK_16(__bitlen) << (__bitoffset))
1327#define BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen) \
1328 (BIT_LEN_MASK_8(__bitlen) << (__bitoffset))
1329
1330/*Description:
1331Return 4-byte value in host byte ordering from
13324-byte pointer in little-endian system.*/
1333#define LE_P4BYTE_TO_HOST_4BYTE(__pstart) \
1334 (EF4BYTE(*((u32 *)(__pstart))))
1335#define LE_P2BYTE_TO_HOST_2BYTE(__pstart) \
1336 (EF2BYTE(*((u16 *)(__pstart))))
1337#define LE_P1BYTE_TO_HOST_1BYTE(__pstart) \
1338 (EF1BYTE(*((u8 *)(__pstart))))
1339
1340/*Description:
1341Translate subfield (continuous bits in little-endian) of 4-byte
1342value to host byte ordering.*/
1343#define LE_BITS_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
1344 ( \
1345 (LE_P4BYTE_TO_HOST_4BYTE(__pstart) >> (__bitoffset)) & \
1346 BIT_LEN_MASK_32(__bitlen) \
1347 )
1348#define LE_BITS_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
1349 ( \
1350 (LE_P2BYTE_TO_HOST_2BYTE(__pstart) >> (__bitoffset)) & \
1351 BIT_LEN_MASK_16(__bitlen) \
1352 )
1353#define LE_BITS_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
1354 ( \
1355 (LE_P1BYTE_TO_HOST_1BYTE(__pstart) >> (__bitoffset)) & \
1356 BIT_LEN_MASK_8(__bitlen) \
1357 )
1358
1359/*Description:
1360Mask subfield (continuous bits in little-endian) of 4-byte value
1361and return the result in 4-byte value in host byte ordering.*/
1362#define LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) \
1363 ( \
1364 LE_P4BYTE_TO_HOST_4BYTE(__pstart) & \
1365 (~BIT_OFFSET_LEN_MASK_32(__bitoffset, __bitlen)) \
1366 )
1367#define LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) \
1368 ( \
1369 LE_P2BYTE_TO_HOST_2BYTE(__pstart) & \
1370 (~BIT_OFFSET_LEN_MASK_16(__bitoffset, __bitlen)) \
1371 )
1372#define LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) \
1373 ( \
1374 LE_P1BYTE_TO_HOST_1BYTE(__pstart) & \
1375 (~BIT_OFFSET_LEN_MASK_8(__bitoffset, __bitlen)) \
1376 )
1377
1378/*Description:
1379Set subfield of little-endian 4-byte value to specified value. */
1380#define SET_BITS_TO_LE_4BYTE(__pstart, __bitoffset, __bitlen, __val) \
1381 *((u32 *)(__pstart)) = EF4BYTE \
1382 ( \
1383 LE_BITS_CLEARED_TO_4BYTE(__pstart, __bitoffset, __bitlen) | \
1384 ((((u32)__val) & BIT_LEN_MASK_32(__bitlen)) << (__bitoffset)) \
1385 );
1386#define SET_BITS_TO_LE_2BYTE(__pstart, __bitoffset, __bitlen, __val) \
1387 *((u16 *)(__pstart)) = EF2BYTE \
1388 ( \
1389 LE_BITS_CLEARED_TO_2BYTE(__pstart, __bitoffset, __bitlen) | \
1390 ((((u16)__val) & BIT_LEN_MASK_16(__bitlen)) << (__bitoffset)) \
1391 );
1392#define SET_BITS_TO_LE_1BYTE(__pstart, __bitoffset, __bitlen, __val) \
1393 *((u8 *)(__pstart)) = EF1BYTE \
1394 ( \
1395 LE_BITS_CLEARED_TO_1BYTE(__pstart, __bitoffset, __bitlen) | \
1396 ((((u8)__val) & BIT_LEN_MASK_8(__bitlen)) << (__bitoffset)) \
1397 );
1398
1399/****************************************
1400 mem access macro define end
1401****************************************/
1402
1403#define packet_get_type(_packet) (EF1BYTE((_packet).octet[0]) & 0xFC)
1404#define RTL_WATCH_DOG_TIME 2000
1405#define MSECS(t) msecs_to_jiffies(t)
1406#define WLAN_FC_GET_VERS(fc) ((fc) & IEEE80211_FCTL_VERS)
1407#define WLAN_FC_GET_TYPE(fc) ((fc) & IEEE80211_FCTL_FTYPE)
1408#define WLAN_FC_GET_STYPE(fc) ((fc) & IEEE80211_FCTL_STYPE)
1409#define WLAN_FC_MORE_DATA(fc) ((fc) & IEEE80211_FCTL_MOREDATA)
1410#define SEQ_TO_SN(seq) (((seq) & IEEE80211_SCTL_SEQ) >> 4)
1411#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
1412#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
1413
1414#define RT_RF_OFF_LEVL_ASPM BIT(0) /*PCI ASPM */
1415#define RT_RF_OFF_LEVL_CLK_REQ BIT(1) /*PCI clock request */
1416#define RT_RF_OFF_LEVL_PCI_D3 BIT(2) /*PCI D3 mode */
1417/*NIC halt, re-initialize hw parameters*/
1418#define RT_RF_OFF_LEVL_HALT_NIC BIT(3)
1419#define RT_RF_OFF_LEVL_FREE_FW BIT(4) /*FW free, re-download the FW */
1420#define RT_RF_OFF_LEVL_FW_32K BIT(5) /*FW in 32k */
1421/*Always enable ASPM and Clock Req in initialization.*/
1422#define RT_RF_PS_LEVEL_ALWAYS_ASPM BIT(6)
1423/*When LPS is on, disable 2R if no packet is received or transmittd.*/
1424#define RT_RF_LPS_DISALBE_2R BIT(30)
1425#define RT_RF_LPS_LEVEL_ASPM BIT(31) /*LPS with ASPM */
1426#define RT_IN_PS_LEVEL(ppsc, _ps_flg) \
1427 ((ppsc->cur_ps_level & _ps_flg) ? true : false)
1428#define RT_CLEAR_PS_LEVEL(ppsc, _ps_flg) \
1429 (ppsc->cur_ps_level &= (~(_ps_flg)))
1430#define RT_SET_PS_LEVEL(ppsc, _ps_flg) \
1431 (ppsc->cur_ps_level |= _ps_flg)
1432
1433#define container_of_dwork_rtl(x, y, z) \
1434 container_of(container_of(x, struct delayed_work, work), y, z)
1435
1436#define FILL_OCTET_STRING(_os, _octet, _len) \
1437 (_os).octet = (u8 *)(_octet); \
1438 (_os).length = (_len);
1439
1440#define CP_MACADDR(des, src) \
1441 ((des)[0] = (src)[0], (des)[1] = (src)[1],\
1442 (des)[2] = (src)[2], (des)[3] = (src)[3],\
1443 (des)[4] = (src)[4], (des)[5] = (src)[5])
1444
1445static inline u8 rtl_read_byte(struct rtl_priv *rtlpriv, u32 addr)
1446{
1447 return rtlpriv->io.read8_sync(rtlpriv, addr);
1448}
1449
1450static inline u16 rtl_read_word(struct rtl_priv *rtlpriv, u32 addr)
1451{
1452 return rtlpriv->io.read16_sync(rtlpriv, addr);
1453}
1454
1455static inline u32 rtl_read_dword(struct rtl_priv *rtlpriv, u32 addr)
1456{
1457 return rtlpriv->io.read32_sync(rtlpriv, addr);
1458}
1459
1460static inline void rtl_write_byte(struct rtl_priv *rtlpriv, u32 addr, u8 val8)
1461{
1462 rtlpriv->io.write8_async(rtlpriv, addr, val8);
1463}
1464
1465static inline void rtl_write_word(struct rtl_priv *rtlpriv, u32 addr, u16 val16)
1466{
1467 rtlpriv->io.write16_async(rtlpriv, addr, val16);
1468}
1469
1470static inline void rtl_write_dword(struct rtl_priv *rtlpriv,
1471 u32 addr, u32 val32)
1472{
1473 rtlpriv->io.write32_async(rtlpriv, addr, val32);
1474}
1475
1476static inline u32 rtl_get_bbreg(struct ieee80211_hw *hw,
1477 u32 regaddr, u32 bitmask)
1478{
1479 return ((struct rtl_priv *)(hw)->priv)->cfg->ops->get_bbreg(hw,
1480 regaddr,
1481 bitmask);
1482}
1483
1484static inline void rtl_set_bbreg(struct ieee80211_hw *hw, u32 regaddr,
1485 u32 bitmask, u32 data)
1486{
1487 ((struct rtl_priv *)(hw)->priv)->cfg->ops->set_bbreg(hw,
1488 regaddr, bitmask,
1489 data);
1490
1491}
1492
1493static inline u32 rtl_get_rfreg(struct ieee80211_hw *hw,
1494 enum radio_path rfpath, u32 regaddr,
1495 u32 bitmask)
1496{
1497 return ((struct rtl_priv *)(hw)->priv)->cfg->ops->get_rfreg(hw,
1498 rfpath,
1499 regaddr,
1500 bitmask);
1501}
1502
1503static inline void rtl_set_rfreg(struct ieee80211_hw *hw,
1504 enum radio_path rfpath, u32 regaddr,
1505 u32 bitmask, u32 data)
1506{
1507 ((struct rtl_priv *)(hw)->priv)->cfg->ops->set_rfreg(hw,
1508 rfpath, regaddr,
1509 bitmask, data);
1510}
1511
1512static inline bool is_hal_stop(struct rtl_hal *rtlhal)
1513{
1514 return (_HAL_STATE_STOP == rtlhal->state);
1515}
1516
1517static inline void set_hal_start(struct rtl_hal *rtlhal)
1518{
1519 rtlhal->state = _HAL_STATE_START;
1520}
1521
1522static inline void set_hal_stop(struct rtl_hal *rtlhal)
1523{
1524 rtlhal->state = _HAL_STATE_STOP;
1525}
1526
1527static inline u8 get_rf_type(struct rtl_phy *rtlphy)
1528{
1529 return rtlphy->rf_type;
1530}
1531
1532#endif
diff --git a/drivers/net/wireless/wl1251/boot.c b/drivers/net/wireless/wl1251/boot.c
index 61572dfa1f60..d729daf8e841 100644
--- a/drivers/net/wireless/wl1251/boot.c
+++ b/drivers/net/wireless/wl1251/boot.c
@@ -19,7 +19,6 @@
19 * 19 *
20 */ 20 */
21 21
22#include <linux/gpio.h>
23#include <linux/slab.h> 22#include <linux/slab.h>
24 23
25#include "reg.h" 24#include "reg.h"
diff --git a/drivers/net/wireless/wl1251/main.c b/drivers/net/wireless/wl1251/main.c
index 7a8762553cdc..012e1a4016fe 100644
--- a/drivers/net/wireless/wl1251/main.c
+++ b/drivers/net/wireless/wl1251/main.c
@@ -52,14 +52,14 @@ void wl1251_disable_interrupts(struct wl1251 *wl)
52 wl->if_ops->disable_irq(wl); 52 wl->if_ops->disable_irq(wl);
53} 53}
54 54
55static void wl1251_power_off(struct wl1251 *wl) 55static int wl1251_power_off(struct wl1251 *wl)
56{ 56{
57 wl->set_power(false); 57 return wl->if_ops->power(wl, false);
58} 58}
59 59
60static void wl1251_power_on(struct wl1251 *wl) 60static int wl1251_power_on(struct wl1251 *wl)
61{ 61{
62 wl->set_power(true); 62 return wl->if_ops->power(wl, true);
63} 63}
64 64
65static int wl1251_fetch_firmware(struct wl1251 *wl) 65static int wl1251_fetch_firmware(struct wl1251 *wl)
@@ -152,9 +152,12 @@ static void wl1251_fw_wakeup(struct wl1251 *wl)
152 152
153static int wl1251_chip_wakeup(struct wl1251 *wl) 153static int wl1251_chip_wakeup(struct wl1251 *wl)
154{ 154{
155 int ret = 0; 155 int ret;
156
157 ret = wl1251_power_on(wl);
158 if (ret < 0)
159 return ret;
156 160
157 wl1251_power_on(wl);
158 msleep(WL1251_POWER_ON_SLEEP); 161 msleep(WL1251_POWER_ON_SLEEP);
159 wl->if_ops->reset(wl); 162 wl->if_ops->reset(wl);
160 163
diff --git a/drivers/net/wireless/wl1251/sdio.c b/drivers/net/wireless/wl1251/sdio.c
index 74ba9ced5393..d550b5e68d3c 100644
--- a/drivers/net/wireless/wl1251/sdio.c
+++ b/drivers/net/wireless/wl1251/sdio.c
@@ -26,6 +26,7 @@
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/wl12xx.h> 27#include <linux/wl12xx.h>
28#include <linux/irq.h> 28#include <linux/irq.h>
29#include <linux/pm_runtime.h>
29 30
30#include "wl1251.h" 31#include "wl1251.h"
31 32
@@ -42,8 +43,6 @@ struct wl1251_sdio {
42 u32 elp_val; 43 u32 elp_val;
43}; 44};
44 45
45static struct wl12xx_platform_data *wl12xx_board_data;
46
47static struct sdio_func *wl_to_func(struct wl1251 *wl) 46static struct sdio_func *wl_to_func(struct wl1251 *wl)
48{ 47{
49 struct wl1251_sdio *wl_sdio = wl->if_priv; 48 struct wl1251_sdio *wl_sdio = wl->if_priv;
@@ -171,8 +170,42 @@ static void wl1251_disable_line_irq(struct wl1251 *wl)
171 return disable_irq(wl->irq); 170 return disable_irq(wl->irq);
172} 171}
173 172
174static void wl1251_sdio_set_power(bool enable) 173static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable)
175{ 174{
175 struct sdio_func *func = wl_to_func(wl);
176 int ret;
177
178 if (enable) {
179 /*
180 * Power is controlled by runtime PM, but we still call board
181 * callback in case it wants to do any additional setup,
182 * for example enabling clock buffer for the module.
183 */
184 if (wl->set_power)
185 wl->set_power(true);
186
187 ret = pm_runtime_get_sync(&func->dev);
188 if (ret < 0)
189 goto out;
190
191 sdio_claim_host(func);
192 sdio_enable_func(func);
193 sdio_release_host(func);
194 } else {
195 sdio_claim_host(func);
196 sdio_disable_func(func);
197 sdio_release_host(func);
198
199 ret = pm_runtime_put_sync(&func->dev);
200 if (ret < 0)
201 goto out;
202
203 if (wl->set_power)
204 wl->set_power(false);
205 }
206
207out:
208 return ret;
176} 209}
177 210
178static struct wl1251_if_operations wl1251_sdio_ops = { 211static struct wl1251_if_operations wl1251_sdio_ops = {
@@ -181,30 +214,7 @@ static struct wl1251_if_operations wl1251_sdio_ops = {
181 .write_elp = wl1251_sdio_write_elp, 214 .write_elp = wl1251_sdio_write_elp,
182 .read_elp = wl1251_sdio_read_elp, 215 .read_elp = wl1251_sdio_read_elp,
183 .reset = wl1251_sdio_reset, 216 .reset = wl1251_sdio_reset,
184}; 217 .power = wl1251_sdio_set_power,
185
186static int wl1251_platform_probe(struct platform_device *pdev)
187{
188 if (pdev->id != -1) {
189 wl1251_error("can only handle single device");
190 return -ENODEV;
191 }
192
193 wl12xx_board_data = pdev->dev.platform_data;
194 return 0;
195}
196
197/*
198 * Dummy platform_driver for passing platform_data to this driver,
199 * until we have a way to pass this through SDIO subsystem or
200 * some other way.
201 */
202static struct platform_driver wl1251_platform_driver = {
203 .driver = {
204 .name = "wl1251_data",
205 .owner = THIS_MODULE,
206 },
207 .probe = wl1251_platform_probe,
208}; 218};
209 219
210static int wl1251_sdio_probe(struct sdio_func *func, 220static int wl1251_sdio_probe(struct sdio_func *func,
@@ -214,6 +224,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
214 struct wl1251 *wl; 224 struct wl1251 *wl;
215 struct ieee80211_hw *hw; 225 struct ieee80211_hw *hw;
216 struct wl1251_sdio *wl_sdio; 226 struct wl1251_sdio *wl_sdio;
227 const struct wl12xx_platform_data *wl12xx_board_data;
217 228
218 hw = wl1251_alloc_hw(); 229 hw = wl1251_alloc_hw();
219 if (IS_ERR(hw)) 230 if (IS_ERR(hw))
@@ -239,9 +250,9 @@ static int wl1251_sdio_probe(struct sdio_func *func,
239 wl_sdio->func = func; 250 wl_sdio->func = func;
240 wl->if_priv = wl_sdio; 251 wl->if_priv = wl_sdio;
241 wl->if_ops = &wl1251_sdio_ops; 252 wl->if_ops = &wl1251_sdio_ops;
242 wl->set_power = wl1251_sdio_set_power;
243 253
244 if (wl12xx_board_data != NULL) { 254 wl12xx_board_data = wl12xx_get_platform_data();
255 if (!IS_ERR(wl12xx_board_data)) {
245 wl->set_power = wl12xx_board_data->set_power; 256 wl->set_power = wl12xx_board_data->set_power;
246 wl->irq = wl12xx_board_data->irq; 257 wl->irq = wl12xx_board_data->irq;
247 wl->use_eeprom = wl12xx_board_data->use_eeprom; 258 wl->use_eeprom = wl12xx_board_data->use_eeprom;
@@ -273,6 +284,10 @@ static int wl1251_sdio_probe(struct sdio_func *func,
273 goto out_free_irq; 284 goto out_free_irq;
274 285
275 sdio_set_drvdata(func, wl); 286 sdio_set_drvdata(func, wl);
287
288 /* Tell PM core that we don't need the card to be powered now */
289 pm_runtime_put_noidle(&func->dev);
290
276 return ret; 291 return ret;
277 292
278out_free_irq: 293out_free_irq:
@@ -294,6 +309,9 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func)
294 struct wl1251 *wl = sdio_get_drvdata(func); 309 struct wl1251 *wl = sdio_get_drvdata(func);
295 struct wl1251_sdio *wl_sdio = wl->if_priv; 310 struct wl1251_sdio *wl_sdio = wl->if_priv;
296 311
312 /* Undo decrement done above in wl1251_probe */
313 pm_runtime_get_noresume(&func->dev);
314
297 if (wl->irq) 315 if (wl->irq)
298 free_irq(wl->irq, wl); 316 free_irq(wl->irq, wl);
299 kfree(wl_sdio); 317 kfree(wl_sdio);
@@ -305,23 +323,37 @@ static void __devexit wl1251_sdio_remove(struct sdio_func *func)
305 sdio_release_host(func); 323 sdio_release_host(func);
306} 324}
307 325
326static int wl1251_suspend(struct device *dev)
327{
328 /*
329 * Tell MMC/SDIO core it's OK to power down the card
330 * (if it isn't already), but not to remove it completely.
331 */
332 return 0;
333}
334
335static int wl1251_resume(struct device *dev)
336{
337 return 0;
338}
339
340static const struct dev_pm_ops wl1251_sdio_pm_ops = {
341 .suspend = wl1251_suspend,
342 .resume = wl1251_resume,
343};
344
308static struct sdio_driver wl1251_sdio_driver = { 345static struct sdio_driver wl1251_sdio_driver = {
309 .name = "wl1251_sdio", 346 .name = "wl1251_sdio",
310 .id_table = wl1251_devices, 347 .id_table = wl1251_devices,
311 .probe = wl1251_sdio_probe, 348 .probe = wl1251_sdio_probe,
312 .remove = __devexit_p(wl1251_sdio_remove), 349 .remove = __devexit_p(wl1251_sdio_remove),
350 .drv.pm = &wl1251_sdio_pm_ops,
313}; 351};
314 352
315static int __init wl1251_sdio_init(void) 353static int __init wl1251_sdio_init(void)
316{ 354{
317 int err; 355 int err;
318 356
319 err = platform_driver_register(&wl1251_platform_driver);
320 if (err) {
321 wl1251_error("failed to register platform driver: %d", err);
322 return err;
323 }
324
325 err = sdio_register_driver(&wl1251_sdio_driver); 357 err = sdio_register_driver(&wl1251_sdio_driver);
326 if (err) 358 if (err)
327 wl1251_error("failed to register sdio driver: %d", err); 359 wl1251_error("failed to register sdio driver: %d", err);
@@ -331,7 +363,6 @@ static int __init wl1251_sdio_init(void)
331static void __exit wl1251_sdio_exit(void) 363static void __exit wl1251_sdio_exit(void)
332{ 364{
333 sdio_unregister_driver(&wl1251_sdio_driver); 365 sdio_unregister_driver(&wl1251_sdio_driver);
334 platform_driver_unregister(&wl1251_platform_driver);
335 wl1251_notice("unloaded"); 366 wl1251_notice("unloaded");
336} 367}
337 368
diff --git a/drivers/net/wireless/wl1251/spi.c b/drivers/net/wireless/wl1251/spi.c
index 88fa8e69d0d1..ac872b38960f 100644
--- a/drivers/net/wireless/wl1251/spi.c
+++ b/drivers/net/wireless/wl1251/spi.c
@@ -215,12 +215,21 @@ static void wl1251_spi_disable_irq(struct wl1251 *wl)
215 return disable_irq(wl->irq); 215 return disable_irq(wl->irq);
216} 216}
217 217
218static int wl1251_spi_set_power(struct wl1251 *wl, bool enable)
219{
220 if (wl->set_power)
221 wl->set_power(enable);
222
223 return 0;
224}
225
218static const struct wl1251_if_operations wl1251_spi_ops = { 226static const struct wl1251_if_operations wl1251_spi_ops = {
219 .read = wl1251_spi_read, 227 .read = wl1251_spi_read,
220 .write = wl1251_spi_write, 228 .write = wl1251_spi_write,
221 .reset = wl1251_spi_reset_wake, 229 .reset = wl1251_spi_reset_wake,
222 .enable_irq = wl1251_spi_enable_irq, 230 .enable_irq = wl1251_spi_enable_irq,
223 .disable_irq = wl1251_spi_disable_irq, 231 .disable_irq = wl1251_spi_disable_irq,
232 .power = wl1251_spi_set_power,
224}; 233};
225 234
226static int __devinit wl1251_spi_probe(struct spi_device *spi) 235static int __devinit wl1251_spi_probe(struct spi_device *spi)
diff --git a/drivers/net/wireless/wl1251/wl1251.h b/drivers/net/wireless/wl1251/wl1251.h
index 47d2653baccd..c0ce2c8b43b8 100644
--- a/drivers/net/wireless/wl1251/wl1251.h
+++ b/drivers/net/wireless/wl1251/wl1251.h
@@ -256,6 +256,7 @@ struct wl1251_if_operations {
256 void (*write)(struct wl1251 *wl, int addr, void *buf, size_t len); 256 void (*write)(struct wl1251 *wl, int addr, void *buf, size_t len);
257 void (*read_elp)(struct wl1251 *wl, int addr, u32 *val); 257 void (*read_elp)(struct wl1251 *wl, int addr, u32 *val);
258 void (*write_elp)(struct wl1251 *wl, int addr, u32 val); 258 void (*write_elp)(struct wl1251 *wl, int addr, u32 val);
259 int (*power)(struct wl1251 *wl, bool enable);
259 void (*reset)(struct wl1251 *wl); 260 void (*reset)(struct wl1251 *wl);
260 void (*enable_irq)(struct wl1251 *wl); 261 void (*enable_irq)(struct wl1251 *wl);
261 void (*disable_irq)(struct wl1251 *wl); 262 void (*disable_irq)(struct wl1251 *wl);
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index b447559f1db5..0e65bce457d6 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -1,46 +1,68 @@
1menuconfig WL12XX 1menuconfig WL12XX_MENU
2 tristate "TI wl12xx driver support" 2 tristate "TI wl12xx driver support"
3 depends on MAC80211 && EXPERIMENTAL 3 depends on MAC80211 && EXPERIMENTAL
4 ---help--- 4 ---help---
5 This will enable TI wl12xx driver support. The drivers make 5 This will enable TI wl12xx driver support for the following chips:
6 use of the mac80211 stack. 6 wl1271 and wl1273.
7 The drivers make use of the mac80211 stack.
7 8
8config WL1271 9config WL12XX
9 tristate "TI wl1271 support" 10 tristate "TI wl12xx support"
10 depends on WL12XX && GENERIC_HARDIRQS 11 depends on WL12XX_MENU && GENERIC_HARDIRQS
11 depends on INET 12 depends on INET
12 select FW_LOADER 13 select FW_LOADER
13 select CRC7 14 select CRC7
14 ---help--- 15 ---help---
15 This module adds support for wireless adapters based on the 16 This module adds support for wireless adapters based on TI wl1271 and
16 TI wl1271 chipset. 17 TI wl1273 chipsets. This module does *not* include support for wl1251.
18 For wl1251 support, use the separate homonymous driver instead.
17 19
18 If you choose to build a module, it'll be called wl1271. Say N if 20 If you choose to build a module, it will be called wl12xx. Say N if
19 unsure. 21 unsure.
20 22
21config WL1271_SPI 23config WL12XX_HT
22 tristate "TI wl1271 SPI support" 24 bool "TI wl12xx 802.11 HT support (EXPERIMENTAL)"
23 depends on WL1271 && SPI_MASTER 25 depends on WL12XX && EXPERIMENTAL
26 default n
27 ---help---
28 This will enable 802.11 HT support in the wl12xx module.
29
30 That configuration is temporary due to the code incomplete and
31 still in testing process.
32
33config WL12XX_SPI
34 tristate "TI wl12xx SPI support"
35 depends on WL12XX && SPI_MASTER
24 ---help--- 36 ---help---
25 This module adds support for the SPI interface of adapters using 37 This module adds support for the SPI interface of adapters using
26 TI wl1271 chipset. Select this if your platform is using 38 TI wl12xx chipsets. Select this if your platform is using
27 the SPI bus. 39 the SPI bus.
28 40
29 If you choose to build a module, it'll be called wl1251_spi. 41 If you choose to build a module, it'll be called wl12xx_spi.
30 Say N if unsure. 42 Say N if unsure.
31 43
32config WL1271_SDIO 44config WL12XX_SDIO
33 tristate "TI wl1271 SDIO support" 45 tristate "TI wl12xx SDIO support"
34 depends on WL1271 && MMC 46 depends on WL12XX && MMC
35 ---help--- 47 ---help---
36 This module adds support for the SDIO interface of adapters using 48 This module adds support for the SDIO interface of adapters using
37 TI wl1271 chipset. Select this if your platform is using 49 TI wl12xx chipsets. Select this if your platform is using
38 the SDIO bus. 50 the SDIO bus.
39 51
40 If you choose to build a module, it'll be called 52 If you choose to build a module, it'll be called wl12xx_sdio.
41 wl1271_sdio. Say N if unsure. 53 Say N if unsure.
54
55config WL12XX_SDIO_TEST
56 tristate "TI wl12xx SDIO testing support"
57 depends on WL12XX && MMC
58 default n
59 ---help---
60 This module adds support for the SDIO bus testing with the
61 TI wl12xx chipsets. You probably don't want this unless you are
62 testing a new hardware platform. Select this if you want to test the
63 SDIO bus which is connected to the wl12xx chip.
42 64
43config WL12XX_PLATFORM_DATA 65config WL12XX_PLATFORM_DATA
44 bool 66 bool
45 depends on WL1271_SDIO != n 67 depends on WL12XX_SDIO != n || WL1251_SDIO != n
46 default y 68 default y
diff --git a/drivers/net/wireless/wl12xx/Makefile b/drivers/net/wireless/wl12xx/Makefile
index 3a807444b2af..521c0414e52e 100644
--- a/drivers/net/wireless/wl12xx/Makefile
+++ b/drivers/net/wireless/wl12xx/Makefile
@@ -1,12 +1,16 @@
1wl1271-objs = wl1271_main.o wl1271_cmd.o wl1271_io.o \ 1wl12xx-objs = main.o cmd.o io.o event.o tx.o rx.o ps.o acx.o \
2 wl1271_event.o wl1271_tx.o wl1271_rx.o \ 2 boot.o init.o debugfs.o scan.o
3 wl1271_ps.o wl1271_acx.o wl1271_boot.o \ 3
4 wl1271_init.o wl1271_debugfs.o wl1271_scan.o 4wl12xx_spi-objs = spi.o
5 5wl12xx_sdio-objs = sdio.o
6wl1271-$(CONFIG_NL80211_TESTMODE) += wl1271_testmode.o 6wl12xx_sdio_test-objs = sdio_test.o
7obj-$(CONFIG_WL1271) += wl1271.o 7
8obj-$(CONFIG_WL1271_SPI) += wl1271_spi.o 8wl12xx-$(CONFIG_NL80211_TESTMODE) += testmode.o
9obj-$(CONFIG_WL1271_SDIO) += wl1271_sdio.o 9obj-$(CONFIG_WL12XX) += wl12xx.o
10obj-$(CONFIG_WL12XX_SPI) += wl12xx_spi.o
11obj-$(CONFIG_WL12XX_SDIO) += wl12xx_sdio.o
12
13obj-$(CONFIG_WL12XX_SDIO_TEST) += wl12xx_sdio_test.o
10 14
11# small builtin driver bit 15# small builtin driver bit
12obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o 16obj-$(CONFIG_WL12XX_PLATFORM_DATA) += wl12xx_platform_data.o
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/acx.c
index 618993405262..cc4068d2b4a8 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/acx.c
@@ -21,7 +21,7 @@
21 * 21 *
22 */ 22 */
23 23
24#include "wl1271_acx.h" 24#include "acx.h"
25 25
26#include <linux/module.h> 26#include <linux/module.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
@@ -29,10 +29,10 @@
29#include <linux/spi/spi.h> 29#include <linux/spi/spi.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31 31
32#include "wl1271.h" 32#include "wl12xx.h"
33#include "wl12xx_80211.h" 33#include "wl12xx_80211.h"
34#include "wl1271_reg.h" 34#include "reg.h"
35#include "wl1271_ps.h" 35#include "ps.h"
36 36
37int wl1271_acx_wake_up_conditions(struct wl1271 *wl) 37int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
38{ 38{
@@ -862,7 +862,7 @@ out:
862 return ret; 862 return ret;
863} 863}
864 864
865int wl1271_acx_frag_threshold(struct wl1271 *wl) 865int wl1271_acx_frag_threshold(struct wl1271 *wl, u16 frag_threshold)
866{ 866{
867 struct acx_frag_threshold *acx; 867 struct acx_frag_threshold *acx;
868 int ret = 0; 868 int ret = 0;
@@ -876,7 +876,7 @@ int wl1271_acx_frag_threshold(struct wl1271 *wl)
876 goto out; 876 goto out;
877 } 877 }
878 878
879 acx->frag_threshold = cpu_to_le16(wl->conf.tx.frag_threshold); 879 acx->frag_threshold = cpu_to_le16(frag_threshold);
880 ret = wl1271_cmd_configure(wl, ACX_FRAG_CFG, acx, sizeof(*acx)); 880 ret = wl1271_cmd_configure(wl, ACX_FRAG_CFG, acx, sizeof(*acx));
881 if (ret < 0) { 881 if (ret < 0) {
882 wl1271_warning("Setting of frag threshold failed: %d", ret); 882 wl1271_warning("Setting of frag threshold failed: %d", ret);
@@ -1041,7 +1041,7 @@ out:
1041 return ret; 1041 return ret;
1042} 1042}
1043 1043
1044int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, __be32 address) 1044int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address)
1045{ 1045{
1046 struct wl1271_acx_arp_filter *acx; 1046 struct wl1271_acx_arp_filter *acx;
1047 int ret; 1047 int ret;
@@ -1057,7 +1057,7 @@ int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, __be32 address)
1057 acx->version = ACX_IPV4_VERSION; 1057 acx->version = ACX_IPV4_VERSION;
1058 acx->enable = enable; 1058 acx->enable = enable;
1059 1059
1060 if (enable == true) 1060 if (enable)
1061 memcpy(acx->address, &address, ACX_IPV4_ADDR_SIZE); 1061 memcpy(acx->address, &address, ACX_IPV4_ADDR_SIZE);
1062 1062
1063 ret = wl1271_cmd_configure(wl, ACX_ARP_IP_FILTER, 1063 ret = wl1271_cmd_configure(wl, ACX_ARP_IP_FILTER,
@@ -1226,6 +1226,89 @@ out:
1226 return ret; 1226 return ret;
1227} 1227}
1228 1228
1229int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
1230 struct ieee80211_sta_ht_cap *ht_cap,
1231 bool allow_ht_operation)
1232{
1233 struct wl1271_acx_ht_capabilities *acx;
1234 u8 mac_address[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
1235 int ret = 0;
1236
1237 wl1271_debug(DEBUG_ACX, "acx ht capabilities setting");
1238
1239 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1240 if (!acx) {
1241 ret = -ENOMEM;
1242 goto out;
1243 }
1244
1245 /* Allow HT Operation ? */
1246 if (allow_ht_operation) {
1247 acx->ht_capabilites =
1248 WL1271_ACX_FW_CAP_HT_OPERATION;
1249 if (ht_cap->cap & IEEE80211_HT_CAP_GRN_FLD)
1250 acx->ht_capabilites |=
1251 WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT;
1252 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
1253 acx->ht_capabilites |=
1254 WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS;
1255 if (ht_cap->cap & IEEE80211_HT_CAP_LSIG_TXOP_PROT)
1256 acx->ht_capabilites |=
1257 WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION;
1258
1259 /* get data from A-MPDU parameters field */
1260 acx->ampdu_max_length = ht_cap->ampdu_factor;
1261 acx->ampdu_min_spacing = ht_cap->ampdu_density;
1262
1263 memcpy(acx->mac_address, mac_address, ETH_ALEN);
1264 } else { /* HT operations are not allowed */
1265 acx->ht_capabilites = 0;
1266 }
1267
1268 ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx));
1269 if (ret < 0) {
1270 wl1271_warning("acx ht capabilities setting failed: %d", ret);
1271 goto out;
1272 }
1273
1274out:
1275 kfree(acx);
1276 return ret;
1277}
1278
1279int wl1271_acx_set_ht_information(struct wl1271 *wl,
1280 u16 ht_operation_mode)
1281{
1282 struct wl1271_acx_ht_information *acx;
1283 int ret = 0;
1284
1285 wl1271_debug(DEBUG_ACX, "acx ht information setting");
1286
1287 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1288 if (!acx) {
1289 ret = -ENOMEM;
1290 goto out;
1291 }
1292
1293 acx->ht_protection =
1294 (u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
1295 acx->rifs_mode = 0;
1296 acx->gf_protection = 0;
1297 acx->ht_tx_burst_limit = 0;
1298 acx->dual_cts_protection = 0;
1299
1300 ret = wl1271_cmd_configure(wl, ACX_HT_BSS_OPERATION, acx, sizeof(*acx));
1301
1302 if (ret < 0) {
1303 wl1271_warning("acx ht information setting failed: %d", ret);
1304 goto out;
1305 }
1306
1307out:
1308 kfree(acx);
1309 return ret;
1310}
1311
1229int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime) 1312int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime)
1230{ 1313{
1231 struct wl1271_acx_fw_tsf_information *tsf_info; 1314 struct wl1271_acx_fw_tsf_information *tsf_info;
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/acx.h
index 0ce845fb6629..7bd8e4db4a71 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/acx.h
@@ -22,11 +22,11 @@
22 * 22 *
23 */ 23 */
24 24
25#ifndef __WL1271_ACX_H__ 25#ifndef __ACX_H__
26#define __WL1271_ACX_H__ 26#define __ACX_H__
27 27
28#include "wl1271.h" 28#include "wl12xx.h"
29#include "wl1271_cmd.h" 29#include "cmd.h"
30 30
31/************************************************************************* 31/*************************************************************************
32 32
@@ -61,7 +61,8 @@
61 WL1271_ACX_INTR_HW_AVAILABLE | \ 61 WL1271_ACX_INTR_HW_AVAILABLE | \
62 WL1271_ACX_INTR_DATA) 62 WL1271_ACX_INTR_DATA)
63 63
64#define WL1271_INTR_MASK (WL1271_ACX_INTR_EVENT_A | \ 64#define WL1271_INTR_MASK (WL1271_ACX_INTR_WATCHDOG | \
65 WL1271_ACX_INTR_EVENT_A | \
65 WL1271_ACX_INTR_EVENT_B | \ 66 WL1271_ACX_INTR_EVENT_B | \
66 WL1271_ACX_INTR_HW_AVAILABLE | \ 67 WL1271_ACX_INTR_HW_AVAILABLE | \
67 WL1271_ACX_INTR_DATA) 68 WL1271_ACX_INTR_DATA)
@@ -867,10 +868,15 @@ struct wl1271_acx_bet_enable {
867#define ACX_IPV4_VERSION 4 868#define ACX_IPV4_VERSION 4
868#define ACX_IPV6_VERSION 6 869#define ACX_IPV6_VERSION 6
869#define ACX_IPV4_ADDR_SIZE 4 870#define ACX_IPV4_ADDR_SIZE 4
871
872/* bitmap of enabled arp_filter features */
873#define ACX_ARP_FILTER_ARP_FILTERING BIT(0)
874#define ACX_ARP_FILTER_AUTO_ARP BIT(1)
875
870struct wl1271_acx_arp_filter { 876struct wl1271_acx_arp_filter {
871 struct acx_header header; 877 struct acx_header header;
872 u8 version; /* ACX_IPV4_VERSION, ACX_IPV6_VERSION */ 878 u8 version; /* ACX_IPV4_VERSION, ACX_IPV6_VERSION */
873 u8 enable; /* 1 to enable ARP filtering, 0 to disable */ 879 u8 enable; /* bitmap of enabled ARP filtering features */
874 u8 padding[2]; 880 u8 padding[2];
875 u8 address[16]; /* The configured device IP address - all ARP 881 u8 address[16]; /* The configured device IP address - all ARP
876 requests directed to this IP address will pass 882 requests directed to this IP address will pass
@@ -964,6 +970,87 @@ struct wl1271_acx_rssi_snr_avg_weights {
964 u8 snr_data; 970 u8 snr_data;
965}; 971};
966 972
973/*
974 * ACX_PEER_HT_CAP
975 * Configure HT capabilities - declare the capabilities of the peer
976 * we are connected to.
977 */
978struct wl1271_acx_ht_capabilities {
979 struct acx_header header;
980
981 /*
982 * bit 0 - Allow HT Operation
983 * bit 1 - Allow Greenfield format in TX
984 * bit 2 - Allow Short GI in TX
985 * bit 3 - Allow L-SIG TXOP Protection in TX
986 * bit 4 - Allow HT Control fields in TX.
987 * Note, driver will still leave space for HT control in packets
988 * regardless of the value of this field. FW will be responsible
989 * to drop the HT field from any frame when this Bit set to 0.
990 * bit 5 - Allow RD initiation in TXOP. FW is allowed to initate RD.
991 * Exact policy setting for this feature is TBD.
992 * Note, this bit can only be set to 1 if bit 3 is set to 1.
993 */
994 __le32 ht_capabilites;
995
996 /*
997 * Indicates to which peer these capabilities apply.
998 * For infrastructure use ff:ff:ff:ff:ff:ff that indicates relevance
999 * for all peers.
1000 * Only valid for IBSS/DLS operation.
1001 */
1002 u8 mac_address[ETH_ALEN];
1003
1004 /*
1005 * This the maximum A-MPDU length supported by the AP. The FW may not
1006 * exceed this length when sending A-MPDUs
1007 */
1008 u8 ampdu_max_length;
1009
1010 /* This is the minimal spacing required when sending A-MPDUs to the AP*/
1011 u8 ampdu_min_spacing;
1012} __packed;
1013
1014/* HT Capabilites Fw Bit Mask Mapping */
1015#define WL1271_ACX_FW_CAP_HT_OPERATION BIT(0)
1016#define WL1271_ACX_FW_CAP_GREENFIELD_FRAME_FORMAT BIT(1)
1017#define WL1271_ACX_FW_CAP_SHORT_GI_FOR_20MHZ_PACKETS BIT(2)
1018#define WL1271_ACX_FW_CAP_LSIG_TXOP_PROTECTION BIT(3)
1019#define WL1271_ACX_FW_CAP_HT_CONTROL_FIELDS BIT(4)
1020#define WL1271_ACX_FW_CAP_RD_INITIATION BIT(5)
1021
1022
1023/*
1024 * ACX_HT_BSS_OPERATION
1025 * Configure HT capabilities - AP rules for behavior in the BSS.
1026 */
1027struct wl1271_acx_ht_information {
1028 struct acx_header header;
1029
1030 /* Values: 0 - RIFS not allowed, 1 - RIFS allowed */
1031 u8 rifs_mode;
1032
1033 /* Values: 0 - 3 like in spec */
1034 u8 ht_protection;
1035
1036 /* Values: 0 - GF protection not required, 1 - GF protection required */
1037 u8 gf_protection;
1038
1039 /*Values: 0 - TX Burst limit not required, 1 - TX Burst Limit required*/
1040 u8 ht_tx_burst_limit;
1041
1042 /*
1043 * Values: 0 - Dual CTS protection not required,
1044 * 1 - Dual CTS Protection required
1045 * Note: When this value is set to 1 FW will protect all TXOP with RTS
1046 * frame and will not use CTS-to-self regardless of the value of the
1047 * ACX_CTS_PROTECTION information element
1048 */
1049 u8 dual_cts_protection;
1050
1051 u8 padding[3];
1052} __packed;
1053
967struct wl1271_acx_fw_tsf_information { 1054struct wl1271_acx_fw_tsf_information {
968 struct acx_header header; 1055 struct acx_header header;
969 1056
@@ -1079,20 +1166,25 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl, u8 ac, u8 cw_min, u16 cw_max,
1079int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type, 1166int wl1271_acx_tid_cfg(struct wl1271 *wl, u8 queue_id, u8 channel_type,
1080 u8 tsid, u8 ps_scheme, u8 ack_policy, 1167 u8 tsid, u8 ps_scheme, u8 ack_policy,
1081 u32 apsd_conf0, u32 apsd_conf1); 1168 u32 apsd_conf0, u32 apsd_conf1);
1082int wl1271_acx_frag_threshold(struct wl1271 *wl); 1169int wl1271_acx_frag_threshold(struct wl1271 *wl, u16 frag_threshold);
1083int wl1271_acx_tx_config_options(struct wl1271 *wl); 1170int wl1271_acx_tx_config_options(struct wl1271 *wl);
1084int wl1271_acx_mem_cfg(struct wl1271 *wl); 1171int wl1271_acx_mem_cfg(struct wl1271 *wl);
1085int wl1271_acx_init_mem_config(struct wl1271 *wl); 1172int wl1271_acx_init_mem_config(struct wl1271 *wl);
1086int wl1271_acx_init_rx_interrupt(struct wl1271 *wl); 1173int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
1087int wl1271_acx_smart_reflex(struct wl1271 *wl); 1174int wl1271_acx_smart_reflex(struct wl1271 *wl);
1088int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable); 1175int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
1089int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, __be32 address); 1176int wl1271_acx_arp_ip_filter(struct wl1271 *wl, u8 enable, __be32 address);
1090int wl1271_acx_pm_config(struct wl1271 *wl); 1177int wl1271_acx_pm_config(struct wl1271 *wl);
1091int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable); 1178int wl1271_acx_keep_alive_mode(struct wl1271 *wl, bool enable);
1092int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid); 1179int wl1271_acx_keep_alive_config(struct wl1271 *wl, u8 index, u8 tpl_valid);
1093int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable, 1180int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, bool enable,
1094 s16 thold, u8 hyst); 1181 s16 thold, u8 hyst);
1095int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl); 1182int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl);
1183int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
1184 struct ieee80211_sta_ht_cap *ht_cap,
1185 bool allow_ht_operation);
1186int wl1271_acx_set_ht_information(struct wl1271 *wl,
1187 u16 ht_operation_mode);
1096int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime); 1188int wl1271_acx_tsf_info(struct wl1271 *wl, u64 *mactime);
1097 1189
1098#endif /* __WL1271_ACX_H__ */ 1190#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/boot.c
index b91021242098..4df04f84d7f1 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/boot.c
@@ -21,14 +21,13 @@
21 * 21 *
22 */ 22 */
23 23
24#include <linux/gpio.h>
25#include <linux/slab.h> 24#include <linux/slab.h>
26 25
27#include "wl1271_acx.h" 26#include "acx.h"
28#include "wl1271_reg.h" 27#include "reg.h"
29#include "wl1271_boot.h" 28#include "boot.h"
30#include "wl1271_io.h" 29#include "io.h"
31#include "wl1271_event.h" 30#include "event.h"
32 31
33static struct wl1271_partition_set part_table[PART_TABLE_LEN] = { 32static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
34 [PART_DOWN] = { 33 [PART_DOWN] = {
@@ -467,24 +466,24 @@ static void wl1271_boot_hw_version(struct wl1271 *wl)
467 wl->hw_pg_ver = (s8)fuse; 466 wl->hw_pg_ver = (s8)fuse;
468} 467}
469 468
470int wl1271_boot(struct wl1271 *wl) 469/* uploads NVS and firmware */
470int wl1271_load_firmware(struct wl1271 *wl)
471{ 471{
472 int ret = 0; 472 int ret = 0;
473 u32 tmp, clk, pause; 473 u32 tmp, clk, pause;
474 int ref_clock = wl->ref_clock;
475 474
476 wl1271_boot_hw_version(wl); 475 wl1271_boot_hw_version(wl);
477 476
478 if (ref_clock == 0 || ref_clock == 2 || ref_clock == 4) 477 if (wl->ref_clock == 0 || wl->ref_clock == 2 || wl->ref_clock == 4)
479 /* ref clk: 19.2/38.4/38.4-XTAL */ 478 /* ref clk: 19.2/38.4/38.4-XTAL */
480 clk = 0x3; 479 clk = 0x3;
481 else if (ref_clock == 1 || ref_clock == 3) 480 else if (wl->ref_clock == 1 || wl->ref_clock == 3)
482 /* ref clk: 26/52 */ 481 /* ref clk: 26/52 */
483 clk = 0x5; 482 clk = 0x5;
484 else 483 else
485 return -EINVAL; 484 return -EINVAL;
486 485
487 if (ref_clock != 0) { 486 if (wl->ref_clock != 0) {
488 u16 val; 487 u16 val;
489 /* Set clock type (open drain) */ 488 /* Set clock type (open drain) */
490 val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE); 489 val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE);
@@ -529,8 +528,7 @@ int wl1271_boot(struct wl1271 *wl)
529 528
530 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk); 529 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
531 530
532 /* 2 */ 531 clk |= (wl->ref_clock << 1) << 4;
533 clk |= (ref_clock << 1) << 4;
534 wl1271_write32(wl, DRPW_SCRATCH_START, clk); 532 wl1271_write32(wl, DRPW_SCRATCH_START, clk);
535 533
536 wl1271_set_partition(wl, &part_table[PART_WORK]); 534 wl1271_set_partition(wl, &part_table[PART_WORK]);
@@ -574,6 +572,20 @@ int wl1271_boot(struct wl1271 *wl)
574 if (ret < 0) 572 if (ret < 0)
575 goto out; 573 goto out;
576 574
575out:
576 return ret;
577}
578EXPORT_SYMBOL_GPL(wl1271_load_firmware);
579
580int wl1271_boot(struct wl1271 *wl)
581{
582 int ret;
583
584 /* upload NVS and firmware */
585 ret = wl1271_load_firmware(wl);
586 if (ret)
587 return ret;
588
577 /* 10.5 start firmware */ 589 /* 10.5 start firmware */
578 ret = wl1271_boot_run_firmware(wl); 590 ret = wl1271_boot_run_firmware(wl);
579 if (ret < 0) 591 if (ret < 0)
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.h b/drivers/net/wireless/wl12xx/boot.h
index f73b0b15a280..d67dcffa31eb 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.h
+++ b/drivers/net/wireless/wl12xx/boot.h
@@ -24,9 +24,10 @@
24#ifndef __BOOT_H__ 24#ifndef __BOOT_H__
25#define __BOOT_H__ 25#define __BOOT_H__
26 26
27#include "wl1271.h" 27#include "wl12xx.h"
28 28
29int wl1271_boot(struct wl1271 *wl); 29int wl1271_boot(struct wl1271 *wl);
30int wl1271_load_firmware(struct wl1271 *wl);
30 31
31#define WL1271_NO_SUBBANDS 8 32#define WL1271_NO_SUBBANDS 8
32#define WL1271_NO_POWER_LEVELS 4 33#define WL1271_NO_POWER_LEVELS 4
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/cmd.c
index 5d3e8485ea4e..0106628aa5a2 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/cmd.c
@@ -29,13 +29,13 @@
29#include <linux/ieee80211.h> 29#include <linux/ieee80211.h>
30#include <linux/slab.h> 30#include <linux/slab.h>
31 31
32#include "wl1271.h" 32#include "wl12xx.h"
33#include "wl1271_reg.h" 33#include "reg.h"
34#include "wl1271_io.h" 34#include "io.h"
35#include "wl1271_acx.h" 35#include "acx.h"
36#include "wl12xx_80211.h" 36#include "wl12xx_80211.h"
37#include "wl1271_cmd.h" 37#include "cmd.h"
38#include "wl1271_event.h" 38#include "event.h"
39 39
40#define WL1271_CMD_FAST_POLL_COUNT 50 40#define WL1271_CMD_FAST_POLL_COUNT 50
41 41
@@ -611,6 +611,75 @@ out:
611 return ret; 611 return ret;
612} 612}
613 613
614struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
615 struct sk_buff *skb)
616{
617 int ret;
618
619 if (!skb)
620 skb = ieee80211_ap_probereq_get(wl->hw, wl->vif);
621 if (!skb)
622 goto out;
623
624 wl1271_dump(DEBUG_SCAN, "AP PROBE REQ: ", skb->data, skb->len);
625
626 if (wl->band == IEEE80211_BAND_2GHZ)
627 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
628 skb->data, skb->len, 0,
629 wl->conf.tx.basic_rate);
630 else
631 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
632 skb->data, skb->len, 0,
633 wl->conf.tx.basic_rate_5);
634
635 if (ret < 0)
636 wl1271_error("Unable to set ap probe request template.");
637
638out:
639 return skb;
640}
641
642int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr)
643{
644 int ret;
645 struct wl12xx_arp_rsp_template tmpl;
646 struct ieee80211_hdr_3addr *hdr;
647 struct arphdr *arp_hdr;
648
649 memset(&tmpl, 0, sizeof(tmpl));
650
651 /* mac80211 header */
652 hdr = &tmpl.hdr;
653 hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
654 IEEE80211_STYPE_DATA |
655 IEEE80211_FCTL_TODS);
656 memcpy(hdr->addr1, wl->vif->bss_conf.bssid, ETH_ALEN);
657 memcpy(hdr->addr2, wl->vif->addr, ETH_ALEN);
658 memset(hdr->addr3, 0xff, ETH_ALEN);
659
660 /* llc layer */
661 memcpy(tmpl.llc_hdr, rfc1042_header, sizeof(rfc1042_header));
662 tmpl.llc_type = htons(ETH_P_ARP);
663
664 /* arp header */
665 arp_hdr = &tmpl.arp_hdr;
666 arp_hdr->ar_hrd = htons(ARPHRD_ETHER);
667 arp_hdr->ar_pro = htons(ETH_P_IP);
668 arp_hdr->ar_hln = ETH_ALEN;
669 arp_hdr->ar_pln = 4;
670 arp_hdr->ar_op = htons(ARPOP_REPLY);
671
672 /* arp payload */
673 memcpy(tmpl.sender_hw, wl->vif->addr, ETH_ALEN);
674 tmpl.sender_ip = ip_addr;
675
676 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_ARP_RSP,
677 &tmpl, sizeof(tmpl), 0,
678 wl->basic_rate);
679
680 return ret;
681}
682
614int wl1271_build_qos_null_data(struct wl1271 *wl) 683int wl1271_build_qos_null_data(struct wl1271 *wl)
615{ 684{
616 struct ieee80211_qos_hdr template; 685 struct ieee80211_qos_hdr template;
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/cmd.h
index a0caf4fc37b1..2a1d9db7ceb8 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/cmd.h
@@ -22,10 +22,10 @@
22 * 22 *
23 */ 23 */
24 24
25#ifndef __WL1271_CMD_H__ 25#ifndef __CMD_H__
26#define __WL1271_CMD_H__ 26#define __CMD_H__
27 27
28#include "wl1271.h" 28#include "wl12xx.h"
29 29
30struct acx_header; 30struct acx_header;
31 31
@@ -49,6 +49,9 @@ int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid);
49int wl1271_cmd_build_probe_req(struct wl1271 *wl, 49int wl1271_cmd_build_probe_req(struct wl1271 *wl,
50 const u8 *ssid, size_t ssid_len, 50 const u8 *ssid, size_t ssid_len,
51 const u8 *ie, size_t ie_len, u8 band); 51 const u8 *ie, size_t ie_len, u8 band);
52struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
53 struct sk_buff *skb);
54int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, __be32 ip_addr);
52int wl1271_build_qos_null_data(struct wl1271 *wl); 55int wl1271_build_qos_null_data(struct wl1271 *wl);
53int wl1271_cmd_build_klv_null_data(struct wl1271 *wl); 56int wl1271_cmd_build_klv_null_data(struct wl1271 *wl);
54int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id); 57int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id);
@@ -122,6 +125,7 @@ enum cmd_templ {
122 CMD_TEMPL_CTS, /* 125 CMD_TEMPL_CTS, /*
123 * For CTS-to-self (FastCTS) mechanism 126 * For CTS-to-self (FastCTS) mechanism
124 * for BT/WLAN coexistence (SoftGemini). */ 127 * for BT/WLAN coexistence (SoftGemini). */
128 CMD_TEMPL_ARP_RSP,
125 CMD_TEMPL_MAX = 0xff 129 CMD_TEMPL_MAX = 0xff
126}; 130};
127 131
@@ -327,9 +331,6 @@ enum wl1271_channel_tune_bands {
327 331
328#define WL1271_PD_REFERENCE_POINT_BAND_B_G 0 332#define WL1271_PD_REFERENCE_POINT_BAND_B_G 0
329 333
330#define TEST_CMD_P2G_CAL 0x02
331#define TEST_CMD_CHANNEL_TUNE 0x0d
332#define TEST_CMD_UPDATE_PD_REFERENCE_POINT 0x1d
333#define TEST_CMD_INI_FILE_RADIO_PARAM 0x19 334#define TEST_CMD_INI_FILE_RADIO_PARAM 0x19
334#define TEST_CMD_INI_FILE_GENERAL_PARAM 0x1E 335#define TEST_CMD_INI_FILE_GENERAL_PARAM 0x1E
335#define TEST_CMD_INI_FILE_RF_EXTENDED_PARAM 0x26 336#define TEST_CMD_INI_FILE_RF_EXTENDED_PARAM 0x26
@@ -375,51 +376,6 @@ struct wl1271_ext_radio_parms_cmd {
375 u8 padding[3]; 376 u8 padding[3];
376} __packed; 377} __packed;
377 378
378struct wl1271_cmd_cal_channel_tune {
379 struct wl1271_cmd_header header;
380
381 struct wl1271_cmd_test_header test;
382
383 u8 band;
384 u8 channel;
385
386 __le16 radio_status;
387} __packed;
388
389struct wl1271_cmd_cal_update_ref_point {
390 struct wl1271_cmd_header header;
391
392 struct wl1271_cmd_test_header test;
393
394 __le32 ref_power;
395 __le32 ref_detector;
396 u8 sub_band;
397 u8 padding[3];
398} __packed;
399
400#define MAX_TLV_LENGTH 400
401#define MAX_NVS_VERSION_LENGTH 12
402
403#define WL1271_CAL_P2G_BAND_B_G BIT(0)
404
405struct wl1271_cmd_cal_p2g {
406 struct wl1271_cmd_header header;
407
408 struct wl1271_cmd_test_header test;
409
410 __le16 len;
411 u8 buf[MAX_TLV_LENGTH];
412 u8 type;
413 u8 padding;
414
415 __le16 radio_status;
416 u8 nvs_version[MAX_NVS_VERSION_LENGTH];
417
418 u8 sub_band_mask;
419 u8 padding2;
420} __packed;
421
422
423/* 379/*
424 * There are three types of disconnections: 380 * There are three types of disconnections:
425 * 381 *
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/conf.h
index 5f78a6cb1433..a16b3616e430 100644
--- a/drivers/net/wireless/wl12xx/wl1271_conf.h
+++ b/drivers/net/wireless/wl12xx/conf.h
@@ -21,8 +21,8 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef __WL1271_CONF_H__ 24#ifndef __CONF_H__
25#define __WL1271_CONF_H__ 25#define __CONF_H__
26 26
27enum { 27enum {
28 CONF_HW_BIT_RATE_1MBPS = BIT(0), 28 CONF_HW_BIT_RATE_1MBPS = BIT(0),
diff --git a/drivers/net/wireless/wl12xx/debugfs.c b/drivers/net/wireless/wl12xx/debugfs.c
new file mode 100644
index 000000000000..ec6077760157
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/debugfs.c
@@ -0,0 +1,480 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include "debugfs.h"
25
26#include <linux/skbuff.h>
27#include <linux/slab.h>
28
29#include "wl12xx.h"
30#include "acx.h"
31#include "ps.h"
32#include "io.h"
33
34/* ms */
35#define WL1271_DEBUGFS_STATS_LIFETIME 1000
36
37/* debugfs macros idea from mac80211 */
38#define DEBUGFS_FORMAT_BUFFER_SIZE 100
39static int wl1271_format_buffer(char __user *userbuf, size_t count,
40 loff_t *ppos, char *fmt, ...)
41{
42 va_list args;
43 char buf[DEBUGFS_FORMAT_BUFFER_SIZE];
44 int res;
45
46 va_start(args, fmt);
47 res = vscnprintf(buf, sizeof(buf), fmt, args);
48 va_end(args);
49
50 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
51}
52
53#define DEBUGFS_READONLY_FILE(name, fmt, value...) \
54static ssize_t name## _read(struct file *file, char __user *userbuf, \
55 size_t count, loff_t *ppos) \
56{ \
57 struct wl1271 *wl = file->private_data; \
58 return wl1271_format_buffer(userbuf, count, ppos, \
59 fmt "\n", ##value); \
60} \
61 \
62static const struct file_operations name## _ops = { \
63 .read = name## _read, \
64 .open = wl1271_open_file_generic, \
65 .llseek = generic_file_llseek, \
66};
67
68#define DEBUGFS_ADD(name, parent) \
69 entry = debugfs_create_file(#name, 0400, parent, \
70 wl, &name## _ops); \
71 if (!entry || IS_ERR(entry)) \
72 goto err; \
73
74#define DEBUGFS_FWSTATS_FILE(sub, name, fmt) \
75static ssize_t sub## _ ##name## _read(struct file *file, \
76 char __user *userbuf, \
77 size_t count, loff_t *ppos) \
78{ \
79 struct wl1271 *wl = file->private_data; \
80 \
81 wl1271_debugfs_update_stats(wl); \
82 \
83 return wl1271_format_buffer(userbuf, count, ppos, fmt "\n", \
84 wl->stats.fw_stats->sub.name); \
85} \
86 \
87static const struct file_operations sub## _ ##name## _ops = { \
88 .read = sub## _ ##name## _read, \
89 .open = wl1271_open_file_generic, \
90 .llseek = generic_file_llseek, \
91};
92
93#define DEBUGFS_FWSTATS_ADD(sub, name) \
94 DEBUGFS_ADD(sub## _ ##name, stats)
95
96static void wl1271_debugfs_update_stats(struct wl1271 *wl)
97{
98 int ret;
99
100 mutex_lock(&wl->mutex);
101
102 ret = wl1271_ps_elp_wakeup(wl, false);
103 if (ret < 0)
104 goto out;
105
106 if (wl->state == WL1271_STATE_ON &&
107 time_after(jiffies, wl->stats.fw_stats_update +
108 msecs_to_jiffies(WL1271_DEBUGFS_STATS_LIFETIME))) {
109 wl1271_acx_statistics(wl, wl->stats.fw_stats);
110 wl->stats.fw_stats_update = jiffies;
111 }
112
113 wl1271_ps_elp_sleep(wl);
114
115out:
116 mutex_unlock(&wl->mutex);
117}
118
119static int wl1271_open_file_generic(struct inode *inode, struct file *file)
120{
121 file->private_data = inode->i_private;
122 return 0;
123}
124
125DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, "%u");
126
127DEBUGFS_FWSTATS_FILE(rx, out_of_mem, "%u");
128DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, "%u");
129DEBUGFS_FWSTATS_FILE(rx, hw_stuck, "%u");
130DEBUGFS_FWSTATS_FILE(rx, dropped, "%u");
131DEBUGFS_FWSTATS_FILE(rx, fcs_err, "%u");
132DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, "%u");
133DEBUGFS_FWSTATS_FILE(rx, path_reset, "%u");
134DEBUGFS_FWSTATS_FILE(rx, reset_counter, "%u");
135
136DEBUGFS_FWSTATS_FILE(dma, rx_requested, "%u");
137DEBUGFS_FWSTATS_FILE(dma, rx_errors, "%u");
138DEBUGFS_FWSTATS_FILE(dma, tx_requested, "%u");
139DEBUGFS_FWSTATS_FILE(dma, tx_errors, "%u");
140
141DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, "%u");
142DEBUGFS_FWSTATS_FILE(isr, fiqs, "%u");
143DEBUGFS_FWSTATS_FILE(isr, rx_headers, "%u");
144DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, "%u");
145DEBUGFS_FWSTATS_FILE(isr, rx_rdys, "%u");
146DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
147DEBUGFS_FWSTATS_FILE(isr, tx_procs, "%u");
148DEBUGFS_FWSTATS_FILE(isr, decrypt_done, "%u");
149DEBUGFS_FWSTATS_FILE(isr, dma0_done, "%u");
150DEBUGFS_FWSTATS_FILE(isr, dma1_done, "%u");
151DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, "%u");
152DEBUGFS_FWSTATS_FILE(isr, commands, "%u");
153DEBUGFS_FWSTATS_FILE(isr, rx_procs, "%u");
154DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, "%u");
155DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, "%u");
156DEBUGFS_FWSTATS_FILE(isr, pci_pm, "%u");
157DEBUGFS_FWSTATS_FILE(isr, wakeups, "%u");
158DEBUGFS_FWSTATS_FILE(isr, low_rssi, "%u");
159
160DEBUGFS_FWSTATS_FILE(wep, addr_key_count, "%u");
161DEBUGFS_FWSTATS_FILE(wep, default_key_count, "%u");
162/* skipping wep.reserved */
163DEBUGFS_FWSTATS_FILE(wep, key_not_found, "%u");
164DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, "%u");
165DEBUGFS_FWSTATS_FILE(wep, packets, "%u");
166DEBUGFS_FWSTATS_FILE(wep, interrupt, "%u");
167
168DEBUGFS_FWSTATS_FILE(pwr, ps_enter, "%u");
169DEBUGFS_FWSTATS_FILE(pwr, elp_enter, "%u");
170DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, "%u");
171DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, "%u");
172DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, "%u");
173DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, "%u");
174DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, "%u");
175DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, "%u");
176DEBUGFS_FWSTATS_FILE(pwr, power_save_off, "%u");
177DEBUGFS_FWSTATS_FILE(pwr, enable_ps, "%u");
178DEBUGFS_FWSTATS_FILE(pwr, disable_ps, "%u");
179DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, "%u");
180/* skipping cont_miss_bcns_spread for now */
181DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, "%u");
182
183DEBUGFS_FWSTATS_FILE(mic, rx_pkts, "%u");
184DEBUGFS_FWSTATS_FILE(mic, calc_failure, "%u");
185
186DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, "%u");
187DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, "%u");
188DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, "%u");
189DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, "%u");
190DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, "%u");
191DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, "%u");
192
193DEBUGFS_FWSTATS_FILE(event, heart_beat, "%u");
194DEBUGFS_FWSTATS_FILE(event, calibration, "%u");
195DEBUGFS_FWSTATS_FILE(event, rx_mismatch, "%u");
196DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, "%u");
197DEBUGFS_FWSTATS_FILE(event, rx_pool, "%u");
198DEBUGFS_FWSTATS_FILE(event, oom_late, "%u");
199DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, "%u");
200DEBUGFS_FWSTATS_FILE(event, tx_stuck, "%u");
201
202DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, "%u");
203DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, "%u");
204DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, "%u");
205DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, "%u");
206DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, "%u");
207DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, "%u");
208DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, "%u");
209
210DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, "%u");
211DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, "%u");
212DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data, "%u");
213DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, "%u");
214DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, "%u");
215
216DEBUGFS_READONLY_FILE(retry_count, "%u", wl->stats.retry_count);
217DEBUGFS_READONLY_FILE(excessive_retries, "%u",
218 wl->stats.excessive_retries);
219
220static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf,
221 size_t count, loff_t *ppos)
222{
223 struct wl1271 *wl = file->private_data;
224 u32 queue_len;
225 char buf[20];
226 int res;
227
228 queue_len = wl->tx_queue_count;
229
230 res = scnprintf(buf, sizeof(buf), "%u\n", queue_len);
231 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
232}
233
234static const struct file_operations tx_queue_len_ops = {
235 .read = tx_queue_len_read,
236 .open = wl1271_open_file_generic,
237 .llseek = default_llseek,
238};
239
240static ssize_t gpio_power_read(struct file *file, char __user *user_buf,
241 size_t count, loff_t *ppos)
242{
243 struct wl1271 *wl = file->private_data;
244 bool state = test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
245
246 int res;
247 char buf[10];
248
249 res = scnprintf(buf, sizeof(buf), "%d\n", state);
250
251 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
252}
253
254static ssize_t gpio_power_write(struct file *file,
255 const char __user *user_buf,
256 size_t count, loff_t *ppos)
257{
258 struct wl1271 *wl = file->private_data;
259 char buf[10];
260 size_t len;
261 unsigned long value;
262 int ret;
263
264 mutex_lock(&wl->mutex);
265
266 len = min(count, sizeof(buf) - 1);
267 if (copy_from_user(buf, user_buf, len)) {
268 ret = -EFAULT;
269 goto out;
270 }
271 buf[len] = '\0';
272
273 ret = strict_strtoul(buf, 0, &value);
274 if (ret < 0) {
275 wl1271_warning("illegal value in gpio_power");
276 goto out;
277 }
278
279 if (value)
280 wl1271_power_on(wl);
281 else
282 wl1271_power_off(wl);
283
284out:
285 mutex_unlock(&wl->mutex);
286 return count;
287}
288
289static const struct file_operations gpio_power_ops = {
290 .read = gpio_power_read,
291 .write = gpio_power_write,
292 .open = wl1271_open_file_generic,
293 .llseek = default_llseek,
294};
295
296static int wl1271_debugfs_add_files(struct wl1271 *wl)
297{
298 int ret = 0;
299 struct dentry *entry, *stats;
300
301 stats = debugfs_create_dir("fw-statistics", wl->rootdir);
302 if (!stats || IS_ERR(stats)) {
303 entry = stats;
304 goto err;
305 }
306
307 DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow);
308
309 DEBUGFS_FWSTATS_ADD(rx, out_of_mem);
310 DEBUGFS_FWSTATS_ADD(rx, hdr_overflow);
311 DEBUGFS_FWSTATS_ADD(rx, hw_stuck);
312 DEBUGFS_FWSTATS_ADD(rx, dropped);
313 DEBUGFS_FWSTATS_ADD(rx, fcs_err);
314 DEBUGFS_FWSTATS_ADD(rx, xfr_hint_trig);
315 DEBUGFS_FWSTATS_ADD(rx, path_reset);
316 DEBUGFS_FWSTATS_ADD(rx, reset_counter);
317
318 DEBUGFS_FWSTATS_ADD(dma, rx_requested);
319 DEBUGFS_FWSTATS_ADD(dma, rx_errors);
320 DEBUGFS_FWSTATS_ADD(dma, tx_requested);
321 DEBUGFS_FWSTATS_ADD(dma, tx_errors);
322
323 DEBUGFS_FWSTATS_ADD(isr, cmd_cmplt);
324 DEBUGFS_FWSTATS_ADD(isr, fiqs);
325 DEBUGFS_FWSTATS_ADD(isr, rx_headers);
326 DEBUGFS_FWSTATS_ADD(isr, rx_mem_overflow);
327 DEBUGFS_FWSTATS_ADD(isr, rx_rdys);
328 DEBUGFS_FWSTATS_ADD(isr, irqs);
329 DEBUGFS_FWSTATS_ADD(isr, tx_procs);
330 DEBUGFS_FWSTATS_ADD(isr, decrypt_done);
331 DEBUGFS_FWSTATS_ADD(isr, dma0_done);
332 DEBUGFS_FWSTATS_ADD(isr, dma1_done);
333 DEBUGFS_FWSTATS_ADD(isr, tx_exch_complete);
334 DEBUGFS_FWSTATS_ADD(isr, commands);
335 DEBUGFS_FWSTATS_ADD(isr, rx_procs);
336 DEBUGFS_FWSTATS_ADD(isr, hw_pm_mode_changes);
337 DEBUGFS_FWSTATS_ADD(isr, host_acknowledges);
338 DEBUGFS_FWSTATS_ADD(isr, pci_pm);
339 DEBUGFS_FWSTATS_ADD(isr, wakeups);
340 DEBUGFS_FWSTATS_ADD(isr, low_rssi);
341
342 DEBUGFS_FWSTATS_ADD(wep, addr_key_count);
343 DEBUGFS_FWSTATS_ADD(wep, default_key_count);
344 /* skipping wep.reserved */
345 DEBUGFS_FWSTATS_ADD(wep, key_not_found);
346 DEBUGFS_FWSTATS_ADD(wep, decrypt_fail);
347 DEBUGFS_FWSTATS_ADD(wep, packets);
348 DEBUGFS_FWSTATS_ADD(wep, interrupt);
349
350 DEBUGFS_FWSTATS_ADD(pwr, ps_enter);
351 DEBUGFS_FWSTATS_ADD(pwr, elp_enter);
352 DEBUGFS_FWSTATS_ADD(pwr, missing_bcns);
353 DEBUGFS_FWSTATS_ADD(pwr, wake_on_host);
354 DEBUGFS_FWSTATS_ADD(pwr, wake_on_timer_exp);
355 DEBUGFS_FWSTATS_ADD(pwr, tx_with_ps);
356 DEBUGFS_FWSTATS_ADD(pwr, tx_without_ps);
357 DEBUGFS_FWSTATS_ADD(pwr, rcvd_beacons);
358 DEBUGFS_FWSTATS_ADD(pwr, power_save_off);
359 DEBUGFS_FWSTATS_ADD(pwr, enable_ps);
360 DEBUGFS_FWSTATS_ADD(pwr, disable_ps);
361 DEBUGFS_FWSTATS_ADD(pwr, fix_tsf_ps);
362 /* skipping cont_miss_bcns_spread for now */
363 DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_beacons);
364
365 DEBUGFS_FWSTATS_ADD(mic, rx_pkts);
366 DEBUGFS_FWSTATS_ADD(mic, calc_failure);
367
368 DEBUGFS_FWSTATS_ADD(aes, encrypt_fail);
369 DEBUGFS_FWSTATS_ADD(aes, decrypt_fail);
370 DEBUGFS_FWSTATS_ADD(aes, encrypt_packets);
371 DEBUGFS_FWSTATS_ADD(aes, decrypt_packets);
372 DEBUGFS_FWSTATS_ADD(aes, encrypt_interrupt);
373 DEBUGFS_FWSTATS_ADD(aes, decrypt_interrupt);
374
375 DEBUGFS_FWSTATS_ADD(event, heart_beat);
376 DEBUGFS_FWSTATS_ADD(event, calibration);
377 DEBUGFS_FWSTATS_ADD(event, rx_mismatch);
378 DEBUGFS_FWSTATS_ADD(event, rx_mem_empty);
379 DEBUGFS_FWSTATS_ADD(event, rx_pool);
380 DEBUGFS_FWSTATS_ADD(event, oom_late);
381 DEBUGFS_FWSTATS_ADD(event, phy_transmit_error);
382 DEBUGFS_FWSTATS_ADD(event, tx_stuck);
383
384 DEBUGFS_FWSTATS_ADD(ps, pspoll_timeouts);
385 DEBUGFS_FWSTATS_ADD(ps, upsd_timeouts);
386 DEBUGFS_FWSTATS_ADD(ps, upsd_max_sptime);
387 DEBUGFS_FWSTATS_ADD(ps, upsd_max_apturn);
388 DEBUGFS_FWSTATS_ADD(ps, pspoll_max_apturn);
389 DEBUGFS_FWSTATS_ADD(ps, pspoll_utilization);
390 DEBUGFS_FWSTATS_ADD(ps, upsd_utilization);
391
392 DEBUGFS_FWSTATS_ADD(rxpipe, rx_prep_beacon_drop);
393 DEBUGFS_FWSTATS_ADD(rxpipe, descr_host_int_trig_rx_data);
394 DEBUGFS_FWSTATS_ADD(rxpipe, beacon_buffer_thres_host_int_trig_rx_data);
395 DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data);
396 DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
397
398 DEBUGFS_ADD(tx_queue_len, wl->rootdir);
399 DEBUGFS_ADD(retry_count, wl->rootdir);
400 DEBUGFS_ADD(excessive_retries, wl->rootdir);
401
402 DEBUGFS_ADD(gpio_power, wl->rootdir);
403
404 entry = debugfs_create_x32("debug_level", 0600, wl->rootdir,
405 &wl12xx_debug_level);
406 if (!entry || IS_ERR(entry))
407 goto err;
408
409 return 0;
410
411err:
412 if (IS_ERR(entry))
413 ret = PTR_ERR(entry);
414 else
415 ret = -ENOMEM;
416
417 return ret;
418}
419
420void wl1271_debugfs_reset(struct wl1271 *wl)
421{
422 if (!wl->rootdir)
423 return;
424
425 memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
426 wl->stats.retry_count = 0;
427 wl->stats.excessive_retries = 0;
428}
429
430int wl1271_debugfs_init(struct wl1271 *wl)
431{
432 int ret;
433
434 wl->rootdir = debugfs_create_dir(KBUILD_MODNAME,
435 wl->hw->wiphy->debugfsdir);
436
437 if (IS_ERR(wl->rootdir)) {
438 ret = PTR_ERR(wl->rootdir);
439 wl->rootdir = NULL;
440 goto err;
441 }
442
443 wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats),
444 GFP_KERNEL);
445
446 if (!wl->stats.fw_stats) {
447 ret = -ENOMEM;
448 goto err_fw;
449 }
450
451 wl->stats.fw_stats_update = jiffies;
452
453 ret = wl1271_debugfs_add_files(wl);
454
455 if (ret < 0)
456 goto err_file;
457
458 return 0;
459
460err_file:
461 kfree(wl->stats.fw_stats);
462 wl->stats.fw_stats = NULL;
463
464err_fw:
465 debugfs_remove_recursive(wl->rootdir);
466 wl->rootdir = NULL;
467
468err:
469 return ret;
470}
471
472void wl1271_debugfs_exit(struct wl1271 *wl)
473{
474 kfree(wl->stats.fw_stats);
475 wl->stats.fw_stats = NULL;
476
477 debugfs_remove_recursive(wl->rootdir);
478 wl->rootdir = NULL;
479
480}
diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.h b/drivers/net/wireless/wl12xx/debugfs.h
index 00a45b2669ad..254c5b292cf6 100644
--- a/drivers/net/wireless/wl12xx/wl1271_debugfs.h
+++ b/drivers/net/wireless/wl12xx/debugfs.h
@@ -21,10 +21,10 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef WL1271_DEBUGFS_H 24#ifndef __DEBUGFS_H__
25#define WL1271_DEBUGFS_H 25#define __DEBUGFS_H__
26 26
27#include "wl1271.h" 27#include "wl12xx.h"
28 28
29int wl1271_debugfs_init(struct wl1271 *wl); 29int wl1271_debugfs_init(struct wl1271 *wl);
30void wl1271_debugfs_exit(struct wl1271 *wl); 30void wl1271_debugfs_exit(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/event.c
index 7b3f50382963..f9146f5242fb 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/event.c
@@ -21,12 +21,12 @@
21 * 21 *
22 */ 22 */
23 23
24#include "wl1271.h" 24#include "wl12xx.h"
25#include "wl1271_reg.h" 25#include "reg.h"
26#include "wl1271_io.h" 26#include "io.h"
27#include "wl1271_event.h" 27#include "event.h"
28#include "wl1271_ps.h" 28#include "ps.h"
29#include "wl1271_scan.h" 29#include "scan.h"
30#include "wl12xx_80211.h" 30#include "wl12xx_80211.h"
31 31
32void wl1271_pspoll_work(struct work_struct *work) 32void wl1271_pspoll_work(struct work_struct *work)
@@ -134,8 +134,6 @@ static int wl1271_event_ps_report(struct wl1271 *wl,
134 134
135 /* go to extremely low power mode */ 135 /* go to extremely low power mode */
136 wl1271_ps_elp_sleep(wl); 136 wl1271_ps_elp_sleep(wl);
137 if (ret < 0)
138 break;
139 break; 137 break;
140 case EVENT_EXIT_POWER_SAVE_FAIL: 138 case EVENT_EXIT_POWER_SAVE_FAIL:
141 wl1271_debug(DEBUG_PSM, "PSM exit failed"); 139 wl1271_debug(DEBUG_PSM, "PSM exit failed");
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/event.h
index e4751667cf5e..6cce0143adb5 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/event.h
@@ -22,8 +22,8 @@
22 * 22 *
23 */ 23 */
24 24
25#ifndef __WL1271_EVENT_H__ 25#ifndef __EVENT_H__
26#define __WL1271_EVENT_H__ 26#define __EVENT_H__
27 27
28/* 28/*
29 * Mbox events 29 * Mbox events
diff --git a/drivers/net/wireless/wl12xx/wl1271_ini.h b/drivers/net/wireless/wl12xx/ini.h
index 2313047d4015..c330a2583dfd 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ini.h
+++ b/drivers/net/wireless/wl12xx/ini.h
@@ -21,8 +21,8 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef __WL1271_INI_H__ 24#ifndef __INI_H__
25#define __WL1271_INI_H__ 25#define __INI_H__
26 26
27#define WL1271_INI_MAX_SMART_REFLEX_PARAM 16 27#define WL1271_INI_MAX_SMART_REFLEX_PARAM 16
28 28
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/init.c
index 8044bba70ee7..785a5304bfc4 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.c
+++ b/drivers/net/wireless/wl12xx/init.c
@@ -25,11 +25,11 @@
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27 27
28#include "wl1271_init.h" 28#include "init.h"
29#include "wl12xx_80211.h" 29#include "wl12xx_80211.h"
30#include "wl1271_acx.h" 30#include "acx.h"
31#include "wl1271_cmd.h" 31#include "cmd.h"
32#include "wl1271_reg.h" 32#include "reg.h"
33 33
34static int wl1271_init_hwenc_config(struct wl1271 *wl) 34static int wl1271_init_hwenc_config(struct wl1271 *wl)
35{ 35{
@@ -53,18 +53,16 @@ static int wl1271_init_hwenc_config(struct wl1271 *wl)
53int wl1271_init_templates_config(struct wl1271 *wl) 53int wl1271_init_templates_config(struct wl1271 *wl)
54{ 54{
55 int ret, i; 55 int ret, i;
56 size_t size;
57 56
58 /* send empty templates for fw memory reservation */ 57 /* send empty templates for fw memory reservation */
59 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL, 58 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, NULL,
60 sizeof(struct wl12xx_probe_req_template), 59 WL1271_CMD_TEMPL_MAX_SIZE,
61 0, WL1271_RATE_AUTOMATIC); 60 0, WL1271_RATE_AUTOMATIC);
62 if (ret < 0) 61 if (ret < 0)
63 return ret; 62 return ret;
64 63
65 size = sizeof(struct wl12xx_probe_req_template);
66 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5, 64 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
67 NULL, size, 0, 65 NULL, WL1271_CMD_TEMPL_MAX_SIZE, 0,
68 WL1271_RATE_AUTOMATIC); 66 WL1271_RATE_AUTOMATIC);
69 if (ret < 0) 67 if (ret < 0)
70 return ret; 68 return ret;
@@ -102,6 +100,13 @@ int wl1271_init_templates_config(struct wl1271 *wl)
102 if (ret < 0) 100 if (ret < 0)
103 return ret; 101 return ret;
104 102
103 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_ARP_RSP, NULL,
104 sizeof
105 (struct wl12xx_arp_rsp_template),
106 0, WL1271_RATE_AUTOMATIC);
107 if (ret < 0)
108 return ret;
109
105 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) { 110 for (i = 0; i < CMD_TEMPL_KLV_IDX_MAX; i++) {
106 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL, 111 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_KLV, NULL,
107 WL1271_CMD_TEMPL_MAX_SIZE, i, 112 WL1271_CMD_TEMPL_MAX_SIZE, i,
@@ -290,7 +295,7 @@ int wl1271_hw_init(struct wl1271 *wl)
290 goto out_free_memmap; 295 goto out_free_memmap;
291 296
292 /* Default fragmentation threshold */ 297 /* Default fragmentation threshold */
293 ret = wl1271_acx_frag_threshold(wl); 298 ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
294 if (ret < 0) 299 if (ret < 0)
295 goto out_free_memmap; 300 goto out_free_memmap;
296 301
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.h b/drivers/net/wireless/wl12xx/init.h
index bc26f8c53b91..7762421f8602 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.h
+++ b/drivers/net/wireless/wl12xx/init.h
@@ -21,10 +21,10 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef __WL1271_INIT_H__ 24#ifndef __INIT_H__
25#define __WL1271_INIT_H__ 25#define __INIT_H__
26 26
27#include "wl1271.h" 27#include "wl12xx.h"
28 28
29int wl1271_hw_init_power_auth(struct wl1271 *wl); 29int wl1271_hw_init_power_auth(struct wl1271 *wl);
30int wl1271_init_templates_config(struct wl1271 *wl); 30int wl1271_init_templates_config(struct wl1271 *wl);
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.c b/drivers/net/wireless/wl12xx/io.c
index c8759acef131..d557f73e7c19 100644
--- a/drivers/net/wireless/wl12xx/wl1271_io.c
+++ b/drivers/net/wireless/wl12xx/io.c
@@ -26,9 +26,9 @@
26#include <linux/crc7.h> 26#include <linux/crc7.h>
27#include <linux/spi/spi.h> 27#include <linux/spi/spi.h>
28 28
29#include "wl1271.h" 29#include "wl12xx.h"
30#include "wl12xx_80211.h" 30#include "wl12xx_80211.h"
31#include "wl1271_io.h" 31#include "io.h"
32 32
33#define OCP_CMD_LOOP 32 33#define OCP_CMD_LOOP 32
34 34
@@ -113,6 +113,7 @@ int wl1271_set_partition(struct wl1271 *wl,
113 113
114 return 0; 114 return 0;
115} 115}
116EXPORT_SYMBOL_GPL(wl1271_set_partition);
116 117
117void wl1271_io_reset(struct wl1271 *wl) 118void wl1271_io_reset(struct wl1271 *wl)
118{ 119{
diff --git a/drivers/net/wireless/wl12xx/wl1271_io.h b/drivers/net/wireless/wl12xx/io.h
index c1f92e65ded0..844b32b170bb 100644
--- a/drivers/net/wireless/wl12xx/wl1271_io.h
+++ b/drivers/net/wireless/wl12xx/io.h
@@ -22,10 +22,10 @@
22 * 22 *
23 */ 23 */
24 24
25#ifndef __WL1271_IO_H__ 25#ifndef __IO_H__
26#define __WL1271_IO_H__ 26#define __IO_H__
27 27
28#include "wl1271_reg.h" 28#include "reg.h"
29 29
30#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0 30#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0
31 31
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/main.c
index 48a4b9961ae6..062247ef3ad2 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/main.c
@@ -31,20 +31,20 @@
31#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33 33
34#include "wl1271.h" 34#include "wl12xx.h"
35#include "wl12xx_80211.h" 35#include "wl12xx_80211.h"
36#include "wl1271_reg.h" 36#include "reg.h"
37#include "wl1271_io.h" 37#include "io.h"
38#include "wl1271_event.h" 38#include "event.h"
39#include "wl1271_tx.h" 39#include "tx.h"
40#include "wl1271_rx.h" 40#include "rx.h"
41#include "wl1271_ps.h" 41#include "ps.h"
42#include "wl1271_init.h" 42#include "init.h"
43#include "wl1271_debugfs.h" 43#include "debugfs.h"
44#include "wl1271_cmd.h" 44#include "cmd.h"
45#include "wl1271_boot.h" 45#include "boot.h"
46#include "wl1271_testmode.h" 46#include "testmode.h"
47#include "wl1271_scan.h" 47#include "scan.h"
48 48
49#define WL1271_BOOT_RETRIES 3 49#define WL1271_BOOT_RETRIES 3
50 50
@@ -335,6 +335,28 @@ out:
335 return NOTIFY_OK; 335 return NOTIFY_OK;
336} 336}
337 337
338static int wl1271_reg_notify(struct wiphy *wiphy,
339 struct regulatory_request *request)
340{
341 struct ieee80211_supported_band *band;
342 struct ieee80211_channel *ch;
343 int i;
344
345 band = wiphy->bands[IEEE80211_BAND_5GHZ];
346 for (i = 0; i < band->n_channels; i++) {
347 ch = &band->channels[i];
348 if (ch->flags & IEEE80211_CHAN_DISABLED)
349 continue;
350
351 if (ch->flags & IEEE80211_CHAN_RADAR)
352 ch->flags |= IEEE80211_CHAN_NO_IBSS |
353 IEEE80211_CHAN_PASSIVE_SCAN;
354
355 }
356
357 return 0;
358}
359
338static void wl1271_conf_init(struct wl1271 *wl) 360static void wl1271_conf_init(struct wl1271 *wl)
339{ 361{
340 362
@@ -404,7 +426,7 @@ static int wl1271_plt_init(struct wl1271 *wl)
404 goto out_free_memmap; 426 goto out_free_memmap;
405 427
406 /* Default fragmentation threshold */ 428 /* Default fragmentation threshold */
407 ret = wl1271_acx_frag_threshold(wl); 429 ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold);
408 if (ret < 0) 430 if (ret < 0)
409 goto out_free_memmap; 431 goto out_free_memmap;
410 432
@@ -481,9 +503,9 @@ static void wl1271_fw_status(struct wl1271 *wl,
481 total += cnt; 503 total += cnt;
482 } 504 }
483 505
484 /* if more blocks are available now, schedule some tx work */ 506 /* if more blocks are available now, tx work can be scheduled */
485 if (total && !skb_queue_empty(&wl->tx_queue)) 507 if (total)
486 ieee80211_queue_work(wl->hw, &wl->tx_work); 508 clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
487 509
488 /* update the host-chipset time offset */ 510 /* update the host-chipset time offset */
489 getnstimeofday(&ts); 511 getnstimeofday(&ts);
@@ -529,6 +551,15 @@ static void wl1271_irq_work(struct work_struct *work)
529 551
530 intr &= WL1271_INTR_MASK; 552 intr &= WL1271_INTR_MASK;
531 553
554 if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
555 wl1271_error("watchdog interrupt received! "
556 "starting recovery.");
557 ieee80211_queue_work(wl->hw, &wl->recovery_work);
558
559 /* restarting the chip. ignore any other interrupt. */
560 goto out;
561 }
562
532 if (intr & WL1271_ACX_INTR_DATA) { 563 if (intr & WL1271_ACX_INTR_DATA) {
533 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); 564 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
534 565
@@ -537,6 +568,16 @@ static void wl1271_irq_work(struct work_struct *work)
537 (wl->tx_results_count & 0xff)) 568 (wl->tx_results_count & 0xff))
538 wl1271_tx_complete(wl); 569 wl1271_tx_complete(wl);
539 570
571 /* Check if any tx blocks were freed */
572 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
573 wl->tx_queue_count) {
574 /*
575 * In order to avoid starvation of the TX path,
576 * call the work function directly.
577 */
578 wl1271_tx_work_locked(wl);
579 }
580
540 wl1271_rx(wl, wl->fw_status); 581 wl1271_rx(wl, wl->fw_status);
541 } 582 }
542 583
@@ -850,30 +891,54 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
850 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb); 891 struct ieee80211_tx_info *txinfo = IEEE80211_SKB_CB(skb);
851 struct ieee80211_sta *sta = txinfo->control.sta; 892 struct ieee80211_sta *sta = txinfo->control.sta;
852 unsigned long flags; 893 unsigned long flags;
894 int q;
853 895
854 /* peek into the rates configured in the STA entry */ 896 /*
897 * peek into the rates configured in the STA entry.
898 * The rates set after connection stage, The first block only BG sets:
899 * the compare is for bit 0-16 of sta_rate_set. The second block add
900 * HT rates in case of HT supported.
901 */
855 spin_lock_irqsave(&wl->wl_lock, flags); 902 spin_lock_irqsave(&wl->wl_lock, flags);
856 if (sta && sta->supp_rates[conf->channel->band] != wl->sta_rate_set) { 903 if (sta &&
904 (sta->supp_rates[conf->channel->band] !=
905 (wl->sta_rate_set & HW_BG_RATES_MASK))) {
857 wl->sta_rate_set = sta->supp_rates[conf->channel->band]; 906 wl->sta_rate_set = sta->supp_rates[conf->channel->band];
858 set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags); 907 set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
859 } 908 }
909
910#ifdef CONFIG_WL12XX_HT
911 if (sta &&
912 sta->ht_cap.ht_supported &&
913 ((wl->sta_rate_set >> HW_HT_RATES_OFFSET) !=
914 sta->ht_cap.mcs.rx_mask[0])) {
915 /* Clean MCS bits before setting them */
916 wl->sta_rate_set &= HW_BG_RATES_MASK;
917 wl->sta_rate_set |=
918 (sta->ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET);
919 set_bit(WL1271_FLAG_STA_RATES_CHANGED, &wl->flags);
920 }
921#endif
922 wl->tx_queue_count++;
860 spin_unlock_irqrestore(&wl->wl_lock, flags); 923 spin_unlock_irqrestore(&wl->wl_lock, flags);
861 924
862 /* queue the packet */ 925 /* queue the packet */
863 skb_queue_tail(&wl->tx_queue, skb); 926 q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
927 skb_queue_tail(&wl->tx_queue[q], skb);
864 928
865 /* 929 /*
866 * The chip specific setup must run before the first TX packet - 930 * The chip specific setup must run before the first TX packet -
867 * before that, the tx_work will not be initialized! 931 * before that, the tx_work will not be initialized!
868 */ 932 */
869 933
870 ieee80211_queue_work(wl->hw, &wl->tx_work); 934 if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
935 ieee80211_queue_work(wl->hw, &wl->tx_work);
871 936
872 /* 937 /*
873 * The workqueue is slow to process the tx_queue and we need stop 938 * The workqueue is slow to process the tx_queue and we need stop
874 * the queue here, otherwise the queue will get too long. 939 * the queue here, otherwise the queue will get too long.
875 */ 940 */
876 if (skb_queue_len(&wl->tx_queue) >= WL1271_TX_QUEUE_HIGH_WATERMARK) { 941 if (wl->tx_queue_count >= WL1271_TX_QUEUE_HIGH_WATERMARK) {
877 wl1271_debug(DEBUG_TX, "op_tx: stopping queues"); 942 wl1271_debug(DEBUG_TX, "op_tx: stopping queues");
878 943
879 spin_lock_irqsave(&wl->wl_lock, flags); 944 spin_lock_irqsave(&wl->wl_lock, flags);
@@ -919,18 +984,19 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
919 struct wiphy *wiphy = hw->wiphy; 984 struct wiphy *wiphy = hw->wiphy;
920 int retries = WL1271_BOOT_RETRIES; 985 int retries = WL1271_BOOT_RETRIES;
921 int ret = 0; 986 int ret = 0;
987 bool booted = false;
922 988
923 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", 989 wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
924 vif->type, vif->addr); 990 vif->type, vif->addr);
925 991
926 mutex_lock(&wl->mutex); 992 mutex_lock(&wl->mutex);
927 if (wl->vif) { 993 if (wl->vif) {
994 wl1271_debug(DEBUG_MAC80211,
995 "multiple vifs are not supported yet");
928 ret = -EBUSY; 996 ret = -EBUSY;
929 goto out; 997 goto out;
930 } 998 }
931 999
932 wl->vif = vif;
933
934 switch (vif->type) { 1000 switch (vif->type) {
935 case NL80211_IFTYPE_STATION: 1001 case NL80211_IFTYPE_STATION:
936 wl->bss_type = BSS_TYPE_STA_BSS; 1002 wl->bss_type = BSS_TYPE_STA_BSS;
@@ -968,15 +1034,8 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
968 if (ret < 0) 1034 if (ret < 0)
969 goto irq_disable; 1035 goto irq_disable;
970 1036
971 wl->state = WL1271_STATE_ON; 1037 booted = true;
972 wl1271_info("firmware booted (%s)", wl->chip.fw_ver); 1038 break;
973
974 /* update hw/fw version info in wiphy struct */
975 wiphy->hw_version = wl->chip.id;
976 strncpy(wiphy->fw_version, wl->chip.fw_ver,
977 sizeof(wiphy->fw_version));
978
979 goto out;
980 1039
981irq_disable: 1040irq_disable:
982 wl1271_disable_interrupts(wl); 1041 wl1271_disable_interrupts(wl);
@@ -994,8 +1053,31 @@ power_off:
994 wl1271_power_off(wl); 1053 wl1271_power_off(wl);
995 } 1054 }
996 1055
997 wl1271_error("firmware boot failed despite %d retries", 1056 if (!booted) {
998 WL1271_BOOT_RETRIES); 1057 wl1271_error("firmware boot failed despite %d retries",
1058 WL1271_BOOT_RETRIES);
1059 goto out;
1060 }
1061
1062 wl->vif = vif;
1063 wl->state = WL1271_STATE_ON;
1064 wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
1065
1066 /* update hw/fw version info in wiphy struct */
1067 wiphy->hw_version = wl->chip.id;
1068 strncpy(wiphy->fw_version, wl->chip.fw_ver,
1069 sizeof(wiphy->fw_version));
1070
1071 /*
1072 * Now we know if 11a is supported (info from the NVS), so disable
1073 * 11a channels if not supported
1074 */
1075 if (!wl->enable_11a)
1076 wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0;
1077
1078 wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
1079 wl->enable_11a ? "" : "not ");
1080
999out: 1081out:
1000 mutex_unlock(&wl->mutex); 1082 mutex_unlock(&wl->mutex);
1001 1083
@@ -1025,6 +1107,7 @@ static void __wl1271_op_remove_interface(struct wl1271 *wl)
1025 wl->scan.state = WL1271_SCAN_STATE_IDLE; 1107 wl->scan.state = WL1271_SCAN_STATE_IDLE;
1026 kfree(wl->scan.scanned_ch); 1108 kfree(wl->scan.scanned_ch);
1027 wl->scan.scanned_ch = NULL; 1109 wl->scan.scanned_ch = NULL;
1110 wl->scan.req = NULL;
1028 ieee80211_scan_completed(wl->hw, true); 1111 ieee80211_scan_completed(wl->hw, true);
1029 } 1112 }
1030 1113
@@ -1088,10 +1171,16 @@ static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
1088 struct wl1271 *wl = hw->priv; 1171 struct wl1271 *wl = hw->priv;
1089 1172
1090 mutex_lock(&wl->mutex); 1173 mutex_lock(&wl->mutex);
1091 WARN_ON(wl->vif != vif); 1174 /*
1092 __wl1271_op_remove_interface(wl); 1175 * wl->vif can be null here if someone shuts down the interface
1093 mutex_unlock(&wl->mutex); 1176 * just when hardware recovery has been started.
1177 */
1178 if (wl->vif) {
1179 WARN_ON(wl->vif != vif);
1180 __wl1271_op_remove_interface(wl);
1181 }
1094 1182
1183 mutex_unlock(&wl->mutex);
1095 cancel_work_sync(&wl->recovery_work); 1184 cancel_work_sync(&wl->recovery_work);
1096} 1185}
1097 1186
@@ -1312,8 +1401,10 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
1312 1401
1313 mutex_lock(&wl->mutex); 1402 mutex_lock(&wl->mutex);
1314 1403
1315 if (unlikely(wl->state == WL1271_STATE_OFF)) 1404 if (unlikely(wl->state == WL1271_STATE_OFF)) {
1405 ret = -EAGAIN;
1316 goto out; 1406 goto out;
1407 }
1317 1408
1318 ret = wl1271_ps_elp_wakeup(wl, false); 1409 ret = wl1271_ps_elp_wakeup(wl, false);
1319 if (ret < 0) 1410 if (ret < 0)
@@ -1536,6 +1627,11 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
1536 1627
1537 mutex_lock(&wl->mutex); 1628 mutex_lock(&wl->mutex);
1538 1629
1630 if (unlikely(wl->state == WL1271_STATE_OFF)) {
1631 ret = -EAGAIN;
1632 goto out_unlock;
1633 }
1634
1539 ret = wl1271_ps_elp_wakeup(wl, false); 1635 ret = wl1271_ps_elp_wakeup(wl, false);
1540 if (ret < 0) 1636 if (ret < 0)
1541 goto out_unlock; 1637 goto out_unlock;
@@ -1645,6 +1741,16 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
1645 1741
1646 mutex_lock(&wl->mutex); 1742 mutex_lock(&wl->mutex);
1647 1743
1744 if (wl->state == WL1271_STATE_OFF) {
1745 /*
1746 * We cannot return -EBUSY here because cfg80211 will expect
1747 * a call to ieee80211_scan_completed if we do - in this case
1748 * there won't be any call.
1749 */
1750 ret = -EAGAIN;
1751 goto out;
1752 }
1753
1648 ret = wl1271_ps_elp_wakeup(wl, false); 1754 ret = wl1271_ps_elp_wakeup(wl, false);
1649 if (ret < 0) 1755 if (ret < 0)
1650 goto out; 1756 goto out;
@@ -1659,6 +1765,34 @@ out:
1659 return ret; 1765 return ret;
1660} 1766}
1661 1767
1768static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
1769{
1770 struct wl1271 *wl = hw->priv;
1771 int ret = 0;
1772
1773 mutex_lock(&wl->mutex);
1774
1775 if (unlikely(wl->state == WL1271_STATE_OFF)) {
1776 ret = -EAGAIN;
1777 goto out;
1778 }
1779
1780 ret = wl1271_ps_elp_wakeup(wl, false);
1781 if (ret < 0)
1782 goto out;
1783
1784 ret = wl1271_acx_frag_threshold(wl, (u16)value);
1785 if (ret < 0)
1786 wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
1787
1788 wl1271_ps_elp_sleep(wl);
1789
1790out:
1791 mutex_unlock(&wl->mutex);
1792
1793 return ret;
1794}
1795
1662static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 1796static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
1663{ 1797{
1664 struct wl1271 *wl = hw->priv; 1798 struct wl1271 *wl = hw->priv;
@@ -1666,8 +1800,10 @@ static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
1666 1800
1667 mutex_lock(&wl->mutex); 1801 mutex_lock(&wl->mutex);
1668 1802
1669 if (unlikely(wl->state == WL1271_STATE_OFF)) 1803 if (unlikely(wl->state == WL1271_STATE_OFF)) {
1804 ret = -EAGAIN;
1670 goto out; 1805 goto out;
1806 }
1671 1807
1672 ret = wl1271_ps_elp_wakeup(wl, false); 1808 ret = wl1271_ps_elp_wakeup(wl, false);
1673 if (ret < 0) 1809 if (ret < 0)
@@ -1685,21 +1821,21 @@ out:
1685 return ret; 1821 return ret;
1686} 1822}
1687 1823
1688static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *beacon) 1824static void wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb,
1825 int offset)
1689{ 1826{
1690 u8 *ptr = beacon->data + 1827 u8 *ptr = skb->data + offset;
1691 offsetof(struct ieee80211_mgmt, u.beacon.variable);
1692 1828
1693 /* find the location of the ssid in the beacon */ 1829 /* find the location of the ssid in the beacon */
1694 while (ptr < beacon->data + beacon->len) { 1830 while (ptr < skb->data + skb->len) {
1695 if (ptr[0] == WLAN_EID_SSID) { 1831 if (ptr[0] == WLAN_EID_SSID) {
1696 wl->ssid_len = ptr[1]; 1832 wl->ssid_len = ptr[1];
1697 memcpy(wl->ssid, ptr+2, wl->ssid_len); 1833 memcpy(wl->ssid, ptr+2, wl->ssid_len);
1698 return; 1834 return;
1699 } 1835 }
1700 ptr += ptr[1]; 1836 ptr += (ptr[1] + 2);
1701 } 1837 }
1702 wl1271_error("ad-hoc beacon template has no SSID!\n"); 1838 wl1271_error("No SSID in IEs!\n");
1703} 1839}
1704 1840
1705static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, 1841static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
@@ -1709,6 +1845,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1709{ 1845{
1710 enum wl1271_cmd_ps_mode mode; 1846 enum wl1271_cmd_ps_mode mode;
1711 struct wl1271 *wl = hw->priv; 1847 struct wl1271 *wl = hw->priv;
1848 struct ieee80211_sta *sta = ieee80211_find_sta(vif, bss_conf->bssid);
1712 bool do_join = false; 1849 bool do_join = false;
1713 bool set_assoc = false; 1850 bool set_assoc = false;
1714 int ret; 1851 int ret;
@@ -1717,6 +1854,9 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1717 1854
1718 mutex_lock(&wl->mutex); 1855 mutex_lock(&wl->mutex);
1719 1856
1857 if (unlikely(wl->state == WL1271_STATE_OFF))
1858 goto out;
1859
1720 ret = wl1271_ps_elp_wakeup(wl, false); 1860 ret = wl1271_ps_elp_wakeup(wl, false);
1721 if (ret < 0) 1861 if (ret < 0)
1722 goto out; 1862 goto out;
@@ -1738,8 +1878,11 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1738 1878
1739 if (beacon) { 1879 if (beacon) {
1740 struct ieee80211_hdr *hdr; 1880 struct ieee80211_hdr *hdr;
1881 int ieoffset = offsetof(struct ieee80211_mgmt,
1882 u.beacon.variable);
1883
1884 wl1271_ssid_set(wl, beacon, ieoffset);
1741 1885
1742 wl1271_ssid_set(wl, beacon);
1743 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, 1886 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
1744 beacon->data, 1887 beacon->data,
1745 beacon->len, 0, 1888 beacon->len, 0,
@@ -1819,6 +1962,7 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1819 if (changed & BSS_CHANGED_ASSOC) { 1962 if (changed & BSS_CHANGED_ASSOC) {
1820 if (bss_conf->assoc) { 1963 if (bss_conf->assoc) {
1821 u32 rates; 1964 u32 rates;
1965 int ieoffset;
1822 wl->aid = bss_conf->aid; 1966 wl->aid = bss_conf->aid;
1823 set_assoc = true; 1967 set_assoc = true;
1824 1968
@@ -1847,13 +1991,13 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1847 goto out_sleep; 1991 goto out_sleep;
1848 1992
1849 /* 1993 /*
1850 * The SSID is intentionally set to NULL here - the 1994 * Get a template for hardware connection maintenance
1851 * firmware will set the probe request with a
1852 * broadcast SSID regardless of what we set in the
1853 * template.
1854 */ 1995 */
1855 ret = wl1271_cmd_build_probe_req(wl, NULL, 0, 1996 dev_kfree_skb(wl->probereq);
1856 NULL, 0, wl->band); 1997 wl->probereq = wl1271_cmd_build_ap_probe_req(wl, NULL);
1998 ieoffset = offsetof(struct ieee80211_mgmt,
1999 u.probe_req.variable);
2000 wl1271_ssid_set(wl, wl->probereq, ieoffset);
1857 2001
1858 /* enable the connection monitoring feature */ 2002 /* enable the connection monitoring feature */
1859 ret = wl1271_acx_conn_monit_params(wl, true); 2003 ret = wl1271_acx_conn_monit_params(wl, true);
@@ -1876,6 +2020,10 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1876 clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags); 2020 clear_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags);
1877 wl->aid = 0; 2021 wl->aid = 0;
1878 2022
2023 /* free probe-request template */
2024 dev_kfree_skb(wl->probereq);
2025 wl->probereq = NULL;
2026
1879 /* re-enable dynamic ps - just in case */ 2027 /* re-enable dynamic ps - just in case */
1880 ieee80211_enable_dyn_ps(wl->vif); 2028 ieee80211_enable_dyn_ps(wl->vif);
1881 2029
@@ -1891,9 +2039,12 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1891 2039
1892 /* Disable the keep-alive feature */ 2040 /* Disable the keep-alive feature */
1893 ret = wl1271_acx_keep_alive_mode(wl, false); 2041 ret = wl1271_acx_keep_alive_mode(wl, false);
1894
1895 if (ret < 0) 2042 if (ret < 0)
1896 goto out_sleep; 2043 goto out_sleep;
2044
2045 /* restore the bssid filter and go to dummy bssid */
2046 wl1271_unjoin(wl);
2047 wl1271_dummy_join(wl);
1897 } 2048 }
1898 2049
1899 } 2050 }
@@ -1927,14 +2078,61 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1927 } 2078 }
1928 } 2079 }
1929 2080
2081 /*
2082 * Takes care of: New association with HT enable,
2083 * HT information change in beacon.
2084 */
2085 if (sta &&
2086 (changed & BSS_CHANGED_HT) &&
2087 (bss_conf->channel_type != NL80211_CHAN_NO_HT)) {
2088 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, true);
2089 if (ret < 0) {
2090 wl1271_warning("Set ht cap true failed %d", ret);
2091 goto out_sleep;
2092 }
2093 ret = wl1271_acx_set_ht_information(wl,
2094 bss_conf->ht_operation_mode);
2095 if (ret < 0) {
2096 wl1271_warning("Set ht information failed %d", ret);
2097 goto out_sleep;
2098 }
2099 }
2100 /*
2101 * Takes care of: New association without HT,
2102 * Disassociation.
2103 */
2104 else if (sta && (changed & BSS_CHANGED_ASSOC)) {
2105 ret = wl1271_acx_set_ht_capabilities(wl, &sta->ht_cap, false);
2106 if (ret < 0) {
2107 wl1271_warning("Set ht cap false failed %d", ret);
2108 goto out_sleep;
2109 }
2110 }
2111
1930 if (changed & BSS_CHANGED_ARP_FILTER) { 2112 if (changed & BSS_CHANGED_ARP_FILTER) {
1931 __be32 addr = bss_conf->arp_addr_list[0]; 2113 __be32 addr = bss_conf->arp_addr_list[0];
1932 WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS); 2114 WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
1933 2115
1934 if (bss_conf->arp_addr_cnt == 1 && bss_conf->arp_filter_enabled) 2116 if (bss_conf->arp_addr_cnt == 1 &&
1935 ret = wl1271_acx_arp_ip_filter(wl, true, addr); 2117 bss_conf->arp_filter_enabled) {
1936 else 2118 /*
1937 ret = wl1271_acx_arp_ip_filter(wl, false, addr); 2119 * The template should have been configured only upon
2120 * association. however, it seems that the correct ip
2121 * isn't being set (when sending), so we have to
2122 * reconfigure the template upon every ip change.
2123 */
2124 ret = wl1271_cmd_build_arp_rsp(wl, addr);
2125 if (ret < 0) {
2126 wl1271_warning("build arp rsp failed: %d", ret);
2127 goto out_sleep;
2128 }
2129
2130 ret = wl1271_acx_arp_ip_filter(wl,
2131 (ACX_ARP_FILTER_ARP_FILTERING |
2132 ACX_ARP_FILTER_AUTO_ARP),
2133 addr);
2134 } else
2135 ret = wl1271_acx_arp_ip_filter(wl, 0, addr);
1938 2136
1939 if (ret < 0) 2137 if (ret < 0)
1940 goto out_sleep; 2138 goto out_sleep;
@@ -1966,6 +2164,11 @@ static int wl1271_op_conf_tx(struct ieee80211_hw *hw, u16 queue,
1966 2164
1967 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue); 2165 wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
1968 2166
2167 if (unlikely(wl->state == WL1271_STATE_OFF)) {
2168 ret = -EAGAIN;
2169 goto out;
2170 }
2171
1969 ret = wl1271_ps_elp_wakeup(wl, false); 2172 ret = wl1271_ps_elp_wakeup(wl, false);
1970 if (ret < 0) 2173 if (ret < 0)
1971 goto out; 2174 goto out;
@@ -2009,6 +2212,9 @@ static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw)
2009 2212
2010 mutex_lock(&wl->mutex); 2213 mutex_lock(&wl->mutex);
2011 2214
2215 if (unlikely(wl->state == WL1271_STATE_OFF))
2216 goto out;
2217
2012 ret = wl1271_ps_elp_wakeup(wl, false); 2218 ret = wl1271_ps_elp_wakeup(wl, false);
2013 if (ret < 0) 2219 if (ret < 0)
2014 goto out; 2220 goto out;
@@ -2030,14 +2236,14 @@ static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
2030{ 2236{
2031 struct wl1271 *wl = hw->priv; 2237 struct wl1271 *wl = hw->priv;
2032 struct ieee80211_conf *conf = &hw->conf; 2238 struct ieee80211_conf *conf = &hw->conf;
2033 2239
2034 if (idx != 0) 2240 if (idx != 0)
2035 return -ENOENT; 2241 return -ENOENT;
2036 2242
2037 survey->channel = conf->channel; 2243 survey->channel = conf->channel;
2038 survey->filled = SURVEY_INFO_NOISE_DBM; 2244 survey->filled = SURVEY_INFO_NOISE_DBM;
2039 survey->noise = wl->noise; 2245 survey->noise = wl->noise;
2040 2246
2041 return 0; 2247 return 0;
2042} 2248}
2043 2249
@@ -2084,37 +2290,34 @@ static struct ieee80211_rate wl1271_rates[] = {
2084 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, }, 2290 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
2085}; 2291};
2086 2292
2087/* 2293/* can't be const, mac80211 writes to this */
2088 * Can't be const, mac80211 writes to this. The order of the channels here
2089 * is designed to improve scanning.
2090 */
2091static struct ieee80211_channel wl1271_channels[] = { 2294static struct ieee80211_channel wl1271_channels[] = {
2092 { .hw_value = 1, .center_freq = 2412, .max_power = 25 }, 2295 { .hw_value = 1, .center_freq = 2412, .max_power = 25 },
2093 { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
2094 { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
2095 { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
2096 { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
2097 { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
2098 { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
2099 { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
2100 { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
2101 { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
2102 { .hw_value = 2, .center_freq = 2417, .max_power = 25 }, 2296 { .hw_value = 2, .center_freq = 2417, .max_power = 25 },
2297 { .hw_value = 3, .center_freq = 2422, .max_power = 25 },
2298 { .hw_value = 4, .center_freq = 2427, .max_power = 25 },
2299 { .hw_value = 5, .center_freq = 2432, .max_power = 25 },
2103 { .hw_value = 6, .center_freq = 2437, .max_power = 25 }, 2300 { .hw_value = 6, .center_freq = 2437, .max_power = 25 },
2301 { .hw_value = 7, .center_freq = 2442, .max_power = 25 },
2302 { .hw_value = 8, .center_freq = 2447, .max_power = 25 },
2303 { .hw_value = 9, .center_freq = 2452, .max_power = 25 },
2104 { .hw_value = 10, .center_freq = 2457, .max_power = 25 }, 2304 { .hw_value = 10, .center_freq = 2457, .max_power = 25 },
2305 { .hw_value = 11, .center_freq = 2462, .max_power = 25 },
2306 { .hw_value = 12, .center_freq = 2467, .max_power = 25 },
2307 { .hw_value = 13, .center_freq = 2472, .max_power = 25 },
2105}; 2308};
2106 2309
2107/* mapping to indexes for wl1271_rates */ 2310/* mapping to indexes for wl1271_rates */
2108static const u8 wl1271_rate_to_idx_2ghz[] = { 2311static const u8 wl1271_rate_to_idx_2ghz[] = {
2109 /* MCS rates are used only with 11n */ 2312 /* MCS rates are used only with 11n */
2110 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */ 2313 7, /* CONF_HW_RXTX_RATE_MCS7 */
2111 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */ 2314 6, /* CONF_HW_RXTX_RATE_MCS6 */
2112 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS5 */ 2315 5, /* CONF_HW_RXTX_RATE_MCS5 */
2113 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS4 */ 2316 4, /* CONF_HW_RXTX_RATE_MCS4 */
2114 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS3 */ 2317 3, /* CONF_HW_RXTX_RATE_MCS3 */
2115 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS2 */ 2318 2, /* CONF_HW_RXTX_RATE_MCS2 */
2116 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS1 */ 2319 1, /* CONF_HW_RXTX_RATE_MCS1 */
2117 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS0 */ 2320 0, /* CONF_HW_RXTX_RATE_MCS0 */
2118 2321
2119 11, /* CONF_HW_RXTX_RATE_54 */ 2322 11, /* CONF_HW_RXTX_RATE_54 */
2120 10, /* CONF_HW_RXTX_RATE_48 */ 2323 10, /* CONF_HW_RXTX_RATE_48 */
@@ -2134,12 +2337,34 @@ static const u8 wl1271_rate_to_idx_2ghz[] = {
2134 0 /* CONF_HW_RXTX_RATE_1 */ 2337 0 /* CONF_HW_RXTX_RATE_1 */
2135}; 2338};
2136 2339
2340/* 11n STA capabilities */
2341#define HW_RX_HIGHEST_RATE 72
2342
2343#ifdef CONFIG_WL12XX_HT
2344#define WL12XX_HT_CAP { \
2345 .cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20, \
2346 .ht_supported = true, \
2347 .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, \
2348 .ampdu_density = IEEE80211_HT_MPDU_DENSITY_8, \
2349 .mcs = { \
2350 .rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, }, \
2351 .rx_highest = cpu_to_le16(HW_RX_HIGHEST_RATE), \
2352 .tx_params = IEEE80211_HT_MCS_TX_DEFINED, \
2353 }, \
2354}
2355#else
2356#define WL12XX_HT_CAP { \
2357 .ht_supported = false, \
2358}
2359#endif
2360
2137/* can't be const, mac80211 writes to this */ 2361/* can't be const, mac80211 writes to this */
2138static struct ieee80211_supported_band wl1271_band_2ghz = { 2362static struct ieee80211_supported_band wl1271_band_2ghz = {
2139 .channels = wl1271_channels, 2363 .channels = wl1271_channels,
2140 .n_channels = ARRAY_SIZE(wl1271_channels), 2364 .n_channels = ARRAY_SIZE(wl1271_channels),
2141 .bitrates = wl1271_rates, 2365 .bitrates = wl1271_rates,
2142 .n_bitrates = ARRAY_SIZE(wl1271_rates), 2366 .n_bitrates = ARRAY_SIZE(wl1271_rates),
2367 .ht_cap = WL12XX_HT_CAP,
2143}; 2368};
2144 2369
2145/* 5 GHz data rates for WL1273 */ 2370/* 5 GHz data rates for WL1273 */
@@ -2170,66 +2395,55 @@ static struct ieee80211_rate wl1271_rates_5ghz[] = {
2170 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, }, 2395 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
2171}; 2396};
2172 2397
2173/* 2398/* 5 GHz band channels for WL1273 */
2174 * 5 GHz band channels for WL1273 - can't be const, mac80211 writes to this.
2175 * The order of the channels here is designed to improve scanning.
2176 */
2177static struct ieee80211_channel wl1271_channels_5ghz[] = { 2399static struct ieee80211_channel wl1271_channels_5ghz[] = {
2178 { .hw_value = 183, .center_freq = 4915}, 2400 { .hw_value = 7, .center_freq = 5035},
2179 { .hw_value = 188, .center_freq = 4940},
2180 { .hw_value = 8, .center_freq = 5040}, 2401 { .hw_value = 8, .center_freq = 5040},
2181 { .hw_value = 34, .center_freq = 5170},
2182 { .hw_value = 44, .center_freq = 5220},
2183 { .hw_value = 60, .center_freq = 5300},
2184 { .hw_value = 112, .center_freq = 5560},
2185 { .hw_value = 132, .center_freq = 5660},
2186 { .hw_value = 157, .center_freq = 5785},
2187 { .hw_value = 184, .center_freq = 4920},
2188 { .hw_value = 189, .center_freq = 4945},
2189 { .hw_value = 9, .center_freq = 5045}, 2402 { .hw_value = 9, .center_freq = 5045},
2190 { .hw_value = 36, .center_freq = 5180},
2191 { .hw_value = 46, .center_freq = 5230},
2192 { .hw_value = 64, .center_freq = 5320},
2193 { .hw_value = 116, .center_freq = 5580},
2194 { .hw_value = 136, .center_freq = 5680},
2195 { .hw_value = 192, .center_freq = 4960},
2196 { .hw_value = 11, .center_freq = 5055}, 2403 { .hw_value = 11, .center_freq = 5055},
2197 { .hw_value = 38, .center_freq = 5190},
2198 { .hw_value = 48, .center_freq = 5240},
2199 { .hw_value = 100, .center_freq = 5500},
2200 { .hw_value = 120, .center_freq = 5600},
2201 { .hw_value = 140, .center_freq = 5700},
2202 { .hw_value = 185, .center_freq = 4925},
2203 { .hw_value = 196, .center_freq = 4980},
2204 { .hw_value = 12, .center_freq = 5060}, 2404 { .hw_value = 12, .center_freq = 5060},
2205 { .hw_value = 40, .center_freq = 5200},
2206 { .hw_value = 52, .center_freq = 5260},
2207 { .hw_value = 104, .center_freq = 5520},
2208 { .hw_value = 124, .center_freq = 5620},
2209 { .hw_value = 149, .center_freq = 5745},
2210 { .hw_value = 161, .center_freq = 5805},
2211 { .hw_value = 187, .center_freq = 4935},
2212 { .hw_value = 7, .center_freq = 5035},
2213 { .hw_value = 16, .center_freq = 5080}, 2405 { .hw_value = 16, .center_freq = 5080},
2406 { .hw_value = 34, .center_freq = 5170},
2407 { .hw_value = 36, .center_freq = 5180},
2408 { .hw_value = 38, .center_freq = 5190},
2409 { .hw_value = 40, .center_freq = 5200},
2214 { .hw_value = 42, .center_freq = 5210}, 2410 { .hw_value = 42, .center_freq = 5210},
2411 { .hw_value = 44, .center_freq = 5220},
2412 { .hw_value = 46, .center_freq = 5230},
2413 { .hw_value = 48, .center_freq = 5240},
2414 { .hw_value = 52, .center_freq = 5260},
2215 { .hw_value = 56, .center_freq = 5280}, 2415 { .hw_value = 56, .center_freq = 5280},
2416 { .hw_value = 60, .center_freq = 5300},
2417 { .hw_value = 64, .center_freq = 5320},
2418 { .hw_value = 100, .center_freq = 5500},
2419 { .hw_value = 104, .center_freq = 5520},
2216 { .hw_value = 108, .center_freq = 5540}, 2420 { .hw_value = 108, .center_freq = 5540},
2421 { .hw_value = 112, .center_freq = 5560},
2422 { .hw_value = 116, .center_freq = 5580},
2423 { .hw_value = 120, .center_freq = 5600},
2424 { .hw_value = 124, .center_freq = 5620},
2217 { .hw_value = 128, .center_freq = 5640}, 2425 { .hw_value = 128, .center_freq = 5640},
2426 { .hw_value = 132, .center_freq = 5660},
2427 { .hw_value = 136, .center_freq = 5680},
2428 { .hw_value = 140, .center_freq = 5700},
2429 { .hw_value = 149, .center_freq = 5745},
2218 { .hw_value = 153, .center_freq = 5765}, 2430 { .hw_value = 153, .center_freq = 5765},
2431 { .hw_value = 157, .center_freq = 5785},
2432 { .hw_value = 161, .center_freq = 5805},
2219 { .hw_value = 165, .center_freq = 5825}, 2433 { .hw_value = 165, .center_freq = 5825},
2220}; 2434};
2221 2435
2222/* mapping to indexes for wl1271_rates_5ghz */ 2436/* mapping to indexes for wl1271_rates_5ghz */
2223static const u8 wl1271_rate_to_idx_5ghz[] = { 2437static const u8 wl1271_rate_to_idx_5ghz[] = {
2224 /* MCS rates are used only with 11n */ 2438 /* MCS rates are used only with 11n */
2225 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS7 */ 2439 7, /* CONF_HW_RXTX_RATE_MCS7 */
2226 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS6 */ 2440 6, /* CONF_HW_RXTX_RATE_MCS6 */
2227 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS5 */ 2441 5, /* CONF_HW_RXTX_RATE_MCS5 */
2228 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS4 */ 2442 4, /* CONF_HW_RXTX_RATE_MCS4 */
2229 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS3 */ 2443 3, /* CONF_HW_RXTX_RATE_MCS3 */
2230 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS2 */ 2444 2, /* CONF_HW_RXTX_RATE_MCS2 */
2231 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS1 */ 2445 1, /* CONF_HW_RXTX_RATE_MCS1 */
2232 CONF_HW_RXTX_RATE_UNSUPPORTED, /* CONF_HW_RXTX_RATE_MCS0 */ 2446 0, /* CONF_HW_RXTX_RATE_MCS0 */
2233 2447
2234 7, /* CONF_HW_RXTX_RATE_54 */ 2448 7, /* CONF_HW_RXTX_RATE_54 */
2235 6, /* CONF_HW_RXTX_RATE_48 */ 2449 6, /* CONF_HW_RXTX_RATE_48 */
@@ -2254,6 +2468,7 @@ static struct ieee80211_supported_band wl1271_band_5ghz = {
2254 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz), 2468 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
2255 .bitrates = wl1271_rates_5ghz, 2469 .bitrates = wl1271_rates_5ghz,
2256 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz), 2470 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
2471 .ht_cap = WL12XX_HT_CAP,
2257}; 2472};
2258 2473
2259static const u8 *wl1271_band_rate_to_idx[] = { 2474static const u8 *wl1271_band_rate_to_idx[] = {
@@ -2273,6 +2488,7 @@ static const struct ieee80211_ops wl1271_ops = {
2273 .set_key = wl1271_op_set_key, 2488 .set_key = wl1271_op_set_key,
2274 .hw_scan = wl1271_op_hw_scan, 2489 .hw_scan = wl1271_op_hw_scan,
2275 .bss_info_changed = wl1271_op_bss_info_changed, 2490 .bss_info_changed = wl1271_op_bss_info_changed,
2491 .set_frag_threshold = wl1271_op_set_frag_threshold,
2276 .set_rts_threshold = wl1271_op_set_rts_threshold, 2492 .set_rts_threshold = wl1271_op_set_rts_threshold,
2277 .conf_tx = wl1271_op_conf_tx, 2493 .conf_tx = wl1271_op_conf_tx,
2278 .get_tsf = wl1271_op_get_tsf, 2494 .get_tsf = wl1271_op_get_tsf,
@@ -2281,18 +2497,18 @@ static const struct ieee80211_ops wl1271_ops = {
2281}; 2497};
2282 2498
2283 2499
2284u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate) 2500u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band)
2285{ 2501{
2286 u8 idx; 2502 u8 idx;
2287 2503
2288 BUG_ON(wl->band >= sizeof(wl1271_band_rate_to_idx)/sizeof(u8 *)); 2504 BUG_ON(band >= sizeof(wl1271_band_rate_to_idx)/sizeof(u8 *));
2289 2505
2290 if (unlikely(rate >= CONF_HW_RXTX_RATE_MAX)) { 2506 if (unlikely(rate >= CONF_HW_RXTX_RATE_MAX)) {
2291 wl1271_error("Illegal RX rate from HW: %d", rate); 2507 wl1271_error("Illegal RX rate from HW: %d", rate);
2292 return 0; 2508 return 0;
2293 } 2509 }
2294 2510
2295 idx = wl1271_band_rate_to_idx[wl->band][rate]; 2511 idx = wl1271_band_rate_to_idx[band][rate];
2296 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) { 2512 if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
2297 wl1271_error("Unsupported RX rate from HW: %d", rate); 2513 wl1271_error("Unsupported RX rate from HW: %d", rate);
2298 return 0; 2514 return 0;
@@ -2401,6 +2617,8 @@ int wl1271_register_hw(struct wl1271 *wl)
2401 2617
2402 wl->mac80211_registered = true; 2618 wl->mac80211_registered = true;
2403 2619
2620 wl1271_debugfs_init(wl);
2621
2404 register_netdevice_notifier(&wl1271_dev_notifier); 2622 register_netdevice_notifier(&wl1271_dev_notifier);
2405 2623
2406 wl1271_notice("loaded"); 2624 wl1271_notice("loaded");
@@ -2451,12 +2669,21 @@ int wl1271_init_ieee80211(struct wl1271 *wl)
2451 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 2669 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
2452 BIT(NL80211_IFTYPE_ADHOC); 2670 BIT(NL80211_IFTYPE_ADHOC);
2453 wl->hw->wiphy->max_scan_ssids = 1; 2671 wl->hw->wiphy->max_scan_ssids = 1;
2672 /*
2673 * Maximum length of elements in scanning probe request templates
2674 * should be the maximum length possible for a template, without
2675 * the IEEE80211 header of the template
2676 */
2677 wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
2678 sizeof(struct ieee80211_header);
2454 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz; 2679 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz;
2455 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz; 2680 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz;
2456 2681
2457 wl->hw->queues = 4; 2682 wl->hw->queues = 4;
2458 wl->hw->max_rates = 1; 2683 wl->hw->max_rates = 1;
2459 2684
2685 wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
2686
2460 SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl)); 2687 SET_IEEE80211_DEV(wl->hw, wl1271_wl_to_dev(wl));
2461 2688
2462 return 0; 2689 return 0;
@@ -2495,7 +2722,8 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
2495 wl->hw = hw; 2722 wl->hw = hw;
2496 wl->plat_dev = plat_dev; 2723 wl->plat_dev = plat_dev;
2497 2724
2498 skb_queue_head_init(&wl->tx_queue); 2725 for (i = 0; i < NUM_TX_QUEUES; i++)
2726 skb_queue_head_init(&wl->tx_queue[i]);
2499 2727
2500 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work); 2728 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
2501 INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work); 2729 INIT_DELAYED_WORK(&wl->pspoll_work, wl1271_pspoll_work);
@@ -2521,6 +2749,7 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
2521 wl->sg_enabled = true; 2749 wl->sg_enabled = true;
2522 wl->hw_pg_ver = -1; 2750 wl->hw_pg_ver = -1;
2523 2751
2752 memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
2524 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 2753 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
2525 wl->tx_frames[i] = NULL; 2754 wl->tx_frames[i] = NULL;
2526 2755
@@ -2532,8 +2761,6 @@ struct ieee80211_hw *wl1271_alloc_hw(void)
2532 /* Apply default driver configuration. */ 2761 /* Apply default driver configuration. */
2533 wl1271_conf_init(wl); 2762 wl1271_conf_init(wl);
2534 2763
2535 wl1271_debugfs_init(wl);
2536
2537 order = get_order(WL1271_AGGR_BUFFER_SIZE); 2764 order = get_order(WL1271_AGGR_BUFFER_SIZE);
2538 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order); 2765 wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
2539 if (!wl->aggr_buf) { 2766 if (!wl->aggr_buf) {
@@ -2610,6 +2837,11 @@ int wl1271_free_hw(struct wl1271 *wl)
2610} 2837}
2611EXPORT_SYMBOL_GPL(wl1271_free_hw); 2838EXPORT_SYMBOL_GPL(wl1271_free_hw);
2612 2839
2840u32 wl12xx_debug_level;
2841EXPORT_SYMBOL_GPL(wl12xx_debug_level);
2842module_param_named(debug_level, wl12xx_debug_level, uint, DEBUG_NONE);
2843MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
2844
2613MODULE_LICENSE("GPL"); 2845MODULE_LICENSE("GPL");
2614MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); 2846MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
2615MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>"); 2847MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/ps.c
index e3c332e2f97c..60a3738eadb0 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.c
+++ b/drivers/net/wireless/wl12xx/ps.c
@@ -21,9 +21,9 @@
21 * 21 *
22 */ 22 */
23 23
24#include "wl1271_reg.h" 24#include "reg.h"
25#include "wl1271_ps.h" 25#include "ps.h"
26#include "wl1271_io.h" 26#include "io.h"
27 27
28#define WL1271_WAKEUP_TIMEOUT 500 28#define WL1271_WAKEUP_TIMEOUT 500
29 29
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.h b/drivers/net/wireless/wl12xx/ps.h
index 6ba7b032736f..8415060f08e5 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.h
+++ b/drivers/net/wireless/wl12xx/ps.h
@@ -21,11 +21,11 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef __WL1271_PS_H__ 24#ifndef __PS_H__
25#define __WL1271_PS_H__ 25#define __PS_H__
26 26
27#include "wl1271.h" 27#include "wl12xx.h"
28#include "wl1271_acx.h" 28#include "acx.h"
29 29
30int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode, 30int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode,
31 u32 rates, bool send); 31 u32 rates, bool send);
diff --git a/drivers/net/wireless/wl12xx/wl1271_reg.h b/drivers/net/wireless/wl12xx/reg.h
index 990960771528..990960771528 100644
--- a/drivers/net/wireless/wl12xx/wl1271_reg.h
+++ b/drivers/net/wireless/wl12xx/reg.h
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/rx.c
index bea133b6e489..682304c30b81 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.c
+++ b/drivers/net/wireless/wl12xx/rx.c
@@ -23,11 +23,11 @@
23 23
24#include <linux/gfp.h> 24#include <linux/gfp.h>
25 25
26#include "wl1271.h" 26#include "wl12xx.h"
27#include "wl1271_acx.h" 27#include "acx.h"
28#include "wl1271_reg.h" 28#include "reg.h"
29#include "wl1271_rx.h" 29#include "rx.h"
30#include "wl1271_io.h" 30#include "io.h"
31 31
32static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status, 32static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status,
33 u32 drv_rx_counter) 33 u32 drv_rx_counter)
@@ -48,10 +48,24 @@ static void wl1271_rx_status(struct wl1271 *wl,
48 struct ieee80211_rx_status *status, 48 struct ieee80211_rx_status *status,
49 u8 beacon) 49 u8 beacon)
50{ 50{
51 enum ieee80211_band desc_band;
52
51 memset(status, 0, sizeof(struct ieee80211_rx_status)); 53 memset(status, 0, sizeof(struct ieee80211_rx_status));
52 54
53 status->band = wl->band; 55 status->band = wl->band;
54 status->rate_idx = wl1271_rate_to_idx(wl, desc->rate); 56
57 if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG)
58 desc_band = IEEE80211_BAND_2GHZ;
59 else
60 desc_band = IEEE80211_BAND_5GHZ;
61
62 status->rate_idx = wl1271_rate_to_idx(desc->rate, desc_band);
63
64#ifdef CONFIG_WL12XX_HT
65 /* 11n support */
66 if (desc->rate <= CONF_HW_RXTX_RATE_MCS0)
67 status->flag |= RX_FLAG_HT;
68#endif
55 69
56 status->signal = desc->rssi; 70 status->signal = desc->rssi;
57 71
@@ -170,10 +184,14 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
170 while (pkt_offset < buf_size) { 184 while (pkt_offset < buf_size) {
171 pkt_length = wl1271_rx_get_buf_size(status, 185 pkt_length = wl1271_rx_get_buf_size(status,
172 drv_rx_counter); 186 drv_rx_counter);
173 if (wl1271_rx_handle_data(wl, 187 /*
174 wl->aggr_buf + pkt_offset, 188 * the handle data call can only fail in memory-outage
175 pkt_length) < 0) 189 * conditions, in that case the received frame will just
176 break; 190 * be dropped.
191 */
192 wl1271_rx_handle_data(wl,
193 wl->aggr_buf + pkt_offset,
194 pkt_length);
177 wl->rx_counter++; 195 wl->rx_counter++;
178 drv_rx_counter++; 196 drv_rx_counter++;
179 drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK; 197 drv_rx_counter &= NUM_RX_PKT_DESC_MOD_MASK;
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.h b/drivers/net/wireless/wl12xx/rx.h
index 13a232333b13..3abb26fe0364 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.h
+++ b/drivers/net/wireless/wl12xx/rx.h
@@ -22,8 +22,8 @@
22 * 22 *
23 */ 23 */
24 24
25#ifndef __WL1271_RX_H__ 25#ifndef __RX_H__
26#define __WL1271_RX_H__ 26#define __RX_H__
27 27
28#include <linux/bitops.h> 28#include <linux/bitops.h>
29 29
@@ -116,6 +116,6 @@ struct wl1271_rx_descriptor {
116} __packed; 116} __packed;
117 117
118void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status); 118void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status);
119u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate); 119u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
120 120
121#endif 121#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_scan.c b/drivers/net/wireless/wl12xx/scan.c
index 909bb47995b6..6f897b9d90ca 100644
--- a/drivers/net/wireless/wl12xx/wl1271_scan.c
+++ b/drivers/net/wireless/wl12xx/scan.c
@@ -23,10 +23,10 @@
23 23
24#include <linux/ieee80211.h> 24#include <linux/ieee80211.h>
25 25
26#include "wl1271.h" 26#include "wl12xx.h"
27#include "wl1271_cmd.h" 27#include "cmd.h"
28#include "wl1271_scan.h" 28#include "scan.h"
29#include "wl1271_acx.h" 29#include "acx.h"
30 30
31void wl1271_scan_complete_work(struct work_struct *work) 31void wl1271_scan_complete_work(struct work_struct *work)
32{ 32{
@@ -48,14 +48,19 @@ void wl1271_scan_complete_work(struct work_struct *work)
48 wl->scan.state = WL1271_SCAN_STATE_IDLE; 48 wl->scan.state = WL1271_SCAN_STATE_IDLE;
49 kfree(wl->scan.scanned_ch); 49 kfree(wl->scan.scanned_ch);
50 wl->scan.scanned_ch = NULL; 50 wl->scan.scanned_ch = NULL;
51 mutex_unlock(&wl->mutex); 51 wl->scan.req = NULL;
52
53 ieee80211_scan_completed(wl->hw, false); 52 ieee80211_scan_completed(wl->hw, false);
54 53
54 /* restore hardware connection monitoring template */
55 if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags))
56 wl1271_cmd_build_ap_probe_req(wl, wl->probereq);
57
55 if (wl->scan.failed) { 58 if (wl->scan.failed) {
56 wl1271_info("Scan completed due to error."); 59 wl1271_info("Scan completed due to error.");
57 ieee80211_queue_work(wl->hw, &wl->recovery_work); 60 ieee80211_queue_work(wl->hw, &wl->recovery_work);
58 } 61 }
62 mutex_unlock(&wl->mutex);
63
59} 64}
60 65
61 66
diff --git a/drivers/net/wireless/wl12xx/wl1271_scan.h b/drivers/net/wireless/wl12xx/scan.h
index 6d57127b5e6b..421a750add5a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_scan.h
+++ b/drivers/net/wireless/wl12xx/scan.h
@@ -21,10 +21,10 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef __WL1271_SCAN_H__ 24#ifndef __SCAN_H__
25#define __WL1271_SCAN_H__ 25#define __SCAN_H__
26 26
27#include "wl1271.h" 27#include "wl12xx.h"
28 28
29int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len, 29int wl1271_scan(struct wl1271 *wl, const u8 *ssid, size_t ssid_len,
30 struct cfg80211_scan_request *req); 30 struct cfg80211_scan_request *req);
diff --git a/drivers/net/wireless/wl12xx/wl1271_sdio.c b/drivers/net/wireless/wl12xx/sdio.c
index 784ef3432641..93cbb8d5aba9 100644
--- a/drivers/net/wireless/wl12xx/wl1271_sdio.c
+++ b/drivers/net/wireless/wl12xx/sdio.c
@@ -32,9 +32,9 @@
32#include <linux/wl12xx.h> 32#include <linux/wl12xx.h>
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34 34
35#include "wl1271.h" 35#include "wl12xx.h"
36#include "wl12xx_80211.h" 36#include "wl12xx_80211.h"
37#include "wl1271_io.h" 37#include "io.h"
38 38
39#ifndef SDIO_VENDOR_ID_TI 39#ifndef SDIO_VENDOR_ID_TI
40#define SDIO_VENDOR_ID_TI 0x0097 40#define SDIO_VENDOR_ID_TI 0x0097
diff --git a/drivers/net/wireless/wl12xx/sdio_test.c b/drivers/net/wireless/wl12xx/sdio_test.c
new file mode 100644
index 000000000000..9fcbd3dd8490
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/sdio_test.c
@@ -0,0 +1,520 @@
1/*
2 * SDIO testing driver for wl12xx
3 *
4 * Copyright (C) 2010 Nokia Corporation
5 *
6 * Contact: Roger Quadros <roger.quadros@nokia.com>
7 *
8 * wl12xx read/write routines taken from the main module
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
22 * 02110-1301 USA
23 *
24 */
25
26#include <linux/irq.h>
27#include <linux/module.h>
28#include <linux/crc7.h>
29#include <linux/vmalloc.h>
30#include <linux/mmc/sdio_func.h>
31#include <linux/mmc/sdio_ids.h>
32#include <linux/mmc/card.h>
33#include <linux/gpio.h>
34#include <linux/wl12xx.h>
35#include <linux/kthread.h>
36#include <linux/firmware.h>
37#include <linux/pm_runtime.h>
38
39#include "wl12xx.h"
40#include "io.h"
41#include "boot.h"
42
43#ifndef SDIO_VENDOR_ID_TI
44#define SDIO_VENDOR_ID_TI 0x0097
45#endif
46
47#ifndef SDIO_DEVICE_ID_TI_WL1271
48#define SDIO_DEVICE_ID_TI_WL1271 0x4076
49#endif
50
51static bool rx, tx;
52
53module_param(rx, bool, S_IRUGO | S_IWUSR);
54MODULE_PARM_DESC(rx, "Perform rx test. Default (0). "
55 "This test continuously reads data from the SDIO device.\n");
56
57module_param(tx, bool, S_IRUGO | S_IWUSR);
58MODULE_PARM_DESC(tx, "Perform tx test. Default (0). "
59 "This test continuously writes data to the SDIO device.\n");
60
61struct wl1271_test {
62 struct wl1271 wl;
63 struct task_struct *test_task;
64};
65
66static const struct sdio_device_id wl1271_devices[] = {
67 { SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
68 {}
69};
70
71static inline struct sdio_func *wl_to_func(struct wl1271 *wl)
72{
73 return wl->if_priv;
74}
75
76static struct device *wl1271_sdio_wl_to_dev(struct wl1271 *wl)
77{
78 return &(wl_to_func(wl)->dev);
79}
80
81static void wl1271_sdio_raw_read(struct wl1271 *wl, int addr, void *buf,
82 size_t len, bool fixed)
83{
84 int ret = 0;
85 struct sdio_func *func = wl_to_func(wl);
86
87 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
88 ((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
89 wl1271_debug(DEBUG_SDIO, "sdio read 52 addr 0x%x, byte 0x%02x",
90 addr, ((u8 *)buf)[0]);
91 } else {
92 if (fixed)
93 ret = sdio_readsb(func, buf, addr, len);
94 else
95 ret = sdio_memcpy_fromio(func, buf, addr, len);
96
97 wl1271_debug(DEBUG_SDIO, "sdio read 53 addr 0x%x, %zu bytes",
98 addr, len);
99 wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
100 }
101
102 if (ret)
103 wl1271_error("sdio read failed (%d)", ret);
104}
105
106static void wl1271_sdio_raw_write(struct wl1271 *wl, int addr, void *buf,
107 size_t len, bool fixed)
108{
109 int ret = 0;
110 struct sdio_func *func = wl_to_func(wl);
111
112 if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG_ADDR)) {
113 sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
114 wl1271_debug(DEBUG_SDIO, "sdio write 52 addr 0x%x, byte 0x%02x",
115 addr, ((u8 *)buf)[0]);
116 } else {
117 wl1271_debug(DEBUG_SDIO, "sdio write 53 addr 0x%x, %zu bytes",
118 addr, len);
119 wl1271_dump_ascii(DEBUG_SDIO, "data: ", buf, len);
120
121 if (fixed)
122 ret = sdio_writesb(func, addr, buf, len);
123 else
124 ret = sdio_memcpy_toio(func, addr, buf, len);
125 }
126 if (ret)
127 wl1271_error("sdio write failed (%d)", ret);
128
129}
130
131static int wl1271_sdio_set_power(struct wl1271 *wl, bool enable)
132{
133 struct sdio_func *func = wl_to_func(wl);
134 int ret;
135
136 /* Let the SDIO stack handle wlan_enable control, so we
137 * keep host claimed while wlan is in use to keep wl1271
138 * alive.
139 */
140 if (enable) {
141 /* Power up the card */
142 ret = pm_runtime_get_sync(&func->dev);
143 if (ret < 0)
144 goto out;
145 sdio_claim_host(func);
146 sdio_enable_func(func);
147 sdio_release_host(func);
148 } else {
149 sdio_claim_host(func);
150 sdio_disable_func(func);
151 sdio_release_host(func);
152
153 /* Power down the card */
154 ret = pm_runtime_put_sync(&func->dev);
155 }
156
157out:
158 return ret;
159}
160
161static void wl1271_sdio_disable_interrupts(struct wl1271 *wl)
162{
163}
164
165static void wl1271_sdio_enable_interrupts(struct wl1271 *wl)
166{
167}
168
169
170static struct wl1271_if_operations sdio_ops = {
171 .read = wl1271_sdio_raw_read,
172 .write = wl1271_sdio_raw_write,
173 .power = wl1271_sdio_set_power,
174 .dev = wl1271_sdio_wl_to_dev,
175 .enable_irq = wl1271_sdio_enable_interrupts,
176 .disable_irq = wl1271_sdio_disable_interrupts,
177};
178
179static void wl1271_fw_wakeup(struct wl1271 *wl)
180{
181 u32 elp_reg;
182
183 elp_reg = ELPCTRL_WAKE_UP;
184 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
185}
186
187static int wl1271_fetch_firmware(struct wl1271 *wl)
188{
189 const struct firmware *fw;
190 int ret;
191
192 ret = request_firmware(&fw, WL1271_FW_NAME, wl1271_wl_to_dev(wl));
193
194 if (ret < 0) {
195 wl1271_error("could not get firmware: %d", ret);
196 return ret;
197 }
198
199 if (fw->size % 4) {
200 wl1271_error("firmware size is not multiple of 32 bits: %zu",
201 fw->size);
202 ret = -EILSEQ;
203 goto out;
204 }
205
206 wl->fw_len = fw->size;
207 wl->fw = vmalloc(wl->fw_len);
208
209 if (!wl->fw) {
210 wl1271_error("could not allocate memory for the firmware");
211 ret = -ENOMEM;
212 goto out;
213 }
214
215 memcpy(wl->fw, fw->data, wl->fw_len);
216
217 ret = 0;
218
219out:
220 release_firmware(fw);
221
222 return ret;
223}
224
225static int wl1271_fetch_nvs(struct wl1271 *wl)
226{
227 const struct firmware *fw;
228 int ret;
229
230 ret = request_firmware(&fw, WL1271_NVS_NAME, wl1271_wl_to_dev(wl));
231
232 if (ret < 0) {
233 wl1271_error("could not get nvs file: %d", ret);
234 return ret;
235 }
236
237 wl->nvs = kmemdup(fw->data, sizeof(struct wl1271_nvs_file), GFP_KERNEL);
238
239 if (!wl->nvs) {
240 wl1271_error("could not allocate memory for the nvs file");
241 ret = -ENOMEM;
242 goto out;
243 }
244
245 wl->nvs_len = fw->size;
246
247out:
248 release_firmware(fw);
249
250 return ret;
251}
252
253static int wl1271_chip_wakeup(struct wl1271 *wl)
254{
255 struct wl1271_partition_set partition;
256 int ret;
257
258 msleep(WL1271_PRE_POWER_ON_SLEEP);
259 ret = wl1271_power_on(wl);
260 if (ret)
261 return ret;
262
263 msleep(WL1271_POWER_ON_SLEEP);
264
265 /* We don't need a real memory partition here, because we only want
266 * to use the registers at this point. */
267 memset(&partition, 0, sizeof(partition));
268 partition.reg.start = REGISTERS_BASE;
269 partition.reg.size = REGISTERS_DOWN_SIZE;
270 wl1271_set_partition(wl, &partition);
271
272 /* ELP module wake up */
273 wl1271_fw_wakeup(wl);
274
275 /* whal_FwCtrl_BootSm() */
276
277 /* 0. read chip id from CHIP_ID */
278 wl->chip.id = wl1271_read32(wl, CHIP_ID_B);
279
280 /* 1. check if chip id is valid */
281
282 switch (wl->chip.id) {
283 case CHIP_ID_1271_PG10:
284 wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
285 wl->chip.id);
286 break;
287 case CHIP_ID_1271_PG20:
288 wl1271_notice("chip id 0x%x (1271 PG20)",
289 wl->chip.id);
290 break;
291 default:
292 wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
293 return -ENODEV;
294 }
295
296 return ret;
297}
298
299static struct wl1271_partition_set part_down = {
300 .mem = {
301 .start = 0x00000000,
302 .size = 0x000177c0
303 },
304 .reg = {
305 .start = REGISTERS_BASE,
306 .size = 0x00008800
307 },
308 .mem2 = {
309 .start = 0x00000000,
310 .size = 0x00000000
311 },
312 .mem3 = {
313 .start = 0x00000000,
314 .size = 0x00000000
315 },
316};
317
318static int tester(void *data)
319{
320 struct wl1271 *wl = data;
321 struct sdio_func *func = wl_to_func(wl);
322 struct device *pdev = &func->dev;
323 int ret = 0;
324 bool rx_started = 0;
325 bool tx_started = 0;
326 uint8_t *tx_buf, *rx_buf;
327 int test_size = PAGE_SIZE;
328 u32 addr = 0;
329 struct wl1271_partition_set partition;
330
331 /* We assume chip is powered up and firmware fetched */
332
333 memcpy(&partition, &part_down, sizeof(partition));
334 partition.mem.start = addr;
335 wl1271_set_partition(wl, &partition);
336
337 tx_buf = kmalloc(test_size, GFP_KERNEL);
338 rx_buf = kmalloc(test_size, GFP_KERNEL);
339 if (!tx_buf || !rx_buf) {
340 dev_err(pdev,
341 "Could not allocate memory. Test will not run.\n");
342 ret = -ENOMEM;
343 goto free;
344 }
345
346 memset(tx_buf, 0x5a, test_size);
347
348 /* write something in data area so we can read it back */
349 wl1271_write(wl, addr, tx_buf, test_size, false);
350
351 while (!kthread_should_stop()) {
352 if (rx && !rx_started) {
353 dev_info(pdev, "starting rx test\n");
354 rx_started = 1;
355 } else if (!rx && rx_started) {
356 dev_info(pdev, "stopping rx test\n");
357 rx_started = 0;
358 }
359
360 if (tx && !tx_started) {
361 dev_info(pdev, "starting tx test\n");
362 tx_started = 1;
363 } else if (!tx && tx_started) {
364 dev_info(pdev, "stopping tx test\n");
365 tx_started = 0;
366 }
367
368 if (rx_started)
369 wl1271_read(wl, addr, rx_buf, test_size, false);
370
371 if (tx_started)
372 wl1271_write(wl, addr, tx_buf, test_size, false);
373
374 if (!rx_started && !tx_started)
375 msleep(100);
376 }
377
378free:
379 kfree(tx_buf);
380 kfree(rx_buf);
381 return ret;
382}
383
384static int __devinit wl1271_probe(struct sdio_func *func,
385 const struct sdio_device_id *id)
386{
387 const struct wl12xx_platform_data *wlan_data;
388 struct wl1271 *wl;
389 struct wl1271_test *wl_test;
390 int ret = 0;
391
392 /* wl1271 has 2 sdio functions we handle just the wlan part */
393 if (func->num != 0x02)
394 return -ENODEV;
395
396 wl_test = kzalloc(sizeof(struct wl1271_test), GFP_KERNEL);
397 if (!wl_test) {
398 dev_err(&func->dev, "Could not allocate memory\n");
399 return -ENOMEM;
400 }
401
402 wl = &wl_test->wl;
403
404 wl->if_priv = func;
405 wl->if_ops = &sdio_ops;
406
407 /* Grab access to FN0 for ELP reg. */
408 func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
409
410 wlan_data = wl12xx_get_platform_data();
411 if (IS_ERR(wlan_data)) {
412 ret = PTR_ERR(wlan_data);
413 dev_err(&func->dev, "missing wlan platform data: %d\n", ret);
414 goto out_free;
415 }
416
417 wl->irq = wlan_data->irq;
418 wl->ref_clock = wlan_data->board_ref_clock;
419
420 sdio_set_drvdata(func, wl_test);
421
422
423 /* power up the device */
424 ret = wl1271_chip_wakeup(wl);
425 if (ret) {
426 dev_err(&func->dev, "could not wake up chip\n");
427 goto out_free;
428 }
429
430 if (wl->fw == NULL) {
431 ret = wl1271_fetch_firmware(wl);
432 if (ret < 0) {
433 dev_err(&func->dev, "firmware fetch error\n");
434 goto out_off;
435 }
436 }
437
438 /* fetch NVS */
439 if (wl->nvs == NULL) {
440 ret = wl1271_fetch_nvs(wl);
441 if (ret < 0) {
442 dev_err(&func->dev, "NVS fetch error\n");
443 goto out_off;
444 }
445 }
446
447 ret = wl1271_load_firmware(wl);
448 if (ret < 0) {
449 dev_err(&func->dev, "firmware load error: %d\n", ret);
450 goto out_free;
451 }
452
453 dev_info(&func->dev, "initialized\n");
454
455 /* I/O testing will be done in the tester thread */
456
457 wl_test->test_task = kthread_run(tester, wl, "sdio_tester");
458 if (IS_ERR(wl_test->test_task)) {
459 dev_err(&func->dev, "unable to create kernel thread\n");
460 ret = PTR_ERR(wl_test->test_task);
461 goto out_free;
462 }
463
464 return 0;
465
466out_off:
467 /* power off the chip */
468 wl1271_power_off(wl);
469
470out_free:
471 kfree(wl_test);
472 return ret;
473}
474
475static void __devexit wl1271_remove(struct sdio_func *func)
476{
477 struct wl1271_test *wl_test = sdio_get_drvdata(func);
478
479 /* stop the I/O test thread */
480 kthread_stop(wl_test->test_task);
481
482 /* power off the chip */
483 wl1271_power_off(&wl_test->wl);
484
485 vfree(wl_test->wl.fw);
486 wl_test->wl.fw = NULL;
487 kfree(wl_test->wl.nvs);
488 wl_test->wl.nvs = NULL;
489
490 kfree(wl_test);
491}
492
493static struct sdio_driver wl1271_sdio_driver = {
494 .name = "wl12xx_sdio_test",
495 .id_table = wl1271_devices,
496 .probe = wl1271_probe,
497 .remove = __devexit_p(wl1271_remove),
498};
499
500static int __init wl1271_init(void)
501{
502 int ret;
503
504 ret = sdio_register_driver(&wl1271_sdio_driver);
505 if (ret < 0)
506 pr_err("failed to register sdio driver: %d\n", ret);
507
508 return ret;
509}
510module_init(wl1271_init);
511
512static void __exit wl1271_exit(void)
513{
514 sdio_unregister_driver(&wl1271_sdio_driver);
515}
516module_exit(wl1271_exit);
517
518MODULE_LICENSE("GPL");
519MODULE_AUTHOR("Roger Quadros <roger.quadros@nokia.com>");
520
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/spi.c
index ef801680773f..46714910f98c 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/spi.c
@@ -28,11 +28,11 @@
28#include <linux/wl12xx.h> 28#include <linux/wl12xx.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30 30
31#include "wl1271.h" 31#include "wl12xx.h"
32#include "wl12xx_80211.h" 32#include "wl12xx_80211.h"
33#include "wl1271_io.h" 33#include "io.h"
34 34
35#include "wl1271_reg.h" 35#include "reg.h"
36 36
37#define WSPI_CMD_READ 0x40000000 37#define WSPI_CMD_READ 0x40000000
38#define WSPI_CMD_WRITE 0x00000000 38#define WSPI_CMD_WRITE 0x00000000
diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.c b/drivers/net/wireless/wl12xx/testmode.c
index a3aa84386c88..e64403b6896d 100644
--- a/drivers/net/wireless/wl12xx/wl1271_testmode.c
+++ b/drivers/net/wireless/wl12xx/testmode.c
@@ -20,13 +20,13 @@
20 * 02110-1301 USA 20 * 02110-1301 USA
21 * 21 *
22 */ 22 */
23#include "wl1271_testmode.h" 23#include "testmode.h"
24 24
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <net/genetlink.h> 26#include <net/genetlink.h>
27 27
28#include "wl1271.h" 28#include "wl12xx.h"
29#include "wl1271_acx.h" 29#include "acx.h"
30 30
31#define WL1271_TM_MAX_DATA_LENGTH 1024 31#define WL1271_TM_MAX_DATA_LENGTH 1024
32 32
@@ -37,6 +37,7 @@ enum wl1271_tm_commands {
37 WL1271_TM_CMD_CONFIGURE, 37 WL1271_TM_CMD_CONFIGURE,
38 WL1271_TM_CMD_NVS_PUSH, 38 WL1271_TM_CMD_NVS_PUSH,
39 WL1271_TM_CMD_SET_PLT_MODE, 39 WL1271_TM_CMD_SET_PLT_MODE,
40 WL1271_TM_CMD_RECOVER,
40 41
41 __WL1271_TM_CMD_AFTER_LAST 42 __WL1271_TM_CMD_AFTER_LAST
42}; 43};
@@ -248,6 +249,15 @@ static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
248 return ret; 249 return ret;
249} 250}
250 251
252static int wl1271_tm_cmd_recover(struct wl1271 *wl, struct nlattr *tb[])
253{
254 wl1271_debug(DEBUG_TESTMODE, "testmode cmd recover");
255
256 ieee80211_queue_work(wl->hw, &wl->recovery_work);
257
258 return 0;
259}
260
251int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len) 261int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
252{ 262{
253 struct wl1271 *wl = hw->priv; 263 struct wl1271 *wl = hw->priv;
@@ -272,6 +282,8 @@ int wl1271_tm_cmd(struct ieee80211_hw *hw, void *data, int len)
272 return wl1271_tm_cmd_nvs_push(wl, tb); 282 return wl1271_tm_cmd_nvs_push(wl, tb);
273 case WL1271_TM_CMD_SET_PLT_MODE: 283 case WL1271_TM_CMD_SET_PLT_MODE:
274 return wl1271_tm_cmd_set_plt_mode(wl, tb); 284 return wl1271_tm_cmd_set_plt_mode(wl, tb);
285 case WL1271_TM_CMD_RECOVER:
286 return wl1271_tm_cmd_recover(wl, tb);
275 default: 287 default:
276 return -EOPNOTSUPP; 288 return -EOPNOTSUPP;
277 } 289 }
diff --git a/drivers/net/wireless/wl12xx/wl1271_testmode.h b/drivers/net/wireless/wl12xx/testmode.h
index c196d28f9d9d..8071654259ea 100644
--- a/drivers/net/wireless/wl12xx/wl1271_testmode.h
+++ b/drivers/net/wireless/wl12xx/testmode.h
@@ -21,8 +21,8 @@
21 * 21 *
22 */ 22 */
23 23
24#ifndef __WL1271_TESTMODE_H__ 24#ifndef __TESTMODE_H__
25#define __WL1271_TESTMODE_H__ 25#define __TESTMODE_H__
26 26
27#include <net/mac80211.h> 27#include <net/mac80211.h>
28 28
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/tx.c
index e3dc13c4d01a..b44c75cd8c1e 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/tx.c
@@ -24,23 +24,32 @@
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/module.h> 25#include <linux/module.h>
26 26
27#include "wl1271.h" 27#include "wl12xx.h"
28#include "wl1271_io.h" 28#include "io.h"
29#include "wl1271_reg.h" 29#include "reg.h"
30#include "wl1271_ps.h" 30#include "ps.h"
31#include "wl1271_tx.h" 31#include "tx.h"
32 32
33static int wl1271_tx_id(struct wl1271 *wl, struct sk_buff *skb) 33static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
34{ 34{
35 int i; 35 int id;
36 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 36
37 if (wl->tx_frames[i] == NULL) { 37 id = find_first_zero_bit(wl->tx_frames_map, ACX_TX_DESCRIPTORS);
38 wl->tx_frames[i] = skb; 38 if (id >= ACX_TX_DESCRIPTORS)
39 wl->tx_frames_cnt++; 39 return -EBUSY;
40 return i;
41 }
42 40
43 return -EBUSY; 41 __set_bit(id, wl->tx_frames_map);
42 wl->tx_frames[id] = skb;
43 wl->tx_frames_cnt++;
44 return id;
45}
46
47static void wl1271_free_tx_id(struct wl1271 *wl, int id)
48{
49 if (__test_and_clear_bit(id, wl->tx_frames_map)) {
50 wl->tx_frames[id] = NULL;
51 wl->tx_frames_cnt--;
52 }
44} 53}
45 54
46static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra, 55static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
@@ -52,10 +61,10 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
52 int id, ret = -EBUSY; 61 int id, ret = -EBUSY;
53 62
54 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE) 63 if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
55 return -EBUSY; 64 return -EAGAIN;
56 65
57 /* allocate free identifier for the packet */ 66 /* allocate free identifier for the packet */
58 id = wl1271_tx_id(wl, skb); 67 id = wl1271_alloc_tx_id(wl, skb);
59 if (id < 0) 68 if (id < 0)
60 return id; 69 return id;
61 70
@@ -79,8 +88,7 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra,
79 "tx_allocate: size: %d, blocks: %d, id: %d", 88 "tx_allocate: size: %d, blocks: %d, id: %d",
80 total_len, total_blocks, id); 89 total_len, total_blocks, id);
81 } else { 90 } else {
82 wl->tx_frames[id] = NULL; 91 wl1271_free_tx_id(wl, id);
83 wl->tx_frames_cnt--;
84 } 92 }
85 93
86 return ret; 94 return ret;
@@ -117,7 +125,6 @@ static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
117 /* queue (we use same identifiers for tid's and ac's */ 125 /* queue (we use same identifiers for tid's and ac's */
118 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb)); 126 ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
119 desc->tid = ac; 127 desc->tid = ac;
120
121 desc->aid = TX_HW_DEFAULT_AID; 128 desc->aid = TX_HW_DEFAULT_AID;
122 desc->reserved = 0; 129 desc->reserved = 0;
123 130
@@ -201,42 +208,105 @@ u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set)
201 rate_set >>= 1; 208 rate_set >>= 1;
202 } 209 }
203 210
211#ifdef CONFIG_WL12XX_HT
212 /* MCS rates indication are on bits 16 - 23 */
213 rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
214
215 for (bit = 0; bit < 8; bit++) {
216 if (rate_set & 0x1)
217 enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
218 rate_set >>= 1;
219 }
220#endif
221
204 return enabled_rates; 222 return enabled_rates;
205} 223}
206 224
207void wl1271_tx_work(struct work_struct *work) 225static void handle_tx_low_watermark(struct wl1271 *wl)
226{
227 unsigned long flags;
228
229 if (test_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags) &&
230 wl->tx_queue_count <= WL1271_TX_QUEUE_LOW_WATERMARK) {
231 /* firmware buffer has space, restart queues */
232 spin_lock_irqsave(&wl->wl_lock, flags);
233 ieee80211_wake_queues(wl->hw);
234 clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
235 spin_unlock_irqrestore(&wl->wl_lock, flags);
236 }
237}
238
239static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
240{
241 struct sk_buff *skb = NULL;
242 unsigned long flags;
243
244 skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_VO]);
245 if (skb)
246 goto out;
247 skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_VI]);
248 if (skb)
249 goto out;
250 skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_BE]);
251 if (skb)
252 goto out;
253 skb = skb_dequeue(&wl->tx_queue[CONF_TX_AC_BK]);
254
255out:
256 if (skb) {
257 spin_lock_irqsave(&wl->wl_lock, flags);
258 wl->tx_queue_count--;
259 spin_unlock_irqrestore(&wl->wl_lock, flags);
260 }
261
262 return skb;
263}
264
265static void wl1271_skb_queue_head(struct wl1271 *wl, struct sk_buff *skb)
266{
267 unsigned long flags;
268 int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
269
270 skb_queue_head(&wl->tx_queue[q], skb);
271 spin_lock_irqsave(&wl->wl_lock, flags);
272 wl->tx_queue_count++;
273 spin_unlock_irqrestore(&wl->wl_lock, flags);
274}
275
276void wl1271_tx_work_locked(struct wl1271 *wl)
208{ 277{
209 struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
210 struct sk_buff *skb; 278 struct sk_buff *skb;
211 bool woken_up = false; 279 bool woken_up = false;
212 u32 sta_rates = 0; 280 u32 sta_rates = 0;
213 u32 buf_offset; 281 u32 buf_offset = 0;
282 bool sent_packets = false;
214 int ret; 283 int ret;
215 284
216 /* check if the rates supported by the AP have changed */ 285 /* check if the rates supported by the AP have changed */
217 if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED, 286 if (unlikely(test_and_clear_bit(WL1271_FLAG_STA_RATES_CHANGED,
218 &wl->flags))) { 287 &wl->flags))) {
219 unsigned long flags; 288 unsigned long flags;
289
220 spin_lock_irqsave(&wl->wl_lock, flags); 290 spin_lock_irqsave(&wl->wl_lock, flags);
221 sta_rates = wl->sta_rate_set; 291 sta_rates = wl->sta_rate_set;
222 spin_unlock_irqrestore(&wl->wl_lock, flags); 292 spin_unlock_irqrestore(&wl->wl_lock, flags);
223 } 293 }
224 294
225 mutex_lock(&wl->mutex);
226
227 if (unlikely(wl->state == WL1271_STATE_OFF)) 295 if (unlikely(wl->state == WL1271_STATE_OFF))
228 goto out; 296 goto out;
229 297
230 /* if rates have changed, re-configure the rate policy */ 298 /* if rates have changed, re-configure the rate policy */
231 if (unlikely(sta_rates)) { 299 if (unlikely(sta_rates)) {
300 ret = wl1271_ps_elp_wakeup(wl, false);
301 if (ret < 0)
302 goto out;
303 woken_up = true;
304
232 wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates); 305 wl->rate_set = wl1271_tx_enabled_rates_get(wl, sta_rates);
233 wl1271_acx_rate_policies(wl); 306 wl1271_acx_rate_policies(wl);
234 } 307 }
235 308
236 /* Prepare the transfer buffer, by aggregating all 309 while ((skb = wl1271_skb_dequeue(wl))) {
237 * available packets */
238 buf_offset = 0;
239 while ((skb = skb_dequeue(&wl->tx_queue))) {
240 if (!woken_up) { 310 if (!woken_up) {
241 ret = wl1271_ps_elp_wakeup(wl, false); 311 ret = wl1271_ps_elp_wakeup(wl, false);
242 if (ret < 0) 312 if (ret < 0)
@@ -245,13 +315,25 @@ void wl1271_tx_work(struct work_struct *work)
245 } 315 }
246 316
247 ret = wl1271_prepare_tx_frame(wl, skb, buf_offset); 317 ret = wl1271_prepare_tx_frame(wl, skb, buf_offset);
248 if (ret == -EBUSY) { 318 if (ret == -EAGAIN) {
319 /*
320 * Aggregation buffer is full.
321 * Flush buffer and try again.
322 */
323 wl1271_skb_queue_head(wl, skb);
324 wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
325 buf_offset, true);
326 sent_packets = true;
327 buf_offset = 0;
328 continue;
329 } else if (ret == -EBUSY) {
249 /* 330 /*
250 * Either the firmware buffer is full, or the 331 * Firmware buffer is full.
251 * aggregation buffer is.
252 * Queue back last skb, and stop aggregating. 332 * Queue back last skb, and stop aggregating.
253 */ 333 */
254 skb_queue_head(&wl->tx_queue, skb); 334 wl1271_skb_queue_head(wl, skb);
335 /* No work left, avoid scheduling redundant tx work */
336 set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
255 goto out_ack; 337 goto out_ack;
256 } else if (ret < 0) { 338 } else if (ret < 0) {
257 dev_kfree_skb(skb); 339 dev_kfree_skb(skb);
@@ -265,14 +347,25 @@ out_ack:
265 if (buf_offset) { 347 if (buf_offset) {
266 wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf, 348 wl1271_write(wl, WL1271_SLV_MEM_DATA, wl->aggr_buf,
267 buf_offset, true); 349 buf_offset, true);
350 sent_packets = true;
351 }
352 if (sent_packets) {
268 /* interrupt the firmware with the new packets */ 353 /* interrupt the firmware with the new packets */
269 wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count); 354 wl1271_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
355 handle_tx_low_watermark(wl);
270 } 356 }
271 357
272out: 358out:
273 if (woken_up) 359 if (woken_up)
274 wl1271_ps_elp_sleep(wl); 360 wl1271_ps_elp_sleep(wl);
361}
275 362
363void wl1271_tx_work(struct work_struct *work)
364{
365 struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
366
367 mutex_lock(&wl->mutex);
368 wl1271_tx_work_locked(wl);
276 mutex_unlock(&wl->mutex); 369 mutex_unlock(&wl->mutex);
277} 370}
278 371
@@ -298,7 +391,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
298 if (result->status == TX_SUCCESS) { 391 if (result->status == TX_SUCCESS) {
299 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) 392 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
300 info->flags |= IEEE80211_TX_STAT_ACK; 393 info->flags |= IEEE80211_TX_STAT_ACK;
301 rate = wl1271_rate_to_idx(wl, result->rate_class_index); 394 rate = wl1271_rate_to_idx(result->rate_class_index, wl->band);
302 retries = result->ack_failures; 395 retries = result->ack_failures;
303 } else if (result->status == TX_RETRY_EXCEEDED) { 396 } else if (result->status == TX_RETRY_EXCEEDED) {
304 wl->stats.excessive_retries++; 397 wl->stats.excessive_retries++;
@@ -335,8 +428,7 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
335 428
336 /* return the packet to the stack */ 429 /* return the packet to the stack */
337 ieee80211_tx_status(wl->hw, skb); 430 ieee80211_tx_status(wl->hw, skb);
338 wl->tx_frames[result->id] = NULL; 431 wl1271_free_tx_id(wl, result->id);
339 wl->tx_frames_cnt--;
340} 432}
341 433
342/* Called upon reception of a TX complete interrupt */ 434/* Called upon reception of a TX complete interrupt */
@@ -375,19 +467,6 @@ void wl1271_tx_complete(struct wl1271 *wl)
375 467
376 wl->tx_results_count++; 468 wl->tx_results_count++;
377 } 469 }
378
379 if (test_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags) &&
380 skb_queue_len(&wl->tx_queue) <= WL1271_TX_QUEUE_LOW_WATERMARK) {
381 unsigned long flags;
382
383 /* firmware buffer has space, restart queues */
384 wl1271_debug(DEBUG_TX, "tx_complete: waking queues");
385 spin_lock_irqsave(&wl->wl_lock, flags);
386 ieee80211_wake_queues(wl->hw);
387 clear_bit(WL1271_FLAG_TX_QUEUE_STOPPED, &wl->flags);
388 spin_unlock_irqrestore(&wl->wl_lock, flags);
389 ieee80211_queue_work(wl->hw, &wl->tx_work);
390 }
391} 470}
392 471
393/* caller must hold wl->mutex */ 472/* caller must hold wl->mutex */
@@ -397,19 +476,27 @@ void wl1271_tx_reset(struct wl1271 *wl)
397 struct sk_buff *skb; 476 struct sk_buff *skb;
398 477
399 /* TX failure */ 478 /* TX failure */
400 while ((skb = skb_dequeue(&wl->tx_queue))) { 479 for (i = 0; i < NUM_TX_QUEUES; i++) {
401 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); 480 while ((skb = skb_dequeue(&wl->tx_queue[i]))) {
402 ieee80211_tx_status(wl->hw, skb); 481 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
482 ieee80211_tx_status(wl->hw, skb);
483 }
403 } 484 }
485 wl->tx_queue_count = 0;
486
487 /*
488 * Make sure the driver is at a consistent state, in case this
489 * function is called from a context other than interface removal.
490 */
491 handle_tx_low_watermark(wl);
404 492
405 for (i = 0; i < ACX_TX_DESCRIPTORS; i++) 493 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
406 if (wl->tx_frames[i] != NULL) { 494 if (wl->tx_frames[i] != NULL) {
407 skb = wl->tx_frames[i]; 495 skb = wl->tx_frames[i];
408 wl->tx_frames[i] = NULL; 496 wl1271_free_tx_id(wl, i);
409 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb); 497 wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
410 ieee80211_tx_status(wl->hw, skb); 498 ieee80211_tx_status(wl->hw, skb);
411 } 499 }
412 wl->tx_frames_cnt = 0;
413} 500}
414 501
415#define WL1271_TX_FLUSH_TIMEOUT 500000 502#define WL1271_TX_FLUSH_TIMEOUT 500000
@@ -424,8 +511,7 @@ void wl1271_tx_flush(struct wl1271 *wl)
424 mutex_lock(&wl->mutex); 511 mutex_lock(&wl->mutex);
425 wl1271_debug(DEBUG_TX, "flushing tx buffer: %d", 512 wl1271_debug(DEBUG_TX, "flushing tx buffer: %d",
426 wl->tx_frames_cnt); 513 wl->tx_frames_cnt);
427 if ((wl->tx_frames_cnt == 0) && 514 if ((wl->tx_frames_cnt == 0) && (wl->tx_queue_count == 0)) {
428 skb_queue_empty(&wl->tx_queue)) {
429 mutex_unlock(&wl->mutex); 515 mutex_unlock(&wl->mutex);
430 return; 516 return;
431 } 517 }
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/tx.h
index d12a129ad11c..903e5dc69b7a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.h
+++ b/drivers/net/wireless/wl12xx/tx.h
@@ -22,8 +22,8 @@
22 * 22 *
23 */ 23 */
24 24
25#ifndef __WL1271_TX_H__ 25#ifndef __TX_H__
26#define __WL1271_TX_H__ 26#define __TX_H__
27 27
28#define TX_HW_BLOCK_SPARE 2 28#define TX_HW_BLOCK_SPARE 2
29#define TX_HW_BLOCK_SIZE 252 29#define TX_HW_BLOCK_SIZE 252
@@ -140,10 +140,11 @@ static inline int wl1271_tx_get_queue(int queue)
140} 140}
141 141
142void wl1271_tx_work(struct work_struct *work); 142void wl1271_tx_work(struct work_struct *work);
143void wl1271_tx_work_locked(struct wl1271 *wl);
143void wl1271_tx_complete(struct wl1271 *wl); 144void wl1271_tx_complete(struct wl1271 *wl);
144void wl1271_tx_reset(struct wl1271 *wl); 145void wl1271_tx_reset(struct wl1271 *wl);
145void wl1271_tx_flush(struct wl1271 *wl); 146void wl1271_tx_flush(struct wl1271 *wl);
146u8 wl1271_rate_to_idx(struct wl1271 *wl, int rate); 147u8 wl1271_rate_to_idx(int rate, enum ieee80211_band band);
147u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set); 148u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set);
148 149
149#endif 150#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_debugfs.c b/drivers/net/wireless/wl12xx/wl1271_debugfs.c
deleted file mode 100644
index 66c2b90ddfd4..000000000000
--- a/drivers/net/wireless/wl12xx/wl1271_debugfs.c
+++ /dev/null
@@ -1,583 +0,0 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include "wl1271_debugfs.h"
25
26#include <linux/skbuff.h>
27#include <linux/slab.h>
28
29#include "wl1271.h"
30#include "wl1271_acx.h"
31#include "wl1271_ps.h"
32#include "wl1271_io.h"
33
34/* ms */
35#define WL1271_DEBUGFS_STATS_LIFETIME 1000
36
37/* debugfs macros idea from mac80211 */
38
39#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \
40static ssize_t name## _read(struct file *file, char __user *userbuf, \
41 size_t count, loff_t *ppos) \
42{ \
43 struct wl1271 *wl = file->private_data; \
44 char buf[buflen]; \
45 int res; \
46 \
47 res = scnprintf(buf, buflen, fmt "\n", ##value); \
48 return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
49} \
50 \
51static const struct file_operations name## _ops = { \
52 .read = name## _read, \
53 .open = wl1271_open_file_generic, \
54 .llseek = generic_file_llseek, \
55};
56
57#define DEBUGFS_ADD(name, parent) \
58 wl->debugfs.name = debugfs_create_file(#name, 0400, parent, \
59 wl, &name## _ops); \
60 if (IS_ERR(wl->debugfs.name)) { \
61 ret = PTR_ERR(wl->debugfs.name); \
62 wl->debugfs.name = NULL; \
63 goto out; \
64 }
65
66#define DEBUGFS_DEL(name) \
67 do { \
68 debugfs_remove(wl->debugfs.name); \
69 wl->debugfs.name = NULL; \
70 } while (0)
71
72#define DEBUGFS_FWSTATS_FILE(sub, name, buflen, fmt) \
73static ssize_t sub## _ ##name## _read(struct file *file, \
74 char __user *userbuf, \
75 size_t count, loff_t *ppos) \
76{ \
77 struct wl1271 *wl = file->private_data; \
78 char buf[buflen]; \
79 int res; \
80 \
81 wl1271_debugfs_update_stats(wl); \
82 \
83 res = scnprintf(buf, buflen, fmt "\n", \
84 wl->stats.fw_stats->sub.name); \
85 return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
86} \
87 \
88static const struct file_operations sub## _ ##name## _ops = { \
89 .read = sub## _ ##name## _read, \
90 .open = wl1271_open_file_generic, \
91 .llseek = generic_file_llseek, \
92};
93
94#define DEBUGFS_FWSTATS_ADD(sub, name) \
95 DEBUGFS_ADD(sub## _ ##name, wl->debugfs.fw_statistics)
96
97#define DEBUGFS_FWSTATS_DEL(sub, name) \
98 DEBUGFS_DEL(sub## _ ##name)
99
100static void wl1271_debugfs_update_stats(struct wl1271 *wl)
101{
102 int ret;
103
104 mutex_lock(&wl->mutex);
105
106 ret = wl1271_ps_elp_wakeup(wl, false);
107 if (ret < 0)
108 goto out;
109
110 if (wl->state == WL1271_STATE_ON &&
111 time_after(jiffies, wl->stats.fw_stats_update +
112 msecs_to_jiffies(WL1271_DEBUGFS_STATS_LIFETIME))) {
113 wl1271_acx_statistics(wl, wl->stats.fw_stats);
114 wl->stats.fw_stats_update = jiffies;
115 }
116
117 wl1271_ps_elp_sleep(wl);
118
119out:
120 mutex_unlock(&wl->mutex);
121}
122
123static int wl1271_open_file_generic(struct inode *inode, struct file *file)
124{
125 file->private_data = inode->i_private;
126 return 0;
127}
128
129DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, 20, "%u");
130
131DEBUGFS_FWSTATS_FILE(rx, out_of_mem, 20, "%u");
132DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, 20, "%u");
133DEBUGFS_FWSTATS_FILE(rx, hw_stuck, 20, "%u");
134DEBUGFS_FWSTATS_FILE(rx, dropped, 20, "%u");
135DEBUGFS_FWSTATS_FILE(rx, fcs_err, 20, "%u");
136DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, 20, "%u");
137DEBUGFS_FWSTATS_FILE(rx, path_reset, 20, "%u");
138DEBUGFS_FWSTATS_FILE(rx, reset_counter, 20, "%u");
139
140DEBUGFS_FWSTATS_FILE(dma, rx_requested, 20, "%u");
141DEBUGFS_FWSTATS_FILE(dma, rx_errors, 20, "%u");
142DEBUGFS_FWSTATS_FILE(dma, tx_requested, 20, "%u");
143DEBUGFS_FWSTATS_FILE(dma, tx_errors, 20, "%u");
144
145DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, 20, "%u");
146DEBUGFS_FWSTATS_FILE(isr, fiqs, 20, "%u");
147DEBUGFS_FWSTATS_FILE(isr, rx_headers, 20, "%u");
148DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, 20, "%u");
149DEBUGFS_FWSTATS_FILE(isr, rx_rdys, 20, "%u");
150DEBUGFS_FWSTATS_FILE(isr, irqs, 20, "%u");
151DEBUGFS_FWSTATS_FILE(isr, tx_procs, 20, "%u");
152DEBUGFS_FWSTATS_FILE(isr, decrypt_done, 20, "%u");
153DEBUGFS_FWSTATS_FILE(isr, dma0_done, 20, "%u");
154DEBUGFS_FWSTATS_FILE(isr, dma1_done, 20, "%u");
155DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, 20, "%u");
156DEBUGFS_FWSTATS_FILE(isr, commands, 20, "%u");
157DEBUGFS_FWSTATS_FILE(isr, rx_procs, 20, "%u");
158DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, 20, "%u");
159DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, 20, "%u");
160DEBUGFS_FWSTATS_FILE(isr, pci_pm, 20, "%u");
161DEBUGFS_FWSTATS_FILE(isr, wakeups, 20, "%u");
162DEBUGFS_FWSTATS_FILE(isr, low_rssi, 20, "%u");
163
164DEBUGFS_FWSTATS_FILE(wep, addr_key_count, 20, "%u");
165DEBUGFS_FWSTATS_FILE(wep, default_key_count, 20, "%u");
166/* skipping wep.reserved */
167DEBUGFS_FWSTATS_FILE(wep, key_not_found, 20, "%u");
168DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, 20, "%u");
169DEBUGFS_FWSTATS_FILE(wep, packets, 20, "%u");
170DEBUGFS_FWSTATS_FILE(wep, interrupt, 20, "%u");
171
172DEBUGFS_FWSTATS_FILE(pwr, ps_enter, 20, "%u");
173DEBUGFS_FWSTATS_FILE(pwr, elp_enter, 20, "%u");
174DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, 20, "%u");
175DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, 20, "%u");
176DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, 20, "%u");
177DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, 20, "%u");
178DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, 20, "%u");
179DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, 20, "%u");
180DEBUGFS_FWSTATS_FILE(pwr, power_save_off, 20, "%u");
181DEBUGFS_FWSTATS_FILE(pwr, enable_ps, 20, "%u");
182DEBUGFS_FWSTATS_FILE(pwr, disable_ps, 20, "%u");
183DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, 20, "%u");
184/* skipping cont_miss_bcns_spread for now */
185DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, 20, "%u");
186
187DEBUGFS_FWSTATS_FILE(mic, rx_pkts, 20, "%u");
188DEBUGFS_FWSTATS_FILE(mic, calc_failure, 20, "%u");
189
190DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, 20, "%u");
191DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, 20, "%u");
192DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, 20, "%u");
193DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, 20, "%u");
194DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, 20, "%u");
195DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, 20, "%u");
196
197DEBUGFS_FWSTATS_FILE(event, heart_beat, 20, "%u");
198DEBUGFS_FWSTATS_FILE(event, calibration, 20, "%u");
199DEBUGFS_FWSTATS_FILE(event, rx_mismatch, 20, "%u");
200DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, 20, "%u");
201DEBUGFS_FWSTATS_FILE(event, rx_pool, 20, "%u");
202DEBUGFS_FWSTATS_FILE(event, oom_late, 20, "%u");
203DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, 20, "%u");
204DEBUGFS_FWSTATS_FILE(event, tx_stuck, 20, "%u");
205
206DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, 20, "%u");
207DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, 20, "%u");
208DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, 20, "%u");
209DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, 20, "%u");
210DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, 20, "%u");
211DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, 20, "%u");
212DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, 20, "%u");
213
214DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, 20, "%u");
215DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, 20, "%u");
216DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data,
217 20, "%u");
218DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, 20, "%u");
219DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, 20, "%u");
220
221DEBUGFS_READONLY_FILE(retry_count, 20, "%u", wl->stats.retry_count);
222DEBUGFS_READONLY_FILE(excessive_retries, 20, "%u",
223 wl->stats.excessive_retries);
224
225static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf,
226 size_t count, loff_t *ppos)
227{
228 struct wl1271 *wl = file->private_data;
229 u32 queue_len;
230 char buf[20];
231 int res;
232
233 queue_len = skb_queue_len(&wl->tx_queue);
234
235 res = scnprintf(buf, sizeof(buf), "%u\n", queue_len);
236 return simple_read_from_buffer(userbuf, count, ppos, buf, res);
237}
238
239static const struct file_operations tx_queue_len_ops = {
240 .read = tx_queue_len_read,
241 .open = wl1271_open_file_generic,
242 .llseek = default_llseek,
243};
244
245static ssize_t gpio_power_read(struct file *file, char __user *user_buf,
246 size_t count, loff_t *ppos)
247{
248 struct wl1271 *wl = file->private_data;
249 bool state = test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
250
251 int res;
252 char buf[10];
253
254 res = scnprintf(buf, sizeof(buf), "%d\n", state);
255
256 return simple_read_from_buffer(user_buf, count, ppos, buf, res);
257}
258
259static ssize_t gpio_power_write(struct file *file,
260 const char __user *user_buf,
261 size_t count, loff_t *ppos)
262{
263 struct wl1271 *wl = file->private_data;
264 char buf[10];
265 size_t len;
266 unsigned long value;
267 int ret;
268
269 mutex_lock(&wl->mutex);
270
271 len = min(count, sizeof(buf) - 1);
272 if (copy_from_user(buf, user_buf, len)) {
273 ret = -EFAULT;
274 goto out;
275 }
276 buf[len] = '\0';
277
278 ret = strict_strtoul(buf, 0, &value);
279 if (ret < 0) {
280 wl1271_warning("illegal value in gpio_power");
281 goto out;
282 }
283
284 if (value)
285 wl1271_power_on(wl);
286 else
287 wl1271_power_off(wl);
288
289out:
290 mutex_unlock(&wl->mutex);
291 return count;
292}
293
294static const struct file_operations gpio_power_ops = {
295 .read = gpio_power_read,
296 .write = gpio_power_write,
297 .open = wl1271_open_file_generic,
298 .llseek = default_llseek,
299};
300
301static void wl1271_debugfs_delete_files(struct wl1271 *wl)
302{
303 DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow);
304
305 DEBUGFS_FWSTATS_DEL(rx, out_of_mem);
306 DEBUGFS_FWSTATS_DEL(rx, hdr_overflow);
307 DEBUGFS_FWSTATS_DEL(rx, hw_stuck);
308 DEBUGFS_FWSTATS_DEL(rx, dropped);
309 DEBUGFS_FWSTATS_DEL(rx, fcs_err);
310 DEBUGFS_FWSTATS_DEL(rx, xfr_hint_trig);
311 DEBUGFS_FWSTATS_DEL(rx, path_reset);
312 DEBUGFS_FWSTATS_DEL(rx, reset_counter);
313
314 DEBUGFS_FWSTATS_DEL(dma, rx_requested);
315 DEBUGFS_FWSTATS_DEL(dma, rx_errors);
316 DEBUGFS_FWSTATS_DEL(dma, tx_requested);
317 DEBUGFS_FWSTATS_DEL(dma, tx_errors);
318
319 DEBUGFS_FWSTATS_DEL(isr, cmd_cmplt);
320 DEBUGFS_FWSTATS_DEL(isr, fiqs);
321 DEBUGFS_FWSTATS_DEL(isr, rx_headers);
322 DEBUGFS_FWSTATS_DEL(isr, rx_mem_overflow);
323 DEBUGFS_FWSTATS_DEL(isr, rx_rdys);
324 DEBUGFS_FWSTATS_DEL(isr, irqs);
325 DEBUGFS_FWSTATS_DEL(isr, tx_procs);
326 DEBUGFS_FWSTATS_DEL(isr, decrypt_done);
327 DEBUGFS_FWSTATS_DEL(isr, dma0_done);
328 DEBUGFS_FWSTATS_DEL(isr, dma1_done);
329 DEBUGFS_FWSTATS_DEL(isr, tx_exch_complete);
330 DEBUGFS_FWSTATS_DEL(isr, commands);
331 DEBUGFS_FWSTATS_DEL(isr, rx_procs);
332 DEBUGFS_FWSTATS_DEL(isr, hw_pm_mode_changes);
333 DEBUGFS_FWSTATS_DEL(isr, host_acknowledges);
334 DEBUGFS_FWSTATS_DEL(isr, pci_pm);
335 DEBUGFS_FWSTATS_DEL(isr, wakeups);
336 DEBUGFS_FWSTATS_DEL(isr, low_rssi);
337
338 DEBUGFS_FWSTATS_DEL(wep, addr_key_count);
339 DEBUGFS_FWSTATS_DEL(wep, default_key_count);
340 /* skipping wep.reserved */
341 DEBUGFS_FWSTATS_DEL(wep, key_not_found);
342 DEBUGFS_FWSTATS_DEL(wep, decrypt_fail);
343 DEBUGFS_FWSTATS_DEL(wep, packets);
344 DEBUGFS_FWSTATS_DEL(wep, interrupt);
345
346 DEBUGFS_FWSTATS_DEL(pwr, ps_enter);
347 DEBUGFS_FWSTATS_DEL(pwr, elp_enter);
348 DEBUGFS_FWSTATS_DEL(pwr, missing_bcns);
349 DEBUGFS_FWSTATS_DEL(pwr, wake_on_host);
350 DEBUGFS_FWSTATS_DEL(pwr, wake_on_timer_exp);
351 DEBUGFS_FWSTATS_DEL(pwr, tx_with_ps);
352 DEBUGFS_FWSTATS_DEL(pwr, tx_without_ps);
353 DEBUGFS_FWSTATS_DEL(pwr, rcvd_beacons);
354 DEBUGFS_FWSTATS_DEL(pwr, power_save_off);
355 DEBUGFS_FWSTATS_DEL(pwr, enable_ps);
356 DEBUGFS_FWSTATS_DEL(pwr, disable_ps);
357 DEBUGFS_FWSTATS_DEL(pwr, fix_tsf_ps);
358 /* skipping cont_miss_bcns_spread for now */
359 DEBUGFS_FWSTATS_DEL(pwr, rcvd_awake_beacons);
360
361 DEBUGFS_FWSTATS_DEL(mic, rx_pkts);
362 DEBUGFS_FWSTATS_DEL(mic, calc_failure);
363
364 DEBUGFS_FWSTATS_DEL(aes, encrypt_fail);
365 DEBUGFS_FWSTATS_DEL(aes, decrypt_fail);
366 DEBUGFS_FWSTATS_DEL(aes, encrypt_packets);
367 DEBUGFS_FWSTATS_DEL(aes, decrypt_packets);
368 DEBUGFS_FWSTATS_DEL(aes, encrypt_interrupt);
369 DEBUGFS_FWSTATS_DEL(aes, decrypt_interrupt);
370
371 DEBUGFS_FWSTATS_DEL(event, heart_beat);
372 DEBUGFS_FWSTATS_DEL(event, calibration);
373 DEBUGFS_FWSTATS_DEL(event, rx_mismatch);
374 DEBUGFS_FWSTATS_DEL(event, rx_mem_empty);
375 DEBUGFS_FWSTATS_DEL(event, rx_pool);
376 DEBUGFS_FWSTATS_DEL(event, oom_late);
377 DEBUGFS_FWSTATS_DEL(event, phy_transmit_error);
378 DEBUGFS_FWSTATS_DEL(event, tx_stuck);
379
380 DEBUGFS_FWSTATS_DEL(ps, pspoll_timeouts);
381 DEBUGFS_FWSTATS_DEL(ps, upsd_timeouts);
382 DEBUGFS_FWSTATS_DEL(ps, upsd_max_sptime);
383 DEBUGFS_FWSTATS_DEL(ps, upsd_max_apturn);
384 DEBUGFS_FWSTATS_DEL(ps, pspoll_max_apturn);
385 DEBUGFS_FWSTATS_DEL(ps, pspoll_utilization);
386 DEBUGFS_FWSTATS_DEL(ps, upsd_utilization);
387
388 DEBUGFS_FWSTATS_DEL(rxpipe, rx_prep_beacon_drop);
389 DEBUGFS_FWSTATS_DEL(rxpipe, descr_host_int_trig_rx_data);
390 DEBUGFS_FWSTATS_DEL(rxpipe, beacon_buffer_thres_host_int_trig_rx_data);
391 DEBUGFS_FWSTATS_DEL(rxpipe, missed_beacon_host_int_trig_rx_data);
392 DEBUGFS_FWSTATS_DEL(rxpipe, tx_xfr_host_int_trig_rx_data);
393
394 DEBUGFS_DEL(tx_queue_len);
395 DEBUGFS_DEL(retry_count);
396 DEBUGFS_DEL(excessive_retries);
397
398 DEBUGFS_DEL(gpio_power);
399}
400
401static int wl1271_debugfs_add_files(struct wl1271 *wl)
402{
403 int ret = 0;
404
405 DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow);
406
407 DEBUGFS_FWSTATS_ADD(rx, out_of_mem);
408 DEBUGFS_FWSTATS_ADD(rx, hdr_overflow);
409 DEBUGFS_FWSTATS_ADD(rx, hw_stuck);
410 DEBUGFS_FWSTATS_ADD(rx, dropped);
411 DEBUGFS_FWSTATS_ADD(rx, fcs_err);
412 DEBUGFS_FWSTATS_ADD(rx, xfr_hint_trig);
413 DEBUGFS_FWSTATS_ADD(rx, path_reset);
414 DEBUGFS_FWSTATS_ADD(rx, reset_counter);
415
416 DEBUGFS_FWSTATS_ADD(dma, rx_requested);
417 DEBUGFS_FWSTATS_ADD(dma, rx_errors);
418 DEBUGFS_FWSTATS_ADD(dma, tx_requested);
419 DEBUGFS_FWSTATS_ADD(dma, tx_errors);
420
421 DEBUGFS_FWSTATS_ADD(isr, cmd_cmplt);
422 DEBUGFS_FWSTATS_ADD(isr, fiqs);
423 DEBUGFS_FWSTATS_ADD(isr, rx_headers);
424 DEBUGFS_FWSTATS_ADD(isr, rx_mem_overflow);
425 DEBUGFS_FWSTATS_ADD(isr, rx_rdys);
426 DEBUGFS_FWSTATS_ADD(isr, irqs);
427 DEBUGFS_FWSTATS_ADD(isr, tx_procs);
428 DEBUGFS_FWSTATS_ADD(isr, decrypt_done);
429 DEBUGFS_FWSTATS_ADD(isr, dma0_done);
430 DEBUGFS_FWSTATS_ADD(isr, dma1_done);
431 DEBUGFS_FWSTATS_ADD(isr, tx_exch_complete);
432 DEBUGFS_FWSTATS_ADD(isr, commands);
433 DEBUGFS_FWSTATS_ADD(isr, rx_procs);
434 DEBUGFS_FWSTATS_ADD(isr, hw_pm_mode_changes);
435 DEBUGFS_FWSTATS_ADD(isr, host_acknowledges);
436 DEBUGFS_FWSTATS_ADD(isr, pci_pm);
437 DEBUGFS_FWSTATS_ADD(isr, wakeups);
438 DEBUGFS_FWSTATS_ADD(isr, low_rssi);
439
440 DEBUGFS_FWSTATS_ADD(wep, addr_key_count);
441 DEBUGFS_FWSTATS_ADD(wep, default_key_count);
442 /* skipping wep.reserved */
443 DEBUGFS_FWSTATS_ADD(wep, key_not_found);
444 DEBUGFS_FWSTATS_ADD(wep, decrypt_fail);
445 DEBUGFS_FWSTATS_ADD(wep, packets);
446 DEBUGFS_FWSTATS_ADD(wep, interrupt);
447
448 DEBUGFS_FWSTATS_ADD(pwr, ps_enter);
449 DEBUGFS_FWSTATS_ADD(pwr, elp_enter);
450 DEBUGFS_FWSTATS_ADD(pwr, missing_bcns);
451 DEBUGFS_FWSTATS_ADD(pwr, wake_on_host);
452 DEBUGFS_FWSTATS_ADD(pwr, wake_on_timer_exp);
453 DEBUGFS_FWSTATS_ADD(pwr, tx_with_ps);
454 DEBUGFS_FWSTATS_ADD(pwr, tx_without_ps);
455 DEBUGFS_FWSTATS_ADD(pwr, rcvd_beacons);
456 DEBUGFS_FWSTATS_ADD(pwr, power_save_off);
457 DEBUGFS_FWSTATS_ADD(pwr, enable_ps);
458 DEBUGFS_FWSTATS_ADD(pwr, disable_ps);
459 DEBUGFS_FWSTATS_ADD(pwr, fix_tsf_ps);
460 /* skipping cont_miss_bcns_spread for now */
461 DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_beacons);
462
463 DEBUGFS_FWSTATS_ADD(mic, rx_pkts);
464 DEBUGFS_FWSTATS_ADD(mic, calc_failure);
465
466 DEBUGFS_FWSTATS_ADD(aes, encrypt_fail);
467 DEBUGFS_FWSTATS_ADD(aes, decrypt_fail);
468 DEBUGFS_FWSTATS_ADD(aes, encrypt_packets);
469 DEBUGFS_FWSTATS_ADD(aes, decrypt_packets);
470 DEBUGFS_FWSTATS_ADD(aes, encrypt_interrupt);
471 DEBUGFS_FWSTATS_ADD(aes, decrypt_interrupt);
472
473 DEBUGFS_FWSTATS_ADD(event, heart_beat);
474 DEBUGFS_FWSTATS_ADD(event, calibration);
475 DEBUGFS_FWSTATS_ADD(event, rx_mismatch);
476 DEBUGFS_FWSTATS_ADD(event, rx_mem_empty);
477 DEBUGFS_FWSTATS_ADD(event, rx_pool);
478 DEBUGFS_FWSTATS_ADD(event, oom_late);
479 DEBUGFS_FWSTATS_ADD(event, phy_transmit_error);
480 DEBUGFS_FWSTATS_ADD(event, tx_stuck);
481
482 DEBUGFS_FWSTATS_ADD(ps, pspoll_timeouts);
483 DEBUGFS_FWSTATS_ADD(ps, upsd_timeouts);
484 DEBUGFS_FWSTATS_ADD(ps, upsd_max_sptime);
485 DEBUGFS_FWSTATS_ADD(ps, upsd_max_apturn);
486 DEBUGFS_FWSTATS_ADD(ps, pspoll_max_apturn);
487 DEBUGFS_FWSTATS_ADD(ps, pspoll_utilization);
488 DEBUGFS_FWSTATS_ADD(ps, upsd_utilization);
489
490 DEBUGFS_FWSTATS_ADD(rxpipe, rx_prep_beacon_drop);
491 DEBUGFS_FWSTATS_ADD(rxpipe, descr_host_int_trig_rx_data);
492 DEBUGFS_FWSTATS_ADD(rxpipe, beacon_buffer_thres_host_int_trig_rx_data);
493 DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data);
494 DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
495
496 DEBUGFS_ADD(tx_queue_len, wl->debugfs.rootdir);
497 DEBUGFS_ADD(retry_count, wl->debugfs.rootdir);
498 DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir);
499
500 DEBUGFS_ADD(gpio_power, wl->debugfs.rootdir);
501
502out:
503 if (ret < 0)
504 wl1271_debugfs_delete_files(wl);
505
506 return ret;
507}
508
509void wl1271_debugfs_reset(struct wl1271 *wl)
510{
511 memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
512 wl->stats.retry_count = 0;
513 wl->stats.excessive_retries = 0;
514}
515
516int wl1271_debugfs_init(struct wl1271 *wl)
517{
518 int ret;
519
520 wl->debugfs.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
521
522 if (IS_ERR(wl->debugfs.rootdir)) {
523 ret = PTR_ERR(wl->debugfs.rootdir);
524 wl->debugfs.rootdir = NULL;
525 goto err;
526 }
527
528 wl->debugfs.fw_statistics = debugfs_create_dir("fw-statistics",
529 wl->debugfs.rootdir);
530
531 if (IS_ERR(wl->debugfs.fw_statistics)) {
532 ret = PTR_ERR(wl->debugfs.fw_statistics);
533 wl->debugfs.fw_statistics = NULL;
534 goto err_root;
535 }
536
537 wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats),
538 GFP_KERNEL);
539
540 if (!wl->stats.fw_stats) {
541 ret = -ENOMEM;
542 goto err_fw;
543 }
544
545 wl->stats.fw_stats_update = jiffies;
546
547 ret = wl1271_debugfs_add_files(wl);
548
549 if (ret < 0)
550 goto err_file;
551
552 return 0;
553
554err_file:
555 kfree(wl->stats.fw_stats);
556 wl->stats.fw_stats = NULL;
557
558err_fw:
559 debugfs_remove(wl->debugfs.fw_statistics);
560 wl->debugfs.fw_statistics = NULL;
561
562err_root:
563 debugfs_remove(wl->debugfs.rootdir);
564 wl->debugfs.rootdir = NULL;
565
566err:
567 return ret;
568}
569
570void wl1271_debugfs_exit(struct wl1271 *wl)
571{
572 wl1271_debugfs_delete_files(wl);
573
574 kfree(wl->stats.fw_stats);
575 wl->stats.fw_stats = NULL;
576
577 debugfs_remove(wl->debugfs.fw_statistics);
578 wl->debugfs.fw_statistics = NULL;
579
580 debugfs_remove(wl->debugfs.rootdir);
581 wl->debugfs.rootdir = NULL;
582
583}
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl12xx.h
index 046acb816d2f..9050dd9b62d2 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl12xx.h
@@ -22,8 +22,8 @@
22 * 22 *
23 */ 23 */
24 24
25#ifndef __WL1271_H__ 25#ifndef __WL12XX_H__
26#define __WL1271_H__ 26#define __WL12XX_H__
27 27
28#include <linux/mutex.h> 28#include <linux/mutex.h>
29#include <linux/completion.h> 29#include <linux/completion.h>
@@ -32,8 +32,8 @@
32#include <linux/bitops.h> 32#include <linux/bitops.h>
33#include <net/mac80211.h> 33#include <net/mac80211.h>
34 34
35#include "wl1271_conf.h" 35#include "conf.h"
36#include "wl1271_ini.h" 36#include "ini.h"
37 37
38#define DRIVER_NAME "wl1271" 38#define DRIVER_NAME "wl1271"
39#define DRIVER_PREFIX DRIVER_NAME ": " 39#define DRIVER_PREFIX DRIVER_NAME ": "
@@ -60,31 +60,32 @@ enum {
60 DEBUG_ALL = ~0, 60 DEBUG_ALL = ~0,
61}; 61};
62 62
63#define DEBUG_LEVEL (DEBUG_NONE) 63extern u32 wl12xx_debug_level;
64 64
65#define DEBUG_DUMP_LIMIT 1024 65#define DEBUG_DUMP_LIMIT 1024
66 66
67#define wl1271_error(fmt, arg...) \ 67#define wl1271_error(fmt, arg...) \
68 printk(KERN_ERR DRIVER_PREFIX "ERROR " fmt "\n", ##arg) 68 pr_err(DRIVER_PREFIX "ERROR " fmt "\n", ##arg)
69 69
70#define wl1271_warning(fmt, arg...) \ 70#define wl1271_warning(fmt, arg...) \
71 printk(KERN_WARNING DRIVER_PREFIX "WARNING " fmt "\n", ##arg) 71 pr_warning(DRIVER_PREFIX "WARNING " fmt "\n", ##arg)
72 72
73#define wl1271_notice(fmt, arg...) \ 73#define wl1271_notice(fmt, arg...) \
74 printk(KERN_INFO DRIVER_PREFIX fmt "\n", ##arg) 74 pr_info(DRIVER_PREFIX fmt "\n", ##arg)
75 75
76#define wl1271_info(fmt, arg...) \ 76#define wl1271_info(fmt, arg...) \
77 printk(KERN_DEBUG DRIVER_PREFIX fmt "\n", ##arg) 77 pr_info(DRIVER_PREFIX fmt "\n", ##arg)
78 78
79#define wl1271_debug(level, fmt, arg...) \ 79#define wl1271_debug(level, fmt, arg...) \
80 do { \ 80 do { \
81 if (level & DEBUG_LEVEL) \ 81 if (level & wl12xx_debug_level) \
82 printk(KERN_DEBUG DRIVER_PREFIX fmt "\n", ##arg); \ 82 pr_debug(DRIVER_PREFIX fmt "\n", ##arg); \
83 } while (0) 83 } while (0)
84 84
85/* TODO: use pr_debug_hex_dump when it will be available */
85#define wl1271_dump(level, prefix, buf, len) \ 86#define wl1271_dump(level, prefix, buf, len) \
86 do { \ 87 do { \
87 if (level & DEBUG_LEVEL) \ 88 if (level & wl12xx_debug_level) \
88 print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \ 89 print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
89 DUMP_PREFIX_OFFSET, 16, 1, \ 90 DUMP_PREFIX_OFFSET, 16, 1, \
90 buf, \ 91 buf, \
@@ -94,7 +95,7 @@ enum {
94 95
95#define wl1271_dump_ascii(level, prefix, buf, len) \ 96#define wl1271_dump_ascii(level, prefix, buf, len) \
96 do { \ 97 do { \
97 if (level & DEBUG_LEVEL) \ 98 if (level & wl12xx_debug_level) \
98 print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \ 99 print_hex_dump(KERN_DEBUG, DRIVER_PREFIX prefix, \
99 DUMP_PREFIX_OFFSET, 16, 1, \ 100 DUMP_PREFIX_OFFSET, 16, 1, \
100 buf, \ 101 buf, \
@@ -174,108 +175,6 @@ struct wl1271_stats {
174 unsigned int excessive_retries; 175 unsigned int excessive_retries;
175}; 176};
176 177
177struct wl1271_debugfs {
178 struct dentry *rootdir;
179 struct dentry *fw_statistics;
180
181 struct dentry *tx_internal_desc_overflow;
182
183 struct dentry *rx_out_of_mem;
184 struct dentry *rx_hdr_overflow;
185 struct dentry *rx_hw_stuck;
186 struct dentry *rx_dropped;
187 struct dentry *rx_fcs_err;
188 struct dentry *rx_xfr_hint_trig;
189 struct dentry *rx_path_reset;
190 struct dentry *rx_reset_counter;
191
192 struct dentry *dma_rx_requested;
193 struct dentry *dma_rx_errors;
194 struct dentry *dma_tx_requested;
195 struct dentry *dma_tx_errors;
196
197 struct dentry *isr_cmd_cmplt;
198 struct dentry *isr_fiqs;
199 struct dentry *isr_rx_headers;
200 struct dentry *isr_rx_mem_overflow;
201 struct dentry *isr_rx_rdys;
202 struct dentry *isr_irqs;
203 struct dentry *isr_tx_procs;
204 struct dentry *isr_decrypt_done;
205 struct dentry *isr_dma0_done;
206 struct dentry *isr_dma1_done;
207 struct dentry *isr_tx_exch_complete;
208 struct dentry *isr_commands;
209 struct dentry *isr_rx_procs;
210 struct dentry *isr_hw_pm_mode_changes;
211 struct dentry *isr_host_acknowledges;
212 struct dentry *isr_pci_pm;
213 struct dentry *isr_wakeups;
214 struct dentry *isr_low_rssi;
215
216 struct dentry *wep_addr_key_count;
217 struct dentry *wep_default_key_count;
218 /* skipping wep.reserved */
219 struct dentry *wep_key_not_found;
220 struct dentry *wep_decrypt_fail;
221 struct dentry *wep_packets;
222 struct dentry *wep_interrupt;
223
224 struct dentry *pwr_ps_enter;
225 struct dentry *pwr_elp_enter;
226 struct dentry *pwr_missing_bcns;
227 struct dentry *pwr_wake_on_host;
228 struct dentry *pwr_wake_on_timer_exp;
229 struct dentry *pwr_tx_with_ps;
230 struct dentry *pwr_tx_without_ps;
231 struct dentry *pwr_rcvd_beacons;
232 struct dentry *pwr_power_save_off;
233 struct dentry *pwr_enable_ps;
234 struct dentry *pwr_disable_ps;
235 struct dentry *pwr_fix_tsf_ps;
236 /* skipping cont_miss_bcns_spread for now */
237 struct dentry *pwr_rcvd_awake_beacons;
238
239 struct dentry *mic_rx_pkts;
240 struct dentry *mic_calc_failure;
241
242 struct dentry *aes_encrypt_fail;
243 struct dentry *aes_decrypt_fail;
244 struct dentry *aes_encrypt_packets;
245 struct dentry *aes_decrypt_packets;
246 struct dentry *aes_encrypt_interrupt;
247 struct dentry *aes_decrypt_interrupt;
248
249 struct dentry *event_heart_beat;
250 struct dentry *event_calibration;
251 struct dentry *event_rx_mismatch;
252 struct dentry *event_rx_mem_empty;
253 struct dentry *event_rx_pool;
254 struct dentry *event_oom_late;
255 struct dentry *event_phy_transmit_error;
256 struct dentry *event_tx_stuck;
257
258 struct dentry *ps_pspoll_timeouts;
259 struct dentry *ps_upsd_timeouts;
260 struct dentry *ps_upsd_max_sptime;
261 struct dentry *ps_upsd_max_apturn;
262 struct dentry *ps_pspoll_max_apturn;
263 struct dentry *ps_pspoll_utilization;
264 struct dentry *ps_upsd_utilization;
265
266 struct dentry *rxpipe_rx_prep_beacon_drop;
267 struct dentry *rxpipe_descr_host_int_trig_rx_data;
268 struct dentry *rxpipe_beacon_buffer_thres_host_int_trig_rx_data;
269 struct dentry *rxpipe_missed_beacon_host_int_trig_rx_data;
270 struct dentry *rxpipe_tx_xfr_host_int_trig_rx_data;
271
272 struct dentry *tx_queue_len;
273
274 struct dentry *retry_count;
275 struct dentry *excessive_retries;
276 struct dentry *gpio_power;
277};
278
279#define NUM_TX_QUEUES 4 178#define NUM_TX_QUEUES 4
280#define NUM_RX_PKT_DESC 8 179#define NUM_RX_PKT_DESC 8
281 180
@@ -351,6 +250,7 @@ struct wl1271 {
351#define WL1271_FLAG_IDLE_REQUESTED (11) 250#define WL1271_FLAG_IDLE_REQUESTED (11)
352#define WL1271_FLAG_PSPOLL_FAILURE (12) 251#define WL1271_FLAG_PSPOLL_FAILURE (12)
353#define WL1271_FLAG_STA_STATE_SENT (13) 252#define WL1271_FLAG_STA_STATE_SENT (13)
253#define WL1271_FLAG_FW_TX_BUSY (14)
354 unsigned long flags; 254 unsigned long flags;
355 255
356 struct wl1271_partition_set part; 256 struct wl1271_partition_set part;
@@ -392,11 +292,13 @@ struct wl1271 {
392 int session_counter; 292 int session_counter;
393 293
394 /* Frames scheduled for transmission, not handled yet */ 294 /* Frames scheduled for transmission, not handled yet */
395 struct sk_buff_head tx_queue; 295 struct sk_buff_head tx_queue[NUM_TX_QUEUES];
296 int tx_queue_count;
396 297
397 struct work_struct tx_work; 298 struct work_struct tx_work;
398 299
399 /* Pending TX frames */ 300 /* Pending TX frames */
301 unsigned long tx_frames_map[BITS_TO_LONGS(ACX_TX_DESCRIPTORS)];
400 struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS]; 302 struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
401 int tx_frames_cnt; 303 int tx_frames_cnt;
402 304
@@ -429,10 +331,18 @@ struct wl1271 {
429 struct wl1271_scan scan; 331 struct wl1271_scan scan;
430 struct delayed_work scan_complete_work; 332 struct delayed_work scan_complete_work;
431 333
334 /* probe-req template for the current AP */
335 struct sk_buff *probereq;
336
432 /* Our association ID */ 337 /* Our association ID */
433 u16 aid; 338 u16 aid;
434 339
435 /* currently configured rate set */ 340 /*
341 * currently configured rate set:
342 * bits 0-15 - 802.11abg rates
343 * bits 16-23 - 802.11n MCS index mask
344 * support only 1 stream, thus only 8 bits for the MCS rates (0-7).
345 */
436 u32 sta_rate_set; 346 u32 sta_rate_set;
437 u32 basic_rate_set; 347 u32 basic_rate_set;
438 u32 basic_rate; 348 u32 basic_rate;
@@ -468,7 +378,7 @@ struct wl1271 {
468 int last_rssi_event; 378 int last_rssi_event;
469 379
470 struct wl1271_stats stats; 380 struct wl1271_stats stats;
471 struct wl1271_debugfs debugfs; 381 struct dentry *rootdir;
472 382
473 __le32 buffer_32; 383 __le32 buffer_32;
474 u32 buffer_cmd; 384 u32 buffer_cmd;
@@ -509,4 +419,8 @@ int wl1271_plt_stop(struct wl1271 *wl);
509#define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */ 419#define WL1271_PRE_POWER_ON_SLEEP 20 /* in milliseconds */
510#define WL1271_POWER_ON_SLEEP 200 /* in milliseconds */ 420#define WL1271_POWER_ON_SLEEP 200 /* in milliseconds */
511 421
422/* Macros to handle wl1271.sta_rate_set */
423#define HW_BG_RATES_MASK 0xffff
424#define HW_HT_RATES_OFFSET 16
425
512#endif 426#endif
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index 184628027213..be21032f4dc1 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -2,6 +2,7 @@
2#define __WL12XX_80211_H__ 2#define __WL12XX_80211_H__
3 3
4#include <linux/if_ether.h> /* ETH_ALEN */ 4#include <linux/if_ether.h> /* ETH_ALEN */
5#include <linux/if_arp.h>
5 6
6/* RATES */ 7/* RATES */
7#define IEEE80211_CCK_RATE_1MB 0x02 8#define IEEE80211_CCK_RATE_1MB 0x02
@@ -133,11 +134,17 @@ struct wl12xx_qos_null_data_template {
133 __le16 qos_ctl; 134 __le16 qos_ctl;
134} __packed; 135} __packed;
135 136
136struct wl12xx_probe_req_template { 137struct wl12xx_arp_rsp_template {
137 struct ieee80211_header header; 138 struct ieee80211_hdr_3addr hdr;
138 struct wl12xx_ie_ssid ssid; 139
139 struct wl12xx_ie_rates rates; 140 u8 llc_hdr[sizeof(rfc1042_header)];
140 struct wl12xx_ie_rates ext_rates; 141 u16 llc_type;
142
143 struct arphdr arp_hdr;
144 u8 sender_hw[ETH_ALEN];
145 u32 sender_ip;
146 u8 target_hw[ETH_ALEN];
147 u32 target_ip;
141} __packed; 148} __packed;
142 149
143 150
diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c
index 390d77f762c4..415eec401e2e 100644
--- a/drivers/net/wireless/zd1201.c
+++ b/drivers/net/wireless/zd1201.c
@@ -30,6 +30,7 @@ static struct usb_device_id zd1201_table[] = {
30 {USB_DEVICE(0x0ace, 0x1201)}, /* ZyDAS ZD1201 Wireless USB Adapter */ 30 {USB_DEVICE(0x0ace, 0x1201)}, /* ZyDAS ZD1201 Wireless USB Adapter */
31 {USB_DEVICE(0x050d, 0x6051)}, /* Belkin F5D6051 usb adapter */ 31 {USB_DEVICE(0x050d, 0x6051)}, /* Belkin F5D6051 usb adapter */
32 {USB_DEVICE(0x0db0, 0x6823)}, /* MSI UB11B usb adapter */ 32 {USB_DEVICE(0x0db0, 0x6823)}, /* MSI UB11B usb adapter */
33 {USB_DEVICE(0x1044, 0x8004)}, /* Gigabyte GN-WLBZ101 */
33 {USB_DEVICE(0x1044, 0x8005)}, /* GIGABYTE GN-WLBZ201 usb adapter */ 34 {USB_DEVICE(0x1044, 0x8005)}, /* GIGABYTE GN-WLBZ201 usb adapter */
34 {} 35 {}
35}; 36};
@@ -1829,7 +1830,7 @@ err_zd:
1829 1830
1830static void zd1201_disconnect(struct usb_interface *interface) 1831static void zd1201_disconnect(struct usb_interface *interface)
1831{ 1832{
1832 struct zd1201 *zd=(struct zd1201 *)usb_get_intfdata(interface); 1833 struct zd1201 *zd = usb_get_intfdata(interface);
1833 struct hlist_node *node, *node2; 1834 struct hlist_node *node, *node2;
1834 struct zd1201_frag *frag; 1835 struct zd1201_frag *frag;
1835 1836
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 87a95bcfee57..6a9b66051cf7 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -117,6 +117,7 @@ int zd_ioread32v_locked(struct zd_chip *chip, u32 *values, const zd_addr_t *addr
117 117
118 /* Allocate a single memory block for values and addresses. */ 118 /* Allocate a single memory block for values and addresses. */
119 count16 = 2*count; 119 count16 = 2*count;
120 /* zd_addr_t is __nocast, so the kmalloc needs an explicit cast */
120 a16 = (zd_addr_t *) kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)), 121 a16 = (zd_addr_t *) kmalloc(count16 * (sizeof(zd_addr_t) + sizeof(u16)),
121 GFP_KERNEL); 122 GFP_KERNEL);
122 if (!a16) { 123 if (!a16) {
@@ -1448,7 +1449,7 @@ int zd_rfwritev_locked(struct zd_chip *chip,
1448 */ 1449 */
1449int zd_rfwrite_cr_locked(struct zd_chip *chip, u32 value) 1450int zd_rfwrite_cr_locked(struct zd_chip *chip, u32 value)
1450{ 1451{
1451 struct zd_ioreq16 ioreqs[] = { 1452 const struct zd_ioreq16 ioreqs[] = {
1452 { CR244, (value >> 16) & 0xff }, 1453 { CR244, (value >> 16) & 0xff },
1453 { CR243, (value >> 8) & 0xff }, 1454 { CR243, (value >> 8) & 0xff },
1454 { CR242, value & 0xff }, 1455 { CR242, value & 0xff },
@@ -1475,7 +1476,7 @@ int zd_rfwritev_cr_locked(struct zd_chip *chip,
1475int zd_chip_set_multicast_hash(struct zd_chip *chip, 1476int zd_chip_set_multicast_hash(struct zd_chip *chip,
1476 struct zd_mc_hash *hash) 1477 struct zd_mc_hash *hash)
1477{ 1478{
1478 struct zd_ioreq32 ioreqs[] = { 1479 const struct zd_ioreq32 ioreqs[] = {
1479 { CR_GROUP_HASH_P1, hash->low }, 1480 { CR_GROUP_HASH_P1, hash->low },
1480 { CR_GROUP_HASH_P2, hash->high }, 1481 { CR_GROUP_HASH_P2, hash->high },
1481 }; 1482 };
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 43307bd42a69..6107304cb94c 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -1207,7 +1207,6 @@ static void housekeeping_enable(struct zd_mac *mac)
1207static void housekeeping_disable(struct zd_mac *mac) 1207static void housekeeping_disable(struct zd_mac *mac)
1208{ 1208{
1209 dev_dbg_f(zd_mac_dev(mac), "\n"); 1209 dev_dbg_f(zd_mac_dev(mac), "\n");
1210 cancel_rearming_delayed_workqueue(zd_workqueue, 1210 cancel_delayed_work_sync(&mac->housekeeping.link_led_work);
1211 &mac->housekeeping.link_led_work);
1212 zd_chip_control_leds(&mac->chip, ZD_LED_OFF); 1211 zd_chip_control_leds(&mac->chip, ZD_LED_OFF);
1213} 1212}
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 818e1480ca93..06041cb1c422 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -55,6 +55,7 @@ static struct usb_device_id usb_ids[] = {
55 { USB_DEVICE(0x129b, 0x1666), .driver_info = DEVICE_ZD1211 }, 55 { USB_DEVICE(0x129b, 0x1666), .driver_info = DEVICE_ZD1211 },
56 { USB_DEVICE(0x13b1, 0x001e), .driver_info = DEVICE_ZD1211 }, 56 { USB_DEVICE(0x13b1, 0x001e), .driver_info = DEVICE_ZD1211 },
57 { USB_DEVICE(0x1435, 0x0711), .driver_info = DEVICE_ZD1211 }, 57 { USB_DEVICE(0x1435, 0x0711), .driver_info = DEVICE_ZD1211 },
58 { USB_DEVICE(0x14ea, 0xab10), .driver_info = DEVICE_ZD1211 },
58 { USB_DEVICE(0x14ea, 0xab13), .driver_info = DEVICE_ZD1211 }, 59 { USB_DEVICE(0x14ea, 0xab13), .driver_info = DEVICE_ZD1211 },
59 { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 }, 60 { USB_DEVICE(0x157e, 0x300a), .driver_info = DEVICE_ZD1211 },
60 { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 }, 61 { USB_DEVICE(0x157e, 0x300b), .driver_info = DEVICE_ZD1211 },
@@ -92,6 +93,7 @@ static struct usb_device_id usb_ids[] = {
92 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B }, 93 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
93 { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B }, 94 { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
94 { USB_DEVICE(0x2019, 0x5303), .driver_info = DEVICE_ZD1211B }, 95 { USB_DEVICE(0x2019, 0x5303), .driver_info = DEVICE_ZD1211B },
96 { USB_DEVICE(0x2019, 0xed01), .driver_info = DEVICE_ZD1211B },
95 /* "Driverless" devices that need ejecting */ 97 /* "Driverless" devices that need ejecting */
96 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER }, 98 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
97 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER }, 99 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index cdbeec9f83ea..546de5749824 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -488,7 +488,7 @@ static int xennet_start_xmit(struct sk_buff *skb, struct net_device *dev)
488 488
489 if (unlikely(!netif_carrier_ok(dev) || 489 if (unlikely(!netif_carrier_ok(dev) ||
490 (frags > 1 && !xennet_can_sg(dev)) || 490 (frags > 1 && !xennet_can_sg(dev)) ||
491 netif_needs_gso(dev, skb))) { 491 netif_needs_gso(skb, netif_skb_features(skb)))) {
492 spin_unlock_irq(&np->tx_lock); 492 spin_unlock_irq(&np->tx_lock);
493 goto drop; 493 goto drop;
494 } 494 }
diff --git a/drivers/net/xilinx_emaclite.c b/drivers/net/xilinx_emaclite.c
index 14f0955eca68..cad66ce1640b 100644
--- a/drivers/net/xilinx_emaclite.c
+++ b/drivers/net/xilinx_emaclite.c
@@ -24,6 +24,7 @@
24#include <linux/of_device.h> 24#include <linux/of_device.h>
25#include <linux/of_platform.h> 25#include <linux/of_platform.h>
26#include <linux/of_mdio.h> 26#include <linux/of_mdio.h>
27#include <linux/of_net.h>
27#include <linux/phy.h> 28#include <linux/phy.h>
28 29
29#define DRIVER_NAME "xilinx_emaclite" 30#define DRIVER_NAME "xilinx_emaclite"
@@ -515,7 +516,7 @@ static void xemaclite_update_address(struct net_local *drvdata,
515 */ 516 */
516static int xemaclite_set_mac_address(struct net_device *dev, void *address) 517static int xemaclite_set_mac_address(struct net_device *dev, void *address)
517{ 518{
518 struct net_local *lp = (struct net_local *) netdev_priv(dev); 519 struct net_local *lp = netdev_priv(dev);
519 struct sockaddr *addr = address; 520 struct sockaddr *addr = address;
520 521
521 if (netif_running(dev)) 522 if (netif_running(dev))
@@ -534,7 +535,7 @@ static int xemaclite_set_mac_address(struct net_device *dev, void *address)
534 */ 535 */
535static void xemaclite_tx_timeout(struct net_device *dev) 536static void xemaclite_tx_timeout(struct net_device *dev)
536{ 537{
537 struct net_local *lp = (struct net_local *) netdev_priv(dev); 538 struct net_local *lp = netdev_priv(dev);
538 unsigned long flags; 539 unsigned long flags;
539 540
540 dev_err(&lp->ndev->dev, "Exceeded transmit timeout of %lu ms\n", 541 dev_err(&lp->ndev->dev, "Exceeded transmit timeout of %lu ms\n",
@@ -578,7 +579,7 @@ static void xemaclite_tx_timeout(struct net_device *dev)
578 */ 579 */
579static void xemaclite_tx_handler(struct net_device *dev) 580static void xemaclite_tx_handler(struct net_device *dev)
580{ 581{
581 struct net_local *lp = (struct net_local *) netdev_priv(dev); 582 struct net_local *lp = netdev_priv(dev);
582 583
583 dev->stats.tx_packets++; 584 dev->stats.tx_packets++;
584 if (lp->deferred_skb) { 585 if (lp->deferred_skb) {
@@ -605,7 +606,7 @@ static void xemaclite_tx_handler(struct net_device *dev)
605 */ 606 */
606static void xemaclite_rx_handler(struct net_device *dev) 607static void xemaclite_rx_handler(struct net_device *dev)
607{ 608{
608 struct net_local *lp = (struct net_local *) netdev_priv(dev); 609 struct net_local *lp = netdev_priv(dev);
609 struct sk_buff *skb; 610 struct sk_buff *skb;
610 unsigned int align; 611 unsigned int align;
611 u32 len; 612 u32 len;
@@ -661,7 +662,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
661{ 662{
662 bool tx_complete = 0; 663 bool tx_complete = 0;
663 struct net_device *dev = dev_id; 664 struct net_device *dev = dev_id;
664 struct net_local *lp = (struct net_local *) netdev_priv(dev); 665 struct net_local *lp = netdev_priv(dev);
665 void __iomem *base_addr = lp->base_addr; 666 void __iomem *base_addr = lp->base_addr;
666 u32 tx_status; 667 u32 tx_status;
667 668
@@ -918,7 +919,7 @@ void xemaclite_adjust_link(struct net_device *ndev)
918 */ 919 */
919static int xemaclite_open(struct net_device *dev) 920static int xemaclite_open(struct net_device *dev)
920{ 921{
921 struct net_local *lp = (struct net_local *) netdev_priv(dev); 922 struct net_local *lp = netdev_priv(dev);
922 int retval; 923 int retval;
923 924
924 /* Just to be safe, stop the device first */ 925 /* Just to be safe, stop the device first */
@@ -987,7 +988,7 @@ static int xemaclite_open(struct net_device *dev)
987 */ 988 */
988static int xemaclite_close(struct net_device *dev) 989static int xemaclite_close(struct net_device *dev)
989{ 990{
990 struct net_local *lp = (struct net_local *) netdev_priv(dev); 991 struct net_local *lp = netdev_priv(dev);
991 992
992 netif_stop_queue(dev); 993 netif_stop_queue(dev);
993 xemaclite_disable_interrupts(lp); 994 xemaclite_disable_interrupts(lp);
@@ -1001,21 +1002,6 @@ static int xemaclite_close(struct net_device *dev)
1001} 1002}
1002 1003
1003/** 1004/**
1004 * xemaclite_get_stats - Get the stats for the net_device
1005 * @dev: Pointer to the network device
1006 *
1007 * This function returns the address of the 'net_device_stats' structure for the
1008 * given network device. This structure holds usage statistics for the network
1009 * device.
1010 *
1011 * Return: Pointer to the net_device_stats structure.
1012 */
1013static struct net_device_stats *xemaclite_get_stats(struct net_device *dev)
1014{
1015 return &dev->stats;
1016}
1017
1018/**
1019 * xemaclite_send - Transmit a frame 1005 * xemaclite_send - Transmit a frame
1020 * @orig_skb: Pointer to the socket buffer to be transmitted 1006 * @orig_skb: Pointer to the socket buffer to be transmitted
1021 * @dev: Pointer to the network device 1007 * @dev: Pointer to the network device
@@ -1031,7 +1017,7 @@ static struct net_device_stats *xemaclite_get_stats(struct net_device *dev)
1031 */ 1017 */
1032static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev) 1018static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
1033{ 1019{
1034 struct net_local *lp = (struct net_local *) netdev_priv(dev); 1020 struct net_local *lp = netdev_priv(dev);
1035 struct sk_buff *new_skb; 1021 struct sk_buff *new_skb;
1036 unsigned int len; 1022 unsigned int len;
1037 unsigned long flags; 1023 unsigned long flags;
@@ -1068,7 +1054,7 @@ static int xemaclite_send(struct sk_buff *orig_skb, struct net_device *dev)
1068static void xemaclite_remove_ndev(struct net_device *ndev) 1054static void xemaclite_remove_ndev(struct net_device *ndev)
1069{ 1055{
1070 if (ndev) { 1056 if (ndev) {
1071 struct net_local *lp = (struct net_local *) netdev_priv(ndev); 1057 struct net_local *lp = netdev_priv(ndev);
1072 1058
1073 if (lp->base_addr) 1059 if (lp->base_addr)
1074 iounmap((void __iomem __force *) (lp->base_addr)); 1060 iounmap((void __iomem __force *) (lp->base_addr));
@@ -1245,7 +1231,7 @@ static int __devexit xemaclite_of_remove(struct platform_device *of_dev)
1245 struct device *dev = &of_dev->dev; 1231 struct device *dev = &of_dev->dev;
1246 struct net_device *ndev = dev_get_drvdata(dev); 1232 struct net_device *ndev = dev_get_drvdata(dev);
1247 1233
1248 struct net_local *lp = (struct net_local *) netdev_priv(ndev); 1234 struct net_local *lp = netdev_priv(ndev);
1249 1235
1250 /* Un-register the mii_bus, if configured */ 1236 /* Un-register the mii_bus, if configured */
1251 if (lp->has_mdio) { 1237 if (lp->has_mdio) {
@@ -1285,7 +1271,6 @@ static struct net_device_ops xemaclite_netdev_ops = {
1285 .ndo_start_xmit = xemaclite_send, 1271 .ndo_start_xmit = xemaclite_send,
1286 .ndo_set_mac_address = xemaclite_set_mac_address, 1272 .ndo_set_mac_address = xemaclite_set_mac_address,
1287 .ndo_tx_timeout = xemaclite_tx_timeout, 1273 .ndo_tx_timeout = xemaclite_tx_timeout,
1288 .ndo_get_stats = xemaclite_get_stats,
1289#ifdef CONFIG_NET_POLL_CONTROLLER 1274#ifdef CONFIG_NET_POLL_CONTROLLER
1290 .ndo_poll_controller = xemaclite_poll_controller, 1275 .ndo_poll_controller = xemaclite_poll_controller,
1291#endif 1276#endif
diff --git a/drivers/net/yellowfin.c b/drivers/net/yellowfin.c
index cd1b3dcd61db..ec47e22fa186 100644
--- a/drivers/net/yellowfin.c
+++ b/drivers/net/yellowfin.c
@@ -744,7 +744,7 @@ static int yellowfin_init_ring(struct net_device *dev)
744 } 744 }
745 745
746 for (i = 0; i < RX_RING_SIZE; i++) { 746 for (i = 0; i < RX_RING_SIZE; i++) {
747 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz); 747 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
748 yp->rx_skbuff[i] = skb; 748 yp->rx_skbuff[i] = skb;
749 if (skb == NULL) 749 if (skb == NULL)
750 break; 750 break;
@@ -1157,7 +1157,7 @@ static int yellowfin_rx(struct net_device *dev)
1157 for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) { 1157 for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) {
1158 entry = yp->dirty_rx % RX_RING_SIZE; 1158 entry = yp->dirty_rx % RX_RING_SIZE;
1159 if (yp->rx_skbuff[entry] == NULL) { 1159 if (yp->rx_skbuff[entry] == NULL) {
1160 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz); 1160 struct sk_buff *skb = dev_alloc_skb(yp->rx_buf_sz + 2);
1161 if (skb == NULL) 1161 if (skb == NULL)
1162 break; /* Better luck next round. */ 1162 break; /* Better luck next round. */
1163 yp->rx_skbuff[entry] = skb; 1163 yp->rx_skbuff[entry] = skb;
diff --git a/drivers/net/znet.c b/drivers/net/znet.c
index c3a329204511..ae07b3dfbcc1 100644
--- a/drivers/net/znet.c
+++ b/drivers/net/znet.c
@@ -124,7 +124,7 @@ MODULE_LICENSE("GPL");
124#define TX_BUF_SIZE 8192 124#define TX_BUF_SIZE 8192
125#define DMA_BUF_SIZE (RX_BUF_SIZE + 16) /* 8k + 16 bytes for trailers */ 125#define DMA_BUF_SIZE (RX_BUF_SIZE + 16) /* 8k + 16 bytes for trailers */
126 126
127#define TX_TIMEOUT 10 127#define TX_TIMEOUT (HZ/10)
128 128
129struct znet_private { 129struct znet_private {
130 int rx_dma, tx_dma; 130 int rx_dma, tx_dma;